diff --git a/.bazelrc b/.bazelrc new file mode 100644 index 0000000..d70a8a3 --- /dev/null +++ b/.bazelrc @@ -0,0 +1 @@ +build --workspace_status_command=hack/bazel_workspace.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..54adef3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,31 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# Test binary, build with `go test -c` +*.test +*.coverprofile + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# version.go should be generated at release time. Since it depends on `git +# describe` it would vary with every CR. +oracle/version.go + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +bazel-* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 0000000..012c605 --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,27 @@ +load("@bazel_gazelle//:def.bzl", "gazelle") + +# gazelle:prefix github.com/GoogleCloudPlatform/elcarro-oracle-operator +# gazelle:exclude **/*.pb.go +# gazelle:resolve go github.com/godror/godror @com_github_godror_godror//:godror +gazelle(name = "gazelle") + +# To use gazelle you need to use `fix` to fix BUILD rules, and `update-repos` to sync deps.bzl with go.mod +# bazel run //:gazelle -- fix +# bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%go_dependencies + +# tools.go tools can be used via these aliases. +alias( + name = "kustomize", + actual = "@io_k8s_sigs_kustomize_kustomize_v4//:v4", +) + +alias( + name = "controller-gen", + actual = "@io_k8s_sigs_controller_tools//cmd/controller-gen", +) + +# cc tools from WORKSPACE +alias( + name = "protoc", + actual = "@com_google_protobuf//:protoc", +) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..ad65a3b --- /dev/null +++ b/README.md @@ -0,0 +1,131 @@ +

El Carro: The Oracle Operator for Kubernetes

+ +[![Go Report Card](https://goreportcard.com/badge/github.com/GoogleCloudPlatform/elcarro-oracle-operator)](https://goreportcard.com/report/github.com/GoogleCloudPlatform/elcarro-oracle-operator) + +# Run Oracle on Kubernetes with El Carro + +El Carro is a new project that offers a way to run Oracle databases in +Kubernetes as a portable, open source, community driven, no vendor lock-in +container orchestration system. El Carro provides a powerful declarative API for +comprehensive and consistent configuration and deployment as well as for +real-time operations and monitoring. + +## High Level Overview + +El Carro helps you with the deployment and management of Oracle database +software in Kubernetes. You must have appropriate licensing rights to allow you +to use it with El Carro (BYOL). + +With the current release, you download the El Carro installation bundle, stage +the Oracle installation software, create a containerized database image (with or +without a seed database), and then create an Instance (known as CDB in Oracle +parlance) and add one or more Databases (known as PDBs). + +After the El Carro Instance and Database(s) are created, you can take +snapshot-based or RMAN-based backups and get basic monitoring and logging +information. Additional database services will be added in future releases. + +### License Notice + +You can use El Carro to automatically provision and manage Oracle Database +Express Edition (XE) or Oracle Database Enterprise Edition (EE). In each case, +it is your responsibility to ensure that you have appropriate licenses to use +any such Oracle software with El Carro. + +Please also note that each El Carro “database” will create a pluggable database, +which may require licensing of the Oracle Multitenant option. + +Oracle and Java are registered trademarks of Oracle and/or its affiliates. Other +names may be trademarks of their respective owners. + +### Quickstart + +We recommend starting with the quickstart first, but as you become more familiar +with El Carro, consider trying more advanced features by following the user +guides linked below. + +If you have a valid license for Oracle 12c EE and would like to get your Oracle +database up and running on Kubernetes, you can follow this +[quickstart guide](docs/content/quickstart.md). + +As an alternative to Oracle 12c EE, you can use +[Oracle 18c XE](https://www.oracle.com/database/technologies/appdev/xe.html) +which is free to use by following the +[quickstart guide for Oracle 18c XE](docs/content/quickstart-18c-xe.md) instead. + +If you prefer to run El Carro locally on your personal computer, you can follow +the [user guide for Oracle on minikube](docs/content/minikube.md). + +### Preparation + +To prepare the El Carro download and deployment, follow +[this guide](docs/content/preparation.md). + +### Provisioning + +El Carro helps you to easily create, scale, and delete Oracle databases. + +Firstly, you need to +[create a containerized database image](docs/content/provision/image.md). + +You can optionally create a default Config to set namespace-wide defaults for +configuring your databases, following +[this guide](docs/content/provision/config.md). + +Then you can create Instances (known as CDBs in Oracle parlance), following +[this guide](docs/content/provision/instance.md). Afterward, create Databases +(known as PDBs) and users following +[this guide](docs/content/provision/database.md). + +### Backup and Recovery + +El Carro provides both storage snapshot based backup/restore and Oracle native +RMAN based backup/restore features to support your database backup and recovery +strategy. + +After the El Carro Instance and Database(s) are created, you can create storage +snapshot based backups, following +[this guide](docs/content/backup-restore/snapshot-backups). + +You can also create Oracle native RMAN based backups, following +[this guide](docs/content/backup-restore/rman-backups). + +To restore from a backup, follow +[this guide](docs/content/backup-restore/restore-from-backups). + +### Data Import & Export + +El Carro provides data import/export features based on Oracle Data Pump. + +To import data to your El Carro database, follow +[this guide](docs/content/data-pump/import.md). + +To export data from your El Carro database, follow +[this guide](docs/content/data-pump/export.md). + +### What's More? + +There are more features supported by El Carro and more to be added soon! For +more information, check [logging](docs/content/monitoring/logging), +[monitoring](docs/content/monitoring/monitoring.md), +[connectivity](docs/content/monitoring/connectivity.md), +[UI](docs/content/monitoring/ui.md), etc. + +## Contributing + +You're very welcome to contribute to the El Carro Project! + +We've put together a set of contributing and development guidelines that you can +review here: + +* [Contributing Guidelines](docs/content/contributing/guidelines.md) +* [Development Guidance](docs/content/contributing/development.md) + +## Support + +To report a bug or log a feature request, please open a GitHub issue and follow +the guidelines for submitting a bug. + +For general questions or community support, we welcome you to join the +[El Carro community mailing list](https://groups.google.com/forum/#!forum/el-carro) +and ask your question there. diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 0000000..01ea84b --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,112 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +# Golang +http_archive( + name = "io_bazel_rules_go", + sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz", + "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz", + ], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") + +# Gazelle +http_archive( + name = "bazel_gazelle", + sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz", + ], +) + +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") + +# Download the rules_docker repository at release v0.14.4 +http_archive( + name = "io_bazel_rules_docker", + sha256 = "4521794f0fba2e20f3bf15846ab5e01d5332e587e9ce81629c7f96c793bb7036", + strip_prefix = "rules_docker-0.14.4", + urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.14.4/rules_docker-v0.14.4.tar.gz"], +) + +# Protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "512e5a674bf31f8b7928a64d8adf73ee67b8fe88339ad29adaa3b84dbaa570d8", + strip_prefix = "protobuf-3.12.4", + urls = ["https://github.com/protocolbuffers/protobuf/archive/refs/tags/v3.12.4.tar.gz"], +) + +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +# skylib for go_gencode +http_archive( + name = "bazel_skylib", + sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", + urls = [ + "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", + ], +) + +http_archive( + name = "com_github_godror_godror", + patch_args = ["-p1"], + patches = ["@//:hack/0001-Patch-to-add-bazel-support.patch"], + sha256 = "ac45b8ea0d8bdb828b4862011ee1b7dc8384231a6ee887bcebbb97ffdb339109", + strip_prefix = "godror-0.20.1", + urls = [ + "https://github.com/godror/godror/archive/v0.20.1.tar.gz", + ], + # version = "v0.21.1" +) + +load("//:deps.bzl", "go_dependencies") + +# gazelle:repository_macro deps.bzl%go_dependencies +go_dependencies() + +# Initialize after loading everything +go_rules_dependencies() + +go_register_toolchains(version = "1.16") + +gazelle_dependencies() + +protobuf_deps() + +load( + "@io_bazel_rules_docker//repositories:repositories.bzl", + container_repositories = "repositories", +) + +container_repositories() + +load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps") + +container_deps() + +load("@io_bazel_rules_docker//repositories:pip_repositories.bzl", container_pip_deps = "pip_deps") + +container_pip_deps() + +# Containers to load from external repositories. This must go in WORKSPACE. +load("@io_bazel_rules_docker//container:container.bzl", "container_pull") + +container_pull( + name = "busybox", + digest = "sha256:c9249fdf56138f0d929e2080ae98ee9cb2946f71498fc1484288e6a935b5e5bc", # unclear how long these images last, it may expire and we need to grab latest again. + registry = "docker.io", + repository = "library/busybox", + # tag = "latest", +) + +container_pull( + name = "distroless", + registry = "gcr.io", + repository = "distroless/cc", # /base is also an option for glibc+openssl, see https://github.com/GoogleContainerTools/distroless + tag = "nonroot", +) diff --git a/common/api/v1alpha1/BUILD.bazel b/common/api/v1alpha1/BUILD.bazel new file mode 100644 index 0000000..8ebb57b --- /dev/null +++ b/common/api/v1alpha1/BUILD.bazel @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_library( + name = "v1alpha1", + srcs = [ + "backup.go", + "credential.go", + "database.go", + "disk.go", + "instance.go", + "maintenancewindow.go", + "phase.go", + "user.go", + "zz_generated.deepcopy.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + ], +) diff --git a/common/api/v1alpha1/backup.go b/common/api/v1alpha1/backup.go new file mode 100644 index 0000000..b09bb35 --- /dev/null +++ b/common/api/v1alpha1/backup.go @@ -0,0 +1,76 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//+kubebuilder:object:generate=true + +// BackupSpec defines the desired state of a backup. +type BackupSpec struct { + + // Instance is a name of an instance to take a backup for. + // +required + Instance string `json:"instance,omitempty"` + + // Type describes a type of a backup to take. Immutable. + // Available options are: + // - Snapshot: storage level disk snapshot. + // - Physical: database engine specific backup that relies on a redo stream / + // continuous archiving (WAL) and may allow a PITR. + // Examples include pg_backup, pgBackRest, mysqlbackup. + // A Physical backup may be file based or database block based + // (e.g. Oracle RMAN). + // - Logical: database engine specific backup that relies on running SQL + // statements, e.g. mysqldump, pg_dump, expdp. + // If not specified, the default of Snapshot is assumed. + // +kubebuilder:validation:Enum=Snapshot;Physical;Logical + // +optional + Type BackupType `json:"type,omitempty"` + + // KeepDataOnDeletion defines whether to keep backup data + // when backup resource is removed. The default value is false. + // +optional + KeepDataOnDeletion bool `json:"keepDataOnDeletion,omitempty"` +} + +// BackupType is presently defined as a free formatted string. +type BackupType string + +const ( + // See Backup.Spec.Type definition above for explanation + // on what Snapshot, Physical and Logical backups are. + BackupTypePhysical BackupType = "Physical" + BackupTypeLogical BackupType = "Logical" + BackupTypeSnapshot BackupType = "Snapshot" +) + +//+kubebuilder:object:generate=true + +// BackupStatus defines the observed state of a backup. +type BackupStatus struct { + // Phase is a summary of current state of the Backup. + // +optional + Phase BackupPhase `json:"phase,omitempty"` + + // Conditions represents the latest available observations + // of the backup's current state. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} diff --git a/common/api/v1alpha1/credential.go b/common/api/v1alpha1/credential.go new file mode 100644 index 0000000..50e9c1c --- /dev/null +++ b/common/api/v1alpha1/credential.go @@ -0,0 +1,59 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +//+kubebuilder:object:generate=true + +// CredentialSpec defines the desired state of user credentials. +// The credential can be expressed in one of the 3 following ways: +// 1) A plaintext password; +// 2) A reference to a k8s secret; +// 3) A reference to a remote GSM secret (note that it only works for GKE). +type CredentialSpec struct { + // Plaintext password. + // +optional + Password string `json:"password,omitempty"` + + // A reference to a k8s secret. + // +optional + SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` + + // A reference to a GSM secret. + // +optional + GsmSecretRef *GsmSecretReference `json:"gsmSecretRef,omitempty"` +} + +//+kubebuilder:object:generate=true + +// GsmSecretReference represents a Google Secret Manager Secret (GSM) Reference. +// It has enough information to retrieve a secret from Google Secret manager. +type GsmSecretReference struct { + // ProjectId identifies the project where the secret resource is. + // +required + ProjectId string `json:"projectId,omitempty"` + + // SecretId identifies the secret. + // +required + SecretId string `json:"secretId,omitempty"` + + // Version is the version of the secret. + // If "latest" is specified, underlying the latest SecretId is used. + // +required + Version string `json:"version,omitempty"` +} diff --git a/common/api/v1alpha1/database.go b/common/api/v1alpha1/database.go new file mode 100644 index 0000000..dd7e855 --- /dev/null +++ b/common/api/v1alpha1/database.go @@ -0,0 +1,46 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +//+kubebuilder:object:generate=true + +// DatabaseSpec defines the desired state of Database. +type DatabaseSpec struct { + // Name of the instance that the database belongs to. + // +required + Instance string `json:"instance,omitempty"` + + // Name of the database. + // +required + Name string `json:"name,omitempty"` +} + +//+kubebuilder:object:generate=true + +// DatabaseStatus defines the observed state of Database +type DatabaseStatus struct { + // Phase is a summary of the current state of the Database. + // +optional + Phase DatabasePhase `json:"phase,omitempty"` + + // Conditions represents the latest available observations of the + // Database's current state. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} diff --git a/common/api/v1alpha1/disk.go b/common/api/v1alpha1/disk.go new file mode 100644 index 0000000..1dd68fb --- /dev/null +++ b/common/api/v1alpha1/disk.go @@ -0,0 +1,49 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import "k8s.io/apimachinery/pkg/api/resource" + +//+kubebuilder:object:generate=true + +// DiskSpec defines the desired state of a disk. +// (the structure is deliberately designed to be flexible, as a slice, +// so that if we change a disk layout for different hosting platforms, +// the model can be also adjusted to reflect that). +type DiskSpec struct { + // Name of a disk. + // Allowed values are: DataDisk,LogDisk,BackupDisk + // +required + // +kubebuilder:validation:Enum=DataDisk;LogDisk;BackupDisk + Name string `json:"name"` + + // Disk type. + // Depending on a deployment platform, DiskType may take different values. + // On GCP, support "HDD" and "SSD". Default to "HDD" if not specified. + // +optional + Type *DiskType `json:"type,omitempty"` + + // Disk size. If not specified, the defaults are: DataDisk:"100Gi", LogDisk:"150Gi",BackupDisk:"100Gi" + // +optional + Size resource.Quantity `json:"size,omitempty"` + + // StorageClass points to a particular CSI driver and is used + // for disk provisioning. + // +optional + StorageClass string `json:"storageClass,omitempty"` +} + +// DiskType is a type that points to the disk type +type DiskType string diff --git a/common/api/v1alpha1/instance.go b/common/api/v1alpha1/instance.go new file mode 100644 index 0000000..90236b4 --- /dev/null +++ b/common/api/v1alpha1/instance.go @@ -0,0 +1,174 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Service is a service provided by the operator. +type Service string + +// InstanceMode describes how an instance will be managed by the operator. +type InstanceMode string + +const ( + // Monitoring service provides the ability to collect + // monitoring data from the database and the cluster. + Monitoring Service = "Monitoring" + + // BackupAndRestore service provides database backups and restore functionalities. + BackupAndRestore Service = "Backup" + + // Security service + Security Service = "Security" + + // Logging service + Logging Service = "Logging" + + // Patching service provides software and database patching. + Patching Service = "Patching" + + // ManuallySetUpStandby means that operator will skip DB creation during + // provisioning, instance will be ready for users to manually set up standby. + ManuallySetUpStandby InstanceMode = "ManuallySetUpStandby" +) + +//+kubebuilder:object:generate=true + +// GenericInstanceSpec represents the database engine agnostic +// part of the spec describing the desired state of an Instance. +type GenericInstanceSpec struct { + // Type of a database engine. + // +required + // +kubebuilder:validation:Enum=Oracle + Type string `json:"type,omitempty"` + + // HostingType conveys whether an Instance is meant to be hosted on a cloud + // (single or multiple), on-prem, on Bare Metal, etc. + // It is meant to be used as a filter and aggregation dimension. + // +optional + // +kubebuilder:validation:Enum="";Cloud;MultiCloud;Hybrid;BareMetal;OnPrem + HostingType string `json:"hostingType,omitempty"` + + // DeploymentType reflects a fully managed (DBaaS) vs. semi-managed database. + // +optional + // +kubebuilder:validation:Enum="";InCluster;CloudSQL;RDS + DeploymentType string `json:"deploymentType,omitempty"` + + // CloudProvider is only relevant if the hosting type is Cloud, + // MultiCloud, Hybrid or Bare Metal. + // +optional + // +kubebuilder:validation:Enum=GCP;AWS;Azure;OCI + CloudProvider string `json:"cloudProvider,omitempty"` + + // Version of a database. + // +required + Version string `json:"version,omitempty"` + + // Edition of a database. + // +required + Edition string `json:"edition,omitempty"` + + // Disks slice describes at minimum two disks: + // data and log (archive log), and optionally a backup disk. + Disks []DiskSpec `json:"disks,omitempty"` + + // Service agent and other data plane GCR images. + // This is an optional map that allows a customer to specify GCR images + // different from those chosen/provided. + // +optional + Images map[string]string `json:"images,omitempty"` + + // Source IP CIDR ranges allowed for a client. + // +optional + SourceCidrRanges []string `json:"sourceCidrRanges,omitempty"` + + // Parameters contains the database flags in the map format + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // Patching contains all the patching related attributes like patch version and image. + // +optional + Patching *PatchingSpec `json:"patching,omitempty"` + + // Services list the optional semi-managed services that + // the customers can choose from. + Services map[Service]bool `json:"services,omitempty"` + + // MinMemoryForDBContainer overrides the default safe limit for + // scheduling the db container without crashes due to memory pressure. + // +optional + MinMemoryForDBContainer string `json:"minMemoryForDBContainer,omitempty"` + + // MaintenanceWindow specifies the time windows during which database downtimes are allowed for maintenance. + // +optional + MaintenanceWindow *MaintenanceWindowSpec `json:"maintenanceWindow,omitempty"` + + // Mode specifies how this instance will be managed by the operator. + // +optional + // +kubebuilder:validation:Enum=ManuallySetUpStandby + Mode InstanceMode `json:"mode,omitempty"` +} + +// PatchingSpec contains the patching related details. +type PatchingSpec struct { + // Patch version + PatchVersion string `json:"patchVersion,omitempty"` + // gcr link containing the patched service image. + PatchedServiceImage string `json:"patchedServiceImage,omitempty"` +} + +//+kubebuilder:object:generate=true + +// GenericInstanceStatus defines the observed state of Instance +type GenericInstanceStatus struct { + // Conditions represents the latest available observations + // of the GenericInstance's current state. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // Endpoint is presently expressed in the format of -svc.. + Endpoint string `json:"endpoint,omitempty"` + + // URL represents an IP and a port number info needed in order to + // establish a database connection from outside a cluster. + URL string `json:"url,omitempty"` + + // Description is for a human consumption. + // E.g. when an Instance is restored from a backup + // this field is populated with the human readable + // restore details. + Description string `json:"description,omitempty"` + + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // IsChangeApplied indicates whether instance changes have been applied + // +optional + IsChangeApplied metav1.ConditionStatus `json:"isChangeApplied,omitempty"` +} + +// GenericInstance represents the contract for the Anthos DB Operator compliant +// database Operator providers to abide by. +type GenericInstance interface { + runtime.Object + GenericInstanceSpec() GenericInstanceSpec + GenericInstanceStatus() GenericInstanceStatus +} diff --git a/common/api/v1alpha1/maintenancewindow.go b/common/api/v1alpha1/maintenancewindow.go new file mode 100644 index 0000000..77e5d8c --- /dev/null +++ b/common/api/v1alpha1/maintenancewindow.go @@ -0,0 +1,39 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TimeRange defines a window of time. +// Both start time and duration are required. +//+kubebuilder:object:generate=true +type TimeRange struct { + // Start time. + // +required + Start *metav1.Time `json:"start,omitempty"` + + // Duration of the maintenance window + // +required + Duration *metav1.Duration `json:"duration,omitempty"` +} + +// MaintenanceWindowSpec defines the time ranges during which maintenance may be started on a database. +//+kubebuilder:object:generate=true +type MaintenanceWindowSpec struct { + // Maintenance time ranges. + TimeRanges []TimeRange `json:"timeRanges,omitempty"` +} diff --git a/common/api/v1alpha1/phase.go b/common/api/v1alpha1/phase.go new file mode 100644 index 0000000..f31208a --- /dev/null +++ b/common/api/v1alpha1/phase.go @@ -0,0 +1,44 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +type InstancePhase string + +const ( + InstanceCreating InstancePhase = "Creating" + InstanceUpdating InstancePhase = "Updating" + InstanceRestoring InstancePhase = "Restoring" + InstanceDeleting InstancePhase = "Deleting" + InstanceReady InstancePhase = "Ready" +) + +type DatabasePhase string + +const ( + DatabasePending DatabasePhase = "Pending" + DatabaseCreating DatabasePhase = "Creating" + DatabaseUpdating DatabasePhase = "Updating" + DatabaseDeleting DatabasePhase = "Deleting" + DatabaseReady DatabasePhase = "Ready" +) + +type BackupPhase string + +const ( + BackupPending BackupPhase = "Pending" + BackupInProgress BackupPhase = "InProgress" + BackupFailed BackupPhase = "Failed" + BackupSucceeded BackupPhase = "Succeeded" +) diff --git a/common/api/v1alpha1/user.go b/common/api/v1alpha1/user.go new file mode 100644 index 0000000..0034653 --- /dev/null +++ b/common/api/v1alpha1/user.go @@ -0,0 +1,28 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +//+kubebuilder:object:generate=true + +// UserSpec defines the common desired state of User. +type UserSpec struct { + // Name of the User. + // +required + Name string `json:"name,omitempty"` + + // Credential of the User. See definition for 'CredentialSpec'. + // +required + CredentialSpec `json:",inline"` +} diff --git a/common/api/v1alpha1/zz_generated.deepcopy.go b/common/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..e7b4cd2 --- /dev/null +++ b/common/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,303 @@ +// +build !ignore_autogenerated + +/* +Copyright 2021 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialSpec) DeepCopyInto(out *CredentialSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(corev1.SecretReference) + **out = **in + } + if in.GsmSecretRef != nil { + in, out := &in.GsmSecretRef, &out.GsmSecretRef + *out = new(GsmSecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSpec. +func (in *CredentialSpec) DeepCopy() *CredentialSpec { + if in == nil { + return nil + } + out := new(CredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. +func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { + if in == nil { + return nil + } + out := new(DatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. +func (in *DatabaseStatus) DeepCopy() *DatabaseStatus { + if in == nil { + return nil + } + out := new(DatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSpec) DeepCopyInto(out *DiskSpec) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(DiskType) + **out = **in + } + out.Size = in.Size.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSpec. +func (in *DiskSpec) DeepCopy() *DiskSpec { + if in == nil { + return nil + } + out := new(DiskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericInstanceSpec) DeepCopyInto(out *GenericInstanceSpec) { + *out = *in + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]DiskSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SourceCidrRanges != nil { + in, out := &in.SourceCidrRanges, &out.SourceCidrRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Patching != nil { + in, out := &in.Patching, &out.Patching + *out = new(PatchingSpec) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make(map[Service]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericInstanceSpec. +func (in *GenericInstanceSpec) DeepCopy() *GenericInstanceSpec { + if in == nil { + return nil + } + out := new(GenericInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericInstanceStatus) DeepCopyInto(out *GenericInstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericInstanceStatus. +func (in *GenericInstanceStatus) DeepCopy() *GenericInstanceStatus { + if in == nil { + return nil + } + out := new(GenericInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmSecretReference) DeepCopyInto(out *GsmSecretReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmSecretReference. +func (in *GsmSecretReference) DeepCopy() *GsmSecretReference { + if in == nil { + return nil + } + out := new(GsmSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowSpec) DeepCopyInto(out *MaintenanceWindowSpec) { + *out = *in + if in.TimeRanges != nil { + in, out := &in.TimeRanges, &out.TimeRanges + *out = make([]TimeRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowSpec. +func (in *MaintenanceWindowSpec) DeepCopy() *MaintenanceWindowSpec { + if in == nil { + return nil + } + out := new(MaintenanceWindowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeRange) DeepCopyInto(out *TimeRange) { + *out = *in + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = (*in).DeepCopy() + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeRange. +func (in *TimeRange) DeepCopy() *TimeRange { + if in == nil { + return nil + } + out := new(TimeRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.CredentialSpec.DeepCopyInto(&out.CredentialSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} diff --git a/common/pkg/maintenance/BUILD.bazel b/common/pkg/maintenance/BUILD.bazel new file mode 100644 index 0000000..1c738ea --- /dev/null +++ b/common/pkg/maintenance/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "maintenance", + srcs = ["windows.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/pkg/maintenance", + visibility = ["//visibility:public"], + deps = ["//common/api/v1alpha1"], +) + +go_test( + name = "maintenance_test", + srcs = ["windows_test.go"], + embed = [":maintenance"], + deps = [ + "//common/api/v1alpha1", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + ], +) diff --git a/common/pkg/maintenance/windows.go b/common/pkg/maintenance/windows.go new file mode 100644 index 0000000..a7a8a49 --- /dev/null +++ b/common/pkg/maintenance/windows.go @@ -0,0 +1,106 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package maintenance + +import ( + "errors" + "time" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// timeRangeInRange returns true iff the specified time lies in the range. +// The range check is inclusive for start-time and exclusive for end-time. +func timeRangeInRange(tr *commonv1alpha1.TimeRange, t time.Time) bool { + if !timeRangeIsValid(tr) { + return false + } + + start := tr.Start.Rfc3339Copy().Time + if t.Before(start) { + return false + } + + end := start.Add(tr.Duration.Duration) + + return end.After(t) +} + +// timeRangeIsValid verifies if fields on TimeRange are correctly set. +// In particular, Start and Duration fields should be set. +func timeRangeIsValid(tr *commonv1alpha1.TimeRange) bool { + return tr != nil && tr.Start != nil && tr.Duration != nil +} + +// HasValidTimeRanges validates that there are non-zero time-ranges and all time-ranges specified are valid. +func HasValidTimeRanges(mw *commonv1alpha1.MaintenanceWindowSpec) bool { + if mw == nil || len(mw.TimeRanges) == 0 { + return false + } + + for _, tr := range mw.TimeRanges { + if !timeRangeIsValid(&tr) { + return false + } + } + + return true +} + +// InRange returns true iff the specified time is in any one of the time ranges. +func InRange(mw *commonv1alpha1.MaintenanceWindowSpec, t time.Time) bool { + for _, tr := range mw.TimeRanges { + if timeRangeInRange(&tr, t) { + return true + } + } + + return false +} + +// NoFutureWindows error can be used by a caller to detect that +// there are no maintenance windows available. +var NoFutureWindows = errors.New("no future windows") + +// NextWindow returns the start time of the current or next maintenance window, +// coupled with the duration of that window. +// If no future windows are available, NoFutureWindows error is returned. +func NextWindow(mw *commonv1alpha1.MaintenanceWindowSpec, t time.Time) (*time.Time, *time.Duration, error) { + var min *time.Time + var d *time.Duration + for _, tr := range mw.TimeRanges { + if !timeRangeIsValid(&tr) { + continue + } + + trStart := tr.Start.Rfc3339Copy().Time + if t.Before(trStart) { + if min == nil { + min = &trStart + d = &tr.Duration.Duration + } + if min.After(trStart) { + min = &trStart + d = &tr.Duration.Duration + } + } + } + + if min != nil { + return min, d, nil + } + + return nil, nil, NoFutureWindows +} diff --git a/common/pkg/maintenance/windows_test.go b/common/pkg/maintenance/windows_test.go new file mode 100644 index 0000000..2e57f4b --- /dev/null +++ b/common/pkg/maintenance/windows_test.go @@ -0,0 +1,319 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package maintenance + +import ( + "fmt" + "testing" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +func TestTimeRangeIsValid(t *testing.T) { + var tests = []struct { + name string + tr commonv1alpha1.TimeRange + want bool + }{ + { + name: "valid values", + tr: commonv1alpha1.TimeRange{ + Start: &v1.Time{Time: time.Now()}, + Duration: &v1.Duration{Duration: time.Hour}, + }, + want: true, + }, + { + name: "missing Duration", + tr: commonv1alpha1.TimeRange{ + Start: &v1.Time{Time: time.Now()}, + }, + want: false, + }, + { + name: "missing Start", + tr: commonv1alpha1.TimeRange{ + Duration: &v1.Duration{Duration: time.Hour}, + }, + want: false, + }, + } + + for _, tt := range tests { + testname := fmt.Sprintf("TestTimeRangeIsValid %s", tt.name) + t.Run(testname, func(t *testing.T) { + act := timeRangeIsValid(&tt.tr) + if act != tt.want { + t.Errorf("got %v, want %v", act, tt.want) + } + }) + } +} + +func TestTimeRangeInRange(t *testing.T) { + now := time.Now() + startTime := now.Add(-time.Minute) + duration := 2 * time.Minute + tr := commonv1alpha1.TimeRange{ + Start: &v1.Time{Time: startTime}, + Duration: &v1.Duration{Duration: duration}, + } + var tests = []struct { + name string + when time.Time + want bool + }{ + { + name: "start time should be in range", + when: startTime, + want: true, + }, + { + name: "time in between should be in range", + when: now, + want: true, + }, + { + name: "time before start should not be in range", + when: startTime.Add(-duration), + want: false, + }, + { + name: "time after end time should not be in range", + when: startTime.Add(2 * duration), + want: false, + }, + { + name: "end time should not be in range", + when: startTime.Add(duration), + want: false, + }, + } + for _, tt := range tests { + testname := fmt.Sprintf("TestTimeRangeInRange %s", tt.name) + t.Run(testname, func(t *testing.T) { + act := timeRangeInRange(&tr, tt.when) + if act != tt.want { + t.Errorf("got %v, want %v", act, tt.want) + } + }) + } +} + +func TestInRange(t *testing.T) { + n := time.Now() + s1 := n + d1 := time.Hour + e1 := s1.Add(d1) + s2 := e1.Add(3 * time.Hour) + d2 := time.Minute + e2 := s2.Add(d2) + mw := &commonv1alpha1.MaintenanceWindowSpec{ + TimeRanges: []commonv1alpha1.TimeRange{ + { + Start: &v1.Time{Time: s1}, + Duration: &v1.Duration{Duration: d1}, + }, + { + Start: &v1.Time{Time: s2}, + Duration: &v1.Duration{Duration: d2}, + }, + }, + } + var tests = []struct { + name string + when time.Time + want bool + }{ + { + name: "time before first range", + when: s1.Add(-d1), + want: false, + }, + { + name: "time in first range", + when: s1.Add(d1 / 2), + want: true, + }, + { + name: "time between first & second range", + when: e1.Add(s2.Sub(e1) / 2), + want: false, + }, + { + name: "time in second range", + when: s2.Add(d2 / 2), + want: true, + }, + { + name: "time after second range", + when: e2.Add(d2 / 2), + want: false, + }, + } + for _, tt := range tests { + testname := fmt.Sprintf("TestInRange %s", tt.name) + t.Run(testname, func(t *testing.T) { + act := InRange(mw, tt.when) + if act != tt.want { + t.Errorf("got %v, want %v", act, tt.want) + } + }) + } +} + +func TestHasValidTimeRanges(t *testing.T) { + validTr1 := commonv1alpha1.TimeRange{ + Start: &v1.Time{Time: time.Now()}, + Duration: &v1.Duration{Duration: time.Minute * 20}, + } + validTr2 := commonv1alpha1.TimeRange{ + Start: &v1.Time{Time: time.Now().Add(time.Hour)}, + Duration: &v1.Duration{Duration: time.Hour}, + } + invalidTr := commonv1alpha1.TimeRange{ + Start: &v1.Time{Time: time.Now().Add(time.Hour)}, + } + + var tests = []struct { + name string + spec *commonv1alpha1.MaintenanceWindowSpec + want bool + }{ + { + name: "nil windows", + spec: &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: nil}, + want: false, + }, + { + name: "no windows", + spec: &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{}}, + want: false, + }, + { + name: "one valid window", + spec: &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{validTr1}}, + want: true, + }, + { + name: "two valid window2", + spec: &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{validTr1, validTr2}}, + want: true, + }, + { + name: "only one invalid window", + spec: &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{invalidTr}}, + want: false, + }, + { + name: "one invalid and one valid window", + spec: &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{validTr1, invalidTr}}, + want: false, + }, + } + for _, tt := range tests { + testname := fmt.Sprintf("TestHasValidTimeRanges %s", tt.name) + t.Run(testname, func(t *testing.T) { + act := HasValidTimeRanges(tt.spec) + if act != tt.want { + t.Errorf("got %v, want %v", act, tt.want) + } + }) + } +} + +func TestNextWindow(t *testing.T) { + n := time.Now() + s1 := n + d1 := time.Hour + e1 := s1.Add(d1) + s2 := e1.Add(3 * time.Hour) + d2 := time.Minute + e2 := s2.Add(d2) + mw := &commonv1alpha1.MaintenanceWindowSpec{ + TimeRanges: []commonv1alpha1.TimeRange{ + { + Start: &v1.Time{Time: s1}, + Duration: &v1.Duration{Duration: d1}, + }, + { + Start: &v1.Time{Time: s2}, + Duration: &v1.Duration{Duration: d2}, + }, + }, + } + var tests = []struct { + name string + when time.Time + wantStart *time.Time + wantDuration *time.Duration + wantError error + }{ + { + name: "time before first range", + when: s1.Add(-d1), + wantStart: &s1, + wantDuration: &d1, + wantError: nil, + }, + { + name: "at start of first range", + when: s1, + wantStart: &s1, + wantDuration: &d1, + wantError: nil, + }, + { + name: "in middle of first range", + when: s1, + wantStart: &s1, + wantDuration: &d1, + wantError: nil, + }, + { + name: "end of first range", + when: e1, + wantStart: &s2, + wantDuration: &d2, + wantError: nil, + }, + { + name: "between first and second range", + when: e1.Add(s2.Sub(e1) / 2), + wantStart: &s2, + wantDuration: &d2, + wantError: nil, + }, + { + name: "end of second range", + when: e2, + wantStart: nil, + wantDuration: nil, + wantError: NoFutureWindows, + }, + } + for _, tt := range tests { + testname := fmt.Sprintf("TestNextWindow %s", tt.name) + t.Run(testname, func(t *testing.T) { + aStart, aDuration, aErr := NextWindow(mw, tt.when) + if aStart != tt.wantStart && aDuration != tt.wantDuration && aErr != tt.wantError { + t.Errorf("got (%v, %v, %v), want (%v, %v, %v)", aStart, aDuration, aErr, tt.wantStart, tt.wantDuration, tt.wantError) + } + }) + } +} diff --git a/common/pkg/utils/BUILD.bazel b/common/pkg/utils/BUILD.bazel new file mode 100644 index 0000000..56bd024 --- /dev/null +++ b/common/pkg/utils/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "utils", + srcs = ["utils.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/pkg/utils", + visibility = ["//visibility:public"], + deps = ["//common/api/v1alpha1"], +) diff --git a/common/pkg/utils/utils.go b/common/pkg/utils/utils.go new file mode 100644 index 0000000..6005aec --- /dev/null +++ b/common/pkg/utils/utils.go @@ -0,0 +1,41 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package utils features auxiliary functions for the Anthos DB Operator compliant resources. +package utils + +import ( + "fmt" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// DiskSpaceTotal is a helper function to calculate the total amount +// of allocated space across all disks requested for an instance. +func DiskSpaceTotal(inst commonv1alpha1.GenericInstance) (int64, error) { + spec := inst.GenericInstanceSpec() + if spec.Disks == nil { + return -1, fmt.Errorf("failed to detect requested disks for inst: %v", spec) + } + var total int64 + for _, d := range spec.Disks { + i, ok := d.Size.AsInt64() + if !ok { + return -1, fmt.Errorf("Invalid size provided for disk: %v. An integer must be provided.\n", d) + } + total += i + } + + return total, nil +} diff --git a/deps.bzl b/deps.bzl new file mode 100644 index 0000000..4cc04a4 --- /dev/null +++ b/deps.bzl @@ -0,0 +1,2398 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("@bazel_gazelle//:deps.bzl", "go_repository") + +def go_dependencies(): + go_repository( + name = "co_honnef_go_tools", + importpath = "honnef.co/go/tools", + sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=", + version = "v0.0.1-2020.1.4", + ) + go_repository( + name = "com_github_afex_hystrix_go", + importpath = "github.com/afex/hystrix-go", + sum = "h1:rFw4nCn9iMW+Vajsk51NtYIcwSTkXr+JGrMd36kTDJw=", + version = "v0.0.0-20180502004556-fa1af6a1f4f5", + ) + + go_repository( + name = "com_github_agnivade_levenshtein", + importpath = "github.com/agnivade/levenshtein", + sum = "h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_alecthomas_template", + importpath = "github.com/alecthomas/template", + sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=", + version = "v0.0.0-20190718012654-fb15b899a751", + ) + go_repository( + name = "com_github_alecthomas_units", + importpath = "github.com/alecthomas/units", + sum = "h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=", + version = "v0.0.0-20190924025748-f65c72e2690d", + ) + go_repository( + name = "com_github_andreyvit_diff", + importpath = "github.com/andreyvit/diff", + sum = "h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=", + version = "v0.0.0-20170406064948-c7f18ee00883", + ) + go_repository( + name = "com_github_apache_thrift", + importpath = "github.com/apache/thrift", + sum = "h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=", + version = "v0.13.0", + ) + go_repository( + name = "com_github_armon_circbuf", + importpath = "github.com/armon/circbuf", + sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=", + version = "v0.0.0-20150827004946-bbbad097214e", + ) + + go_repository( + name = "com_github_armon_consul_api", + importpath = "github.com/armon/consul-api", + sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=", + version = "v0.0.0-20180202201655-eb2c6b5be1b6", + ) + go_repository( + name = "com_github_armon_go_metrics", + importpath = "github.com/armon/go-metrics", + sum = "h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=", + version = "v0.0.0-20180917152333-f0300d1749da", + ) + go_repository( + name = "com_github_armon_go_radix", + importpath = "github.com/armon/go-radix", + sum = "h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=", + version = "v0.0.0-20180808171621-7fddfc383310", + ) + go_repository( + name = "com_github_aryann_difflib", + importpath = "github.com/aryann/difflib", + sum = "h1:pv34s756C4pEXnjgPfGYgdhg/ZdajGhyOvzx8k+23nw=", + version = "v0.0.0-20170710044230-e206f873d14a", + ) + + go_repository( + name = "com_github_asaskevich_govalidator", + importpath = "github.com/asaskevich/govalidator", + sum = "h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=", + version = "v0.0.0-20190424111038-f61b66f89f4a", + ) + go_repository( + name = "com_github_aws_aws_lambda_go", + importpath = "github.com/aws/aws-lambda-go", + sum = "h1:SuCy7H3NLyp+1Mrfp+m80jcbi9KYWAs9/BXwppwRDzY=", + version = "v1.13.3", + ) + go_repository( + name = "com_github_aws_aws_sdk_go", + importpath = "github.com/aws/aws-sdk-go", + sum = "h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk=", + version = "v1.27.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2", + importpath = "github.com/aws/aws-sdk-go-v2", + sum = "h1:qZ+woO4SamnH/eEbjM2IDLhRNwIwND/RQyVlBLp3Jqg=", + version = "v0.18.0", + ) + + go_repository( + name = "com_github_azure_go_ansiterm", + importpath = "github.com/Azure/go-ansiterm", + sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=", + version = "v0.0.0-20170929234023-d6e3b3328b78", + ) + go_repository( + name = "com_github_azure_go_autorest", + importpath = "github.com/Azure/go-autorest", + sum = "h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=", + version = "v14.2.0+incompatible", + ) + + go_repository( + name = "com_github_azure_go_autorest_autorest", + importpath = "github.com/Azure/go-autorest/autorest", + sum = "h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=", + version = "v0.11.1", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_adal", + importpath = "github.com/Azure/go-autorest/autorest/adal", + sum = "h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=", + version = "v0.9.5", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_date", + importpath = "github.com/Azure/go-autorest/autorest/date", + sum = "h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_mocks", + importpath = "github.com/Azure/go-autorest/autorest/mocks", + sum = "h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=", + version = "v0.4.1", + ) + go_repository( + name = "com_github_azure_go_autorest_logger", + importpath = "github.com/Azure/go-autorest/logger", + sum = "h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_azure_go_autorest_tracing", + importpath = "github.com/Azure/go-autorest/tracing", + sum = "h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_beorn7_perks", + importpath = "github.com/beorn7/perks", + sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_bgentry_speakeasy", + importpath = "github.com/bgentry/speakeasy", + sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_bketelsen_crypt", + importpath = "github.com/bketelsen/crypt", + sum = "h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=", + version = "v0.0.3-0.20200106085610-5cbc8cc4026c", + ) + + go_repository( + name = "com_github_blang_semver", + importpath = "github.com/blang/semver", + sum = "h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=", + version = "v3.5.0+incompatible", + ) + go_repository( + name = "com_github_brancz_gojsontoyaml", + importpath = "github.com/brancz/gojsontoyaml", + sum = "h1:eyhpHbo03QUlPHSTt5El8XayORJVl9/7Im3HXV0zRAY=", + version = "v0.0.0-20201216083616-202f76bf8c1f", + ) + + go_repository( + name = "com_github_burntsushi_toml", + importpath = "github.com/BurntSushi/toml", + sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_burntsushi_xgb", + importpath = "github.com/BurntSushi/xgb", + sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=", + version = "v0.0.0-20160522181843-27f122750802", + ) + go_repository( + name = "com_github_campoy_embedmd", + importpath = "github.com/campoy/embedmd", + sum = "h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_casbin_casbin_v2", + importpath = "github.com/casbin/casbin/v2", + sum = "h1:bTwon/ECRx9dwBy2ewRVr5OiqjeXSGiTUY74sDPQi/g=", + version = "v2.1.2", + ) + go_repository( + name = "com_github_cenkalti_backoff", + importpath = "github.com/cenkalti/backoff", + sum = "h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=", + version = "v2.2.1+incompatible", + ) + + go_repository( + name = "com_github_census_instrumentation_opencensus_proto", + importpath = "github.com/census-instrumentation/opencensus-proto", + sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_cespare_xxhash", + importpath = "github.com/cespare/xxhash", + sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_cespare_xxhash_v2", + importpath = "github.com/cespare/xxhash/v2", + sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=", + version = "v2.1.1", + ) + go_repository( + name = "com_github_chzyer_logex", + importpath = "github.com/chzyer/logex", + sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=", + version = "v1.1.10", + ) + go_repository( + name = "com_github_chzyer_readline", + importpath = "github.com/chzyer/readline", + sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=", + version = "v0.0.0-20180603132655-2972be24d48e", + ) + go_repository( + name = "com_github_chzyer_test", + importpath = "github.com/chzyer/test", + sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=", + version = "v0.0.0-20180213035817-a1ea475d72b1", + ) + go_repository( + name = "com_github_clbanning_x2j", + importpath = "github.com/clbanning/x2j", + sum = "h1:EdRZT3IeKQmfCSrgo8SZ8V3MEnskuJP0wCYNpe+aiXo=", + version = "v0.0.0-20191024224557-825249438eec", + ) + + go_repository( + name = "com_github_client9_misspell", + importpath = "github.com/client9/misspell", + sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", + version = "v0.3.4", + ) + go_repository( + name = "com_github_cncf_udpa_go", + importpath = "github.com/cncf/udpa/go", + sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=", + version = "v0.0.0-20201120205902-5459f2c99403", + ) + go_repository( + name = "com_github_cockroachdb_datadriven", + importpath = "github.com/cockroachdb/datadriven", + sum = "h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=", + version = "v0.0.0-20190809214429-80d97fb3cbaa", + ) + go_repository( + name = "com_github_codahale_hdrhistogram", + importpath = "github.com/codahale/hdrhistogram", + sum = "h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=", + version = "v0.0.0-20161010025455-3a0bb77429bd", + ) + + go_repository( + name = "com_github_container_storage_interface_spec", + importpath = "github.com/container-storage-interface/spec", + sum = "h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_containerd_containerd", + importpath = "github.com/containerd/containerd", + sum = "h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY=", + version = "v1.4.1", + ) + go_repository( + name = "com_github_coreos_bbolt", + importpath = "github.com/coreos/bbolt", + sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=", + version = "v1.3.2", + ) + go_repository( + name = "com_github_coreos_etcd", + importpath = "github.com/coreos/etcd", + sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=", + version = "v3.3.10+incompatible", + ) + go_repository( + name = "com_github_coreos_go_etcd", + importpath = "github.com/coreos/go-etcd", + sum = "h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=", + version = "v2.0.0+incompatible", + ) + go_repository( + name = "com_github_coreos_go_oidc", + importpath = "github.com/coreos/go-oidc", + sum = "h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=", + version = "v2.1.0+incompatible", + ) + go_repository( + name = "com_github_coreos_go_semver", + importpath = "github.com/coreos/go-semver", + sum = "h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_coreos_go_systemd", + importpath = "github.com/coreos/go-systemd", + sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=", + version = "v0.0.0-20190321100706-95778dfbb74e", + ) + go_repository( + name = "com_github_coreos_pkg", + importpath = "github.com/coreos/pkg", + sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=", + version = "v0.0.0-20180928190104-399ea9e2e55f", + ) + go_repository( + name = "com_github_cpuguy83_go_md2man", + importpath = "github.com/cpuguy83/go-md2man", + sum = "h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=", + version = "v1.0.10", + ) + go_repository( + name = "com_github_cpuguy83_go_md2man_v2", + importpath = "github.com/cpuguy83/go-md2man/v2", + sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_creachadair_staticfile", + importpath = "github.com/creachadair/staticfile", + sum = "h1:RhyrMgi7IQn3GejgmGtFuCec58vboEMt5CH6N3ulRJk=", + version = "v0.1.3", + ) + + go_repository( + name = "com_github_creack_pty", + importpath = "github.com/creack/pty", + sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=", + version = "v1.1.9", + ) + go_repository( + name = "com_github_davecgh_go_spew", + importpath = "github.com/davecgh/go-spew", + sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_dgrijalva_jwt_go", + importpath = "github.com/dgrijalva/jwt-go", + sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=", + version = "v3.2.0+incompatible", + ) + go_repository( + name = "com_github_dgryski_go_sip13", + importpath = "github.com/dgryski/go-sip13", + sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=", + version = "v0.0.0-20181026042036-e10d5fee7954", + ) + go_repository( + name = "com_github_docker_distribution", + importpath = "github.com/docker/distribution", + sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=", + version = "v2.7.1+incompatible", + ) + go_repository( + name = "com_github_docker_docker", + build_directives = [ + "gazelle:exclude daemon", + "gazelle:exclude cli", + "gazelle:exclude **/testdata", + "gazelle:exclude vendor", + ], + importpath = "github.com/docker/docker", + sum = "h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U=", + version = "v0.7.3-0.20190327010347-be7ac8be2ae0", + ) + go_repository( + name = "com_github_docker_go_connections", + importpath = "github.com/docker/go-connections", + sum = "h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_docker_go_units", + importpath = "github.com/docker/go-units", + sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=", + version = "v0.4.0", + ) + go_repository( + # Makes docker happier + name = "com_github_docker_libnetwork", + importpath = "github.com/docker/libnetwork", + version = "v0.3", + ) + + go_repository( + name = "com_github_docker_spdystream", + importpath = "github.com/docker/spdystream", + sum = "h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=", + version = "v0.0.0-20160310174837-449fdfce4d96", + ) + go_repository( + # Makes docker happier + name = "com_github_docker_swarmkit", + importpath = "github.com/docker/swarmkit", + version = "v1.12.1-0.20200403154854-0b8364e7d08a", + ) + + go_repository( + name = "com_github_docopt_docopt_go", + importpath = "github.com/docopt/docopt-go", + sum = "h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=", + version = "v0.0.0-20180111231733-ee0de3bc6815", + ) + go_repository( + name = "com_github_dustin_go_humanize", + importpath = "github.com/dustin/go-humanize", + sum = "h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_eapache_go_resiliency", + importpath = "github.com/eapache/go-resiliency", + sum = "h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_eapache_go_xerial_snappy", + importpath = "github.com/eapache/go-xerial-snappy", + sum = "h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=", + version = "v0.0.0-20180814174437-776d5712da21", + ) + go_repository( + name = "com_github_eapache_queue", + importpath = "github.com/eapache/queue", + sum = "h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_edsrzf_mmap_go", + importpath = "github.com/edsrzf/mmap-go", + sum = "h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_elazarl_goproxy", + importpath = "github.com/elazarl/goproxy", + sum = "h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=", + version = "v0.0.0-20180725130230-947c36da3153", + ) + go_repository( + name = "com_github_emicklei_go_restful", + importpath = "github.com/emicklei/go-restful", + sum = "h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=", + version = "v2.9.5+incompatible", + ) + go_repository( + name = "com_github_envoyproxy_go_control_plane", + importpath = "github.com/envoyproxy/go-control-plane", + sum = "h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE=", + version = "v0.9.9-0.20201210154907-fd9021fe5dad", + ) + go_repository( + name = "com_github_envoyproxy_protoc_gen_validate", + importpath = "github.com/envoyproxy/protoc-gen-validate", + sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_evanphx_json_patch", + importpath = "github.com/evanphx/json-patch", + sum = "h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=", + version = "v4.9.0+incompatible", + ) + go_repository( + name = "com_github_fatih_color", + importpath = "github.com/fatih/color", + sum = "h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=", + version = "v1.7.0", + ) + go_repository( + name = "com_github_form3tech_oss_jwt_go", + importpath = "github.com/form3tech-oss/jwt-go", + sum = "h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=", + version = "v3.2.2+incompatible", + ) + + go_repository( + name = "com_github_franela_goblin", + importpath = "github.com/franela/goblin", + sum = "h1:gb2Z18BhTPJPpLQWj4T+rfKHYCHxRHCtRxhKKjRidVw=", + version = "v0.0.0-20200105215937-c9ffbefa60db", + ) + go_repository( + name = "com_github_franela_goreq", + importpath = "github.com/franela/goreq", + sum = "h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54=", + version = "v0.0.0-20171204163338-bcd34c9993f8", + ) + + go_repository( + name = "com_github_fsnotify_fsnotify", + importpath = "github.com/fsnotify/fsnotify", + sum = "h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=", + version = "v1.4.9", + ) + go_repository( + name = "com_github_ghodss_yaml", + importpath = "github.com/ghodss/yaml", + sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_globalsign_mgo", + importpath = "github.com/globalsign/mgo", + sum = "h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=", + version = "v0.0.0-20181015135952-eeefdecb41b8", + ) + go_repository( + name = "com_github_go_errors_errors", + importpath = "github.com/go-errors/errors", + sum = "h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=", + version = "v1.0.1", + ) + + go_repository( + name = "com_github_go_gl_glfw", + importpath = "github.com/go-gl/glfw", + sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=", + version = "v0.0.0-20190409004039-e6da0acd62b1", + ) + go_repository( + name = "com_github_go_gl_glfw_v3_3_glfw", + importpath = "github.com/go-gl/glfw/v3.3/glfw", + sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=", + version = "v0.0.0-20200222043503-6f7a984d4dc4", + ) + go_repository( + name = "com_github_go_kit_kit", + importpath = "github.com/go-kit/kit", + sum = "h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=", + version = "v0.10.0", + ) + go_repository( + name = "com_github_go_logfmt_logfmt", + importpath = "github.com/go-logfmt/logfmt", + sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_go_logr_logr", + importpath = "github.com/go-logr/logr", + sum = "h1:ZPVluSmhtMIHlqUDMZu70FgMpRzbQfl4h9oKCAXOVDE=", + version = "v0.2.1-0.20200730175230-ee2de8da5be6", + ) + go_repository( + name = "com_github_go_logr_zapr", + importpath = "github.com/go-logr/zapr", + sum = "h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_go_openapi_analysis", + importpath = "github.com/go-openapi/analysis", + sum = "h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=", + version = "v0.19.5", + ) + go_repository( + name = "com_github_go_openapi_errors", + importpath = "github.com/go-openapi/errors", + sum = "h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=", + version = "v0.19.2", + ) + go_repository( + name = "com_github_go_openapi_jsonpointer", + importpath = "github.com/go-openapi/jsonpointer", + sum = "h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=", + version = "v0.19.3", + ) + go_repository( + name = "com_github_go_openapi_jsonreference", + importpath = "github.com/go-openapi/jsonreference", + sum = "h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=", + version = "v0.19.3", + ) + go_repository( + name = "com_github_go_openapi_loads", + importpath = "github.com/go-openapi/loads", + sum = "h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=", + version = "v0.19.4", + ) + go_repository( + name = "com_github_go_openapi_runtime", + importpath = "github.com/go-openapi/runtime", + sum = "h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI=", + version = "v0.19.4", + ) + go_repository( + name = "com_github_go_openapi_spec", + importpath = "github.com/go-openapi/spec", + sum = "h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=", + version = "v0.19.5", + ) + go_repository( + name = "com_github_go_openapi_strfmt", + importpath = "github.com/go-openapi/strfmt", + sum = "h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM=", + version = "v0.19.5", + ) + go_repository( + name = "com_github_go_openapi_swag", + importpath = "github.com/go-openapi/swag", + sum = "h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=", + version = "v0.19.5", + ) + go_repository( + name = "com_github_go_openapi_validate", + importpath = "github.com/go-openapi/validate", + sum = "h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys=", + version = "v0.19.8", + ) + go_repository( + name = "com_github_go_sql_driver_mysql", + importpath = "github.com/go-sql-driver/mysql", + sum = "h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=", + version = "v1.4.0", + ) + + go_repository( + name = "com_github_go_stack_stack", + importpath = "github.com/go-stack/stack", + sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_gobuffalo_flect", + importpath = "github.com/gobuffalo/flect", + sum = "h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM=", + version = "v0.2.0", + ) + + go_repository( + name = "com_github_gobuffalo_here", + importpath = "github.com/gobuffalo/here", + sum = "h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI=", + version = "v0.6.0", + ) + + go_repository( + name = "com_github_gogo_googleapis", + importpath = "github.com/gogo/googleapis", + sum = "h1:kFkMAZBNAn4j7K0GiZr8cRYzejq68VbheufiV3YuyFI=", + version = "v1.1.0", + ) + + go_repository( + name = "com_github_gogo_protobuf", + build_directives = [ + "gazelle:exclude **/testdata", + "gazelle:exclude test/theproto3", + "gazelle:exclude conformance/internal", + ], + importpath = "github.com/gogo/protobuf", + sum = "h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_golang_glog", + importpath = "github.com/golang/glog", + sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=", + version = "v0.0.0-20160126235308-23def4e6c14b", + ) + go_repository( + name = "com_github_golang_groupcache", + importpath = "github.com/golang/groupcache", + sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=", + version = "v0.0.0-20200121045136-8c9f03a8e57e", + ) + go_repository( + name = "com_github_golang_mock", + importpath = "github.com/golang/mock", + sum = "h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_golang_protobuf", + build_directives = ["gazelle:exclude **/testdata"], + importpath = "github.com/golang/protobuf", + sum = "h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=", + version = "v1.5.2", + ) + go_repository( + name = "com_github_golang_snappy", + importpath = "github.com/golang/snappy", + sum = "h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=", + version = "v0.0.0-20180518054509-2e65f85255db", + ) + + go_repository( + name = "com_github_google_btree", + importpath = "github.com/google/btree", + sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_google_go_cmp", + importpath = "github.com/google/go-cmp", + sum = "h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=", + version = "v0.5.5", + ) + go_repository( + name = "com_github_google_go_jsonnet", + importpath = "github.com/google/go-jsonnet", + sum = "h1:/9NIEfhK1NQRKl3sP2536b2+x5HnZMdql7x3yK/l8JY=", + version = "v0.17.0", + ) + + go_repository( + name = "com_github_google_gofuzz", + importpath = "github.com/google/gofuzz", + sum = "h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_google_martian", + importpath = "github.com/google/martian", + sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=", + version = "v2.1.0+incompatible", + ) + go_repository( + name = "com_github_google_martian_v3", + importpath = "github.com/google/martian/v3", + sum = "h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=", + version = "v3.1.0", + ) + go_repository( + name = "com_github_google_pprof", + importpath = "github.com/google/pprof", + sum = "h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo=", + version = "v0.0.0-20210226084205-cbba55b83ad5", + ) + go_repository( + name = "com_github_google_renameio", + importpath = "github.com/google/renameio", + sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_google_shlex", + importpath = "github.com/google/shlex", + sum = "h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=", + version = "v0.0.0-20191202100458-e7afc7fbc510", + ) + + go_repository( + name = "com_github_google_uuid", + importpath = "github.com/google/uuid", + sum = "h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_googleapis_gax_go_v2", + importpath = "github.com/googleapis/gax-go/v2", + sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=", + version = "v2.0.5", + ) + go_repository( + name = "com_github_googleapis_gnostic", + importpath = "github.com/googleapis/gnostic", + sum = "h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=", + version = "v0.5.1", + ) + go_repository( + name = "com_github_gophercloud_gophercloud", + importpath = "github.com/gophercloud/gophercloud", + sum = "h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_gopherjs_gopherjs", + importpath = "github.com/gopherjs/gopherjs", + sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=", + version = "v0.0.0-20181017120253-0766667cb4d1", + ) + go_repository( + name = "com_github_gorilla_context", + importpath = "github.com/gorilla/context", + sum = "h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=", + version = "v1.1.1", + ) + + go_repository( + name = "com_github_gorilla_mux", + importpath = "github.com/gorilla/mux", + sum = "h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=", + version = "v1.7.3", + ) + go_repository( + name = "com_github_gorilla_websocket", + importpath = "github.com/gorilla/websocket", + sum = "h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_gregjones_httpcache", + importpath = "github.com/gregjones/httpcache", + sum = "h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=", + version = "v0.0.0-20180305231024-9cad4c3443a7", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_middleware", + importpath = "github.com/grpc-ecosystem/go-grpc-middleware", + sum = "h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=", + version = "v1.0.1-0.20190118093823-f849b5445de4", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_prometheus", + importpath = "github.com/grpc-ecosystem/go-grpc-prometheus", + sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_grpc_gateway", + importpath = "github.com/grpc-ecosystem/grpc-gateway", + sum = "h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=", + version = "v1.9.5", + ) + go_repository( + name = "com_github_hashicorp_consul_api", + importpath = "github.com/hashicorp/consul/api", + sum = "h1:HXNYlRkkM/t+Y/Yhxtwcy02dlYwIaoxzvxPnS+cqy78=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_hashicorp_consul_sdk", + importpath = "github.com/hashicorp/consul/sdk", + sum = "h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_hashicorp_errwrap", + importpath = "github.com/hashicorp/errwrap", + sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_cleanhttp", + importpath = "github.com/hashicorp/go-cleanhttp", + sum = "h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=", + version = "v0.5.1", + ) + go_repository( + name = "com_github_hashicorp_go_immutable_radix", + importpath = "github.com/hashicorp/go-immutable-radix", + sum = "h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_msgpack", + importpath = "github.com/hashicorp/go-msgpack", + sum = "h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=", + version = "v0.5.3", + ) + go_repository( + name = "com_github_hashicorp_go_multierror", + importpath = "github.com/hashicorp/go-multierror", + sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_net", + importpath = "github.com/hashicorp/go.net", + sum = "h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=", + version = "v0.0.1", + ) + go_repository( + name = "com_github_hashicorp_go_rootcerts", + importpath = "github.com/hashicorp/go-rootcerts", + sum = "h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_sockaddr", + importpath = "github.com/hashicorp/go-sockaddr", + sum = "h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_syslog", + importpath = "github.com/hashicorp/go-syslog", + sum = "h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_uuid", + importpath = "github.com/hashicorp/go-uuid", + sum = "h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_hashicorp_go_version", + importpath = "github.com/hashicorp/go-version", + sum = "h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=", + version = "v1.2.0", + ) + + go_repository( + name = "com_github_hashicorp_golang_lru", + importpath = "github.com/hashicorp/golang-lru", + sum = "h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=", + version = "v0.5.4", + ) + go_repository( + name = "com_github_hashicorp_hcl", + importpath = "github.com/hashicorp/hcl", + sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_logutils", + importpath = "github.com/hashicorp/logutils", + sum = "h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_mdns", + importpath = "github.com/hashicorp/mdns", + sum = "h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_memberlist", + importpath = "github.com/hashicorp/memberlist", + sum = "h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=", + version = "v0.1.3", + ) + go_repository( + name = "com_github_hashicorp_serf", + importpath = "github.com/hashicorp/serf", + sum = "h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=", + version = "v0.8.2", + ) + + go_repository( + name = "com_github_hpcloud_tail", + importpath = "github.com/hpcloud/tail", + sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hudl_fargo", + importpath = "github.com/hudl/fargo", + sum = "h1:0U6+BtN6LhaYuTnIJq4Wyq5cpn6O2kWrxAtcqBmYY6w=", + version = "v1.3.0", + ) + + go_repository( + name = "com_github_ianlancetaylor_demangle", + importpath = "github.com/ianlancetaylor/demangle", + sum = "h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=", + version = "v0.0.0-20200824232613-28f6c0f3b639", + ) + go_repository( + name = "com_github_imdario_mergo", + importpath = "github.com/imdario/mergo", + sum = "h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=", + version = "v0.3.11", + ) + go_repository( + name = "com_github_inconshreveable_mousetrap", + importpath = "github.com/inconshreveable/mousetrap", + sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_influxdata_influxdb1_client", + importpath = "github.com/influxdata/influxdb1-client", + sum = "h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA=", + version = "v0.0.0-20191209144304-8bf82d3c094d", + ) + go_repository( + name = "com_github_jmespath_go_jmespath", + importpath = "github.com/jmespath/go-jmespath", + sum = "h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=", + version = "v0.0.0-20180206201540-c2b33e8439af", + ) + + go_repository( + name = "com_github_jonboulle_clockwork", + importpath = "github.com/jonboulle/clockwork", + sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_jpillora_backoff", + importpath = "github.com/jpillora/backoff", + sum = "h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_json_iterator_go", + importpath = "github.com/json-iterator/go", + sum = "h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=", + version = "v1.1.10", + ) + go_repository( + name = "com_github_jsonnet_bundler_jsonnet_bundler", + importpath = "github.com/jsonnet-bundler/jsonnet-bundler", + sum = "h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc=", + version = "v0.4.0", + ) + + go_repository( + name = "com_github_jstemmer_go_junit_report", + importpath = "github.com/jstemmer/go-junit-report", + sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_jtolds_gls", + importpath = "github.com/jtolds/gls", + sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=", + version = "v4.20.0+incompatible", + ) + + go_repository( + name = "com_github_julienschmidt_httprouter", + importpath = "github.com/julienschmidt/httprouter", + sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_kisielk_errcheck", + importpath = "github.com/kisielk/errcheck", + sum = "h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_kisielk_gotool", + importpath = "github.com/kisielk/gotool", + sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_knetic_govaluate", + importpath = "github.com/Knetic/govaluate", + sum = "h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=", + version = "v3.0.1-0.20171022003610-9aa49832a739+incompatible", + ) + + go_repository( + name = "com_github_konsorten_go_windows_terminal_sequences", + importpath = "github.com/konsorten/go-windows-terminal-sequences", + sum = "h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=", + version = "v1.0.3", + ) + go_repository( + name = "com_github_kr_logfmt", + importpath = "github.com/kr/logfmt", + sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=", + version = "v0.0.0-20140226030751-b84e30acd515", + ) + go_repository( + name = "com_github_kr_pretty", + importpath = "github.com/kr/pretty", + sum = "h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_kr_pty", + importpath = "github.com/kr/pty", + sum = "h1:hyz3dwM5QLc1Rfoz4FuWJQG5BN7tc6K1MndAUnGpQr4=", + version = "v1.1.5", + ) + go_repository( + name = "com_github_kr_text", + importpath = "github.com/kr/text", + sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_kubernetes_csi_csi_lib_utils", + importpath = "github.com/kubernetes-csi/csi-lib-utils", + sum = "h1:t1cS7HTD7z5D7h9iAdjWuHtMxJPb9s1fIv34rxytzqs=", + version = "v0.7.0", + ) + go_repository( + name = "com_github_kubernetes_csi_csi_test", + importpath = "github.com/kubernetes-csi/csi-test", + sum = "h1:ia04uVFUM/J9n/v3LEMn3rEG6FmKV5BH9QLw7H68h44=", + version = "v2.0.0+incompatible", + ) + go_repository( + name = "com_github_kubernetes_csi_external_snapshotter_v2", + importpath = "github.com/kubernetes-csi/external-snapshotter/v2", + sum = "h1:t5bmB3Y8nCaLA4aFrIpX0zjHEF/HUkJp6f5rm7BsVzM=", + version = "v2.1.1", + ) + go_repository( + name = "com_github_lightstep_lightstep_tracer_common_golang_gogo", + importpath = "github.com/lightstep/lightstep-tracer-common/golang/gogo", + sum = "h1:143Bb8f8DuGWck/xpNUOckBVYfFbBTnLevfRZ1aVVqo=", + version = "v0.0.0-20190605223551-bc2310a04743", + ) + go_repository( + name = "com_github_lightstep_lightstep_tracer_go", + importpath = "github.com/lightstep/lightstep-tracer-go", + sum = "h1:vi1F1IQ8N7hNWytK9DpJsUfQhGuNSc19z330K6vl4zk=", + version = "v0.18.1", + ) + go_repository( + name = "com_github_lyft_protoc_gen_validate", + importpath = "github.com/lyft/protoc-gen-validate", + sum = "h1:KNt/RhmQTOLr7Aj8PsJ7mTronaFyx80mRTT9qF261dA=", + version = "v0.0.13", + ) + + go_repository( + name = "com_github_magiconair_properties", + importpath = "github.com/magiconair/properties", + sum = "h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_mailru_easyjson", + importpath = "github.com/mailru/easyjson", + sum = "h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=", + version = "v0.7.0", + ) + go_repository( + name = "com_github_markbates_pkger", + importpath = "github.com/markbates/pkger", + sum = "h1:/MKEtWqtc0mZvu9OinB9UzVN9iYCwLWuyUv4Bw+PCno=", + version = "v0.17.1", + ) + + go_repository( + name = "com_github_mattn_go_colorable", + importpath = "github.com/mattn/go-colorable", + sum = "h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=", + version = "v0.1.2", + ) + go_repository( + name = "com_github_mattn_go_isatty", + importpath = "github.com/mattn/go-isatty", + sum = "h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=", + version = "v0.0.8", + ) + go_repository( + name = "com_github_mattn_go_runewidth", + importpath = "github.com/mattn/go-runewidth", + sum = "h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=", + version = "v0.0.7", + ) + go_repository( + name = "com_github_matttproud_golang_protobuf_extensions", + importpath = "github.com/matttproud/golang_protobuf_extensions", + sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_microsoft_go_winio", + importpath = "github.com/Microsoft/go-winio", + sum = "h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=", + version = "v0.4.14", + ) + go_repository( + name = "com_github_miekg_dns", + importpath = "github.com/miekg/dns", + sum = "h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=", + version = "v1.0.14", + ) + go_repository( + name = "com_github_mitchellh_cli", + importpath = "github.com/mitchellh/cli", + sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_mitchellh_go_homedir", + importpath = "github.com/mitchellh/go-homedir", + sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_mitchellh_go_testing_interface", + importpath = "github.com/mitchellh/go-testing-interface", + sum = "h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_mitchellh_gox", + importpath = "github.com/mitchellh/gox", + sum = "h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_mitchellh_iochan", + importpath = "github.com/mitchellh/iochan", + sum = "h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_mitchellh_mapstructure", + importpath = "github.com/mitchellh/mapstructure", + sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=", + version = "v1.1.2", + ) + go_repository( + # Makes docker happier + name = "com_github_moby_buildkit", + importpath = "github.com/moby/buildkit", + version = "v0.7.2", + ) + + go_repository( + name = "com_github_moby_term", + importpath = "github.com/moby/term", + sum = "h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI=", + version = "v0.0.0-20200312100748-672ec06f55cd", + ) + go_repository( + name = "com_github_modern_go_concurrent", + importpath = "github.com/modern-go/concurrent", + sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=", + version = "v0.0.0-20180306012644-bacd9c7ef1dd", + ) + go_repository( + name = "com_github_modern_go_reflect2", + importpath = "github.com/modern-go/reflect2", + sum = "h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_monochromegane_go_gitignore", + importpath = "github.com/monochromegane/go-gitignore", + sum = "h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=", + version = "v0.0.0-20200626010858-205db1a8cc00", + ) + + go_repository( + name = "com_github_morikuni_aec", + importpath = "github.com/morikuni/aec", + sum = "h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_munnerz_goautoneg", + importpath = "github.com/munnerz/goautoneg", + sum = "h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=", + version = "v0.0.0-20191010083416-a7dc8b61c822", + ) + go_repository( + name = "com_github_mwitkow_go_conntrack", + importpath = "github.com/mwitkow/go-conntrack", + sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=", + version = "v0.0.0-20190716064945-2f068394615f", + ) + go_repository( + name = "com_github_mxk_go_flowrate", + importpath = "github.com/mxk/go-flowrate", + sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=", + version = "v0.0.0-20140419014527-cca7078d478f", + ) + go_repository( + name = "com_github_nats_io_jwt", + importpath = "github.com/nats-io/jwt", + sum = "h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=", + version = "v0.3.2", + ) + go_repository( + name = "com_github_nats_io_nats_go", + importpath = "github.com/nats-io/nats.go", + sum = "h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=", + version = "v1.9.1", + ) + go_repository( + name = "com_github_nats_io_nats_server_v2", + importpath = "github.com/nats-io/nats-server/v2", + sum = "h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDHZ2PmiIc=", + version = "v2.1.2", + ) + go_repository( + name = "com_github_nats_io_nkeys", + importpath = "github.com/nats-io/nkeys", + sum = "h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=", + version = "v0.1.3", + ) + go_repository( + name = "com_github_nats_io_nuid", + importpath = "github.com/nats-io/nuid", + sum = "h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_niemeyer_pretty", + importpath = "github.com/niemeyer/pretty", + sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=", + version = "v0.0.0-20200227124842-a10e7caefd8e", + ) + + go_repository( + name = "com_github_nxadm_tail", + importpath = "github.com/nxadm/tail", + sum = "h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=", + version = "v1.4.4", + ) + go_repository( + name = "com_github_nytimes_gziphandler", + importpath = "github.com/NYTimes/gziphandler", + sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=", + version = "v0.0.0-20170623195520-56545f4a5d46", + ) + go_repository( + name = "com_github_oklog_oklog", + importpath = "github.com/oklog/oklog", + sum = "h1:wVfs8F+in6nTBMkA7CbRw+zZMIB7nNM825cM1wuzoTk=", + version = "v0.3.2", + ) + go_repository( + name = "com_github_oklog_run", + importpath = "github.com/oklog/run", + sum = "h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_oklog_ulid", + importpath = "github.com/oklog/ulid", + sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_olekukonko_tablewriter", + importpath = "github.com/olekukonko/tablewriter", + sum = "h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=", + version = "v0.0.4", + ) + go_repository( + name = "com_github_oneofone_xxhash", + importpath = "github.com/OneOfOne/xxhash", + sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=", + version = "v1.2.2", + ) + go_repository( + name = "com_github_onsi_ginkgo", + build_directives = ["gazelle:exclude integration"], + importpath = "github.com/onsi/ginkgo", + sum = "h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=", + version = "v1.14.1", + ) + go_repository( + name = "com_github_onsi_gomega", + importpath = "github.com/onsi/gomega", + sum = "h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=", + version = "v1.10.2", + ) + go_repository( + name = "com_github_op_go_logging", + importpath = "github.com/op/go-logging", + sum = "h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=", + version = "v0.0.0-20160315200505-970db520ece7", + ) + + go_repository( + name = "com_github_opencontainers_go_digest", + importpath = "github.com/opencontainers/go-digest", + sum = "h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=", + version = "v1.0.0-rc1", + ) + go_repository( + name = "com_github_opencontainers_image_spec", + importpath = "github.com/opencontainers/image-spec", + sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_opentracing_basictracer_go", + importpath = "github.com/opentracing/basictracer-go", + sum = "h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_opentracing_contrib_go_observer", + importpath = "github.com/opentracing-contrib/go-observer", + sum = "h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=", + version = "v0.0.0-20170622124052-a52f23424492", + ) + go_repository( + name = "com_github_opentracing_opentracing_go", + importpath = "github.com/opentracing/opentracing-go", + sum = "h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_openzipkin_contrib_zipkin_go_opentracing", + importpath = "github.com/openzipkin-contrib/zipkin-go-opentracing", + sum = "h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU=", + version = "v0.4.5", + ) + go_repository( + name = "com_github_openzipkin_zipkin_go", + importpath = "github.com/openzipkin/zipkin-go", + sum = "h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb9NAWI=", + version = "v0.2.2", + ) + go_repository( + name = "com_github_pact_foundation_pact_go", + importpath = "github.com/pact-foundation/pact-go", + sum = "h1:OYkFijGHoZAYbOIb1LWXrwKQbMMRUv1oQ89blD2Mh2Q=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_pascaldekloe_goe", + importpath = "github.com/pascaldekloe/goe", + sum = "h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=", + version = "v0.0.0-20180627143212-57f6aae5913c", + ) + + go_repository( + name = "com_github_pborman_uuid", + importpath = "github.com/pborman/uuid", + sum = "h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_pelletier_go_toml", + importpath = "github.com/pelletier/go-toml", + sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_performancecopilot_speed", + importpath = "github.com/performancecopilot/speed", + sum = "h1:2WnRzIquHa5QxaJKShDkLM+sc0JPuwhXzK8OYOyt3Vg=", + version = "v3.0.0+incompatible", + ) + + go_repository( + name = "com_github_peterbourgon_diskv", + importpath = "github.com/peterbourgon/diskv", + sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=", + version = "v2.0.1+incompatible", + ) + go_repository( + name = "com_github_pierrec_lz4", + importpath = "github.com/pierrec/lz4", + sum = "h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=", + version = "v2.0.5+incompatible", + ) + + go_repository( + name = "com_github_pkg_errors", + importpath = "github.com/pkg/errors", + sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_pkg_profile", + importpath = "github.com/pkg/profile", + sum = "h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=", + version = "v1.2.1", + ) + + go_repository( + name = "com_github_pmezard_go_difflib", + importpath = "github.com/pmezard/go-difflib", + sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_posener_complete", + importpath = "github.com/posener/complete", + sum = "h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=", + version = "v1.1.1", + ) + + go_repository( + name = "com_github_pquerna_cachecontrol", + importpath = "github.com/pquerna/cachecontrol", + sum = "h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM=", + version = "v0.0.0-20171018203845-0dec1b30a021", + ) + go_repository( + name = "com_github_prometheus_client_golang", + importpath = "github.com/prometheus/client_golang", + sum = "h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=", + version = "v1.7.1", + ) + go_repository( + name = "com_github_prometheus_client_model", + importpath = "github.com/prometheus/client_model", + sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_prometheus_common", + importpath = "github.com/prometheus/common", + sum = "h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg=", + version = "v0.13.0", + ) + go_repository( + name = "com_github_prometheus_procfs", + importpath = "github.com/prometheus/procfs", + sum = "h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=", + version = "v0.1.3", + ) + go_repository( + name = "com_github_prometheus_tsdb", + importpath = "github.com/prometheus/tsdb", + sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=", + version = "v0.7.1", + ) + go_repository( + name = "com_github_puerkitobio_purell", + importpath = "github.com/PuerkitoBio/purell", + sum = "h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_puerkitobio_urlesc", + importpath = "github.com/PuerkitoBio/urlesc", + sum = "h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=", + version = "v0.0.0-20170810143723-de5bf2ad4578", + ) + go_repository( + name = "com_github_rcrowley_go_metrics", + importpath = "github.com/rcrowley/go-metrics", + sum = "h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=", + version = "v0.0.0-20181016184325-3113b8401b8a", + ) + + go_repository( + name = "com_github_remyoudompheng_bigfft", + importpath = "github.com/remyoudompheng/bigfft", + sum = "h1:/NRJ5vAYoqz+7sG51ubIDHXeWO8DlTSrToPu6q11ziA=", + version = "v0.0.0-20170806203942-52369c62f446", + ) + go_repository( + name = "com_github_robfig_cron", + importpath = "github.com/robfig/cron", + sum = "h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=", + version = "v1.2.0", + ) + + go_repository( + name = "com_github_rogpeppe_fastuuid", + importpath = "github.com/rogpeppe/fastuuid", + sum = "h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng=", + version = "v0.0.0-20150106093220-6724a57986af", + ) + go_repository( + name = "com_github_rogpeppe_go_internal", + importpath = "github.com/rogpeppe/go-internal", + sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_russross_blackfriday", + importpath = "github.com/russross/blackfriday", + sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=", + version = "v1.5.2", + ) + go_repository( + name = "com_github_russross_blackfriday_v2", + importpath = "github.com/russross/blackfriday/v2", + sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=", + version = "v2.0.1", + ) + go_repository( + name = "com_github_ryanuber_columnize", + importpath = "github.com/ryanuber/columnize", + sum = "h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M=", + version = "v0.0.0-20160712163229-9b3edd62028f", + ) + go_repository( + name = "com_github_samuel_go_zookeeper", + importpath = "github.com/samuel/go-zookeeper", + sum = "h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=", + version = "v0.0.0-20190923202752-2cc03de413da", + ) + go_repository( + name = "com_github_sean_seed", + importpath = "github.com/sean-/seed", + sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=", + version = "v0.0.0-20170313163322-e2103e2c3529", + ) + + go_repository( + name = "com_github_sergi_go_diff", + importpath = "github.com/sergi/go-diff", + sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_shopify_sarama", + importpath = "github.com/Shopify/sarama", + sum = "h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s=", + version = "v1.19.0", + ) + go_repository( + name = "com_github_shopify_toxiproxy", + importpath = "github.com/Shopify/toxiproxy", + sum = "h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=", + version = "v2.1.4+incompatible", + ) + + go_repository( + name = "com_github_shurcool_sanitized_anchor_name", + importpath = "github.com/shurcooL/sanitized_anchor_name", + sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_sirupsen_logrus", + importpath = "github.com/sirupsen/logrus", + sum = "h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_smartystreets_assertions", + importpath = "github.com/smartystreets/assertions", + sum = "h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=", + version = "v0.0.0-20180927180507-b2de0cb4f26d", + ) + go_repository( + name = "com_github_smartystreets_goconvey", + importpath = "github.com/smartystreets/goconvey", + sum = "h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=", + version = "v1.6.4", + ) + + go_repository( + name = "com_github_soheilhy_cmux", + importpath = "github.com/soheilhy/cmux", + sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=", + version = "v0.1.4", + ) + go_repository( + name = "com_github_sony_gobreaker", + importpath = "github.com/sony/gobreaker", + sum = "h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ=", + version = "v0.4.1", + ) + + go_repository( + name = "com_github_spaolacci_murmur3", + importpath = "github.com/spaolacci/murmur3", + sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=", + version = "v0.0.0-20180118202830-f09979ecbc72", + ) + go_repository( + name = "com_github_spf13_afero", + importpath = "github.com/spf13/afero", + sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=", + version = "v1.2.2", + ) + go_repository( + name = "com_github_spf13_cast", + importpath = "github.com/spf13/cast", + sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_spf13_cobra", + importpath = "github.com/spf13/cobra", + sum = "h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_spf13_jwalterweatherman", + importpath = "github.com/spf13/jwalterweatherman", + sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_spf13_pflag", + importpath = "github.com/spf13/pflag", + sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=", + version = "v1.0.5", + ) + go_repository( + name = "com_github_spf13_viper", + importpath = "github.com/spf13/viper", + sum = "h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_stoewer_go_strcase", + importpath = "github.com/stoewer/go-strcase", + sum = "h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_streadway_amqp", + importpath = "github.com/streadway/amqp", + sum = "h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw=", + version = "v0.0.0-20190827072141-edfb9018d271", + ) + go_repository( + name = "com_github_streadway_handy", + importpath = "github.com/streadway/handy", + sum = "h1:AhmOdSHeswKHBjhsLs/7+1voOxT+LLrSk/Nxvk35fug=", + version = "v0.0.0-20190108123426-d5acb3125c2a", + ) + + go_repository( + name = "com_github_stretchr_objx", + importpath = "github.com/stretchr/objx", + sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_stretchr_testify", + importpath = "github.com/stretchr/testify", + sum = "h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=", + version = "v1.6.1", + ) + go_repository( + name = "com_github_subosito_gotenv", + importpath = "github.com/subosito/gotenv", + sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=", + version = "v1.2.0", + ) + + go_repository( + name = "com_github_tidwall_pretty", + importpath = "github.com/tidwall/pretty", + sum = "h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_tmc_grpc_websocket_proxy", + importpath = "github.com/tmc/grpc-websocket-proxy", + sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=", + version = "v0.0.0-20190109142713-0ad062ec5ee5", + ) + go_repository( + name = "com_github_ugorji_go", + importpath = "github.com/ugorji/go", + sum = "h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=", + version = "v1.1.4", + ) + go_repository( + name = "com_github_ugorji_go_codec", + importpath = "github.com/ugorji/go/codec", + sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=", + version = "v0.0.0-20181204163529-d75b2dcb6bc8", + ) + go_repository( + name = "com_github_urfave_cli", + importpath = "github.com/urfave/cli", + sum = "h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=", + version = "v1.22.1", + ) + go_repository( + name = "com_github_vektah_gqlparser", + importpath = "github.com/vektah/gqlparser", + sum = "h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_vividcortex_gohistogram", + importpath = "github.com/VividCortex/gohistogram", + sum = "h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_xiang90_probing", + importpath = "github.com/xiang90/probing", + sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=", + version = "v0.0.0-20190116061207-43a291ad63a2", + ) + go_repository( + name = "com_github_xlab_treeprint", + importpath = "github.com/xlab/treeprint", + sum = "h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=", + version = "v0.0.0-20181112141820-a009c3971eca", + ) + + go_repository( + name = "com_github_xordataexchange_crypt", + importpath = "github.com/xordataexchange/crypt", + sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=", + version = "v0.0.3-0.20170626215501-b2862e3d0a77", + ) + go_repository( + name = "com_github_yuin_goldmark", + importpath = "github.com/yuin/goldmark", + sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=", + version = "v1.2.1", + ) + go_repository( + name = "com_google_cloud_go", + importpath = "cloud.google.com/go", + sum = "h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=", + version = "v0.81.0", + ) + go_repository( + name = "com_google_cloud_go_bigquery", + importpath = "cloud.google.com/go/bigquery", + sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=", + version = "v1.8.0", + ) + go_repository( + name = "com_google_cloud_go_datastore", + importpath = "cloud.google.com/go/datastore", + sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=", + version = "v1.1.0", + ) + go_repository( + name = "com_google_cloud_go_firestore", + importpath = "cloud.google.com/go/firestore", + sum = "h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=", + version = "v1.1.0", + ) + + go_repository( + name = "com_google_cloud_go_pubsub", + importpath = "cloud.google.com/go/pubsub", + sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=", + version = "v1.3.1", + ) + go_repository( + name = "com_google_cloud_go_storage", + importpath = "cloud.google.com/go/storage", + sum = "h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=", + version = "v1.10.0", + ) + go_repository( + name = "com_shuralyov_dmitri_gpu_mtl", + importpath = "dmitri.shuralyov.com/gpu/mtl", + sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=", + version = "v0.0.0-20190408044501-666a987793e9", + ) + go_repository( + name = "com_sourcegraph_sourcegraph_appdash", + importpath = "sourcegraph.com/sourcegraph/appdash", + sum = "h1:ucqkfpjg9WzSUubAO62csmucvxl4/JeW3F4I4909XkM=", + version = "v0.0.0-20190731080439-ebfcffb1b5c0", + ) + + go_repository( + name = "in_gopkg_alecthomas_kingpin_v2", + importpath = "gopkg.in/alecthomas/kingpin.v2", + sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=", + version = "v2.2.6", + ) + go_repository( + name = "in_gopkg_check_v1", + importpath = "gopkg.in/check.v1", + sum = "h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=", + version = "v1.0.0-20200227125254-8fa46927fb4f", + ) + go_repository( + name = "in_gopkg_cheggaaa_pb_v1", + importpath = "gopkg.in/cheggaaa/pb.v1", + sum = "h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=", + version = "v1.0.25", + ) + go_repository( + name = "in_gopkg_errgo_v2", + importpath = "gopkg.in/errgo.v2", + sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=", + version = "v2.1.0", + ) + go_repository( + name = "in_gopkg_fsnotify_v1", + importpath = "gopkg.in/fsnotify.v1", + sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=", + version = "v1.4.7", + ) + go_repository( + name = "in_gopkg_gcfg_v1", + importpath = "gopkg.in/gcfg.v1", + sum = "h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=", + version = "v1.2.3", + ) + + go_repository( + name = "in_gopkg_inf_v0", + importpath = "gopkg.in/inf.v0", + sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=", + version = "v0.9.1", + ) + go_repository( + name = "in_gopkg_ini_v1", + importpath = "gopkg.in/ini.v1", + sum = "h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=", + version = "v1.51.0", + ) + + go_repository( + name = "in_gopkg_natefinch_lumberjack_v2", + importpath = "gopkg.in/natefinch/lumberjack.v2", + sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=", + version = "v2.0.0", + ) + go_repository( + name = "in_gopkg_resty_v1", + importpath = "gopkg.in/resty.v1", + sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=", + version = "v1.12.0", + ) + go_repository( + name = "in_gopkg_square_go_jose_v2", + importpath = "gopkg.in/square/go-jose.v2", + sum = "h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=", + version = "v2.2.2", + ) + go_repository( + name = "in_gopkg_tomb_v1", + importpath = "gopkg.in/tomb.v1", + sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=", + version = "v1.0.0-20141024135613-dd632973f1e7", + ) + go_repository( + name = "in_gopkg_warnings_v0", + importpath = "gopkg.in/warnings.v0", + sum = "h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=", + version = "v0.1.2", + ) + + go_repository( + name = "in_gopkg_yaml_v2", + importpath = "gopkg.in/yaml.v2", + sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=", + version = "v2.4.0", + ) + go_repository( + name = "in_gopkg_yaml_v3", + importpath = "gopkg.in/yaml.v3", + sum = "h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=", + version = "v3.0.0-20200615113413-eeeca48fe776", + ) + go_repository( + name = "io_etcd_go_bbolt", + importpath = "go.etcd.io/bbolt", + sum = "h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=", + version = "v1.3.3", + ) + go_repository( + name = "io_etcd_go_etcd", + importpath = "go.etcd.io/etcd", + sum = "h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0=", + version = "v0.0.0-20191023171146-3cf2f69b5738", + ) + go_repository( + name = "io_k8s_api", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/api", + sum = "h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=", + version = "v0.20.2", + ) + go_repository( + name = "io_k8s_apiextensions_apiserver", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/apiextensions-apiserver", + sum = "h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo=", + version = "v0.18.6", + ) + go_repository( + name = "io_k8s_apimachinery", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/apimachinery", + sum = "h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=", + version = "v0.20.2", + ) + go_repository( + name = "io_k8s_apiserver", + importpath = "k8s.io/apiserver", + sum = "h1:HcWwcOfhj4Yv6y2igP4ZUuovyPjVLGoZcG0Tsph4Mxo=", + version = "v0.18.6", + ) + go_repository( + name = "io_k8s_client_go", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/client-go", + sum = "h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ=", + version = "v0.20.2", + ) + go_repository( + name = "io_k8s_code_generator", + importpath = "k8s.io/code-generator", + sum = "h1:QdfvGfs4gUCS1dru+rLbCKIFxYEV0IRfF8MXwY/ozLk=", + version = "v0.18.6", + ) + go_repository( + name = "io_k8s_component_base", + importpath = "k8s.io/component-base", + sum = "h1:Wd6cHGwJN2qpufnirVOB3oMhyhbioGsKEi5HeDBsV+s=", + version = "v0.18.6", + ) + go_repository( + name = "io_k8s_gengo", + importpath = "k8s.io/gengo", + sum = "h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs=", + version = "v0.0.0-20200413195148-3a45101e95ac", + ) + go_repository( + name = "io_k8s_klog", + importpath = "k8s.io/klog", + sum = "h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=", + version = "v1.0.0", + ) + go_repository( + name = "io_k8s_klog_v2", + importpath = "k8s.io/klog/v2", + sum = "h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=", + version = "v2.4.0", + ) + go_repository( + name = "io_k8s_kube_openapi", + importpath = "k8s.io/kube-openapi", + sum = "h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=", + version = "v0.0.0-20201113171705-d219536bb9fd", + ) + go_repository( + name = "io_k8s_kubernetes", + importpath = "k8s.io/kubernetes", + sum = "h1:6T2iAEoOYQnzQb3WvPlUkcczEEXZ7+YPlAO8olwujRw=", + version = "v1.14.0", + ) + go_repository( + name = "io_k8s_sigs_apiserver_network_proxy_konnectivity_client", + importpath = "sigs.k8s.io/apiserver-network-proxy/konnectivity-client", + sum = "h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50=", + version = "v0.0.7", + ) + go_repository( + name = "io_k8s_sigs_controller_runtime", + importpath = "sigs.k8s.io/controller-runtime", + sum = "h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA=", + version = "v0.6.2", + ) + go_repository( + name = "io_k8s_sigs_controller_tools", + importpath = "sigs.k8s.io/controller-tools", + sum = "h1:kH7HKWed9XO42OTxyhUtqyImiefdZV2Q9Jbrytvhf18=", + version = "v0.2.5", + ) + + go_repository( + name = "io_k8s_sigs_kustomize_api", + importpath = "sigs.k8s.io/kustomize/api", + sum = "h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ=", + version = "v0.8.5", + ) + go_repository( + name = "io_k8s_sigs_kustomize_cmd_config", + importpath = "sigs.k8s.io/kustomize/cmd/config", + sum = "h1:xxvL/np/zYHVuCH1tNFehlyEtSW5oXjoI6ycejiyOwQ=", + version = "v0.9.7", + ) + go_repository( + name = "io_k8s_sigs_kustomize_kustomize_v4", + importpath = "sigs.k8s.io/kustomize/kustomize/v4", + sum = "h1:0xQWp03aKWilF6UJrupcA2rCoCn3jejkJ+m/CCI/Fis=", + version = "v4.0.5", + ) + go_repository( + name = "io_k8s_sigs_kustomize_kyaml", + importpath = "sigs.k8s.io/kustomize/kyaml", + sum = "h1:dSLgG78KyaxN4HylPXdK+7zB3k7sW6q3IcCmcfKA+aI=", + version = "v0.10.15", + ) + + go_repository( + name = "io_k8s_sigs_structured_merge_diff", + importpath = "sigs.k8s.io/structured-merge-diff", + sum = "h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=", + version = "v1.0.1-0.20191108220359-b1b620dd3f06", + ) + go_repository( + name = "io_k8s_sigs_structured_merge_diff_v3", + importpath = "sigs.k8s.io/structured-merge-diff/v3", + sum = "h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=", + version = "v3.0.0", + ) + go_repository( + name = "io_k8s_sigs_structured_merge_diff_v4", + importpath = "sigs.k8s.io/structured-merge-diff/v4", + sum = "h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=", + version = "v4.0.2", + ) + go_repository( + name = "io_k8s_sigs_yaml", + importpath = "sigs.k8s.io/yaml", + sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=", + version = "v1.2.0", + ) + go_repository( + name = "io_k8s_utils", + importpath = "k8s.io/utils", + sum = "h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=", + version = "v0.0.0-20201110183641-67b214c5f920", + ) + go_repository( + name = "io_opencensus_go", + importpath = "go.opencensus.io", + sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=", + version = "v0.23.0", + ) + go_repository( + name = "io_rsc_binaryregexp", + importpath = "rsc.io/binaryregexp", + sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=", + version = "v0.2.0", + ) + go_repository( + name = "io_rsc_quote_v3", + importpath = "rsc.io/quote/v3", + sum = "h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=", + version = "v3.1.0", + ) + go_repository( + name = "io_rsc_sampler", + importpath = "rsc.io/sampler", + sum = "h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=", + version = "v1.3.0", + ) + go_repository( + name = "net_starlark_go", + importpath = "go.starlark.net", + sum = "h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=", + version = "v0.0.0-20200306205701-8dd3e2ee1dd5", + ) + + go_repository( + name = "org_bitbucket_creachadair_stringset", + importpath = "bitbucket.org/creachadair/stringset", + sum = "h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o=", + version = "v0.0.9", + ) + + go_repository( + name = "org_golang_google_api", + importpath = "google.golang.org/api", + sum = "h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA=", + version = "v0.44.0", + ) + go_repository( + name = "org_golang_google_appengine", + importpath = "google.golang.org/appengine", + sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=", + version = "v1.6.7", + ) + go_repository( + name = "org_golang_google_genproto", + importpath = "google.golang.org/genproto", + sum = "h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8=", + version = "v0.0.0-20210402141018-6c239bbf2bb1", + ) + go_repository( + name = "org_golang_google_grpc", + importpath = "google.golang.org/grpc", + sum = "h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY=", + version = "v1.36.1", + ) + go_repository( + name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc", + importpath = "google.golang.org/grpc/cmd/protoc-gen-go-grpc", + sum = "h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=", + version = "v1.1.0", + ) + + go_repository( + name = "org_golang_google_protobuf", + build_directives = ["gazelle:exclude **/testdata"], + importpath = "google.golang.org/protobuf", + sum = "h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=", + version = "v1.26.0", + ) + go_repository( + name = "org_golang_x_crypto", + importpath = "golang.org/x/crypto", + sum = "h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=", + version = "v0.0.0-20201002170205-7f63de1d35b0", + ) + go_repository( + name = "org_golang_x_exp", + importpath = "golang.org/x/exp", + sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=", + version = "v0.0.0-20200224162631-6cc2880d07d6", + ) + go_repository( + name = "org_golang_x_image", + importpath = "golang.org/x/image", + sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=", + version = "v0.0.0-20190802002840-cff245a6509b", + ) + go_repository( + name = "org_golang_x_lint", + importpath = "golang.org/x/lint", + sum = "h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=", + version = "v0.0.0-20201208152925-83fdc39ff7b5", + ) + go_repository( + name = "org_golang_x_mobile", + importpath = "golang.org/x/mobile", + sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=", + version = "v0.0.0-20190719004257-d2bd2a29d028", + ) + go_repository( + name = "org_golang_x_mod", + importpath = "golang.org/x/mod", + sum = "h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=", + version = "v0.4.1", + ) + go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E=", + version = "v0.0.0-20210316092652-d523dce5a7f4", + ) + go_repository( + name = "org_golang_x_oauth2", + importpath = "golang.org/x/oauth2", + sum = "h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc=", + version = "v0.0.0-20210402161424-2e8d93401602", + ) + go_repository( + name = "org_golang_x_sync", + importpath = "golang.org/x/sync", + sum = "h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=", + version = "v0.0.0-20210220032951-036812b2e83c", + ) + go_repository( + name = "org_golang_x_sys", + importpath = "golang.org/x/sys", + sum = "h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=", + version = "v0.0.0-20210403161142-5e06dd20ab57", + ) + go_repository( + name = "org_golang_x_term", + importpath = "golang.org/x/term", + sum = "h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=", + version = "v0.0.0-20201126162022-7de9c90e9dd1", + ) + + go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=", + version = "v0.3.5", + ) + go_repository( + name = "org_golang_x_time", + importpath = "golang.org/x/time", + sum = "h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=", + version = "v0.0.0-20200630173020-3af7569d3a1e", + ) + go_repository( + name = "org_golang_x_tools", + build_directives = ["gazelle:exclude **/testdata"], + importpath = "golang.org/x/tools", + sum = "h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=", + version = "v0.1.0", + ) + go_repository( + name = "org_golang_x_xerrors", + importpath = "golang.org/x/xerrors", + sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=", + version = "v0.0.0-20200804184101-5ec99f83aff1", + ) + go_repository( + name = "org_gonum_v1_gonum", + importpath = "gonum.org/v1/gonum", + sum = "h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=", + version = "v0.0.0-20190331200053-3d26580ed485", + ) + go_repository( + name = "org_gonum_v1_netlib", + importpath = "gonum.org/v1/netlib", + sum = "h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=", + version = "v0.0.0-20190331212654-76723241ea4e", + ) + go_repository( + name = "org_modernc_cc", + importpath = "modernc.org/cc", + sum = "h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_golex", + importpath = "modernc.org/golex", + sum = "h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_mathutil", + importpath = "modernc.org/mathutil", + sum = "h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_strutil", + importpath = "modernc.org/strutil", + sum = "h1:XVFtQwFVwc02Wk+0L/Z/zDDXO81r5Lhe6iMKmGX3KhE=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_xc", + importpath = "modernc.org/xc", + sum = "h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU=", + version = "v1.0.0", + ) + go_repository( + name = "org_mongodb_go_mongo_driver", + importpath = "go.mongodb.org/mongo-driver", + sum = "h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=", + version = "v1.1.2", + ) + go_repository( + name = "org_uber_go_atomic", + importpath = "go.uber.org/atomic", + sum = "h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY=", + version = "v1.5.0", + ) + go_repository( + name = "org_uber_go_multierr", + importpath = "go.uber.org/multierr", + sum = "h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc=", + version = "v1.3.0", + ) + go_repository( + name = "org_uber_go_tools", + importpath = "go.uber.org/tools", + sum = "h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=", + version = "v0.0.0-20190618225709-2cfd321de3ee", + ) + + go_repository( + name = "org_uber_go_zap", + importpath = "go.uber.org/zap", + sum = "h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=", + version = "v1.13.0", + ) + go_repository( + name = "tools_gotest", + importpath = "gotest.tools", + sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=", + version = "v2.2.0+incompatible", + ) + go_repository( + name = "tools_gotest_v3", + importpath = "gotest.tools/v3", + sum = "h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=", + version = "v3.0.2", + ) + go_repository( + name = "xyz_gomodules_jsonpatch_v2", + importpath = "gomodules.xyz/jsonpatch/v2", + sum = "h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=", + version = "v2.1.0", + ) diff --git a/docs/content/backup-restore/restore-from-backups.md b/docs/content/backup-restore/restore-from-backups.md new file mode 100644 index 0000000..8d96bf6 --- /dev/null +++ b/docs/content/backup-restore/restore-from-backups.md @@ -0,0 +1,129 @@ +# Restore from a backup + +An Instance can be restored from a `backups.oracle.db.anthosapis.com` resource +representing either a snapshot-based backup or an RMAN backup. + +The following variables used in the examples below: + +```sh +export NAMESPACE= +export PATH_TO_EL_CARRO_RELEASE= +``` + +### Locate a backup + +The Instance resource contains the ID of the latest backup taken for the instance: + +```sh +kubectl get instances.oracle.db.anthosapis.com -n $NAMESPACE +``` + +```sh +NAME DB ENGINE VERSION EDITION ENDPOINT URL DB NAMES BACKUP ID READYSTATUS READYREASON DBREADYSTATUS DBREADYREASON +mydb Oracle 12.2 Enterprise mydb-svc.db 10.128.0.33:6021 [pdb1, pdb2] mydb-20210427-phys-885709718 True CreateComplete True CreateComplete +``` + +Alternatively, IDs of older backups can be found by listing the +`backups.oracle.db.anthosapis.com` resources in the same namespace that the +database instance belongs to: + +```sh +kubectl get backups.oracle.db.anthosapis.com -n $NAMESPACE +``` + +```sh +NAME INSTANCE NAME BACKUP TYPE BACKUP SUBTYPE DOP BS/IC GCS PATH PHASE BACKUP ID BACKUP TIME +rman1-inst mydb Physical Instance 1 true Succeeded mydb-20210427-phys-885709718 20210427210913 +snap1 mydb Snapshot Instance Succeeded mydb-20210427-snap-416248334 20210427182828 +``` + +### Prepare an Instance Resource Manifest for Restore + +Once the ID for the backup to restore from is determined, you can restore the +instance by uncommenting or adding the `restore` section in the Instance +manifest. The four mandatory attributes to uncomment are: + +* backupType (Snapshot or Physical) +* backupId +* force +* requestTime + +The backupId comes from the previous step. + +To avoid accidental restores, the `force` attribute needs to be explicitly set +to `true`. Failure to do so trips the safeguard in the Instance controller and +leads to an error message stating that you need to be explicit in requesting a +restore (and acknowledging the downtime associated with it). Another safeguard +is the `requestTime` attribute which is recommended to be set to the current +time. If the same .yaml file that was previously used for a restore operation is +sent to Kubernetes with the same value of `requestTime`, the request will be +ignored. To invoke another restore operation, `requestTime` needs to be updated +to the current time - this will ensure that the new request’s timestamp is +later than the timestamp of the previous restore operation. Requests with +earlier timestamps in `requestTime` are ignored as well. + +Take 12.2 instance as an example. +```sh +cat $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_instance.yaml +``` + +```yaml +apiVersion: oracle.db.anthosapis.com/v1alpha1 +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-gce-pd" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-gce-pd" + services: + Backup: true + Monitoring: true + Logging: true + Patching: true + sourceCidrRanges: [ 0.0.0.0/0 ] + minMemoryForDBContainer: 4.0Gi + maintenanceWindow: + timeRanges: + - start: "2121-04-20T15:45:30Z" + duration: "168h" + +# parameters: +# parallel_servers_target: "15" +# disk_asynch_io: "true" +# memory_max_target: "0" + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" #(or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimit: 180 +``` + +### Limitations + +There are currently limitations for restoring from an RMAN backup: + +* Only backups created with `spec.backupset` either omitted or set to `true` + can be restored from +* Only backups created with `spec.subType` either omitted or set to `Instance` + can be restored from +* For local backups (ones that don't specify `spec.gcsPath` attribute and thus + do not persist backup data in GCS) restore can be only be done for the latest + such backup. diff --git a/docs/content/backup-restore/rman-backups.md b/docs/content/backup-restore/rman-backups.md new file mode 100644 index 0000000..d62fdd2 --- /dev/null +++ b/docs/content/backup-restore/rman-backups.md @@ -0,0 +1,82 @@ +# Steps to create Oracle RMAN backup + +The following variables used in the examples below: + +```sh +export NAMESPACE= +export PATH_TO_EL_CARRO_RELEASE= +``` + +## Prepare a Backup CR Manifest + +In Backup CR Manifest the following fields are required: +* name: backup name. +* instance: instance name to create RMAN backup for. +* type: this must be set to "Physical" for a RMAN backup. + +El Carro also provides the following optional fields to manage RMAN backup +creation: + +* subtype: used to specify level at which RMAN backup is to be taken. Choose between "Instance" and "Database". Default to "Instance". +* backupItems: used to specify PDBs that need to be backuped. Must be used along with "subtype: Database". Default is empty. +* backupSet: a boolean flag to control RMAN backup type. "true" for BackupSets, 'false' for Images Copies. Default is true. +* compressed: a boolean flag to turn on compression. Must used along with "backupSet: true". Default is false. +* filesperset: used to set the number of files to be allowed in a backup set. Must be used along with "backupSet: true". Default is 64. +* checkLogical: a boolean flag to turn on RMAN "check logical" option. Default is false. +* dop: used to set degree of parallelism. Default is 1. +* level: used to set incremental level (0=Full Backup, 1=Incremental, 2=Cumulative). Default is 0. +* sectionSize: an integer used to set section size in MB. +* timeLimit: an integer used to set the time threshold for creating a RMAN backup in minutes. Default is 60. +* localPath: used to specify local backup directory. Default is '/u03/app/oracle/rman'. +* gcSPath: used to specify a GCS bucket to transfer backup to. User need to ensure proper write access to the bucket from the Oracle Operator. "localPath" will be ignored if this is set. + +A sample Backup CR Manifest may look like the following: +```sh +cat $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_backup_rman3.yaml +``` + +```yaml +# Physical backup config for the whole Instance with all the options. +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: rman3-inst-opts +spec: + instance: mydb + type: Physical + subType: Instance + backupset: true + checkLogical: true + compressed: true + # DOP = Degree of Parallelism. + dop: 4 + # Level: 0=Full Backup, 1=Incremental, 2=Cumulative + level: 0 + filesperset: 10 + # Backup Section Size in MB (don't specify the unit, just the integer). + sectionSize: 100 + # Backup threshold is expressed in minutes (don't specify the unit, just the integer). + timeLimit: 30 + localPath: "/u03/app/oracle/rman" +``` + +## Submit the Backup CR + +```sh +kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_backup_rman3.yaml -n $NAMESPACE +``` + + +## Watch backup status + +```sh +kubectl get backups.oracle.db.anthosapis.com -w -n $NAMESPACE +``` + +```sh +NAME INSTANCE NAME BACKUP TYPE BACKUP SUBTYPE DOP BS/IC GCS PATH PHASE BACKUP ID BACKUP TIME +rman3-inst-opts mydb Physical Instance 4 InProgress mydb-20210430-phys-826537073 20210420173733 +rman3-inst-opts mydb Physical Instance 4 Succeeded mydb-20210430-phys-826537073 20210420173733 +``` + +Once the backup phase changed to `Succeeded`, the physical backup creation is complete and ready to use. diff --git a/docs/content/backup-restore/snapshot-backups.md b/docs/content/backup-restore/snapshot-backups.md new file mode 100644 index 0000000..a1fe7ca --- /dev/null +++ b/docs/content/backup-restore/snapshot-backups.md @@ -0,0 +1,101 @@ +# Backup: Snapshots + +El Carro features two types of backups: **snapshot-based** and **RMAN-based**. + +You're free to choose one over the other or use a combination of the two. In +general, snapshot backups allow creating thin clones of a database, which are +considerably faster and scale better as the databases grow in size. The same +applies for the restore. On the other hand, RMAN backups are done at the +database block level, with validations (some optional, for example: check +logical), which makes RMAN backups more trustworthy. For example, block +corruption (ORA-1578 and similar) may get unnoticed and propagate to the +snapshot-based backup, but is likely to get detected in the RMAN backupset. +Also, snapshots inherently rely on the same storage device, making it a +potential point of failure. + +The choice between RMAN and a storage based snapshot is completely up to you. + +## Steps to create Oracle Snapshot backup + +The following variables used in the examples below: + +```sh +export NAMESPACE = +export PATH_TO_EL_CARRO_RELEASE = +``` + +### Locate an instance in ready state + +```sh +kubectl get instances.oracle.db.anthosapis.com -n NAMESPACE +``` + +```sh +NAME DB ENGINE VERSION EDITION ENDPOINT URL DB NAMES BACKUP ID READYSTATUS READYREASON DBREADYSTATUS DBREADYREASON +mydb Oracle 12.2 Enterprise mydb-svc.db ******* True CreateComplete True CreateComplete +``` + +### Prepare a Backup CR Manifest + +Depending on whether or not a Config CR was submitted earlier, the +[volumeSnapshotClass](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/) +attribute may not be required. If it's not provided and there's no Config, El +Carro attempts to figure out the default value for the platform. We recommend +setting it explicitly, either here or in the Config. + +For GCP platform, the default value for volumeSnapshotClass is +`csi-gce-pd-snapshot-class`. + +For Minikube, the default value for volumeSnapshotClass is `csi-hostpath-snapclass`. + +List installed Volume Snapshot Class in the cluster. + +```sh +kubectl get volumeSnapshotClass +``` + +```sh +cat PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_backup_snap2.yaml +``` + +```yaml +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: snap2 +spec: + instance: mydb + type: Snapshot + subType: Instance + volumeSnapshotClass: "csi-gce-pd-snapshot-class" +``` + +### Submit the Database CR + +After completing the Backup manifest, submit it to the local cluster as follows: + +```sh +kubectl apply -f PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_backup_snap2.yaml -n $NAMESPACE +``` + +### Review the Backup CR + +An easy way to monitor the state of the Backup CR is by running the following +command: + +```sh +kubectl get backups.oracle.db.anthosapis.com -n NAMESPACE -w +``` + +```sh +NAME INSTANCE NAME BACKUP TYPE BACKUP SUBTYPE DOP BS/IC GCS PATH PHASE BACKUP ID BACKUP TIME +snap2 mydb Snapshot Instance Succeeded mydb-20210505-snap-480271058 20210505233252 +``` + +Once the backup phase changed to `Succeeded`, the created snapshot backup is +ready to restore an instance. + +Note that there might be multiple disks used in an El Carro instance and the +snapshots of all three have to finish successfully for the Backup CR's status to +turn from InProgress to Ready. The latest backup ID is also copied to the +Instance CR. diff --git a/docs/content/contributing/development.md b/docs/content/contributing/development.md new file mode 100644 index 0000000..4b37749 --- /dev/null +++ b/docs/content/contributing/development.md @@ -0,0 +1,24 @@ +# Development Guidance + +## Code Reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows +[Google's Open Source Community Guidelines](https://opensource.google/conduct/). + +### Testing + +Ensure the following passes: + +``` +cd oracle +make check +make unit-test +``` +and commit any resultant changes to `go.mod` and `go.sum`. diff --git a/docs/content/contributing/guidelines.md b/docs/content/contributing/guidelines.md new file mode 100644 index 0000000..e9b7826 --- /dev/null +++ b/docs/content/contributing/guidelines.md @@ -0,0 +1,33 @@ +# Contributing Guidelines + +Thanks in advance for your contribution to the El Carro project! + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +Our contributors try to follow good software development practices to help +ensure that the product we provide to our customers is stable and reliable. + +We've proposed some guidelines below (and welcome more suggestions!) + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement (CLA). You (or your employer) retain the copyright to your +contribution; this simply gives us permission to use and redistribute your +contributions as part of the project. Head over to + to see your current agreements on file or +to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Pull Requests + +Once you are ready to submit a Pull Request, please ensure you do the following: + +* Please be as descriptive in your pull request as possible. If you are +referencing an issue, please be sure to include the issue in your pull request. + +* Please ensure you have added testing where appropriate. diff --git a/docs/content/custom-resources/database.md b/docs/content/custom-resources/database.md new file mode 100644 index 0000000..6fb879e --- /dev/null +++ b/docs/content/custom-resources/database.md @@ -0,0 +1,215 @@ +# Appendix B: Change a Database (PDB): users/privs {: #appendix-b} + +El Carro provides support for declarative user/schema and roles/privilege +management through the changes in a Database manifest. + +## Case 1: Add a User + +In the example below we change the Database CR by adding a new user scott1 and +grant him two roles ("connect" and "resource") as well as the "unlimited +tablespace" system privilege: + +```sh +cat ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - create session + - name: scott1 + password: tiger + privileges: + - connect + - resource + - unlimited tablespace +``` + +Submit a Database CR and verify that the UsersReady condition is set to True, +which can be further confirmed by querying the data dictionary: + +```sh +kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml -n $NS + +kubectl get databases.oracle.db.anthosapis.com -n $NS +NAME INSTANCE USERS PHASE DATABASEREADYSTATUS DATABASEREADYREASON USERREADYSTATUS USERREADYREASON +pdb1 mydb ["superuser","scott","proberuser","..."] Ready True CreateComplete True SyncComplete + + +SQL> alter session set container=PDB1; +Session altered. + +SQL> select granted_role from dba_role_privs where grantee='SCOTT1'; +GRANTED_ROLE +-------------------------------------------------------------------------------- +CONNECT +RESOURCE + +SQL> select privilege from dba_sys_privs where grantee='SCOTT1'; +PRIVILEGE +---------------------------------------- +UNLIMITED TABLESPACE +``` + +## Case 2: Delete a User + +In the Preview release El Carro doesn't delete users (or schemas) in a database. +If a user is removed from a manifest submitted against an existing Database CR, +El Carro flags this and sets the UsersReady condition type to False. + +In the example below we change the Database CR by deleting an existing user +proberuser: + +```sh +cat ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: scott1 + password: tiger + privileges: + - connect + - resource + - unlimited tablespace +``` + +Submit a Database CR and verify that the UsersReady condition indeed gets reset +to False. You can then get more information on this by reviewing the status. You +can further query the data dictionary to confirm that indeed the user and the +privileges/roles in the database haven't changed: + +```sh +kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml -n $NS + +kubectl get databases.oracle.db.anthosapis.com -n $NS +NAME INSTANCE USERS PHASE DATABASEREADYSTATUS DATABASEREADYREASON USERREADYSTATUS USERREADYREASON +pdb1 mydb ["superuser","scott","scott1"] Ready True CreateComplete False UserOutOfSync + +kubectl get databases.oracle.db.anthosapis.com pdb1 -o=jsonpath='{.status}' -n $NS +{"conditions":[{"message":"User \"PROBERUSER\" not defined in database spec, supposed to be deleted. suppressed SQL \"ALTER SESSION SET CONTAINER=PDB1; DROP USER PROBERUSER CASCADE;\". Fix by deleting the user in DB or updating DB spec to include the user","reason":"UsersOutOfSync","status":"False","type":"UsersReady"}],"status":"Ready"} + + +SQL> alter session set container=PDB1; + +Session altered. + +SQL> select privilege from dba_sys_privs where grantee='PROBERUSER'; + +PRIVILEGE +---------------------------------------- +CREATE SESSION +``` + +## Case 3: Change a User by adding/removing Roles/Privileges + +On top of adding and removing users, you can also add/remove privileges and +roles from/to the existing users: + +```sh +cat ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - unlimited tablespace + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - connect + - create session + - name: scott1 + password: tiger + privileges: + - connect +``` + +Submit a Database CR and query the data dictionary to confirm that the +privileges and roles have been removed/added as requested: + +```sh +kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml -n $NS + + +SQL> alter session set container=PDB1; + +Session altered. + +SQL> select privilege from dba_sys_privs where grantee='SUPERUSER'; + +PRIVILEGE +---------------------------------------- +UNLIMITED TABLESPACE + +SQL> select granted_role from dba_role_privs where grantee='SUPERUSER'; + +no rows selected + +SQL> select privilege from dba_sys_privs where grantee='SCOTT1'; + +no rows selected + +SQL> select granted_role from dba_role_privs where grantee='SCOTT1'; + +GRANTED_ROLE +-------------------------------------------------------------------------------- +CONNECT + +SQL> select privilege from dba_sys_privs where grantee='PROBERUSER'; + +PRIVILEGE +---------------------------------------- +CREATE SESSION + +SQL> select granted_role from dba_role_privs where grantee='PROBERUSER'; + +GRANTED_ROLE +-------------------------------------------------------------------------------- +CONNECT +``` diff --git a/docs/content/custom-resources/instance.md b/docs/content/custom-resources/instance.md new file mode 100644 index 0000000..c66ffd6 --- /dev/null +++ b/docs/content/custom-resources/instance.md @@ -0,0 +1,77 @@ +# Appendix A: Create an El Carro Instance: Advanced {: #appendix-a} + +The `samples` directory provided with the El Carro release contains a set of +useful manifests to get you started. As you start rolling out El Carro services +to many databases, it may become tedious to keep track and maintain consistency +across manifests even with the rigorous version control practices. This is in +part because the sample manifests lack a common origin, the "root of +manifests". + +One way to start on the path of creating declarative workflows is to parametrize +the template YAMLs, keep them DRY and hydrate per application. The `workflows` +directory can help you to get started. Here's how an Instance template manifest +can be hydrated: + +```sh +kpt cfg create-setter ${PATH_TO_EL_CARRO_RELEASE}/workflows namespace "" +
+kpt cfg create-setter ${PATH_TO_EL_CARRO_RELEASE}/workflows services --type array --field spec.services +
+kpt cfg create-setter ${PATH_TO_EL_CARRO_RELEASE}/workflows dbimage "" +``` + +The result of running this command is a fully hydrated and ready to apply +Instance manifest: + +```sh +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb + namespace: "" # {"$kpt-set":"namespace"} +spec: + type: Oracle + version: "12.2" + edition: Enterprise + DBDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + - name: LogDisk + size: 55Gi + type: pd-standard + services: # {"$kpt-set":"services"} + - "" # {"$kpt-set":"services"} + images: + service: "" # {"$kpt-set":"dbimage"} + sourceCidrRanges: [0.0.0.0/0] + minMemoryForDBContainer: 4.0Gi + maintenanceWindow: + timeRanges: + - start: "2121-04-20T15:45:30Z" + duration: "168h" + + # parameters: + # parallel_servers_target: "15" + # disk_asynch_io: "true" + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" #(or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 +``` + +Given that this manifest is the same as the one provided in the `samples` +directory (but hydrated dynamically by way of the safe variable substitution), +the process +[described earlier](#submit-cr) +of applying this manifest fully applies. diff --git a/docs/content/data-pump/export.md b/docs/content/data-pump/export.md new file mode 100644 index 0000000..071a860 --- /dev/null +++ b/docs/content/data-pump/export.md @@ -0,0 +1,106 @@ +# Data Pump: Export data from a PDB + +The following variables used in the examples below: + +
+NAMESPACE = kubernetes namespace where the instance was created
+PATH_TO_EL_CARRO_RELEASE = the complete path to the downloaded release directory
+
+ +Data Pump export uses the Oracle `expdp` utility for exporting data and metadata +into a set of operating system files called a dump file set. El Carro allows you +to declaratively initiate a Data Pump export. To do so: + +1. Prepare and apply an Export CR (Custom Resource): +
+    cat PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_export_dmp1.yaml
+    
+ +
+    apiVersion: oracle.db.anthosapis.com/v1alpha1
+    kind: Export
+    metadata:
+     name: export-dmp1
+    spec:
+     instance: mydb
+     databaseName: pdb1
+     type: DataPump
+     exportObjectType: Schemas # 'Schemas' or 'Tables'
+     exportObjects:
+     -SCOTT
+     # Uncomment flashbackTime to enable flashback time feature
+     # Time is in RFC3339 for datetime format,
+     # for example 1985-04-12T23:20:50.52Z represents 20 minutes and 50.52 seconds after the 23rd hour of April 12th, 1985 in UTC.
+     # before enabling make sure undo_retention settings are consistent with set time
+     # flashbackTime: "2021-01-05T15:00:00Z"  #optional
+
+     # Service account should have write access to the destination bucket,
+     # sample command to grant access (replace with actual SA email):
+     # > gsutil iam ch serviceaccount:SA@PROJECT.iam.gserviceaccount.com:objectCreator gs://example-bucket
+     #  Add .gz as GCS object file extension to enable compression.
+     gcsPath: "gs://example-bucket/elcarro/export/pdb1/exportSchema.dmp"
+     gcsLogPath: "gs://example-bucket/elcarro/export/pdb1/exportSchema.log" #optional
+    
+ +2. Submit the Export CR + +
+    kubectl apply -f PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_export_dmp1.yaml -n NAMESPACE
+    
+ +### ExportObjectType and ExportObjects fields + +El Carro currently supports Data Pump export in Schema (default export mode) and +Table mode. Export mode can be set in the `exportObjectType` field of the export +CR. A list of objects to be exported, schemas or tables, should be specified in +the `exportObjects` field. For example, to export tables instead of schemas, +change `exportObjectType` and `exportObjects` fields: + +
+exportObjectType: Tables
+exportObjects:
+- SCOTT.t1
+- SCOTT.t2
+
+ +### FlashbackTime field + +[FlashbackTime](https://docs.oracle.com/cd/B28359_01/server.111/b28319/dp_export.htm#i1007150) +field in the export CR is an optional timestamp in +[RFC3339](https://tools.ietf.org/html/rfc3339) format. When specified, the +system change number (SCN) that most closely matches the time is found, and this +SCN is used to enable the Flashback utility. If consistency up to a certain SCN +is desired, specify the FlashbackTime field. + +### gcsPath field + +`gcsPath` field is a full path to a GCS bucket to which the export dmp file +should be uploaded to. You must ensure that the service account used by the El +Carro operator has adequate write permissions to the GCS bucket by running the +following command: + +
+gsutil iam ch serviceaccount:gke_cluster_service_account_email:objectCreator gs://example-bucket
+
+ +### gcsLogPath field + +`gcsLogPath` is an optional parameter. When specified, export logs files will be +uploaded to it. Similar to `gcsPath`, write access to the GCS bucket to upload +export log files should be granted to the service account used by the El Carro +operator. + +Note: For `gcsPath` and `gcsLogPath`, the .gz suffix can be added to dmp and log +files to enable compression when uploading to the GCS bucket. For example: + +
+#  Add .gz as Google Cloud Storage object file extension to enable compression.
+gcsPath: "gs://example-bucket/elcarro/export/pdb1/exportSchema.dmp.gz"
+gcsLogPath: "gs://example-bucket/elcarro/export/pdb1/exportSchema.log.gz" # optional
+
+ +
+#  Add .gz as GCS object file extension to enable compression.
+gcsPath: "gs://example-bucket/elcarro/export/pdb1/exportSchema.dmp.gz"
+gcsLogPath: "gs://example-bucket/elcarro/export/pdb1/exportSchema.log.gz" # optional
+
diff --git a/docs/content/data-pump/import.md b/docs/content/data-pump/import.md new file mode 100644 index 0000000..fd8c805 --- /dev/null +++ b/docs/content/data-pump/import.md @@ -0,0 +1,104 @@ +# DataPump: Import data from one PDB into another one + +Running Oracle Data Pump import utility - `impdp` - is supported via a +declarative Kubernetes-based API. An import operation takes a dump file at a +[Google Cloud Storage](https://cloud.google.com/storage/docs) location and +applies it to an existing PDB. A dump file contains db data and metadata in an +Oracle proprietary binary format which is produced by the Oracle Data Pump +export utility - `expdp`. + +Note: the **PDB name** the data was exported from, and the destination **PDB +name** the dump file is imported to, must match. + +1. Prepare a dump file and set permissions + + Upload a dump file to a + [Google Cloud Storage](https://cloud.google.com/storage/docs) location, for + example: `gs://example-bucket/imports/pdb1/tables.dmp` Make sure the El + Carro operator has read access to the bucket containing the dump file: + + a. If you're not using + workload + identity, use the Compute Engine default service account for GCS access. + + b. If you have enabled workload identity on GKE, you can + configure + the Kubernetes service account to act as a Google service account. + + You can run the following command to see whether workload identity is + enabled for the GKE cluster or not: + +
+     gcloud container clusters describe CLUSTER --zone=ZONE  --project=PROJECT | grep workload
+
+     workloadMetadataConfig:
+       workloadMetadataConfig:
+     workloadIdentityCon fig:
+        workloadPool: PROJECT.svc.id.goog
+     
+ + Grant permissions using the appropriate service account: + +
+     gsutil iam ch serviceAccount:service_account_email:objectViewer gs://example-bucket
+    
+ + **Optionally** + + There is an optional parameter you can pass to an import with the GCS + location of the import operation log file that you can inspect later, and + which is produced by the `impdp`utility with the import process details. If + you request an import log, make sure the operator can write to the + destination GCS bucket + +
+    gsutil iam ch serviceAccount:gke_cluster_service_account_email:objectCreator gs://example-log-bucket
+    
+ +1. Create and apply Import CR + + Edit the `gcsPath` and optionally the `gcsLogPath` attributes in the sample + import resource: + +
+    cat PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_import_pdb1.yaml
+    
+ +
+    apiVersion: oracle.db.anthosapis.com/v1alpha1
+    kind: Import
+    metadata:
+      name: import-pdb1
+    spec:
+      instance: mydb
+      databaseName: pdb1
+      type: DataPump
+      # Service account should have read access to the destination bucket,
+      # sample command to grant read access (replace with actual SA email):
+      # > gsutil iam ch serviceaccount:SA@PROJECT.iam.gserviceaccount.com:objectViewer gs://ex-bucket
+      gcsPath: "gs://example-bucket/import/pdb1/import.dmp"
+      # Uncomment to enable import log upload to Google Cloud Storage.
+      # Service account should have write access to the destination bucket,
+      # sample command to grant access (replace with actual SA email):
+      # > gsutil iam ch serviceaccount:SA@PROJECT.iam.gserviceaccount.com:objectCreator gs://ex-bucket
+      #  Add .gz as Google Cloud Storage object file extension to enable compression.
+      gcsLogPath: "gs://example-log-bucket/import/pdb1.log"
+    
+ + `instance` and `databaseName` must refer to existing `Instance` and + `Database` custom resources names in the namespace the `Import` resource + will be created in. + + After the manifest is ready, submit it to the cluster as follows: + +
+    kubectl apply -f PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_import_pdb1 -n NAMESPACE
+    
+ +1. (Optional) Inspect the result of creating an Import resource + + Check the Import custom resource status: + +
+    kubectl get imports.oracle.db.anthosapis.com -n NAMESPACE
+    
diff --git a/docs/content/minikube.md b/docs/content/minikube.md new file mode 100644 index 0000000..0475121 --- /dev/null +++ b/docs/content/minikube.md @@ -0,0 +1,264 @@ +# Running El Carro Operator on local clusters with minikube + +This guide helps you run El Carro Operator locally on minikube on your personal +computer. If you prefer to use GKE (Google Kurbernetes Engine) to deploy the El +Carro Operator, stop here and refer to our [Quickstart Guide](quickstart.md) or +[Quickstart Guide for Oracle 18c XE](quickstart-18c-xe.md). + +## Before you begin + +The following variables will be used in this guide: + +```sh +export PATH_TO_EL_CARRO_REPO= +export NS= +``` + +You should set these variables in your environment. + +## Install Minikube, Docker and kubectl + +* Install minikube by following the official minikube + [Get Started guide](https://minikube.sigs.k8s.io/docs/start/). +* Install [kubectl](https://kubernetes.io/docs/tasks/tools/) to access the + kubernetes cluster inside minikube +* Install Docker to build images locally +* Make sure you have access to El Carro source code either through Github or + GOB because we will build container images locally and push to the local + minikube registry. + +## Prepare a minikube cluster + +1. Create a minikube cluster by running: + + ```sh + minikube start + ``` + +2. Verify that your minikube cluster was created and set as the current + context: + + ```sh + kubectl config current-context + ``` + + This should print: + ```sh + minikube + ``` + +3. Enable the following two addons to get minikube ready for El Carro: + + ```sh + minikube addons enable csi-hostpath-driver + minikube addons enable volumesnapshots + ``` + +4. Enable the registry addon to allow docker to push images to minikube's registry: + ```sh + minikube addons enable registry + ``` + +5. In a a separate terminal, redirect port 5000 from Docker to port 5000 on + your host by following this + [guide](https://minikube.sigs.k8s.io/docs/handbook/registry/) or running: + + ```sh + docker run --rm -d --network=host --name=registry-port-forwarder alpine ash -c "apk add socat && socat TCP-LISTEN:5000,reuseaddr,fork TCP:$(minikube ip):5000" + ``` + +6. Verify that you are able to access the minikube registry by running: + + ```sh + curl http://localhost:5000/v2/_catalog + ``` + +7. After completing the steps above, running: + + ```sh + minikube addons list + ``` + + should print: + + ```sh + |-----------------------------|----------|--------------| + | ADDON NAME | PROFILE | STATUS | + |-----------------------------|----------|--------------| + | ambassador | minikube | disabled | + | auto-pause | minikube | disabled | + | csi-hostpath-driver | minikube | enabled ✅ | + | dashboard | minikube | disabled | + | default-storageclass | minikube | enabled ✅ | + | efk | minikube | disabled | + | freshpod | minikube | disabled | + | gcp-auth | minikube | disabled | + | gvisor | minikube | disabled | + | helm-tiller | minikube | disabled | + | ingress | minikube | disabled | + | ingress-dns | minikube | disabled | + | istio | minikube | disabled | + | istio-provisioner | minikube | disabled | + | kubevirt | minikube | disabled | + | logviewer | minikube | disabled | + | metallb | minikube | disabled | + | metrics-server | minikube | disabled | + | nvidia-driver-installer | minikube | disabled | + | nvidia-gpu-device-plugin | minikube | disabled | + | olm | minikube | disabled | + | pod-security-policy | minikube | disabled | + | registry | minikube | enabled ✅ | + | registry-aliases | minikube | disabled | + | registry-creds | minikube | disabled | + | storage-provisioner | minikube | enabled ✅ | + | storage-provisioner-gluster | minikube | disabled | + | volumesnapshots | minikube | enabled ✅ | + |-----------------------------|----------|--------------| + ``` + +## Connect to the minikube LoadBalancer service + +In order to connect to El Carro later, you need to +[connect to LoadBalancer services](https://minikube.sigs.k8s.io/docs/commands/tunnel/) +by running the following command in **a separate terminal session**: + +```sh +minikube tunnel +``` + +## Build El Carro images locally + +### Oracle database image + +Follow the [Quickstart](quickstart) guide to build an oracle database image +locally, then tag and push the image to the local registry: + +```sh +docker tag gcr.io/local-build/oracle-database-images/oracle-12.2-ee-seeded-mydb:latest localhost:5000/oracle-12.2-ee-seeded-mydb:latest +docker push localhost:5000/oracle-12.2-ee-seeded-mydb:latest +``` + +### Build and push the El Carro Operator image + +Build the El Carro operator image and push it to your local registry by running: + +```sh +cd $PATH_TO_EL_CARRO_REPO +export REPO="localhost:5000/oracle.db.anthosapis.com" +export TAG="latest" +export OPERATOR_IMG="${REPO}/operator:${TAG}" +docker build -f oracle/Dockerfile -t ${OPERATOR_IMG} . +docker push ${OPERATOR_IMG} +``` + +### Build and push the El Carro agent images: + +```sh +export DBINIT_IMG="${REPO}/dbinit:${TAG}" +docker build -f oracle/build/dbinit/Dockerfile -t ${DBINIT_IMG} . +docker push ${DBINIT_IMG} + +export CONFIG_AGENT_IMG="${REPO}/configagent:${TAG}" +docker build -f oracle/build/config_agent/Dockerfile -t ${CONFIG_AGENT_IMG} . +docker push ${CONFIG_AGENT_IMG} + +export LOGGING_IMG="${REPO}/loggingsidecar:${TAG}" +docker build -f oracle/build/loggingsidecar/Dockerfile -t ${LOGGING_IMG} . +docker push ${LOGGING_IMG} + +export MONITORING_IMG="${REPO}/monitoring:${TAG}" +docker build -f oracle/build/monitoring/Dockerfile -t ${MONITORING_IMG} . +docker push ${MONITORING_IMG} +``` + +Verify that your images were successfully pushed to your local repository by running: +```sh +curl http://localhost:5000/v2/_catalog +``` + +You should see an output similar to this: +```sh +{"repositories":["oracle-12.2-ee-seeded-mydb","oracle.db.anthosapis.com/configagent","oracle.db.anthosapis.com/dbinit","oracle.db.anthosapis.com/loggingsidecar","oracle.db.anthosapis.com/monitoring","oracle.db.anthosapis.com/operator"]} +``` + +## Deploying the El Carro Operator + +To deploy the El Carro operator using your locally built image, run the following: + +```sh +sed -i 's/image: gcr.*oracle.db.anthosapis.com/image: localhost:5000\/oracle.db.anthosapis.com/g' $PATH_TO_EL_CARRO_REPO/oracle/operator.yaml +kubectl apply -f $PATH_TO_EL_CARRO_REPO/oracle/operator.yaml +``` + +### Setup a namespace: + +Setup a namespace where you will apply your custom resources (El carro instance, +database, etc). For the linked user guides referencing a namespace, you should +use the namespace you created in this step. + +```sh +kubectl create namespace $NS +``` + +## Creating a minikube config CR: + +To override the default csi driver and image settings used for GKE, apply the minikube specific config CR by running: + +```sh +kubectl apply -f $PATH_TO_EL_CARRO_REPO/oracle/config/samples/v1alpha1_config_minikube.yaml -n $NS +``` + +You must apply the config CR before you create El Carro instances so minikube specific +configurations can be picked up by El Carro. + +### Creating an El Carro instance: + +```sh +kubectl apply -f $PATH_TO_EL_CARRO_REPO/oracle/config/samples/v1alpha1_instance_minikube.yaml -n $NS +``` + +Follow the [instance provisioning user guide](provision/instance.md) to learn +how to provision more complex types of El Carro instances. + +## [optional] Minikube dashboard + +Enable the minikube dashboard addon to see information about Kubernetes +resources in your minikube cluster: + +```sh +minikube addons enable dashboard + ▪ Using image kubernetesui/metrics-scraper:v1.0.4 + ▪ Using image kubernetesui/dashboard:v2.1.0 +💡 Some dashboard features require the metrics-server addon. To enable all features please run: + minikube addons enable metrics-server +🌟 The 'dashboard' addon is enabled +minikube dashboard +🤔 Verifying dashboard health ... +🚀 Launching proxy ... +🤔 Verifying proxy health ... +🎉 Opening http://127.0.0.1:44273/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser... +Opening in existing browser session. +``` + +## Stop the minikube cluster: + +```sh +minikube stop +✋ Stopping node "minikube" ... +🛑 Powering off "minikube" via SSH ... +🛑 1 nodes stopped. +``` + +Using `minikube stop` will not delete the resources you've provisioned in your +cluster. You can start minikube again with `minikube start` and all the +resources you have created in the minikube cluster will be available. + +## Delete the minikube cluster: + +```sh +minikube delete +🔥 Deleting "minikube" in docker ... +🔥 Deleting container "minikube" ... +🔥 Removing /usr/local/google/home/${USER}/.minikube/machines/minikube ... +💀 Removed all traces of the "minikube" cluster. +``` diff --git a/docs/content/monitoring/connectivity.md b/docs/content/monitoring/connectivity.md new file mode 100644 index 0000000..786ce72 --- /dev/null +++ b/docs/content/monitoring/connectivity.md @@ -0,0 +1,31 @@ +# Client Side Connectivity + +Once a database/PDB is created, database connectivity can be tested with +any of the client side tools. For examnple, using SQL\*Plus: + +```sh +sqlplus /@/. +``` + +In practice, this could look like the following: + +```sh +sqlplus scott/tiger@ip-address:6021/pdb1.gke +``` + +We currently don't allow changing a listener port. Use port-forwarding if you +need to use a port other than the default. For example: + +```sh +kubectl port-forward svc/graydb-svc 1521:6021 -n db +Forwarding from 127.0.0.1:1521 -> 6021 +Forwarding from [::1]:1521 -> 6021 +Handling connection for 1521 +``` + +You can then connect to port 1521 (or any port of a customer choice) +using localhost: + +```bash +sqlplus scott/tiger@localhost:1521/pdb1.gke +``` diff --git a/docs/content/monitoring/logging.md b/docs/content/monitoring/logging.md new file mode 100644 index 0000000..f360e68 --- /dev/null +++ b/docs/content/monitoring/logging.md @@ -0,0 +1,64 @@ +# Logging + +## Before you begin + +The following variables will be used in this guide: + +```sh +export INSTANCE_NAME= +export NS= +export PATH_TO_EL_CARRO_RELEASE= +``` + +You should set these variables in your environment. + +## Viewing logs via kubectl + +You can retrieve El Carro logs from your Kubernetes cluster via the kubectl +command, which will display the stdout and stderr streams of the specified +container (alert-log-sidecar in this example) by running: + +```sh +kubectl logs -f $(kubectl get pod -n $NS -l instance=$INSTANCE_NAME -o jsonpath="{.items[0].metadata.name}") -c alert-log-sidecar -n $INSTANCE_NAME +``` + +This will display all the stdout/stderr output of the specified container. Aside +from the regular agents, we have provided two sidecar containers that tail the +listener log and alert log file, named _listener-log-sidecar_ and +_alert-log-sidecar_ respectively. + +## Viewing logs via Cloud Console + +You can also retrieve El Carro logs using the Google Cloud Logs Explorer. This +feature allows you to view logs of containers for up to 30 days. The Logs +explorer allows searching for particular messages, as well as viewing logs for +containers that have been shut down or restarted. For more information on Logs +Explorer, see the information at the +[Logs Explorer help page](https://cloud.google.com/logging/docs/view/logs-viewer-interface). + +Since it may take a few minutes for the database instance to get fully +provisioned (largely depending on whether or not the database container has been +locally cached or not), the alert log won't show up in Stackdriver until +instance provisioning is complete. Once it does (see alert-log-sidecar +container's "View Logs" link), there's an option to time lock, jump to now or +stream the logs to get the latest updates automatically. + +## Changing Log Verbosity Levels + +The El Carro operator and its agents use the +[klog logger](https://github.com/kubernetes/klog), which is a fork of glog, and +has the ability to dynamically set the verbosity level of the logs. The default +level of log verbosity is set to 0, but it can be increased to any positive +integer value. Any log messages less than or equal to the current verbosity +level will be printed out, for example if the verbosity level is set to 2 then +log messages at level 0, 1, or 2 will be printed out, but messages at level 3 or +greater will not be printed out. + +You're free to change the verbosity level of the operators or config-agent by +setting the value of the log levels in the config spec (see the +v1alpha1_config_gcp1.yaml file for an example), and then applying it with the +command below. + +```sh +kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_config_gcp1.yaml -n $NS +``` diff --git a/docs/content/monitoring/monitoring.md b/docs/content/monitoring/monitoring.md new file mode 100644 index 0000000..abd9bea --- /dev/null +++ b/docs/content/monitoring/monitoring.md @@ -0,0 +1,94 @@ +# Monitoring and Dashboards + +In the Preview release only basic OS, cluster and Oracle metrics are collected +through Prometheus and can be visualized with Grafana. + +## Monitoring Containers Setup + +To setup monitoring, run the installation script. This will deploy Prometheus, +alert manager, node-exporter and grafana on your Kubernetes cluster. The +deployment will be in a separate namespace called "monitoring" + +```sh +cd ${PATH_TO_EL_CARRO_RELEASE} +chmod +x ./setup_monitoring.sh +./setup_monitoring.sh install +``` + +## Deploying Oracle Database Monitoring + +Once the installation is done, configure your El Carro instance to start the +Monitoring Service. To do this, edit your Instance manifest. For example: + +```sh +vi ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_instance.yaml +``` + +Include the following line: + +```sh + services: + Monitoring:true +``` + +Apply the configuration: + +```sh +kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_instance.yaml -n $NS +``` + +## Setup OracleDB As Monitor Target + +This step points Prometheus to start scraping the Oracle DB monitoring agent. + +```sh +kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/db_monitor.yaml +``` + +## Viewing Monitoring Metrics in Prometheus + +To view the monitoring metrics in Prometheus you need to port forward the +prometheus service: + +```sh +kubectl port-forward svc/prometheus-k8s 9090 -n monitoring +``` + +You can now access prometheus on: + +```sh +http://localhost:9090 +``` + +## Dashboards + +Dashboards are set up in Grafana. To access the dashboard, set up the port +forwarding as follows: + +```sh +kubectl port-forward svc/grafana 3000 -n monitoring +``` + +In your browser navigate to + +```sh +http://localhost:3000/ +``` + +You will be prompted for a username and password. Use admin for both values. You +will be prompted immediately to change the admin password. + +Once you log into Grafana, choose Dashboards and then Manage. + + +## Uninstalling Monitoring + +To uninstall the monitoring operator and ALL containers, run the +following command: + +```sh +${PATH_TO_EL_CARRO_RELEASE}/setup_monitoring.sh uninstall +``` +If you see the error +`fatal: destination path 'kube-prometheus' already exists and is not an empty directory.` +then delete `kube-prometheus` directory and rerun the uninstall script. diff --git a/docs/content/monitoring/ui.md b/docs/content/monitoring/ui.md new file mode 100644 index 0000000..75147bd --- /dev/null +++ b/docs/content/monitoring/ui.md @@ -0,0 +1,36 @@ +# User Interface + +### Installation + +Run the following command to install El Carro ui. + +```sh +kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/ui.yaml +``` + +The output of the previous command looks like the following: + +```sh +clusterrole.rbac.authorization.k8s.io/ui created +clusterrolebinding.rbac.authorization.k8s.io/ui created +deployment.apps/ui created +service/ui created +``` + +### Visit the Web UI + +Forward the port of Web UI to [http://localhost:8080](http://localhost:8080). + +```sh +kubectl port-forward -n ui svc/ui 8080:80 +``` + +Then you can visit the Web UI at [http://localhost:8080](http://localhost:8080). +To create a new Instance, a new Database hosted on that Instance or to take a +Backup, click on the side menu on the left. + +## GCP + +There's no special El Carro UI available in the Preview release other than the +Google Cloud Console to view and manage a GKE cluster with the El Carro Operator +and database in it. diff --git a/docs/content/preparation.md b/docs/content/preparation.md new file mode 100644 index 0000000..dc6de6e --- /dev/null +++ b/docs/content/preparation.md @@ -0,0 +1,225 @@ +# Preparation + +The preparation steps consist of the following: + +1. [Set up a GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects). +1. Create a containerized database image. +1. [Create a Kubernetes cluster](https://kubernetes.io/docs/setup/). +1. Deploy the El Carro Operator. + +## Set up a GCP Project + +Either create a new project or use an existing one with the following settings: + +```bash +gcloud projects create $PROJECT_ID [--folder [...] +gcloud beta billing projects link $PROJECT_ID --billing-account [...] + +gcloud services enable \ +container.googleapis.com \ +anthos.googleapis.com \ +cloudbuild.googleapis.com \ +artifactregistry.googleapis.com \ +--project $PROJECT_ID +``` + +While the default compute service account can be used with El Carro, we +recommend creating a dedicated one as follows: + +```bash +gcloud iam service-accounts create $SERVICE_ACCOUNT --project $PROJECT_ID +PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") + +gcloud projects add-iam-policy-binding $PROJECT_ID --member=serviceAccount:service-${PROJECT_NUMBER}@containerregistry.iam.gserviceaccount.com --role=roles/containerregistry.ServiceAgent +``` + +Use this service account when you create a GKE cluster (for GCP deployment route). + +### Download El Carro Software + +The preparation steps differ depending on the deployment platform. + +#### GCP + +On GCP you need to download and to install the El Carro software yourself. The +El Carro manifests are available in both GitHub release and Google Cloud Storage. + +Download El Carro software to your workstation as follows: + +1) Option 1: You can download it from [El Carro GitHub repo](https://github.com/GoogleCloudPlatform/elcarro-oracle-operator/releases). +Choose one of the release versions, preferably the latest release. The release +artifacts exist as *release-artifacts.tar.gz*. + +2) Option 2: You can choose one of the release versions, preferably the latest +release, from this [GCS bucket](https://console.cloud.google.com/storage/browser/elcarro) +using [gsutil](https://cloud.google.com/storage/docs/gsutil). + +```sh +gsutil -m cp -r gs://elcarro/latest . +Copying gs://elcarro/... +... + + +tree latest +latest +├── dashboards +│ ├── db-dashboard.json +│ ├── install-dashboards.jsonnet +│ └── README.md +├── dbimage +│ ├── cloudbuild-18c-xe.yaml +│ ├── cloudbuild.yaml +│ ├── Dockerfile +│ ├── image_build.sh +│ ├── install-oracle-18c-xe.sh +│ ├── install-oracle.sh +│ ├── ora12-config.sh +│ ├── ora19-config.sh +│ └── README.md +├── db_monitor.yaml +├── deploy +│ ├── csi +│ │ ├── gce_pd_storage_class.yaml +│ │ └── gce_pd_volume_snapshot_class.yaml +│ ├── install-18c-xe.sh +│ └── install.sh +├── get_all_logs.sh +├── operator.yaml +├── samples +│ ├── v1alpha1_backup_rman1.yaml +│ ├── v1alpha1_backup_rman2.yaml +│ ├── v1alpha1_backup_rman3.yaml +│ ├── v1alpha1_backup_rman4.yaml +│ ├── v1alpha1_backupschedule.yaml +│ ├── v1alpha1_backup_snap1.yaml +│ ├── v1alpha1_backup_snap2.yaml +│ ├── v1alpha1_backup_snap_minikube.yaml +│ ├── v1alpha1_config_bm1.yaml +│ ├── v1alpha1_config_bm2.yaml +│ ├── v1alpha1_config_gcp1.yaml +│ ├── v1alpha1_config_gcp2.yaml +│ ├── v1alpha1_config_gcp3.yaml +│ ├── v1alpha1_config_minikube.yaml +│ ├── v1alpha1_cronanything.yaml +│ ├── v1alpha1_database_pdb1_express.yaml +│ ├── v1alpha1_database_pdb1_gsm.yaml +│ ├── v1alpha1_database_pdb1_unseeded.yaml +│ ├── v1alpha1_database_pdb1.yaml +│ ├── v1alpha1_database_pdb2.yaml +│ ├── v1alpha1_database_pdb3.yaml +│ ├── v1alpha1_database_pdb4.yaml +│ ├── v1alpha1_export_dmp1.yaml +│ ├── v1alpha1_export_dmp2.yaml +│ ├── v1alpha1_import_pdb1.yaml +│ ├── v1alpha1_instance_18c_XE_express.yaml +│ ├── v1alpha1_instance_18c_XE.yaml +│ ├── v1alpha1_instance_custom_seeded.yaml +│ ├── v1alpha1_instance_express.yaml +│ ├── v1alpha1_instance_gcp_ilb.yaml +│ ├── v1alpha1_instance_minikube.yaml +│ ├── v1alpha1_instance_standby.yaml +│ ├── v1alpha1_instance_unseeded.yaml +│ ├── v1alpha1_instance_with_backup_disk.yaml +│ └── v1alpha1_instance.yaml +├── setup_monitoring.sh +├── ui.yaml +└── workflows + ├── Kptfile + ├── README.md + ├── v1alpha1_database_pdb1.yaml + └── v1alpha1_instance.yaml + +6 directories, 60 files +``` + + +The top level files and directories are: + +* The `operator.yaml` is a collection of manifests that is used to deploy the El Carro Operator. +* The `ui.yaml` is a collection of manifests that is used to deploy the El + Carro UI. +* The `dbimage` directory contains a set of files for building a containerized + database image described in the next section. +* The `samples` directory contains the manifests for creating Custom Resources + (CRs) mentioned in this user guide. +* The `workflows` directory is similar to samples, but the manifests there are the + DRY templates that can be hydrated with + [kpt](https://googlecontainertools.github.io/kpt/) to create/manage the same + Custom Resources (CRs). + +We recommend starting with the samples first, but as you become more familiar +with El Carro, consider the more advanced use of declarative workflows that can +be achieved with the parameterized templates in the workflows directory. + +The `db_monitor.yaml` and `setup_monitoring.sh` files are useful to +deploy the El Carro monitoring and viewing metrics. + +### Create a Cluster + +#### GCP + +On GCP you can create a GKE-on-GCP cluster on your own at will. GKE provides a +fully managed K8s cluster, which can be provisioned with a single command: + +```sh +export ZONE= +export CLUSTER_NAME= +export SERVICE_ACCOUNT= + +gcloud beta container clusters create ${CLUSTER_NAME} --release-channel rapid --machine-type=n1-standard-4 --num-nodes 2 --zone ${ZONE} --project ${PROJECT_ID} --scopes gke-default,compute-rw,cloud-platform,https://www.googleapis.com/auth/dataaccessauditlogging --service-account ${SERVICE_ACCOUNT} --addons GcePersistentDiskCsiDriver +``` + +If backups using the storage snapshots are required (El Carro recommended), +additional steps are required for setting up a CSI driver (and its corresponding +storage class). GKE comes with the on-board CSI driver, which is not suitable +for El Carro storage-based backups. + +The general installation process for the gce-pd-csi driver is described +[here](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/docs/kubernetes/user-guides/driver-install.md). + +See an example run and the end result below: + +```sh +$ cd $PATH_TO_EL_CARRO_RELEASE/deploy/csi + +$ kubectl create -f gce_pd_storage_class.yaml + +$ kubectl create -f gce_pd_volume_snapshot_class.yaml + +// Confirm that both resources have been created properly: + +$ kubectl get storageclasses +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +csi-gce-pd pd.csi.storage.gke.io Delete WaitForFirstConsumer false 30d +standard (default) kubernetes.io/gce-pd Delete Immediate true 30d + +$ kubectl get volumesnapshotclass +NAME AGE +csi-gce-pd-snapshot-class 32d +``` + +### Deploy El Carro Operator + +See the standard [K8s documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +on how to point kubectl to a particular cluster. + +#### GCP + +Given that you control how GKE clusters are created, the deployment of the +El Carro Operator on your GKE-on-GCP cluster is also left to you as a user, +but it is a one step process: + +```sh +$ kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/operator.yaml +namespace/operator-system created +``` + +### Create a namespace + +You're free to deploy El Carro in a namespace of your choice, but that +namespace should be created prior to creating an El Carro Instance as follows: + +```sh +$ export NS= +$ kubectl create namespace $NS +``` diff --git a/docs/content/provision/config.md b/docs/content/provision/config.md new file mode 100644 index 0000000..d19c5aa --- /dev/null +++ b/docs/content/provision/config.md @@ -0,0 +1,119 @@ +# (Optional) Creating a Default Config + +This is an optional step designed to set namespace-wide (and if a single +namespace is used, then cluster-wide) defaults. The default configuration is +designed to achieve the following: + +* Ensure consistency across all the subsequent Instance, Database, Backup + manifests because the parameters would be taken from the Config created in + this step. + +* Make the subsequent manifests smaller because the common parameters would be + taken from a default config, for example: storage class, location of a + database image, disk type and sizes, etc. + +## Before you begin + +The following variables will be used in this quickstart: + +```sh +export PROJECT_ID= +export NS= +export PATH_TO_EL_CARRO_RELEASE= +``` + +You should set these variables in your environment. + +To get El Carro up and running, you need to do one the following: + +1. Prepare a Config CR Manifest + + Depending on the deployment platform you are using the default Config + manifests should look like one of the following examples: + + GCP: + + ```sh + $ cat ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_config_gcp2.yaml + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Config + metadata: + name: config + spec: + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-unseeded" + config: "gcr.io/${PROJECT_ID}/oracle.db.anthosapis.com/configagent:latest" + platform: "GCP" + disks: [ + { + name: "DataDisk", + type: "pd-standard", + size: "100Gi", + }, + { + name: "LogDisk", + type: "pd-standard", + size: "150Gi", + } + ] + volumeSnapshotClass: "csi-gce-pd-snapshot-class" + ``` + + For the Preview release El Carro relies on three storage volumes to host the + following: + + * Oracle binary tree. + * The data files. + * The archive redo logs and the RMAN backups. + + The type of storage (for example: PD vs. PD-SSD) and the size of each volume + can be defined here in the Config manifest. Otherwise you would need to + specify it for each of the Instance manifests. El Carro currently only + supports a single Instance per cluster/namespace, but it's a good practice + to set these attributes globally in the Config CR. + +1. Submit the Config CR {: #submit-cr} + + After completing the Config manifest, submit it to the local cluster as + follows: + + ```sh + $ kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_config_gcp2.yaml -n ${NS} + ``` + +1. (Optional) Review the Config CR + + ```sh + $ kubectl get configs -n $NS + NAME PLATFORM DISK SIZES STORAGE CLASS VOLUME SNAPSHOT CLASS + config GCP csi-gce-pd-snapshot-class + + $ kubectl describe config config -n $NS + Name: config + Namespace: db + Labels: + Annotations: + API Version: oracle.db.anthosapis.com/v1alpha1 + Kind: Config + Metadata: + Creation Timestamp: 2020-09-05T03:26:31Z + Generation: 1 + Resource Version: 17692020 + Self Link: /apis/oracle.db.anthosapis.com/v1alpha1/namespaces/db/configs/config + UID: a4883c72-ab65-4c56-9f06-df8ff68b526c + Spec: + Disks: + Name: DataDisk + Size: 100Gi + Type: pd-standard + Name: LogDisk + Size: 150Gi + Type: pd-standard + Images: + Config: gcr.io/${PROJECT_ID}/oracle.db.anthosapis.com/configagent:latest + Service: gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2ee-unseeded + Platform: GCP + Volume Snapshot Class: csi-gce-pd-snapshot-class + Events: + ``` diff --git a/docs/content/provision/database.md b/docs/content/provision/database.md new file mode 100644 index 0000000..5c03a67 --- /dev/null +++ b/docs/content/provision/database.md @@ -0,0 +1,124 @@ +# Create an El Carro Database(s) + +This step depends on the previous step of successfully created Instance CR. +The preflight checks ensure that the Database Controller doesn't reconcile until +the Instance is found in the Ready state and the database instance accepts +connections. + +Confirm that the Instance CR has been created successfully and features +ReadyStatus and DBReadyStatus of "True": + +```sh +export NS= +kubectl get instances.oracle.db.anthosapis.com -n $NS + +NAME DB ENGINE VERSION EDITION ENDPOINT URL READYSTATUS DBREADYSTATUS DB NAMES BACKUP ID +mydb Oracle 12.2 Enterprise mydb-svc.db 34.122.76.205:6021 True True +``` + +1. Prepare a Database CR Manifest + + Please note that the user / schema credentials in the manifest below appear + in clear text and may need to be secured. + + ```sh + cat ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Database + metadata: + name: pdb1 + spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - create session + ``` + +1. Submit the Database CR + + After completing the Database manifest, submit it to the local cluster as + follows: + + ```sh + kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_database_pdb1.yaml -n $NS + ``` + +1. Review the Database CR + + An easy way to monitor the state of the Database CR is by running the + following command: + + ```sh + kubectl get databases.oracle.db.anthosapis.com -n $NS -w + NAMESPACE NAME INSTANCE USERS STATUS + db pdb1 mydb scott Ready + ``` + + Once the Database CR status turns to Ready, the PDB database is ready to use. + In addition to having a separate entry for each PDB in the Database CR, a + list of PDBs is also propagated up to an Instance CR and looks like the + following: + + ```sh + kubectl get instances.oracle.db.anthosapis.com -n $NS + NAME DB ENGINE VERSION EDITION ENDPOINT URL READYSTATUS DBREADYSTATUS DB NAMES BACKUP ID + mydb Oracle 12.2 Enterprise mydb-svc.db 34.122.76.205:6021 True True [pdb1] + ``` + + The above steps can be repeated to create additional Databases (in particular, + the samples supplied with El Carro Preview release includes v1alpha1_database_pdb2.yaml). + + +1. Connect to a Database + + At this point a Database (PDB) should be fully functional and accessible from + outside the cluster via an external load balancer on a public IP address and + port 6021 (to be made configurable in future releases). The IP and the port + are combined together in the Instance.Status.URL attribute: + + ```sh + kubectl get instances.oracle.db.anthosapis.com -n $NS + NAME DB ENGINE VERSION EDITION ENDPOINT URL READYSTATUS DBREADYSTATUS DB NAMES BACKUP ID + mydb Oracle 12.2 Enterprise mydb-svc.db 34.122.76.205:6021 True True + ``` + + As long as there's connectivity to the cluster, you should be able to + establish a connection to a database as follows: + + ```sh + nc -vz 34.122.76.205 6021 + Connection to 34.122.76.205 6021 port [tcp/*] succeeded! + + sqlplus scott/tiger@34.122.76.205:6021/pdb1.gke + + SQL> show user con_id con_name + USER is "SCOTT" + + CON_ID + ------------------------------ + 3 + + CON_NAME + ------------------------------ + PDB1 + ``` + + Similar to SQL*Plus, other client side database tools can be used to access + an El Carro database. + + Once a Database CR (and the corresponding PDB) is created, it can be modified + as described in the [appendix B](../custom-resources/database.md). diff --git a/docs/content/provision/image.md b/docs/content/provision/image.md new file mode 100644 index 0000000..b63f9f2 --- /dev/null +++ b/docs/content/provision/image.md @@ -0,0 +1,237 @@ +# Create a Containerized Database Image + +This guide is only valid for Oracle Database 12c. To build an Oracle Database +18c XE image, refer to the [18c XE quickstart guide](../quickstart-18c-xe.md). + +## Before you begin + +The following variables will be used in this guide: + +```sh +export DBNAME= +export PROJECT_ID= +export SERVICE_ACCOUNT= +export PATH_TO_EL_CARRO_RELEASE= +export GCS_BUCKET= +``` + +You should set these variables in your environment. + +**There are two options to build the actual container database image: Using +Google Cloud Build or building the image locally using Docker.** + +### Using Google Cloud Build to create a containerized Oracle database image (Recommended) + +1. Download Oracle software to Google Cloud Storage. + + El Carro has only been tested with + [Oracle 12c R2 (12.2)](https://docs.oracle.com/en/database/oracle/oracle-database/12.2/index.html) + and + [Oracle 18c XE](https://www.oracle.com/database/technologies/appdev/xe.html). + The recommended place to obtain the database software is the official Oracle + eDelivery Cloud. Patches can be downloaded from the Oracle support website. + As an El Carro user, you're advised to consult your licensing agreement with + Oracle Corp to decide where to get the software from. To create an Oracle + database image, you will need to download three pieces of software from + Oracle's website: + + - Oracle Database 12c Release 2 (12.2.0.1.0) for Linux x86-64 (Enterprise + Edition), which can be downloaded from the + [Oracle eDelivery Cloud](https://edelivery.oracle.com). + - A recent PSU. The Jan 2021 PSU can be downloaded + [here](https://support.oracle.com/epmos/faces/PatchDetail?_adf.ctrl-state=bsblgctta_4&patch_name=32228578&releaseId=600000000018520&patchId=32228578&languageId=0&platformId=226&_afrLoop=314820757336783). + - Latest available OPatch that can be downloaded from here. We recommend + choosing the following download parameters: + - Release: OPatch 20.0.0.0.0 + - Platform: Linux x86_64 + + The result of downloading the three Oracle artifacts should look like the + following: + + ```sh + ls -l ~/Downloads + -rw-r--r--@ 1 primaryuser primarygroup 856130787 Oct 13 13:24 p32228578_122010_Linux-x86-64.zip + -rw-r--r--@ 1 primaryuser primarygroup 119259475 Oct 13 12:45 p6880880_200000_Linux-x86-64.zip + -rw-r--r--@ 1 primaryuser primarygroup 3453696911 Oct 13 12:33 linuxx64_12201_database.zip + ``` + + Next, transfer these three files to a Google Cloud Storage bucket. If + needed, a new Google Cloud Storage bucket can be created as follows: + + ```sh + gsutil mb gs://${GCS_BUCKET} + gsutil cp ~/Downloads/linuxx64_12201_database.zip gs://${GCS_BUCKET}/install/ + gsutil cp ~/Downloads/p6880880_200000_LINUX.zip gs://${GCS_BUCKET}/install/ + gsutil cp ~/Downloads/p32228578_122010_Linux-x86-64.zip gs://${GCS_BUCKET}/install/ + ``` + + This is an example of how the three files in a Google Cloud Storage bucket + could look like on the command line and in the Google Cloud Console: + + ```sh + gsutil ls -l gs://${GCS_BUCKET}/install + 0 2020-10-13T19:24:05Z gs://${GCS_BUCKET}/install/ + 3453696911 2020-10-13T19:24:24Z gs://${GCS_BUCKET}/install/linuxx64_12201_database.zip + 856130787 2020-10-13T19:37:29Z gs://${GCS_BUCKET}/install/p32228578_122010_Linux-x86-64.zip + 119259475 2020-10-13T19:26:33Z gs://${GCS_BUCKET}/install/p6880880_200000_LINUX.zip + ``` + + Once the bucket is ready, grant the IAM read privilege + (roles/storage.objectViewer) to the Google Cloud Build service account. Use + your project name as a PROJECT_ID below and your globally unique Google + Cloud Storage bucket name in the snippet below: + + ```sh + export PROJECT_NUMBER=$(gcloud projects describe ${PROJECT_ID} --format="value(projectNumber)") + gsutil iam ch serviceAccount:${PROJECT_NUMBER}@cloudbuild.gserviceaccount.com:roles/storage.objectViewer gs://${GCS_BUCKET} + ``` + +2. Trigger the Google Cloud Build (GCB) pipeline. + + You have a choice of creating a container image with just the Oracle RDBMS + software in it (based on what you downloaded and placed in a Google Cloud + Storage bucket) or, optionally, create a seed database at the same time and + host it in the same image. El Carro recommends you to consider the following + before making this decision: + + * A database container image with the seed database in it is "heavier" and + takes longer to download at runtime, however the overall El Carro + Instance provisioning with the seed image is still faster compared to + creating a database instance from scratch at runtime. + + * The downside of including a seed database in the container image are + related to image maintenance and flexibility: + + * If all/most of your databases use the same character set and + same/similar database options (Oracle Text, APEX, etc.), having a + seed database in the image may be the most cost effective way for + you to proceed. + + * If on the other hand, your databases are very different in terms of + database options, init parameters, character sets, it may be easier + for you to not include a seed database in the image, which makes the + provisioning time longer, but relieves you from maintaining multiple + container images. + + To proceed with creating a seed database as part of the container image + build, add --create_cdb true and optionally specify a seed database name, + e.g. --cdb_name ORCL (Note that if a seed database name is not provided, + image_build.sh defaults it to GCLOUD. This value can be changed at runtime) + + Set PATH_TO_EL_CARRO_RELEASE to the directory where the El Carro release was + downloaded to. + + ```sh + cd ${PATH_TO_EL_CARRO_RELEASE}/dbimage + chmod +x ./image_build.sh + ./image_build.sh --install_path=gs://${GCS_BUCKET}/install --db_version=12.2 --create_cdb=true --cdb_name=${DBNAME} --mem_pct=45 --no_dry_run --project_id=${PROJECT_ID} + ``` + + If you prefer to create a database container image without a seed database, + set --create_cdb false. + + ```sh + cd ${PATH_TO_EL_CARRO_RELEASE}/dbimage + chmod +x ./image_build.sh + ./image_build.sh --install_path=gs://${GCS_BUCKET}/install --db_version=12.2 --create_cdb=false --mem_pct=45 --no_dry_run --project_id=${PROJECT_ID} + ``` + + Note that depending on the options, creating a containerized image may take + ~40+ minutes. + + If AccessDeniedException is raised against the above command that likely + means that the previous gsutil iam ch command didn't succeed. Once fixed, + rerun the above image build script. + +3. Verify that your containerized database image was successfully created. + + Creating a containerized image can take ~40+ minutes and the progress is + trackable from both the command line (from the previous command) and the + Google Cloud Console UI. + + ```sh + gcloud container images list --project ${PROJECT_ID} --repository gcr.io/${PROJECT_ID}/oracle-database-images --filter=oracle-12.2-ee-seeded-$(echo "${DBNAME}" | tr '[:upper:]' '[:lower:]') + + NAME + gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DBNAME} + + gcloud container images describe gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-$(echo "${DBNAME}" | tr '[:upper:]' '[:lower:]') --project ${PROJECT_ID} + image_summary: + digest: sha256:ce9b44ccab513101f51516aafea782dc86749a08d02a20232f78156fd4f8a52c + fully_qualified_digest: gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DBNAME}@sha256:ce9b44ccab513101f51516aafea782dc86749a08d02a20232f78156fd4f8a52c + registry: gcr.io + repository: ${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DBNAME} + ``` + +### Building a containerized Oracle database image locally using Docker + +If you're not able or prefer not to use Google cloud build to create a +containerized database image, you can build an image locally using +[Docker](https://www.docker.com). You need to push your locally built image to a +registry that your Kubernetes cluster can pull images from. You must have Docker +installed before proceeding with a local containerized database image build. + +1. Copy the Oracle binaries you downloaded earlier to + ${PATH_TO_EL_CARRO_RELEASE}/dbimage + + Your ${PATH_TO_EL_CARRO_RELEASE} directory should look something + like. + + ```sh + ls -1X + Dockerfile + README.md + image_build.sh + install-oracle-18c-xe.sh + install-oracle.sh + ora12-config.sh + ora19-config.sh + cloudbuild.yaml + p32228578_122010_Linux-x86-64.zip + p6880880_200000_Linux-x86-64.zip + V839960-01.zip + ``` + +2. Trigger the image creation script. + + You have the choice of creating a container image with just the Oracle RDBMS + software in it or, optionally, create a seed database (CDB) at the same time + and host it in the same container image. Seeded images are larger in size + but may save you time during provisioning. For a quick start, we suggest you + create a seeded container image. + + - To create a seeded image, run the following: + + ```sh + cd ${PATH_TO_EL_CARRO_RELEASE}/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --local_build=true --db_version=12.2 --patch_version=32228578 --create_cdb=true --cdb_name=${DBNAME} --mem_pct=45 --no_dry_run --project_id=local-build + + Executing the following command: + [...] + ``` + + - To create an unseeded image (one without a CDB), run the same steps as + for the seeded case but set the `--create_cdb` flag to `false` and omit + the `--cdb_name` parameter. + +3. Verify that your containerized database image was successfully created. + + Depending on the options you choose when you run the image creation script, + creating a containerized image may take ~40+ minutes. To verify that your + image was successfully created, run the following command: + + ```sh + docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + gcr.io/local-build/oracle-database-images/oracle-12.2-ee-seeded-${DBNAME} latest c766d980c9a0 2 hours ago 17.4GB + ``` + +4. Retag your locally built image if necessary and push it to a registry that + your Kubernetes cluster can pull images from. + +## What's Next + +Check out the [instance provisioning guide](instance.md) to learn how to deploy +your newly built image to a Kubernetes cluster using El Carro. diff --git a/docs/content/provision/instance.md b/docs/content/provision/instance.md new file mode 100644 index 0000000..4d8038e --- /dev/null +++ b/docs/content/provision/instance.md @@ -0,0 +1,118 @@ +# Create an El Carro Instance: Basic + +This step depends on the previous one of successfully creating a containerized +database image in GCR. Once the image is ready, you need to tell El Carro the +location of that image in GCR as part of the Instance manifest. For more +advanced cases you can review the parameterized template manifests described in +[Appendix A](../custom-resources/instance.md). + +1. Prepare an Instance CR Manifest + + El Carro instances are created from yaml configuration files. We have + provided an example of this configuration file. As a bare minimum, update the + Instance.Spec.Images.Service to + point El Carro to the location of the database container image that you + created in the previous step. + + ```sh + cat ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_instance_custom_seeded.yaml + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Instance + metadata: + name: mydb + spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-gce-pd" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-gce-pd" + services: + Backup: true + Monitoring: true + Logging: true + images: + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DB}" + sourceCidrRanges: [0.0.0.0/0] + databaseUID: 54321 + databaseGID: 54322 + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: ${DB} + + # Uncomment this section to trigger a restore. + # restore: + # backupType: "Snapshot" (or "Physical") + # backupId: "mydb-20200705-snap-996678001" + # force: True + # # once applied, new requests with same or older time will be ignored, + # # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' + # requestTime: "2000-01-19T01:23:45Z" + # # Physical backup specific attributes: + # dop: 2 + # # The unit for time limit is minutes (but specify just an integer). + # timeLimit: 180 + ``` + +1. Submit the Instance CR + + After completing the Instance manifest, submit it to the local cluster as + follows: + + ```sh + export NS= + kubectl apply -f ${PATH_TO_EL_CARRO_RELEASE}/samples/v1alpha1_instance_custom_seeded.yaml -n $NS + ``` + +1. Review the Instance CR + + You can monitor the state of the Instance CR is by running the following + command: + + ```sh + kubectl get instances.oracle.db.anthosapis.com -n $NS -w + ``` + + Note the ReadyStatus and the DBReadyStatus fields that denote the status of + an Instance K8s CR and the status of the underlying database instance + respectively. Once both turn "True", the Instance is ready to use. + +1. (Optional) List the database processes + + At this point a database instance should be fully operational. You can "exec" + into a database container and inspect the background processes as described + below: + +```sh +kubectl get instances.oracle.db.anthosapis.com -n $NS +NAME DB ENGINE VERSION EDITION ENDPOINT URL READYSTATUS DBREADYSTATUS DB NAMES +mydb Oracle 12.2 Enterprise mydb-svc.db 34.122.76.205:6021 True True + +kubectl exec -ti $(kubectl get pod -n $NS -l instance=mydb -o jsonpath="{.items[0].metadata.name}") -c oracledb -n $NS -- /bin/bash + +[oracle@mydb-sts-0 /]source ~/MYDB.env +[oracle@mydb-sts-0 ~]sqlplus / as sysdba + +SQL> select dbid, open_mode, database_role from v$database; + + DBID OPEN_MODE DATABASE_ROLE +---------- -------------------- ---------------- +1591708746 READ WRITE PRIMARY + +SQL> show pdbs + + CON_ID CON_NAME OPEN MODE RESTRICTED +---------- ------------------------------ ---------- ---------- + 2 PDB$SEED READ ONLY NO +``` + +## What's Next + +Check out the [database provisioning guide](database.md) to learn how to create +a database (PDB) in this instance (CDB) using El Carro. \ No newline at end of file diff --git a/docs/content/quickstart-18c-xe.md b/docs/content/quickstart-18c-xe.md new file mode 100644 index 0000000..5c00090 --- /dev/null +++ b/docs/content/quickstart-18c-xe.md @@ -0,0 +1,322 @@ +# El Carro Operator installation guide + +El Carro is a new tool that allows users to keep full control of their database +environment (root on a machine, sysdba in Oracle), while helping users automate +several aspects of managing their database services. + +El Carro helps you with the deployment and management of a database software +(like Oracle database) on Kubernetes. You must have appropriate licensing rights +to that database software to allow you to use it with El Carro (BYOL). + +This quickstart aims to help get your licensed Oracle database up and running on +Kubernetes. This guide is only intended for Oracle 18c XE which is free to use. +If you prefer to use an Enterprise Edition of Oracle with El Carro and have a +valid Oracle license, check out the +[main quickstart guide](quickstart.md) instead. + +## Before you begin + +The following variables will be used in this quickstart: + +```sh +export DBNAME= +export PROJECT_ID= +export SERVICE_ACCOUNT_ID= +export PATH_TO_EL_CARRO_RELEASE= +export ZONE= +export CLUSTER_NAME= +``` + +Download El Carro software to your workstation as follows: + +1) Option 1: You can download it from [El Carro GitHub repo](https://github.com/GoogleCloudPlatform/elcarro-oracle-operator/releases). +Choose one of the release versions, preferably the latest release. The release +artifacts exist as *release-artifacts.tar.gz*. + +2) Option 2: You can choose one of the release versions, preferably the latest +release, from this [GCS bucket](https://console.cloud.google.com/storage/browser/elcarro) +using [gsutil](https://cloud.google.com/storage/docs/gsutil). + +```sh +gsutil -m cp -r gs://elcarro/latest $PATH_TO_EL_CARRO_RELEASE +``` + +[Create a new GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) +or reuse an existing one to install El Carro. + +```sh +gcloud projects create $PROJECT_ID [--folder [...]] +gcloud beta billing projects link $PROJECT_ID --billing-account [...] +``` + +Set gcloud config project to $PROJECT_ID +```sh +gcloud config set project $PROJECT_ID +``` + +Check gcloud config project +```sh +gcloud config get-value project +``` + +To get El Carro up and running, you need to do one the following: + +**Express install** + +Run the express install script: + +```sh +cd $PATH_TO_EL_CARRO_RELEASE/deploy +chmod +x ./install-18c-xe.sh + +./install-18c-xe.sh --service_account $SERVICE_ACCOUNT_ID@$PROJECT_ID.iam.gserviceaccount.com +``` + +Optionally set CDB name, GKE cluster name, GKE zone + +```sh +./install-18c-xe.sh --service_account $SERVICE_ACCOUNT_ID@$PROJECT_ID.iam.gserviceaccount.com --cdb_name $DBNAME --cluster_name $CLUSTER_NAME --gke_zone $ZONE +``` + +Check out +[Creating and Managing Service Accounts](https://cloud.google.com/iam/docs/creating-managing-service-accounts) +if you need help creating or locating an existing service account. + +OR + +**Perform the manual install steps:** + +* Download El Carro software +* Create a containerized database image +* Provision a kubernetes cluster. We recommend a cluster running + Kubernetes/GKE version 1.17 or above. +* Deploy the El Carro Operator to your Kubernetes cluster +* Create a CDB and PDB (Database) via the El Carro Operator + +## Check downloaded El Carro software + +* The `operator.yaml` is a collection of manifests that is used to deploy the + El Carro Operator: +* The `ui.yaml` is a collection of manifests that is used to deploy the El + Carro UI. +* The `dbimage` directory contains a set of files for building a + containerized database image described in the next section. +* The `samples` directory contains the manifests for creating Custom + Resources (CRs) mentioned in this user guide. +* The `workflows` directory is similar to samples, but the manifests there + are the DRY templates that can be hydrated with + [kpt](https://googlecontainertools.github.io/kpt/) to create/manage the same + Custom Resources (CRs). + +We recommend starting with the samples first, but as you become more familiar +with El Carro, consider the more advanced use of declarative workflows that can +be achieved with the parameterized templates in the workflows directory. + +The `db_monitor.yaml` and `setup_monitoring.sh` files are useful to deploy the +El Carro monitoring and viewing metrics. + +## Creating a containerized Oracle database image + +There are two options to build the actual container database image: Using Google +Cloud Build or locally using Docker. + +### Using Google Cloud Build to create a containerized Oracle database image (Recommended) + +1. [Create a new GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) + or reuse an existing one with the following settings: + + ```sh + gcloud projects create $PROJECT_ID [--folder [...]] + gcloud beta billing projects link $PROJECT_ID --billing-account [...] + + gcloud services enable container.googleapis.com anthos.googleapis.com cloudbuild.googleapis.com artifactregistry.googleapis.com --project $PROJECT_ID + ``` + + Though the default compute service account can be used with El Carro, we + recommend creating a dedicated one as follows: + + ```sh + gcloud iam service-accounts create $SERVICE_ACCOUNT_ID --project $PROJECT_ID + export PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") + gcloud projects add-iam-policy-binding $PROJECT_ID --member=serviceAccount:service-${PROJECT_NUMBER}@containerregistry.iam.gserviceaccount.com --role=roles/containerregistry.ServiceAgent + ``` + +2. Trigger the Google Cloud Build pipeline + + When using Oracle 18c XE, you can only create seeded (containing a CDB) + images. To create a seeded image, run the following: + + ```sh + cd $PATH_TO_EL_CARRO_RELEASE/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --db_version=18c --create_cdb=true --cdb_name=$DBNAME --no_dry_run --project_id=$PROJECT_ID + + Executing the following command: + [...] + ``` + +3. Verify that your containerized database image was successfully created. + + Cloud Build should take around 45 minutes to build the image. To verify the + image that was created, run: + + ```sh + gcloud container images list --project $PROJECT_ID --repository gcr.io/$PROJECT_ID/oracle-database-images + + NAME + gcr.io/$PROJECT_ID/oracle-database-images/oracle-18c-xe-seeded-$DBNAME + + gcloud container images describe gcr.io/$PROJECT_ID/oracle-database-images/oracle-18c-xe-seeded-$DBNAME + + image_summary: + digest: sha256:ce9b44ccab513101f51516aafea782dc86749a08d02a20232f78156fd4f8a52c + fully_qualified_digest: gcr.io/$PROJECT_ID/oracle-database-images/oracle-18c-seeded-$DBNAME@sha256:ce9b44ccab513101f51516aafea782dc86749a08d02a20232f78156fd4f8a52c + registry: gcr.io + repository: $PROJECT_ID/oracle-database-images/oracle-18c-xe-seeded-$DBNAME + ``` + +### Building a containerized Oracle database image locally using Docker + +If you're not able or prefer not to use Google cloud build to create a +containerized database image, you can build an image locally using +[Docker](https://www.docker.com). You need to push your locally built image to a +registry that your Kubernetes cluster can pull images from. You must have Docker +installed before proceeding with a local containerized database image build. + +1. Trigger the image creation script + + When using Oracle 18c XE, you can only create seeded (containing a CDB) + images. To create a seeded image, run the following: + + ```sh + cd $PATH_TO_EL_CARRO_RELEASE/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --local_build=true --db_version=18c --create_cdb=true --cdb_name=$DBNAME --no_dry_run --project_id=local-build + ``` + +2. Verify that your containerized database image was successfully created. + + Docker should take around 20-30 minutes to build the image. To verify that + your image was successfully created, run the following command: + + ```sh + docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + gcr.io/local-build/oracle-database-images/oracle-18c-xe-seeded-$DBNAME latest c766d980c9a0 2 hours ago 11.8GB + ``` + +3. Retag your locally built image if necessary and push it to a registry that + your Kubernetes cluster can pull images from. + +## Provisioning a Kubernetes cluster on GKE to run El Carro + +To provision a Kubernetes cluster on Google Kubernetes Engine (GKE), run the +following command: + +```sh +gcloud container clusters create $CLUSTER_NAME --release-channel rapid --machine-type=n1-standard-4 --num-nodes 2 --zone $ZONE --project $PROJECT_ID --scopes gke-default,compute-rw,cloud-platform,https://www.googleapis.com/auth/dataaccessauditlogging --service-account $SERVICE_ACCUNT --addons GcePersistentDiskCsiDriver +``` + +To get the cluster ready for El Carro, create a k8s storage class and a volume +snapshot class as follows: + +```sh +kubectl create -f $PATH_TO_EL_CARRO_RELEASE/deploy/csi/gce_pd_storage_class.yaml +kubectl create -f $PATH_TO_EL_CARRO_RELEASE/deploy/csi/gce_pd_volume_snapshot_class.yaml +``` + +Verify that both resources have been created properly by running: + +```sh +kubectl get storageclasses +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +csi-gce-pd pd.csi.storage.gke.io Delete WaitForFirstConsumer false 30d +standard (default) kubernetes.io/gce-pd Delete Immediate true 30d + +kubectl get volumesnapshotclass +NAME AGE +csi-gce-pd-snapshot-class 78s +``` + +## Deploying the El Carro Operator to a Kubernetes cluster + +You can use `kubectl` to deploy the El Carro Operator to your cluster by +running: + +```sh +kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/operator.yaml +namespace/operator-system created +[...] +``` + +## Creating an El Carro Instance + +An instance consists of Oracle software running in a container and a CDB. To +create an instance: + +- Create a namespace to host your instance by running: + ```sh + kubectl create namespace db + ``` + +- Modify $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_instance_18c_XE.yaml to + include the link to the database service image you built earlier. + +- Apply the modified yaml file by running: + ```sh + kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_instance_18c_XE.yaml -n db + ``` + +Monitor creation of your instance by running: +```sh +kubectl get -w instances.oracle.db.anthosapis.com -n db +NAME DB ENGINE VERSION EDITION ENDPOINT URL DB NAMES BACKUP ID READYSTATUS READYREASON DBREADYSTATUS DBREADYREASON +mydb Oracle 18c Express mydb-svc.db 34.71.69.25:6021 False CreateInProgress +``` + +Once your instance is ready, the **READYSTATUS** and **DBREADYSTATUS** will both +flip to **TRUE**. + +Tip: You can monitor the logs from the El Carro operator by running: +```sh +kubectl logs -l control-plane=controller-manager -n operator-system -c manager -f +``` + +## Creating a PDB (Database) + +To store and query data, create a PDB and attach it to the instance you created +in the previous step by running: +```sh +kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_database_pdb1.yaml -n db +``` + +Monitor creation of your PDB by running: +```sh +kubectl get -w databases.oracle.db.anthosapis.com -n db +NAME INSTANCE USERS PHASE DATABASEREADYSTATUS DATABASEREADYREASON USERREADYSTATUS USERREADYREASON +pdb1 mydb ["superuser","scott","proberuser"] Ready True CreateComplete True SyncComplete +``` + +Once your PDB is ready, the **DATABASEREADYSTATUS** and **USERREADYSTATUS** will +both flip to **TRUE**. + +You can access your PDB externally by using +[sqlplus](https://docs.oracle.com/en/database/oracle/oracle-database/18/sqpug/index.html): +```sh +sqlplus scott/tiger@$INSTANCE_URL/pdb1.gke +``` +Replace $INSTANCE_URL with the URL that was assigned to your instance. + +## ORACLE 18c XE Limitations + +Oracle 18c XE has the following resource limitations: + +- Up to 12 GB of user data +- Up to 2 GB of database RAM +- Up to 2 CPU threads +- Up to 3 Pluggable Databases + +More details on what's included in Oracle 18c XE can be found on +[Oracle's website](https://www.oracle.com/database/technologies/appdev/xe.html). diff --git a/docs/content/quickstart.md b/docs/content/quickstart.md new file mode 100644 index 0000000..1369f1e --- /dev/null +++ b/docs/content/quickstart.md @@ -0,0 +1,425 @@ +# El Carro Operator installation guide + +El Carro is a new tool that allows users to keep full control of their database +environment (root on a machine, sysdba in Oracle), while helping users automate +several aspects of managing their database services. + +El Carro helps you with the deployment and management of a database software +(like Oracle database) on Kubernetes. You must have appropriate licensing rights +to that database software to allow you to use it with El Carro (BYOL). + +This quickstart aims to help get your licensed Oracle database up and running on +Kubernetes. This guide is only intended for users that have a valid license for +Oracle 12c EE. If you do not have a license, you should use Oracle 18c XE which +is free to use by following the +[quickstart guide for Oracle 18c XE](quickstart-18c-xe.md) instead. + +## Before you begin + +The following variables will be used in this quickstart: + +```sh +export DBNAME= +export PROJECT_ID= +export SERVICE_ACCOUNT= +export PATH_TO_EL_CARRO_RELEASE= +export GCS_BUCKET= +export ZONE= +export CLUSTER_NAME= +``` + +You should set these variables in your environment. + +Download El Carro software to your workstation as follows: + +1) Option 1: You can download it from [El Carro GitHub repo](https://github.com/GoogleCloudPlatform/elcarro-oracle-operator/releases). +Choose one of the release versions, preferably the latest release. The release +artifacts exist as *release-artifacts.tar.gz*. + +2) Option 2: You can choose one of the release versions, preferably the latest +release, from this [GCS bucket](https://console.cloud.google.com/storage/browser/elcarro) +using [gsutil](https://cloud.google.com/storage/docs/gsutil). + +```sh +gsutil -m cp -r gs://elcarro/latest $PATH_TO_EL_CARRO_RELEASE +``` + + +[Create a new GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) +or reuse an existing one to install El Carro. + +```sh +gcloud projects create $PROJECT_ID [--folder [...]] +gcloud beta billing projects link $PROJECT_ID --billing-account [...] +``` + +Set gcloud config project to $PROJECT_ID +```sh +gcloud config set project $PROJECT_ID +``` + +Check gcloud config project +```sh +gcloud config get-value project +``` + + +To get El Carro up and running, you need to do one of the following: + +**Express install** + +Run the express install script: + +```sh +$PATH_TO_EL_CARRO_RELEASE/deploy/install.sh --gcs_oracle_binaries_path $GCS_BUCKET --service_account $SERVICE_ACCOUNT +``` + +Optionally set CDB name, GKE cluster name, GKE zone + +```sh +$PATH_TO_EL_CARRO_RELEASE/deploy/install.sh --gcs_oracle_binaries_path $GCS_BUCKET --service_account $SERVICE_ACCOUNT --cdb_name $DBNAME --cluster_name $CLUSTER_NAME --gke_zone $ZONE +``` + +Check out +[Creating and Managing Service Accounts](https://cloud.google.com/iam/docs/creating-managing-service-accounts) +if you need help creating or locating an existing service account. + +OR + +**Perform the manual install steps:** + +* Check downloaded El Carro software +* Create a containerized database image +* Provision a kubernetes cluster. We recommend a cluster running + Kubernetes/GKE version 1.17 or above. +* Deploy the El Carro Operator to your Kubernetes cluster + +## Check downloaded El Carro software + +El Carro software was downloaded to $PATH_TO_EL_CARRO_RELEASE, artifacts: + +* The `operator.yaml` is a collection of manifests that is used to deploy the + El Carro Operator: +* The `ui.yaml` is a collection of manifests that is used to deploy the El + Carro UI. +* The `dbimage` directory contains a set of files for building a containerized + database image described in the next section. +* The `samples` directory contains the manifests for creating Custom Resources + (CRs) mentioned in this user guide. +* The `workflows` directory is similar to samples, but the manifests there are + the DRY templates that can be hydrated with + [kpt](https://googlecontainertools.github.io/kpt/) to create/manage the same + Custom Resources (CRs). + +We recommend starting with the samples first, but as you become more familiar +with El Carro, consider the more advanced use of declarative workflows that can +be achieved with the parameterized templates in the workflows directory. + +The `db_monitor.yaml` and `setup_monitoring.sh` files are useful to deploy the +El Carro monitoring and viewing metrics. + +## Creating a containerized Oracle database image + +**Only Oracle 12c EE and 18c XE are currently supported by El Carro**. The +recommended place to obtain the database software is the official Oracle +eDelivery Cloud. Patches can be downloaded from the Oracle support website. As +an El Carro user, you're advised to consult your licensing agreement with Oracle +Corp to decide where to get the software from. + +To create an Oracle database image for 12c EE, you will need to download three +pieces of software from Oracle's website: + +- Oracle Database 12c Release 2 (12.2.0.1.0) for Linux x86-64 (Enterprise + Edition), which can be downloaded from the + [Oracle eDelivery Cloud](https://edelivery.oracle.com). +- A recent PSU. The Jan 2021 PSU can be downloaded + [here](https://support.oracle.com/epmos/faces/PatchDetail?_adf.ctrl-state=bsblgctta_4&patch_name=32228578&releaseId=600000000018520&patchId=32228578&languageId=0&platformId=226&_afrLoop=314820757336783). +- Latest available OPatch that can be downloaded from here. We recommend + choosing the following download parameters: + - Release: OPatch 20.0.0.0.0 + - Platform: Linux x86_64 + +There are two options to build the actual container database image: Using Google +Cloud Build or building the image locally using Docker. + +### Using Google Cloud Build to create a containerized Oracle database image (Recommended) + +1. [Create a new GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) + or reuse an existing one with the following settings: + + ```sh + gcloud projects create $PROJECT_ID [--folder [...]] + gcloud beta billing projects link $PROJECT_ID --billing-account [...] + + gcloud services enable container.googleapis.com anthos.googleapis.com cloudbuild.googleapis.com artifactregistry.googleapis.com --project $PROJECT_ID + ``` + + Though the default compute service account can be used with El Carro, we + recommend creating a dedicated one as follows: + + ```sh + gcloud iam service-accounts create $SERVICE_ACCOUNT --project $PROJECT_ID + export PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") + gcloud projects add-iam-policy-binding $PROJECT_ID --member=serviceAccount:service-${PROJECT_NUMBER}@containerregistry.iam.gserviceaccount.com --role=roles/containerregistry.ServiceAgent + ``` + +2. Transfer the Oracle binaries you downloaded earlier to a + [GCS bucket](https://cloud.google.com/storage). If needed, a new GCS bucket + can be created as follows: + + ```sh + gsutil mb gs://$GCS_BUCKET + + gsutil cp ~/Downloads/V839960-01.zip gs://$GCS_BUCKET/install/ + gsutil cp ~/Downloads/p6880880_200000_Linux-x86-64.zip gs://$GCS_BUCKET/install/ + gsutil cp ~/Downloads/p32228578_122010_Linux-x86-64.zip gs://$GCS_BUCKET/install/ + ``` + + Once the bucket is ready, grant the IAM read privilege + (roles/storage.objectViewer) to the Google Cloud Build service account. Use + your project name as a PROJECT_ID and your globally unique GCS bucket name + in the snippet below: + + ```sh + export PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") + gsutil iam ch serviceAccount:${PROJECT_NUMBER}@cloudbuild.gserviceaccount.com:roles/storage.objectViewer gs://$GCS_BUCKET + ``` + +3. Trigger the Google Cloud Build pipeline + + You have the choice of creating a container image with just the Oracle RDBMS + software in it or, optionally, create a seed database (CDB) at the same time + and host it in the same container image. Seeded images are larger in size + but may save you time during provisioning. For a quick start, we suggest you + create a seeded container image. + + - To create a seeded image, run the following: + + ```sh + cd $PATH_TO_EL_CARRO_RELEASE/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --install_path=gs://$GCS_BUCKET/install --db_version=12.2 --PATCH_VERSION=32228578 --create_cdb=true --cdb_name=$DBNAME --mem_pct=45 --no_dry_run --project_id=$PROJECT_ID + + Executing the following command: + [...] + ``` + + If **AccessDeniedException** is raised against the above command, it's + likely because the previous `gsutil iam ch` command didn’t succeed. We + suggest you rerun the `gsutil` command and ensure that the Cloud build + service account has the read privilege on the GCS bucket that contains the + Oracle software. + + - To create an unseeded image (one without a CDB), run the same steps as + for the seeded case but set the `--create_cdb` flag to `false` and omit + the `--cdb_name` parameter. + + ```sh + cd $PATH_TO_EL_CARRO_RELEASE/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --install_path=gs://$GCS_BUCKET/install --db_version=12.2 --PATCH_VERSION=32228578 --create_cdb=false --mem_pct=45 --no_dry_run --project_id=$PROJECT_ID + + Executing the following command: + [...] + ``` + +4. Verify that your containerized database image was successfully created. + + Depending on the options you choose when you run the image creation script, + creating a containerized image may take ~40+ minutes. To verify the image + that was created, run: + + ```sh + gcloud container images list --project $PROJECT_ID --repository gcr.io/$PROJECT_ID/oracle-database-images + + NAME + gcr.io/$PROJECT_ID/oracle-database-images/oracle-12.2-ee-seeded-$DBNAME + + gcloud container images describe gcr.io/$PROJECT_ID/oracle-database-images/oracle-12.2-ee-seeded-$DBNAME + + image_summary: + digest: sha256:ce9b44ccab513101f51516aafea782dc86749a08d02a20232f78156fd4f8a52c + fully_qualified_digest: gcr.io/$PROJECT_ID/oracle-database-images/oracle-12.2-ee-database-$DBNAME@sha256:ce9b44ccab513101f51516aafea782dc86749a08d02a20232f78156fd4f8a52c + registry: gcr.io + repository: $PROJECT_ID/oracle-database-images/oracle-12.2-ee-database-$DBNAME + ``` + +### Building a containerized Oracle database image locally using Docker + +If you're not able or prefer not to use Google cloud build to create a +containerized database image, you can build an image locally using +[Docker](https://www.docker.com). You need to push your locally built image to a +registry that your Kubernetes cluster can pull images from. You must have Docker +installed before proceeding with a local containerized database image build. + +1. Copy the Oracle binaries you downloaded earlier to + `PATH_TO_EL_CARRO_RELEASE/dbimage` + + Your $PATH_TO_EL_CARRO_RELEASE directory should look something + like. + + ```sh + ls -1X + Dockerfile + README.md + image_build.sh + install-oracle-18c-xe.sh + install-oracle.sh + ora12-config.sh + ora19-config.sh + cloudbuild.yaml + p32228578_122010_Linux-x86-64.zip + p6880880_200000_Linux-x86-64.zip + V839960-01.zip + ``` + +2. Trigger the image creation script + + You have the choice of creating a container image with just the Oracle RDBMS + software in it or, optionally, create a seed database (CDB) at the same time + and host it in the same container image. Seeded images are larger in size + but may save you time during provisioning. For a quick start, we suggest you + create a seeded container image. + + - To create a seeded image, run the following: + + ```sh + cd $PATH_TO_EL_CARRO_RELEASE/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --local_build=true --db_version=12.2 --patch_version=32228578 --create_cdb=true --cdb_name=$DBNAME --mem_pct=45 --no_dry_run --project_id=local-build + + Executing the following command: + [...] + ``` + + - To create an unseeded image (one without a CDB), run the same steps as + for the seeded case but set the `--create_cdb` flag to `false` and omit + the `--cdb_name` parameter. + + ```sh + cd $PATH_TO_EL_CARRO_RELEASE/dbimage + chmod +x ./image_build.sh + + ./image_build.sh --local_build=true --db_version=12.2 --patch_version=32228578 --create_cdb=false --mem_pct=45 --no_dry_run --project_id=local-build + + Executing the following command: + [...] + ``` + +3. Verify that your containerized database image was successfully created + + Depending on the options you choose when you run the image creation script, + creating a containerized image may take ~40+ minutes. To verify that your + image was successfully created, run the following command: + + ```sh + docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + gcr.io/local-build/oracle-database-images/oracle-12.2-ee-seeded-$DBNAME latest c766d980c9a0 2 hours ago 17.4GB + ``` + +4. Retag your locally built image if necessary and push it to a registry that + your Kubernetes cluster can pull images from. + +## Provisioning a Kubernetes cluster on GKE to run El Carro + +To provision a Kubernetes cluster on Google Kubernetes Engine (GKE), run the +following command: + +```sh +gcloud container clusters create $CLUSTER_NAME --release-channel rapid --machine-type=n1-standard-4 --num-nodes 2 --zone $ZONE --project $PROJECT_ID --scopes gke-default,compute-rw,cloud-platform,https://www.googleapis.com/auth/dataaccessauditlogging --service-account $SERVICE_ACCUNT --addons GcePersistentDiskCsiDriver +``` + +To get the cluster ready for El Carro, create a k8s storage class and a volume +snapshot class as follows: + +```sh +kubectl create -f $PATH_TO_EL_CARRO_RELEASE/deploy/csi/gce_pd_storage_class.yaml +kubectl create -f $PATH_TO_EL_CARRO_RELEASE/deploy/csi/gce_pd_volume_snapshot_class.yaml +``` + +Verify that both resources have been created properly by running: + +```sh +kubectl get storageclasses +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +csi-gce-pd pd.csi.storage.gke.io Delete WaitForFirstConsumer false 30d +standard (default) kubernetes.io/gce-pd Delete Immediate true 30d + +kubectl get volumesnapshotclass +NAME AGE +csi-gce-pd-snapshot-class 78s +``` + +## Deploying the El Carro Operator to a Kubernetes cluster + +You can use `kubectl` to deploy the El Carro Operator to your cluster by +running: + +```sh +kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/operator.yaml +namespace/operator-system created +[...] +``` + +## Creating an El Carro Instance + +To create an instance: + +- Create a namespace to host your instance by running: + ```sh + kubectl create namespace db + ``` + +- Modify + $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_instance_custom_seeded.yaml + to include the link to the database service image you built earlier. + +- Apply the modified yaml file by running: + ```sh + kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_instance_custom_seeded.yaml -n db + ``` + +Monitor creation of your instance by running: +```sh +kubectl get -w instances.oracle.db.anthosapis.com -n db + +NAME DB ENGINE VERSION EDITION ENDPOINT URL DB NAMES BACKUP ID READYSTATUS READYREASON DBREADYSTATUS DBREADYREASON +mydb Oracle 12.2 Enterprise mydb-svc.db 34.71.69.25:6021 False CreateInProgress +``` + +Once your instance is ready, the **READYSTATUS** and **DBREADYSTATUS** will both +flip to **TRUE**. + +Tip: You can monitor the logs from the El Carro operator by running: +```sh +kubectl logs -l control-plane=controller-manager -n operator-system -c manager -f +``` + +## Creating a PDB (Database) + +To store and query data, create a PDB and attach it to the instance you created +in the previous step by running: +```sh +kubectl apply -f $PATH_TO_EL_CARRO_RELEASE/samples/v1alpha1_database_pdb1.yaml -n db +``` + +Monitor creation of your PDB by running: +```sh +kubectl get -w databases.oracle.db.anthosapis.com -n db +NAME INSTANCE USERS PHASE DATABASEREADYSTATUS DATABASEREADYREASON USERREADYSTATUS USERREADYREASON +pdb1 mydb ["superuser","scott","proberuser"] Ready True CreateComplete True SyncComplete +``` + +Once your PDB is ready, the **DATABASEREADYSTATUS** and **USERREADYSTATUS** will +both flip to **TRUE**. + +You can access your PDB externally by using +[sqlplus](https://docs.oracle.com/en/database/oracle/oracle-database/18/sqpug/index.html): +```sh +sqlplus scott/tiger@$INSTANCE_URL/pdb1.gke +``` +Replace $INSTANCE_URL with the URL that was assigned to your instance. diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..b04e930 --- /dev/null +++ b/go.mod @@ -0,0 +1,49 @@ +module github.com/GoogleCloudPlatform/elcarro-oracle-operator + +go 1.15 + +require ( + bitbucket.org/creachadair/stringset v0.0.9 + cloud.google.com/go v0.81.0 + cloud.google.com/go/storage v1.10.0 + github.com/Microsoft/go-winio v0.4.14 // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 + github.com/docker/go-connections v0.4.0 // indirect + github.com/ghodss/yaml v1.0.0 + github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6 + github.com/go-logr/zapr v0.2.0 // indirect + github.com/godror/godror v0.20.1 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.5 + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.1.2 + github.com/googleapis/gnostic v0.5.1 // indirect; keep + github.com/hpcloud/tail v1.0.0 + github.com/imdario/mergo v0.3.11 // indirect + github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1 + github.com/morikuni/aec v1.0.0 // indirect + github.com/onsi/ginkgo v1.14.1 + github.com/onsi/gomega v1.10.2 + github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.7.1 + github.com/prometheus/common v0.13.0 // indirect + github.com/robfig/cron v1.2.0 + gomodules.xyz/jsonpatch/v2 v2.1.0 // indirect + google.golang.org/api v0.44.0 + google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 + google.golang.org/grpc v1.36.1 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 + google.golang.org/protobuf v1.26.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.20.2 + k8s.io/apimachinery v0.20.2 + k8s.io/client-go v0.20.2 + k8s.io/klog/v2 v2.4.0 + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + sigs.k8s.io/controller-runtime v0.6.2 + sigs.k8s.io/controller-tools v0.2.5 + sigs.k8s.io/kustomize/kustomize/v4 v4.0.5 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..9d97947 --- /dev/null +++ b/go.sum @@ -0,0 +1,1216 @@ +bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= +bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creachadair/staticfile v0.1.3/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6 h1:ZPVluSmhtMIHlqUDMZu70FgMpRzbQfl4h9oKCAXOVDE= +github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/godror/godror v0.20.1 h1:s/ehD65nfVzWR2MrZGChDkLvVPlIVxbt+Jpzfwkl1c8= +github.com/godror/godror v0.20.1/go.mod h1:YlPoIf962ZZKPM5Xqa8NxmGgck39pi51tqAs+K3IaFM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubernetes-csi/csi-lib-utils v0.7.0/go.mod h1:bze+2G9+cmoHxN6+WyG1qT4MDxgZJMLGwc7V4acPNm0= +github.com/kubernetes-csi/csi-test v2.0.0+incompatible/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0= +github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1 h1:t5bmB3Y8nCaLA4aFrIpX0zjHEF/HUkJp6f5rm7BsVzM= +github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1/go.mod h1:dV5oB3U62KBdlf9ADWkMmjGd3USauqQtwIm2OZb5mqI= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.13.0 h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg= +github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191220175831-5c49e3ecc1c1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= +k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= +k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= +k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= +k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo= +k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= +k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.17.1-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= +k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= +k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= +k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= +k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= +k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ= +k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE= +k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= +k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.14.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= +sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= +sigs.k8s.io/controller-tools v0.2.5 h1:kH7HKWed9XO42OTxyhUtqyImiefdZV2Q9Jbrytvhf18= +sigs.k8s.io/controller-tools v0.2.5/go.mod h1:+t0Hz6tOhJQCdd7IYO0mNzimmiM9sqMU0021u6UCF2o= +sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ= +sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= +sigs.k8s.io/kustomize/cmd/config v0.9.7 h1:xxvL/np/zYHVuCH1tNFehlyEtSW5oXjoI6ycejiyOwQ= +sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= +sigs.k8s.io/kustomize/kustomize/v4 v4.0.5 h1:0xQWp03aKWilF6UJrupcA2rCoCn3jejkJ+m/CCI/Fis= +sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= +sigs.k8s.io/kustomize/kyaml v0.10.15 h1:dSLgG78KyaxN4HylPXdK+7zB3k7sW6q3IcCmcfKA+aI= +sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/hack/0001-Patch-to-add-bazel-support.patch b/hack/0001-Patch-to-add-bazel-support.patch new file mode 100644 index 0000000..77cc787 --- /dev/null +++ b/hack/0001-Patch-to-add-bazel-support.patch @@ -0,0 +1,335 @@ +From ad7dd3c9756a024051fd7464a5f4a807791eb8e8 Mon Sep 17 00:00:00 2001 +From: Kurt Kartaltepe +Date: Thu, 24 Sep 2020 11:36:11 -0700 +Subject: [PATCH] Patch to add bazel support + +--- + .gitignore | 1 + + BUILD.bazel | 157 ++++++++++++++++++++++++++++++++++++++++++++++++ + WORKSPACE | 32 ++++++++++ + deps.bzl | 27 +++++++++ + drv.go | 4 +- + dsn/BUILD.bazel | 26 ++++++++ + sid/BUILD.bazel | 15 +++++ + 7 files changed, 260 insertions(+), 2 deletions(-) + create mode 100644 BUILD.bazel + create mode 100644 WORKSPACE + create mode 100644 deps.bzl + create mode 100644 dsn/BUILD.bazel + create mode 100644 sid/BUILD.bazel + +diff --git a/.gitignore b/.gitignore +index 43efe71..ac0505d 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -5,3 +5,4 @@ + *.swp + env.sh + ? ++bazel-* +diff --git a/BUILD.bazel b/BUILD.bazel +new file mode 100644 +index 0000000..bc33f73 +--- /dev/null ++++ b/BUILD.bazel +@@ -0,0 +1,157 @@ ++load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") ++load("@bazel_gazelle//:def.bzl", "gazelle") ++ ++# gazelle:prefix github.com/godror/godror ++gazelle(name = "gazelle") ++ ++go_library( ++ name = "godror", ++ srcs = [ ++ "conn.go", ++ "conn_go15.go", ++ "data.go", ++ "drv.go", ++ "drv_posix.go", ++ "lob.go", ++ "obj.go", ++ "orahlp.go", ++ "queue.go", ++ "require.go", ++ "rows.go", ++ "stmt.go", ++ "stmt_go11.go", ++ "stmt_go13.go", ++ "subscr.c", ++ "subscr.go", ++ "version.go", ++ ], ++ cdeps = [ ++ ":odpi", ++ ], ++ cgo = True, ++ clinkopts = select({ ++ "@io_bazel_rules_go//go/platform:aix": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:android": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:darwin": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:dragonfly": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:freebsd": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:illumos": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:ios": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:js": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:linux": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:nacl": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:netbsd": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:openbsd": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:plan9": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:solaris": [ ++ "-ldl -lpthread", ++ ], ++ "//conditions:default": [], ++ }), ++ copts = ["-Iodpi/include -Iodpi/src -Iodpi/embed"], ++ importpath = "github.com/godror/godror", ++ visibility = ["//visibility:public"], ++ deps = [ ++ "//dsn", ++ "@com_github_go_logfmt_logfmt//:go_default_library", ++ "@org_golang_x_xerrors//:go_default_library", ++ ], ++) ++ ++cc_library( ++ name = "odpi", ++ srcs = [ ++ "odpi/src/dpiConn.c", ++ "odpi/src/dpiContext.c", ++ "odpi/src/dpiData.c", ++ "odpi/src/dpiDebug.c", ++ "odpi/src/dpiDeqOptions.c", ++ "odpi/src/dpiEnqOptions.c", ++ "odpi/src/dpiEnv.c", ++ "odpi/src/dpiError.c", ++ "odpi/src/dpiGen.c", ++ "odpi/src/dpiGlobal.c", ++ "odpi/src/dpiHandleList.c", ++ "odpi/src/dpiHandlePool.c", ++ "odpi/src/dpiImpl.h", ++ "odpi/src/dpiLob.c", ++ "odpi/src/dpiMsgProps.c", ++ "odpi/src/dpiObject.c", ++ "odpi/src/dpiObjectAttr.c", ++ "odpi/src/dpiObjectType.c", ++ "odpi/src/dpiOci.c", ++ "odpi/src/dpiOracleType.c", ++ "odpi/src/dpiPool.c", ++ "odpi/src/dpiQueue.c", ++ "odpi/src/dpiRowid.c", ++ "odpi/src/dpiSodaColl.c", ++ "odpi/src/dpiSodaCollCursor.c", ++ "odpi/src/dpiSodaDb.c", ++ "odpi/src/dpiSodaDoc.c", ++ "odpi/src/dpiSodaDocCursor.c", ++ "odpi/src/dpiStmt.c", ++ "odpi/src/dpiSubscr.c", ++ "odpi/src/dpiUtils.c", ++ "odpi/src/dpiVar.c", ++ ], ++ hdrs = [ ++ "odpi/include/dpi.h", ++ "odpi/src/dpiErrorMessages.h", ++ "odpi/src/dpiImpl.h", ++ ], ++ includes = [ ++ "odpi/include", ++ "odpi/src", ++ ], ++) ++ ++go_test( ++ name = "godror_test", ++ srcs = [ ++ "conn_test.go", ++ "data_test.go", ++ "drv_test.go", ++ "example_shutdown_test.go", ++ "orahlp_test.go", ++ "queue_test.go", ++ "z_bench_test.go", ++ "z_conncut_test.go", ++ "z_heterogeneous_test.go", ++ "z_lob_test.go", ++ "z_plsql_types_test.go", ++ "z_qrcn_test.go", ++ "z_test.go", ++ ], ++ embed = [":godror"], ++ deps = [ ++ "@com_github_go_logfmt_logfmt//:go_default_library", ++ "@com_github_google_go_cmp//cmp:go_default_library", ++ "@org_golang_x_sync//errgroup:go_default_library", ++ ], ++) +diff --git a/WORKSPACE b/WORKSPACE +new file mode 100644 +index 0000000..3bba372 +--- /dev/null ++++ b/WORKSPACE +@@ -0,0 +1,32 @@ ++load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") ++ ++http_archive( ++ name = "io_bazel_rules_go", ++ sha256 = "b725e6497741d7fc2d55fcc29a276627d10e43fa5d0bb692692890ae30d98d00", ++ urls = [ ++ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.3/rules_go-v0.24.3.tar.gz", ++ "https://github.com/bazelbuild/rules_go/releases/download/v0.24.3/rules_go-v0.24.3.tar.gz", ++ ], ++) ++ ++http_archive( ++ name = "bazel_gazelle", ++ sha256 = "72d339ff874a382f819aaea80669be049069f502d6c726a07759fdca99653c48", ++ urls = [ ++ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.1/bazel-gazelle-v0.22.1.tar.gz", ++ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.1/bazel-gazelle-v0.22.1.tar.gz", ++ ], ++) ++ ++load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") ++load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") ++load("//:deps.bzl", "go_dependencies") ++ ++# gazelle:repository_macro deps.bzl%go_dependencies ++go_dependencies() ++ ++go_rules_dependencies() ++ ++go_register_toolchains() ++ ++gazelle_dependencies() +diff --git a/deps.bzl b/deps.bzl +new file mode 100644 +index 0000000..0f4f5ee +--- /dev/null ++++ b/deps.bzl +@@ -0,0 +1,27 @@ ++load("@bazel_gazelle//:deps.bzl", "go_repository") ++ ++def go_dependencies(): ++ go_repository( ++ name = "com_github_go_logfmt_logfmt", ++ importpath = "github.com/go-logfmt/logfmt", ++ sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=", ++ version = "v0.5.0", ++ ) ++ go_repository( ++ name = "com_github_google_go_cmp", ++ importpath = "github.com/google/go-cmp", ++ sum = "h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=", ++ version = "v0.4.0", ++ ) ++ go_repository( ++ name = "org_golang_x_sync", ++ importpath = "golang.org/x/sync", ++ sum = "h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=", ++ version = "v0.0.0-20190911185100-cd5d95a43a6e", ++ ) ++ go_repository( ++ name = "org_golang_x_xerrors", ++ importpath = "golang.org/x/xerrors", ++ sum = "h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=", ++ version = "v0.0.0-20191204190536-9bdfabe68543", ++ ) +diff --git a/drv.go b/drv.go +index 8b03df0..6e674d0 100644 +--- a/drv.go ++++ b/drv.go +@@ -46,9 +46,9 @@ + package godror + + /* +-#cgo CFLAGS: -I./odpi/include -I./odpi/src -I./odpi/embed ++#include + +-#include "dpi.c" ++#include "dpiImpl.h" + */ + import "C" + +diff --git a/dsn/BUILD.bazel b/dsn/BUILD.bazel +new file mode 100644 +index 0000000..3b114ea +--- /dev/null ++++ b/dsn/BUILD.bazel +@@ -0,0 +1,26 @@ ++load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") ++ ++go_library( ++ name = "dsn", ++ srcs = [ ++ "dsn.go", ++ "dsn_fuzz.go", ++ ], ++ importpath = "github.com/godror/godror/dsn", ++ visibility = ["//visibility:public"], ++ deps = [ ++ "@com_github_go_logfmt_logfmt//:go_default_library", ++ "@org_golang_x_xerrors//:go_default_library", ++ ], ++) ++ ++go_test( ++ name = "dsn_test", ++ srcs = ["dsn_test.go"], ++ embed = [":dsn"], ++ deps = [ ++ "@com_github_google_go_cmp//cmp:go_default_library", ++ "@com_github_google_go_cmp//cmp/cmpopts:go_default_library", ++ "@org_golang_x_xerrors//:go_default_library", ++ ], ++) +diff --git a/sid/BUILD.bazel b/sid/BUILD.bazel +new file mode 100644 +index 0000000..ad16633 +--- /dev/null ++++ b/sid/BUILD.bazel +@@ -0,0 +1,15 @@ ++load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") ++ ++go_library( ++ name = "sid", ++ srcs = ["sid.go"], ++ importpath = "github.com/godror/godror/sid", ++ visibility = ["//visibility:public"], ++ deps = ["@org_golang_x_xerrors//:go_default_library"], ++) ++ ++go_test( ++ name = "sid_test", ++ srcs = ["sid_test.go"], ++ embed = [":sid"], ++) +-- +2.28.0.681.g6f77f65b4e-goog + diff --git a/hack/0002-Patch-to-add-bazel-support.patch b/hack/0002-Patch-to-add-bazel-support.patch new file mode 100644 index 0000000..02cb47b --- /dev/null +++ b/hack/0002-Patch-to-add-bazel-support.patch @@ -0,0 +1,300 @@ +diff --git .gitignore .gitignore +index 43efe71..ac0505d 100644 +--- .gitignore ++++ .gitignore +@@ -5,3 +5,4 @@ + *.swp + env.sh + ? ++bazel-* +diff --git BUILD.bazel BUILD.bazel +new file mode 100644 +index 0000000..bc33f73 +--- /dev/null ++++ BUILD.bazel +@@ -0,0 +1,156 @@ ++load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") ++load("@bazel_gazelle//:def.bzl", "gazelle") ++ ++# gazelle:prefix github.com/godror/godror ++gazelle(name = "gazelle") ++ ++go_library( ++ name = "godror", ++ srcs = [ ++ "conn.go", ++ "conn_go15.go", ++ "data.go", ++ "drv.go", ++ "drv_posix.go", ++ "lob.go", ++ "obj.go", ++ "orahlp.go", ++ "queue.go", ++ "require.go", ++ "rows.go", ++ "stmt.go", ++ "stmt_go11.go", ++ "stmt_go13.go", ++ "subscr.c", ++ "subscr.go", ++ "version.go", ++ ], ++ cdeps = [ ++ ":odpi", ++ ], ++ cgo = True, ++ clinkopts = select({ ++ "@io_bazel_rules_go//go/platform:aix": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:android": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:darwin": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:dragonfly": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:freebsd": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:illumos": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:ios": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:js": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:linux": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:nacl": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:netbsd": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:openbsd": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:plan9": [ ++ "-ldl -lpthread", ++ ], ++ "@io_bazel_rules_go//go/platform:solaris": [ ++ "-ldl -lpthread", ++ ], ++ "//conditions:default": [], ++ }), ++ copts = ["-Iodpi/include -Iodpi/src -Iodpi/embed"], ++ importpath = "github.com/godror/godror", ++ visibility = ["//visibility:public"], ++ deps = [ ++ "@com_github_go_logfmt_logfmt//:go_default_library", ++ "@org_golang_x_xerrors//:go_default_library", ++ ], ++) ++ ++cc_library( ++ name = "odpi", ++ srcs = [ ++ "odpi/src/dpiConn.c", ++ "odpi/src/dpiContext.c", ++ "odpi/src/dpiData.c", ++ "odpi/src/dpiDebug.c", ++ "odpi/src/dpiDeqOptions.c", ++ "odpi/src/dpiEnqOptions.c", ++ "odpi/src/dpiEnv.c", ++ "odpi/src/dpiError.c", ++ "odpi/src/dpiGen.c", ++ "odpi/src/dpiGlobal.c", ++ "odpi/src/dpiHandleList.c", ++ "odpi/src/dpiHandlePool.c", ++ "odpi/src/dpiImpl.h", ++ "odpi/src/dpiLob.c", ++ "odpi/src/dpiMsgProps.c", ++ "odpi/src/dpiObject.c", ++ "odpi/src/dpiObjectAttr.c", ++ "odpi/src/dpiObjectType.c", ++ "odpi/src/dpiOci.c", ++ "odpi/src/dpiOracleType.c", ++ "odpi/src/dpiPool.c", ++ "odpi/src/dpiQueue.c", ++ "odpi/src/dpiRowid.c", ++ "odpi/src/dpiSodaColl.c", ++ "odpi/src/dpiSodaCollCursor.c", ++ "odpi/src/dpiSodaDb.c", ++ "odpi/src/dpiSodaDoc.c", ++ "odpi/src/dpiSodaDocCursor.c", ++ "odpi/src/dpiStmt.c", ++ "odpi/src/dpiSubscr.c", ++ "odpi/src/dpiUtils.c", ++ "odpi/src/dpiVar.c", ++ ], ++ hdrs = [ ++ "odpi/include/dpi.h", ++ "odpi/src/dpiErrorMessages.h", ++ "odpi/src/dpiImpl.h", ++ ], ++ includes = [ ++ "odpi/include", ++ "odpi/src", ++ ], ++) ++ ++go_test( ++ name = "godror_test", ++ srcs = [ ++ "conn_test.go", ++ "data_test.go", ++ "drv_test.go", ++ "example_shutdown_test.go", ++ "orahlp_test.go", ++ "queue_test.go", ++ "z_bench_test.go", ++ "z_conncut_test.go", ++ "z_heterogeneous_test.go", ++ "z_lob_test.go", ++ "z_plsql_types_test.go", ++ "z_qrcn_test.go", ++ "z_test.go", ++ ], ++ embed = [":godror"], ++ deps = [ ++ "@com_github_go_logfmt_logfmt//:go_default_library", ++ "@com_github_google_go_cmp//cmp:go_default_library", ++ "@org_golang_x_sync//errgroup:go_default_library", ++ ], ++) +diff --git WORKSPACE WORKSPACE +new file mode 100644 +index 0000000..3bba372 +--- /dev/null ++++ WORKSPACE +@@ -0,0 +1,32 @@ ++load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") ++ ++http_archive( ++ name = "io_bazel_rules_go", ++ sha256 = "b725e6497741d7fc2d55fcc29a276627d10e43fa5d0bb692692890ae30d98d00", ++ urls = [ ++ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.3/rules_go-v0.24.3.tar.gz", ++ "https://github.com/bazelbuild/rules_go/releases/download/v0.24.3/rules_go-v0.24.3.tar.gz", ++ ], ++) ++ ++http_archive( ++ name = "bazel_gazelle", ++ sha256 = "72d339ff874a382f819aaea80669be049069f502d6c726a07759fdca99653c48", ++ urls = [ ++ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.1/bazel-gazelle-v0.22.1.tar.gz", ++ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.1/bazel-gazelle-v0.22.1.tar.gz", ++ ], ++) ++ ++load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") ++load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") ++load("//:deps.bzl", "go_dependencies") ++ ++# gazelle:repository_macro deps.bzl%go_dependencies ++go_dependencies() ++ ++go_rules_dependencies() ++ ++go_register_toolchains() ++ ++gazelle_dependencies() +diff --git deps.bzl deps.bzl +new file mode 100644 +index 0000000..0f4f5ee +--- /dev/null ++++ deps.bzl +@@ -0,0 +1,27 @@ ++load("@bazel_gazelle//:deps.bzl", "go_repository") ++ ++def go_dependencies(): ++ go_repository( ++ name = "com_github_go_logfmt_logfmt", ++ importpath = "github.com/go-logfmt/logfmt", ++ sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=", ++ version = "v0.5.0", ++ ) ++ go_repository( ++ name = "com_github_google_go_cmp", ++ importpath = "github.com/google/go-cmp", ++ sum = "h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=", ++ version = "v0.4.0", ++ ) ++ go_repository( ++ name = "org_golang_x_sync", ++ importpath = "golang.org/x/sync", ++ sum = "h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=", ++ version = "v0.0.0-20190911185100-cd5d95a43a6e", ++ ) ++ go_repository( ++ name = "org_golang_x_xerrors", ++ importpath = "golang.org/x/xerrors", ++ sum = "h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=", ++ version = "v0.0.0-20191204190536-9bdfabe68543", ++ ) +diff --git drv.go drv.go +index 8b03df0..6e674d0 100644 +--- drv.go ++++ drv.go +@@ -46,9 +46,9 @@ + package godror + + /* +-#cgo CFLAGS: -I./odpi/include -I./odpi/src -I./odpi/embed ++#include + +-#include "dpi.c" ++#include "dpiImpl.h" + */ + import "C" + +diff --git sid/BUILD.bazel sid/BUILD.bazel +new file mode 100644 +index 0000000..ad16633 +--- /dev/null ++++ sid/BUILD.bazel +@@ -0,0 +1,15 @@ ++load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") ++ ++go_library( ++ name = "sid", ++ srcs = ["sid.go"], ++ importpath = "github.com/godror/godror/sid", ++ visibility = ["//visibility:public"], ++ deps = ["@org_golang_x_xerrors//:go_default_library"], ++) ++ ++go_test( ++ name = "sid_test", ++ srcs = ["sid_test.go"], ++ embed = [":sid"], ++) +diff --git a/orahlp_go12.go b/orahlp_go12.go +index 4475950..0c73a90 100644 +--- orahlp_go12.go ++++ orahlp_go12.go +@@ -1,4 +1,4 @@ +-// +build !go1.13 ++// +build go1.12 go1.11 go1.10 + + // Copyright 2017, 2020 The Godror Authors + // +diff --git a/orahlp_go13.go b/orahlp_go13.go +index a0f0462..1bc6e6a 100644 +--- orahlp_go13.go ++++ orahlp_go13.go +@@ -1,4 +1,4 @@ +-// +build go1.13 ++// +build !go1.12 !go1.11 !go1.10 + + // Copyright 2017, 2020 The Godror Authors + // + diff --git a/hack/bazel_workspace.sh b/hack/bazel_workspace.sh new file mode 100755 index 0000000..5f24d08 --- /dev/null +++ b/hack/bazel_workspace.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +echo "PROW_IMAGE_REPO ${PROW_IMAGE_REPO}" +echo "PROW_IMAGE_TAG ${PROW_IMAGE_TAG}" +echo "PROW_PROJECT ${PROW_PROJECT}" +echo "PROW_CLUSTER ${PROW_CLUSTER}" +echo "PROW_CLUSTER_ZONE ${PROW_CLUSTER_ZONE}" diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..6dd4de7 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2021 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/oracle/BUILD.bazel b/oracle/BUILD.bazel new file mode 100644 index 0000000..b4a3a3a --- /dev/null +++ b/oracle/BUILD.bazel @@ -0,0 +1,92 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push") + +go_binary( + name = "operator", + embed = [":oracle_lib"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "configs", + srcs = glob(["config/**"]), + visibility = ["//visibility:public"], +) + +go_library( + name = "oracle_lib", + srcs = [ + "main.go", + "version.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle", + visibility = ["//visibility:private"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/controllers/backupcontroller", + "//oracle/controllers/backupschedulecontroller", + "//oracle/controllers/configcontroller", + "//oracle/controllers/cronanythingcontroller", + "//oracle/controllers/databasecontroller", + "//oracle/controllers/exportcontroller", + "//oracle/controllers/importcontroller", + "//oracle/controllers/instancecontroller", + "@com_github_kubernetes_csi_external_snapshotter_v2//pkg/apis/volumesnapshot/v1beta1", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_client_go//kubernetes/scheme", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_klog_v2//:klog", + "@io_k8s_klog_v2//klogr", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) + +# Read in workspace status values to configure _push targets. +# REGISTRY = "${PROW_IMAGE_REPO}".split("/")[0] +REGISTRY = "gcr.io" + +# PROJECT = "${PROW_IMAGE_REPO}".split("/")[1] +PROJECT = "{PROW_PROJECT}" + +TAG = "{PROW_IMAGE_TAG}" + +container_image( + name = "base_image_with_busybox", + tars = [ + "@distroless//image:000.tar.gz", + "@distroless//image:001.tar.gz", + "@distroless//image:002.tar.gz", + "@busybox//image:000.tar.gz", # duplicated files will be retained from the lowest layer. + ], + user = "nonroot:nonroot", + visibility = ["//visibility:public"], +) + +container_image( + name = "base_image", + base = "@distroless//image", + user = "nonroot:nonroot", + visibility = ["//visibility:public"], +) + +container_image( + name = "operator_image", + base = ":base_image", + files = [ + ":operator", + ], + symlinks = {"/manager": "/operator"}, +) + +container_push( + name = "operator_image_push", + format = "OCI", + image = ":operator_image", + registry = REGISTRY, + repository = PROJECT + "/oracle.db.anthosapis.com/operator", + tag = TAG, +) diff --git a/oracle/Dockerfile b/oracle/Dockerfile new file mode 100644 index 0000000..d725550 --- /dev/null +++ b/oracle/Dockerfile @@ -0,0 +1,46 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +FROM docker.io/golang:1.15 as builder + +WORKDIR /build +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY common common +COPY oracle/main.go oracle/main.go +COPY oracle/version.go oracle/version.go +COPY oracle/api/ oracle/api/ +COPY oracle/controllers/ oracle/controllers/ +COPY oracle/pkg/agents oracle/pkg/agents +COPY oracle/pkg/database/common oracle/pkg/database/common +COPY oracle/pkg/k8s oracle/pkg/k8s + +# Build +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager ./oracle + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /build/manager . +USER nonroot:nonroot + +ENTRYPOINT ["/manager"] diff --git a/oracle/Makefile b/oracle/Makefile new file mode 100644 index 0000000..0dea7f8 --- /dev/null +++ b/oracle/Makefile @@ -0,0 +1,269 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Image URL to use all building/pushing image targets +# Override these with environment variables or via command line +# To push the operator to autopush manually for example, +# make buildah-build-operator buildah-push-operator PROW_IMAGE_REPO=gcr.io/ PROW_IMAGE_TAG=latest +SHELL := /bin/bash + +# Use the bazel version, important not to use quotes so that the command is expanded. +bazel_noui=--ui_event_filters=-INFO --noshow_loading_progress --show_result=0 +KUSTOMIZE=bazel run --run_under="cd $$PWD &&" $(bazel_noui) -- //:kustomize +CONTROLLER_GEN=bazel run --run_under="cd $$PWD &&" $(bazel_noui) -- //:controller-gen +PROTOC=bazel run --run_under="cd $$PWD/.. &&" $(bazel_noui) -- //:protoc +GAZELLE=bazel run $(bazel_noui) -- //:gazelle + +export PROW_IMAGE_REPO ?= $(shell\ + if [ -n "${PROW_JOB_ID}" ]; then \ + echo "gcr.io/prow-build-graybox"; \ + else echo "gcr.io/${USER}-playground-operator"; \ + fi \ +) +# Prefer Louhi refSha, then check for general prow, finally assume dev. +export PROW_IMAGE_TAG ?= $(shell\ + if [ -n "${refSha}" ]; then \ + echo "$${refSha:0:8}"; \ + elif [ -n "${PROW_JOB_ID}" ]; then \ + echo "${PROW_JOB_ID}"; \ + else echo ${USER}-dev; \ + fi \ +) +export PROW_PROJECT ?= $(shell\ + if [ -n "${PROW_JOB_ID}" ]; then \ + echo "prow-build-graybox"; \ + else echo "${USER}-playground-operator"; \ + fi \ +) +export RELEASE_NAME ?= $(cat /workspace/louhi_ws/_release_name) +# Name of the cluster for integration tests +# If this is a PROW job create a 40-chars unique cluster name from PROW JOB ID +# e.g. inttests-2808d604-8644-11eb-bc7 +# Otherwise use cluster4 +export PROW_JOB_ID_PART=$(shell echo ${PROW_JOB_ID} | cut -c 1-22) +export PROW_CLUSTER ?= $(shell\ + if [ -n "${PROW_JOB_ID}" ]; then \ + echo "inttests-${PROW_JOB_ID_PART}"; \ + else echo "cluster4"; \ + fi \ +) +export PROW_CLUSTER_ZONE ?= $(shell\ + if [ -n "${PROW_JOB_ID}" ]; then \ + echo "us-central1-a"; \ + else echo "us-central1-a"; \ + fi \ +) + +# Create unique service account name for integration tests +# Service account name can't be more than 30 chars. +# Create unique ID from the PROW_JOB_ID or use 'sa-local' +export PROW_INT_TEST_SA=$(shell\ + if [ -n "${PROW_JOB_ID}" ]; then \ + echo "sa-${PROW_JOB_ID_PART}"; \ + else echo "sa-local"; \ + fi \ +) + +env: + @echo "export PROW_IMAGE_REPO=${PROW_IMAGE_REPO}" + @echo "export PROW_IMAGE_TAG=${PROW_IMAGE_TAG}" + @echo "export PROW_PROJECT=${PROW_PROJECT}" + @echo "export PROW_CLUSTER=${PROW_CLUSTER}" + @echo "export PROW_CLUSTER_ZONE=${PROW_CLUSTER_ZONE}" + @echo "export PROW_INT_TEST_SA=${PROW_INT_TEST_SA}" + +export _BUILDAH_STARTED_IN_USERNS +export BUILDAH_ISOLATION +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +# CRD_OPTIONS ?= "crd:trivialVersions=true" +CRD_OPTIONS ?= "crd" + +# allow builds outside $GOPATH/src +export GO111MODULE=on + +unit-test: + bazel test --test_tag_filters="-integration" ... + +# Run test, if we are in prow and ARTIFACTS is set, copy junit xmls and test logs for upload. +test: + rm -f failed # clear any old flags + # Unit/Functional tests, unlimited jobs + bazel test --test_output=errors --test_tag_filters="-integration" ... || touch failed + # Load oracle image paths from env variables to .bazelrc + envsubst < controllers/inttest/.bazelrc.tmpl > controllers/inttest/.bazelrc + # Integration tests, build unlimited, but limit test jobs. + bazel build --test_tag_filters="integration" ... || touch failed + bazel \ + --bazelrc=controllers/inttest/.bazelrc \ + test \ + --jobs=8 \ + --test_tag_filters="integration" \ + ... \ + --test_output=errors \ + --spawn_strategy=local \ + --genrule_strategy=local \ + --test_env=PROW_IMAGE_REPO=${PROW_IMAGE_REPO} \ + --test_env=PROW_IMAGE_TAG=${PROW_IMAGE_TAG} \ + --test_env=PROW_PROJECT=${PROW_PROJECT} \ + --test_env=PROW_CLUSTER=${PROW_CLUSTER} \ + --test_env=PROW_INT_TEST_SA=${PROW_INT_TEST_SA} \ + --test_env=PROW_CLUSTER_ZONE=${PROW_CLUSTER_ZONE} || touch failed + if [[ -n "$$ARTIFACTS" ]]; then find ../bazel-testlogs/ -name '*.xml' -o -name '*.log' | xargs cp --parents -t "$$ARTIFACTS/" ; fi + if [[ -f "failed" ]]; then rm failed; exit 100; fi + +# Run go fmt against code +fmt: + scripts/fmt_fixup.sh + +# Run go vet against code +vet: + go vet ./... + +glaze: + $(GAZELLE) fix + +glaze-deps: + $(GAZELLE) update-repos -from_file=go.mod -to_macro=deps.bzl%go_dependencies + go mod tidy # glaze dirties the go.sum file. + +# Static checks to be run on presubmit. +# Not parallel to prevent output interleaving. +# We cannot generate protobufs in prow because we dont have equivalent compilers available. +check: generate-config generate-go + # Cant use vet/fmt targets or ordering between generate and vet/fmt is unknown. + go vet ./... + $(MAKE) glaze-deps glaze + scripts/fmt_fixup.sh + scripts/fmt_check.sh + # Check bazel builds + bazel build ... + +# Generate all code for building +generate-go: generate-proto + $(CONTROLLER_GEN) object:headerFile=../hack/boilerplate.go.txt paths="../common/..." + $(CONTROLLER_GEN) object:headerFile=../hack/boilerplate.go.txt paths="./..." + +# Generate config manifests e.g. CRD, RBAC, controller etc. for k8s. +generate-config: + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:rbac:artifacts:config=config/rbac output:webhook:artifacts:config=config/webhook output:crd:artifacts:config=config/crd/bases + cd config/manager && $(KUSTOMIZE) edit set image controller="gcr.io/elcarro/oracle.db.anthosapis.com/operator:latest" + $(KUSTOMIZE) build config/default > operator.yaml + +# Generate proto/grpc code +# Prerequisites: google-cloud-sdk +# execute: +# sudo apt install google-cloud-sdk +protoc-gen-go-target="@com_github_golang_protobuf//protoc-gen-go:protoc-gen-go" +protoc-gen-go="bazel-bin/external/com_github_golang_protobuf/protoc-gen-go/protoc-gen-go_/protoc-gen-go" +protoc-gen-go-grpc-target="@org_golang_google_grpc_cmd_protoc_gen_go_grpc//:protoc-gen-go-grpc" +protoc-gen-go-grpc="bazel-bin/external/org_golang_google_grpc_cmd_protoc_gen_go_grpc/protoc-gen-go-grpc_/protoc-gen-go-grpc" +protos := $(shell cd ../ && find ./ -name '*.proto') +generate-proto: + # Make sure our tools are available. + bazel build $(bazel_noui) $(protoc-gen-go-target) $(protoc-gen-go-grpc-target) + $(PROTOC) \ + --plugin=$(protoc-gen-go) \ + --plugin=$(protoc-gen-go-grpc) \ + -I /usr/lib/google-cloud-sdk/lib/third_party \ + -I bazel-$(shell basename $$(readlink -f ../))/external/com_google_protobuf/src \ + -I . \ + --go_out=. --go_opt=paths=source_relative \ + --go-grpc_out=. --go-grpc_opt=paths=source_relative \ + $(protos) + +buildah-push-operator: + bazel run operator_image_push + +buildah-push-dbinit: + bazel run //oracle/build:dbinit_push + +buildah-push-configagent: + bazel run //oracle/build:configagent_push + +buildah-push-dbdaemon-client: + bazel run //oracle/build:dbdaemonclient_push + +buildah-push-logging: + bazel run //oracle/build:loggingsidecar_push + +buildah-push-monitoring: + bazel run //oracle/build:monitoring_push + +# Build and push everything except the db image for integration tests. +buildah-push-all: buildah-push-operator buildah-push-dbinit buildah-push-configagent buildah-push-logging buildah-push-monitoring + +# Install CRDs into a cluster +install: generate-config + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +# Uninstall CRDs from a cluster +uninstall: generate-config + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: generate-config + $(MAKE) buildah-push-all -j8 + cd config/manager && $(KUSTOMIZE) edit set image controller=${PROW_IMAGE_REPO}/oracle.db.anthosapis.com/operator:${PROW_IMAGE_TAG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +# Install all the required dependencies for prow. +prepare-prow: + scripts/install_prow_deps.sh || (echo "*** Retrying install prow deps*** "; scripts/install_prow_deps.sh) + touch prepare-prow + +# Prow job entry point: operator-checks +operator-checks: prepare-prow + $(MAKE) check + +# Prow presubmit job entry point: operator-presubmit +operator-presubmit: prepare-prow + $(MAKE) -j8 buildah-push-all + # Remove stale clusters from the project + scripts/integration_test_cluster/cleanup_integration_test_clusters.sh + # Create a new GKE cluster for int tests + scripts/integration_test_cluster/create_integration_test_cluster.sh + # Run tests, remove temp cluster in case of failure + $(MAKE) test || (scripts/integration_test_cluster/delete_integration_test_cluster.sh; exit 100) + # Delete the GKE cluster + scripts/integration_test_cluster/delete_integration_test_cluster.sh + +# Prow canary test job entry point: operator-canary +operator-canary: prepare-prow + $(MAKE) operator-presubmit + +# Louhi test job entry point: louhi-operator-canary +louhi-operator-canary: prepare-prow + # Remove stale clusters from the project + scripts/integration_test_cluster/cleanup_integration_test_clusters.sh + # Create a new GKE cluster for int tests (can also be flaky, retry this step once more) + scripts/integration_test_cluster/create_integration_test_cluster.sh || (echo "*** Retrying cluster creation ***"; scripts/integration_test_cluster/create_integration_test_cluster.sh) + # Run tests, remove temp cluster in case of failure + $(MAKE) test || (scripts/integration_test_cluster/delete_integration_test_cluster.sh; exit 100) + # Delete the GKE cluster + scripts/integration_test_cluster/delete_integration_test_cluster.sh + +# Louhi step to prepare for a release. +prepare-release: + echo "Generating version" + ./scripts/generate_version.sh "main" > version.go + echo "updating projects" + sed -i "s/elcarro/${PROW_PROJECT}/g" operator.yaml + sed -i "s/elcarro/${PROW_PROJECT}/g" main.go + echo "updating image tags for operator.yaml..." + sed -i "s/operator:latest/operator:${RELEASE_NAME}/g" operator.yaml + echo "updating image tags for main.go..." + sed -i "s/dbinit:latest/dbinit:${RELEASE_NAME}/g" main.go + sed -i "s/configagent:latest/configagent:${RELEASE_NAME}/g" main.go + sed -i "s/monitoring:latest/monitoring:${RELEASE_NAME}/g" main.go + sed -i "s/loggingsidecar:latest/loggingsidecar:${RELEASE_NAME}/g" main.go diff --git a/oracle/PROJECT b/oracle/PROJECT new file mode 100644 index 0000000..ae781e3 --- /dev/null +++ b/oracle/PROJECT @@ -0,0 +1,25 @@ +domain: db.anthosapis.com +repo: github.com/GoogleCloudPlatform/elcarro-oracle-operator +resources: +- group: oracle + kind: Instance + version: v1alpha1 +- group: oracle + kind: Database + version: v1alpha1 +- group: oracle + kind: Backup + version: v1alpha1 +- group: oracle + kind: Config + version: v1alpha1 +- group: oracle + kind: Release + version: v1alpha1 +- group: oracle + kind: CronAnything + version: v1alpha1 +- group: oracle + kind: BackupSchedule + version: v1alpha1 +version: "2" diff --git a/oracle/api/v1alpha1/BUILD.bazel b/oracle/api/v1alpha1/BUILD.bazel new file mode 100644 index 0000000..3360805 --- /dev/null +++ b/oracle/api/v1alpha1/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "v1alpha1", + srcs = [ + "backup_types.go", + "backupschedule_types.go", + "config_types.go", + "cronanything_types.go", + "database_types.go", + "export_types.go", + "groupversion_info.go", + "import_types.go", + "instance_types.go", + "release_types.go", + "zz_generated.deepcopy.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1", + deps = [ + "//common/api/v1alpha1", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/runtime/schema", + "@io_k8s_sigs_controller_runtime//pkg/scheme", + ], +) diff --git a/oracle/api/v1alpha1/backup_types.go b/oracle/api/v1alpha1/backup_types.go new file mode 100644 index 0000000..35fbc59 --- /dev/null +++ b/oracle/api/v1alpha1/backup_types.go @@ -0,0 +1,147 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// BackupSpec defines the desired state of Backup. +type BackupSpec struct { + // Backup specs that are common across all database engines. + commonv1alpha1.BackupSpec `json:",inline"` + + // Backup sub-type, which is only relevant for a Physical backup type + // (e.g. RMAN). If omitted, the default of Instance(Level) is assumed. + // Supported options at this point are: Instance or Database level backups. + // +kubebuilder:validation:Enum=Instance;Database;Tablespace;Datafile + // +optional + Subtype string `json:"subType,omitempty"` + + // VolumeSnapshotClass points to a particular CSI driver and is used + // for taking a volume snapshot. If requested here at the Backup + // level, this setting overrides the platform default as well + // as the default set via the Config (global user preferences). + VolumeSnapshotClass string `json:"volumeSnapshotClass,omitempty"` + + // For a Physical backup this slice can be used to indicate what + // PDBs, schemas, tablespaces or tables to back up. + // +optional + BackupItems []string `json:"backupItems,omitempty"` + + // For a Physical backup the choices are Backupset and Image Copies. + // Backupset is the default, but if Image Copies are required, + // flip this flag to false. + // +optional + Backupset *bool `json:"backupset,omitempty"` + + // For a Physical backup, optionally turn on compression, + // by flipping this flag to true. The default is false. + Compressed bool `json:"compressed,omitempty"` + + // For a Physical backup, optionally turn on an additional "check + // logical" option. The default is off. + // +optional + CheckLogical bool `json:"checkLogical,omitempty"` + + // For a Physical backup, optionally indicate a degree of parallelism + // also known as DOP. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=100 + Dop int32 `json:"dop,omitempty"` + + // For a Physical backup, optionally specify an incremental level. + // The default is 0 (the whole database). + // +optional + Level int32 `json:"level,omitempty"` + + // For a Physical backup, optionally specify filesperset. + // The default depends on a type of backup, generally 64. + // +optional + Filesperset int32 `json:"filesperset,omitempty"` + + // For a Physical backup, optionally specify a section size in MB. + // Don't include the unit (MB), just the integer. + // +optional + SectionSize int32 `json:"sectionSize,omitempty"` + + // For a Physical backup, optionally specify the time threshold. + // If a threshold is reached, the backup request would time out and + // error out. The threshold is expressed in minutes. + // Don't include the unit (minutes), just the integer. + // +optional + TimeLimitMinutes int32 `json:"timeLimitMinutes,omitempty"` + + // For a Physical backup, optionally specify a local backup dir. + // If omitted, /u03/app/oracle/rman is assumed. + // +optional + LocalPath string `json:"localPath,omitempty"` + + // If set up ahead of time, the backup sets of a physical backup can be + // optionally transferred to a GCS bucket. + // A user is to ensure proper write access to the bucket from within the + // Oracle Operator. + // +optional + GcsPath string `json:"gcsPath,omitempty"` +} + +// BackupStatus defines the observed state of Backup. +type BackupStatus struct { + // Backup status that is common across all database engines. + commonv1alpha1.BackupStatus `json:",inline"` + + BackupID string `json:"backupid,omitempty"` + BackupTime string `json:"backuptime,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.instance",name="Instance Name",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.type",name="Backup Type",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.subType",name="Backup SubType",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.dop",name="DOP",type="integer" +// +kubebuilder:printcolumn:JSONPath=".spec.backupset",name="BS/IC",type="boolean" +// +kubebuilder:printcolumn:JSONPath=".spec.gcsPath",name="GCS Path",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Phase",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.backupid",name="Backup ID",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.backuptime",name="Backup Time",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="ReadyStatus",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,name="ReadyReason",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].message`,name="ReadyMessage",type="string",priority=1 + +// Backup is the Schema for the backups API. +type Backup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupSpec `json:"spec,omitempty"` + Status BackupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupList contains a list of Backup. +type BackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Backup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Backup{}, &BackupList{}) +} diff --git a/oracle/api/v1alpha1/backupschedule_types.go b/oracle/api/v1alpha1/backupschedule_types.go new file mode 100644 index 0000000..fc695c2 --- /dev/null +++ b/oracle/api/v1alpha1/backupschedule_types.go @@ -0,0 +1,123 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// BackupRetentionPolicy is a policy used to trigger automatic deletion of +// backups produced by a particular schedule. Deletion will be triggered by +// count (keeping a maximum number of backups around). +type BackupRetentionPolicy struct { + // BackupRetention is the number of successful backups to keep around. + // The default is 7. + // A value of 0 means "do not delete backups based on count". Max of 512 + // allows for ~21 days of hourly backups or ~1.4 years of daily backups. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=512 + // +optional + BackupRetention *int32 `json:"backupRetention,omitempty"` +} + +// BackupHistoryRecord is a historical record of a Backup. +type BackupHistoryRecord struct { + // BackupName is the name of the Backup that gets created. + // +nullable + BackupName string `json:"backupName"` + + // CreationTime is the time that the Backup gets created. + // +nullable + CreationTime metav1.Time `json:"creationTime"` + + // Phase tells the state of the Backup. + // +optional + Phase commonv1alpha1.BackupPhase `json:"phase,omitempty"` +} + +// BackupScheduleSpec defines the desired state of BackupSchedule. +type BackupScheduleSpec struct { + // BackupSpec defines the Backup that will be created on the provided schedule. + BackupSpec BackupSpec `json:"backupSpec"` + + // Schedule is a cron-style expression of the schedule on which Backup will + // be created. For allowed syntax, see en.wikipedia.org/wiki/Cron and + // godoc.org/github.com/robfig/cron. + Schedule string `json:"schedule"` + + // Suspend tells the controller to suspend operations - both creation of new + // Backup and retention actions. This will not have any effect on backups + // currently in progress. Default is false. + // +optional + Suspend *bool `json:"suspend,omitempty"` + + // StartingDeadlineSeconds is an optional deadline in seconds for starting the + // backup creation if it misses scheduled time for any reason. + // The default is 30 seconds. + // +optional + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` + + // BackupRetentionPolicy is the policy used to trigger automatic deletion of + // backups produced from this BackupSchedule. + // +optional + BackupRetentionPolicy *BackupRetentionPolicy `json:"backupRetentionPolicy,omitempty"` +} + +// BackupScheduleStatus defines the observed state of BackupSchedule. +type BackupScheduleStatus struct { + // LastBackupTime is the time the last Backup was created for this + // BackupSchedule. + // +optional + // +nullable + LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` + + // Conditions of the BackupSchedule. + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // BackupTotal stores the total number of current existing backups created + // by this backupSchedule. + BackupTotal *int32 `json:"backupTotal,omitempty"` + + // BackupHistory stores the records for up to 7 of the latest backups. + // +optional + BackupHistory []BackupHistoryRecord `json:"backupHistory,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupSchedule is the Schema for the backupschedules API. +type BackupSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupScheduleSpec `json:"spec,omitempty"` + Status BackupScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupScheduleList contains a list of BackupSchedule. +type BackupScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupSchedule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BackupSchedule{}, &BackupScheduleList{}) +} diff --git a/oracle/api/v1alpha1/config_types.go b/oracle/api/v1alpha1/config_types.go new file mode 100644 index 0000000..a1c0792 --- /dev/null +++ b/oracle/api/v1alpha1/config_types.go @@ -0,0 +1,100 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// ConfigSpec defines the desired state of Config. +type ConfigSpec struct { + // Service agent and other data plane agent images. + // This is an optional map that allows a customer to specify agent images + // different from those chosen/provided by the Oracle Operator by default. + // See an example of how this map can be used in + // config/samples/v1alpha1_config_gcp1.yaml + // +optional + Images map[string]string `json:"images,omitempty"` + + // Deployment platform. + // Presently supported values are: GCP (default), BareMetal. + // +optional + // +kubebuilder:validation:Enum=GCP;BareMetal;Minikube + Platform string `json:"platform,omitempty"` + + // Disks slice describes at minimum two disks: + // data and log (archive log), and optionally a backup disk. + Disks []commonv1alpha1.DiskSpec `json:"disks,omitempty"` + + // Storage class to use for dynamic provisioning. + // This value varies depending on a platform. + // For GCP (and the default) it is "csi-gce-pd". + // +optional + StorageClass string `json:"storageClass,omitempty"` + + // Volume Snapshot class to use for storage snapshots. + // This value varies depending on a platform. + // For GCP (and the default) it is "csi-gce-pd-snapshot-class". + // +optional + VolumeSnapshotClass string `json:"volumeSnapshotClass,omitempty"` + + // Log Levels for the various components. + // This is an optional map for component -> log level + // See an example of how this map can be used in + // config/samples/v1alpha1_config_gcp1.yaml + // +optional + LogLevel map[string]string `json:"logLevel,omitempty"` + + // HostAntiAffinityNamespaces is an optional list of namespaces that need + // to be included in anti-affinity by hostname rule. The effect of the rule + // is forbidding scheduling a database pod in the current namespace on a host + // that already runs a database pod in any of the listed namespaces. + // +optional + HostAntiAffinityNamespaces []string `json:"hostAntiAffinityNamespaces,omitempty"` +} + +// ConfigStatus defines the observed state of Config. +type ConfigStatus struct { +} + +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:JSONPath=".spec.platform",name="Platform",type="string" +// +kubebuilder:printcolumn:name="Disk Sizes",type="string",JSONPath=".spec.diskSizes" +// +kubebuilder:printcolumn:JSONPath=".spec.storageClass",name="Storage Class",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.volumeSnapshotClass",name="Volume Snapshot Class",type="string" + +// Config is the Schema for the configs API. +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ConfigSpec `json:"spec,omitempty"` + Status ConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigList contains a list of Config. +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Config `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Config{}, &ConfigList{}) +} diff --git a/oracle/api/v1alpha1/cronanything_types.go b/oracle/api/v1alpha1/cronanything_types.go new file mode 100644 index 0000000..6383e57 --- /dev/null +++ b/oracle/api/v1alpha1/cronanything_types.go @@ -0,0 +1,379 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // CronAnythingCreatedByLabel is the name of the label used by CronAnything to + // denote the entity which created the resource. + CronAnythingCreatedByLabel = "oracle.db.anthosapis.com/created-by" + + // CronAnythingScheduleTimeLabel is the name of the label used by CronAnything + // to denote the schedule time. + CronAnythingScheduleTimeLabel = "oracle.db.anthosapis.com/schedule-time" + + // TriggerHistoryMaxLength defines the maximum number of trigger history to + // keep track of by CronAnything. + TriggerHistoryMaxLength = 10 +) + +// CronAnythingSpec defines the desired state of CronAnything. +type CronAnythingSpec struct { + // Schedule defines a time-based schedule, e.g., a standard cron schedule such + // as “@every 10m”. This field is mandatory and mutable. If it is changed, + // resources will simply be created at the new interval from then on. + Schedule string `json:"schedule"` + + // TriggerDeadlineSeconds defines Deadline in seconds for creating the + // resource if it missed the scheduled time. If no deadline is provided, the + // resource will be created no matter how far after the scheduled time. + // If multiple triggers were missed, only the last will be triggered and only + // one resource will be created. This field is mutable and changing it + // will affect the creation of new resources from that point in time. + // +optional + TriggerDeadlineSeconds *int64 `json:"triggerDeadlineSeconds,omitempty"` + + // ConcurrencyPolicy specifies how to treat concurrent resources if the + // resource provides a status path that exposes completion. + // The default policy if not provided is to allow a new resource to be created + // even if an active resource already exists. + // If the resource doesn’t have an active/completed status, the only supported + // concurrency policy is to allow creating new resources. + // This field is mutable. If the policy is changed to a more stringent policy + // while multiple resources are active, it will not delete any existing + // resources. The exception is if a creation of a new resource is triggered + // and the policy has been changed to Replace. If multiple resources are + // active, they will all be deleted and replaced by a new resource. + // +optional + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + + // Suspend tells the controller to suspend creation of additional resources. + // The default value is false. This field is mutable. It will not affect any + // existing resources, but only affect creation of additional resources. + // +optional + Suspend *bool `json:"suspend,omitempty"` + + // FinishableStrategy defines how the CronAnything controller an decide if a + // resource has completed. + // Some resources will do some work after they have been created and at some + // point be finished. Jobs are the most common example. + // If no strategy is defined, it is assumed that the resources never finish. + // +optional + FinishableStrategy *FinishableStrategy `json:"finishableStrategy,omitempty"` + + // Template is a template of a resource type for which instances are to + // be created on the given schedule. + // This field is mandatory and it must contain a valid template for an + // existing apiVersion and kind in the cluster. + // It is immutable, so if the template needs to change, the whole CronAnything + // resource should be replaced. + Template runtime.RawExtension `json:"template"` + + // TotalResourceLimit specifies the total number of children allowed for a + // particular CronAnything resource. If this limit is reached, no additional + // resources will be created. + // This limit is mostly meant to avoid runaway creation of resources that + // could bring down the cluster. Both finished and unfinished resources count + // against this limit. + // This field is mutable. If it is changed to a lower value than the existing + // number of resources, none of the existing resources will be deleted as a + // result, but no additional resources will be created until the number of + // child resources goes below the limit. + // The field is optional with a default value of 100. + // +optional + TotalResourceLimit *int32 `json:"totalResourceLimit,omitempty"` + + // Retention defines the retention policy for resources created by + // CronAnything. If no retention policy is defined, CronAnything will never + // delete resources, so cleanup must be handled through some other process. + // +optional + Retention *ResourceRetention `json:"retention,omitempty"` + + // CascadeDelete tells CronAnything to set up owner references from the + // created resources to the CronAnything resource. This means that if the + // CronAnything resource is deleted, all resources created by it will also be + // deleted. This is an optional field that defaults to false. + // +optional + CascadeDelete *bool `json:"cascadeDelete,omitempty"` + + // ResourceBaseName specifies the base name for the resources created by + // CronAnything, which will be named using the format + // -. This field is optional, and the default + // is to use the name of the CronAnything resource as the ResourceBaseName. + // +optional + ResourceBaseName *string `json:"resourceBaseName,omitempty"` + + // ResourceTimestampFormat defines the format of the timestamp in the name of + // Resources created by CronAnything -. + // This field is optional, and the default is to format the timestamp as unix + // time. If provided, it must be compatible with time.Format in golang. + // +optional + ResourceTimestampFormat *string `json:"resourceTimestampFormat,omitempty"` +} + +// ResourceRetention specifies the retention policy for resources. +type ResourceRetention struct { + // The number of completed resources to keep before deleting them. This + // only affects finishable resources and the default value is 3. + // This field is mutable and if it is changed to a number lower than + // the current number of finished resources, the oldest ones will + // eventually be deleted until the number of finished resources matches + // the limit. + // +optional + HistoryCountLimit *int32 `json:"historyCountLimit,omitempty"` + + // The time since completion that a resource is kept before deletion. This + // only affects finishable resources. This does not have any default value and + // if it is not provided, HistoryCountLimit will be used to prune completed + // resources. + // If both HistoryCountLimit and HistoryTimeLimitSeconds are set, it is treated + // as an OR operation. + // +optional + HistoryTimeLimitSeconds *uint64 `json:"historyTimeLimitSeconds,omitempty"` + + // ResourceTimestampStrategy specifies how the CronAnything controller + // can find the age of a resource. This is needed to support retention. + ResourceTimestampStrategy ResourceTimestampStrategy `json:"resourceTimestampStrategy"` +} + +// FinishableStrategyType specifies the type of the field which tells whether +// a resource is finished. +type FinishableStrategyType string + +const ( + // FinishableStrategyTimestampField specifies deriving whether a resource is + // finished from a timestamp field. + FinishableStrategyTimestampField FinishableStrategyType = "TimestampField" + + // FinishableStrategyStringField specifies deriving whether a resource is + // finished from a string field. + FinishableStrategyStringField FinishableStrategyType = "StringField" +) + +// FinishableStrategy specifies how the CronAnything controller can decide +// whether a created resource has completed. This is needed for any concurrency +// policies other than AllowConcurrent. +type FinishableStrategy struct { + + // Type tells which strategy should be used. + Type FinishableStrategyType `json:"type"` + + // TimestampField contains the details for how the CronAnything controller + // can find the timestamp field on the resource in order to decide if the + // resource has completed. + // +optional + TimestampField *TimestampFieldStrategy `json:"timestampField,omitempty"` + + // StringField contains the details for how the CronAnything controller + // can find the string field on the resource needed to decide if the resource + // has completed. It also lists the values that mean the resource has completed. + // +optional + StringField *StringFieldStrategy `json:"stringField,omitempty"` +} + +// TimestampFieldStrategy defines how the CronAnything controller can find +// a field on the resource that contains a timestamp. The contract here is that +// if the field contains a valid timestamp the resource is considered finished. +type TimestampFieldStrategy struct { + + // The path to the field on the resource that contains the timestamp. + FieldPath string `json:"fieldPath"` +} + +// StringFieldStrategy defines how the CronAnything controller can find and +// use the value of a field on the resource to decide if it has finished. +type StringFieldStrategy struct { + + // The path to the field on the resource that contains a string value. + FieldPath string `json:"fieldPath"` + + // The values of the field that means the resource has completed. + FinishedValues []string `json:"finishedValues"` +} + +// ResourceTimestampStrategyType specifies the strategy to use for getting +// the resource timestamp. +type ResourceTimestampStrategyType string + +const ( + // ResourceTimestampStrategyField specifies getting the timestamp for the + // resource from a field on the resource. + ResourceTimestampStrategyField ResourceTimestampStrategyType = "Field" +) + +// ResourceTimestampStrategy specifies how the CronAnything controller can find +// the timestamp on the resource that will again decide the order in which +// resources are deleted based on the retention policy. +type ResourceTimestampStrategy struct { + + // Type tells which strategy should be used. + Type ResourceTimestampStrategyType `json:"type"` + + // FieldResourceTimestampStrategy specifies how the CronAnything controller + // can find the timestamp for the resource from a field. + // +optional + FieldResourceTimestampStrategy *FieldResourceTimestampStrategy `json:"field,omitempty"` +} + +// FieldResourceTimestampStrategy defines how the CronAnything controller can +// find the timestamp for a resource. +type FieldResourceTimestampStrategy struct { + + // The path to the field on the resource that contains the timestamp. + FieldPath string `json:"fieldPath"` +} + +// ConcurrencyPolicy specifies the policy to use for concurrency control. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent policy specifies allowing creation of new resources + // regardless of how many other currently active resources exist. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent policy specifies not allowing creation of a new resource + // if any existing resources are active. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent policy specifies deleting any existing, active resources + // before creating a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronAnythingStatus defines the observed state of CronAnything. +type CronAnythingStatus struct { + + // LastScheduleTime keeps track of the scheduled time for the last + // successfully completed creation of a resource. + // This is used by the controller to determine when the next resource creation + // should happen. If creation of a resource is delayed for any reason but + // eventually does happen, this value will still be updated to the time when + // it was originally scheduled to happen. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` + + // TriggerHistory keeps track of the status for the last 10 triggers. This + // allows users of CronAnything to see whether any triggers failed. It is + // important to know that this only keeps track of whether a trigger was + // successfully executed (as in creating the given resource), not whether the + // created resource was itself successful. For this information, any users + // of CronAnything should observe the resources created. + // +optional + TriggerHistory []TriggerHistoryRecord `json:"triggerHistory,omitempty"` + + // PendingTrigger keeps track of any triggers that are past their trigger time, + // but for some reason have not been completed yet. This is typically a result + // of the create operation failing. + // +optional + PendingTrigger *PendingTrigger `json:"pendingTrigger,omitempty"` +} + +// PendingTrigger keeps information about triggers that should have been +// completed, but due to some kind of error, is still pending. They will +// typically remain in this state until either the issue has been resolved and +// the resource in question can be created, the triggerDeadlineSeconds is +// reached and we stop trying, or the next trigger time is reached at which time +// we consider the previous trigger as failed. +type PendingTrigger struct { + + // ScheduleTime is the time when this trigger was scheduled to be executed. + ScheduleTime metav1.Time `json:"scheduleTime"` + + // Result tells why this trigger is in the pending state, i.e. what prevented + // it from completing successfully. + Result TriggerResult `json:"result"` +} + +// TriggerHistoryRecord contains information about the result of a trigger. It +// can either have completed successfully, and if it did not, the record will +// provide information about what is the cause of the failure. +type TriggerHistoryRecord struct { + + // ScheduleTime is the time when this trigger was scheduled to be executed. + ScheduleTime metav1.Time `json:"scheduleTime"` + + // CreationTimestamp is the time when this record was created. This is thus + // also the time at which the final result of the trigger was decided. + CreationTimestamp metav1.Time `json:"creationTimestamp"` + + // Result contains the outcome of a trigger. It can either be CreateSucceeded, + // which means the given resource was created as intended, or it can be one + // of several error messages. + Result TriggerResult `json:"result"` +} + +// TriggerResult specifies the result of a trigger. +type TriggerResult string + +const ( + // TriggerResultMissed means the trigger was not able to complete until the + // next trigger fired. Thus the trigger missed its window for being executed. + TriggerResultMissed TriggerResult = "MissedSchedule" + + // TriggerResultCreateFailed means the create operation for a resource failed. + // This itself doesn't cause the trigger to fail, but this status will be + // reported if failing create operations are the reason a trigger misses its + // window for being executed. + TriggerResultCreateFailed TriggerResult = "CreateFailed" + + // TriggerResultCreateSucceeded means the trigger was successful. + TriggerResultCreateSucceeded TriggerResult = "CreateSucceeded" + + // TriggerResultResourceLimitReached means the trigger could not be completed + // as the resource limit was reached and it is not possible to create + // additional resources. + TriggerResultResourceLimitReached TriggerResult = "ResourceLimitReached" + + // TriggerResultForbidConcurrent means the trigger could not be completed as + // there is already an unfinished resource and the concurrency policy forbid + // any concurrently running resources. + TriggerResultForbidConcurrent TriggerResult = "ForbidConcurrent" + + // TriggerResultDeadlineExceeded means the trigger could not be completed as + // the deadline for how delayed a trigger can be was reached. + TriggerResultDeadlineExceeded TriggerResult = "DeadlineExceeded" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronAnything is the Schema for the cronanythings API. +// +k8s:openapi-gen=true +type CronAnything struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CronAnythingSpec `json:"spec,omitempty"` + Status CronAnythingStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronAnythingList contains a list of CronAnything. +type CronAnythingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CronAnything `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CronAnything{}, &CronAnythingList{}) +} diff --git a/oracle/api/v1alpha1/database_types.go b/oracle/api/v1alpha1/database_types.go new file mode 100644 index 0000000..3ca96bc --- /dev/null +++ b/oracle/api/v1alpha1/database_types.go @@ -0,0 +1,113 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// DatabaseSpec defines the desired state of Database. +type DatabaseSpec struct { + // Database specs that are common across all database engines. + commonv1alpha1.DatabaseSpec `json:",inline"` + + // AdminPassword is the password for the sys admin of the database. + // +optional + // +kubebuilder:validation:MaxLength=30 + // +kubebuilder:validation:MinLength=5 + AdminPassword string `json:"admin_password,omitempty"` + + // AdminPasswordGsmSecretRef is a reference to the secret object containing + // sensitive information to pass to config agent. + // This field is optional, and may be empty if plaintext password is used. + // +optional + AdminPasswordGsmSecretRef *commonv1alpha1.GsmSecretReference `json:"adminPasswordGsmSecretRef,omitempty"` + + // Users specifies an optional list of users to be created in this database. + // +optional + Users []UserSpec `json:"users"` +} + +// UserSpec defines the desired state of the Database Users. +type UserSpec struct { + // User specs that are common across all database engines. + commonv1alpha1.UserSpec `json:",inline"` + + // Privileges specifies an optional list of privileges to grant to the user. + // +optional + Privileges []PrivilegeSpec `json:"privileges"` +} + +// PrivilegeSpec defines the desired state of roles and privileges. +type PrivilegeSpec string + +// DatabaseStatus defines the observed state of Database. +type DatabaseStatus struct { + // Database status that is common across all database engines. + commonv1alpha1.DatabaseStatus `json:",inline"` + + // List of user names. + UserNames []string `json:"usernames,omitempty"` + + // UserResourceVersions is a map of username to user resource version + // (plaintext or GSM). For GSM Resource version, use format: + // "projects/{ProjectId}/secrets/{SecretId}/versions/{Version}". + UserResourceVersions map[string]string `json:"UserResourceVersions,omitempty"` + + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // IsChangeApplied indicates whether database changes have been applied + // +optional + IsChangeApplied metav1.ConditionStatus `json:"isChangeApplied,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=genericdatabases,shortName=gdb +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.instance",name="Instance",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.usernames",name="Users",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Phase",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="DatabaseReadyStatus",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,name="DatabaseReadyReason",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].message`,name="DatabaseReadyMessage",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="UserReady")].status`,name="UserReadyStatus",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="UserReady")].reason`,name="UserReadyReason",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="UserReady")].message`,name="UserReadyMessage",type="string",priority=1 + +// Database is the Schema for the databases API. +type Database struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseSpec `json:"spec,omitempty"` + Status DatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatabaseList contains a list of Database. +type DatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Database `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Database{}, &DatabaseList{}) +} diff --git a/oracle/api/v1alpha1/export_types.go b/oracle/api/v1alpha1/export_types.go new file mode 100644 index 0000000..13b36a9 --- /dev/null +++ b/oracle/api/v1alpha1/export_types.go @@ -0,0 +1,111 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ExportSpec defines the desired state of Export +type ExportSpec struct { + // Instance is the resource name within namespace to export from. + // +required + Instance string `json:"instance"` + + // DatabaseName is the database resource name within Instance to export from. + // +required + DatabaseName string `json:"databaseName"` + + // Type of the Export. If omitted, the default of DataPump is assumed. + // +kubebuilder:validation:Enum=DataPump + // +optional + Type string `json:"type,omitempty"` + + // ExportObjectType is the type of objects to export. If omitted, the default + // of Schemas is assumed. + // Supported options at this point are: Schemas or Tables. + // +kubebuilder:validation:Enum=Schemas;Tables + // +optional + ExportObjectType string `json:"exportObjectType,omitempty"` + + // ExportObjects are objects, schemas or tables, exported by DataPump. + // +required + ExportObjects []string `json:"exportObjects,omitempty"` + + // GcsPath is a full path in GCS bucket to transfer exported files to. + // A user is to ensure proper write access to the bucket from within the + // Oracle Operator. + // +required + GcsPath string `json:"gcsPath,omitempty"` + + // GcsLogPath is an optional full path in GCS. If set up ahead of time, export + // logs can be optionally transferred to set GCS bucket. A user is to ensure + // proper write access to the bucket from within the Oracle Operator. + // +optional + GcsLogPath string `json:"gcsLogPath,omitempty"` + + // FlashbackTime is an optional time. If this time is set, the SCN that most + // closely matches the time is found, and this SCN is used to enable the + // Flashback utility. The export operation is performed with data that is + // consistent up to this SCN. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + // +optional + FlashbackTime *metav1.Time `json:"flashbackTime,omitempty"` +} + +// ExportStatus defines the observed state of Export. +type ExportStatus struct { + // Conditions represents the latest available observations + // of the export's current state. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.instance",name="Instance Name",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.databaseName",name="Database Name",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.exportObjectType",name="Export Object Type",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.exportObjects",name="Export Objects",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.gcsPath",name="GCS Path",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.gcsLogPath",name="GCS Log Path",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="ReadyStatus",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,name="ReadyReason",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].message`,name="ReadyMessage",type="string",priority=1 + +// Export is the Schema for the exports API. +type Export struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExportSpec `json:"spec,omitempty"` + Status ExportStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExportList contains a list of Export. +type ExportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Export `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Export{}, &ExportList{}) +} diff --git a/oracle/api/v1alpha1/groupversion_info.go b/oracle/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..04b27f4 --- /dev/null +++ b/oracle/api/v1alpha1/groupversion_info.go @@ -0,0 +1,35 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha1 contains API Schema definitions for the Oracle v1alpha1 API +// group. +// +kubebuilder:object:generate=true +// +groupName=oracle.db.anthosapis.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "oracle.db.anthosapis.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/oracle/api/v1alpha1/import_types.go b/oracle/api/v1alpha1/import_types.go new file mode 100644 index 0000000..eee4011 --- /dev/null +++ b/oracle/api/v1alpha1/import_types.go @@ -0,0 +1,90 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ImportSpec defines the desired state of Import. +type ImportSpec struct { + // Instance is the resource name within same namespace to import into. + // +required + Instance string `json:"instance,omitempty"` + + // DatabaseName is the database resource name within Instance to import into. + // +required + DatabaseName string `json:"databaseName,omitempty"` + + // Type of the Import. If not specified, the default of DataPump is assumed, + // which is the only supported option currently. + // +kubebuilder:validation:Enum=DataPump + // +optional + Type string `json:"type,omitempty"` + + // GcsPath is a full path to the input file in GCS containing import data. + // A user is to ensure proper write access to the bucket from within the + // Oracle Operator. + // +required + GcsPath string `json:"gcsPath,omitempty"` + + // GcsLogPath is an optional path in GCS to copy import log to. + // A user is to ensure proper write access to the bucket from within the + // Oracle Operator. + // +optional + GcsLogPath string `json:"gcsLogPath,omitempty"` +} + +// ImportStatus defines the observed state of Import. +type ImportStatus struct { + // Conditions represents the latest available observations + // of the import's current state. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.instance",name="Instance Name",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.databaseName",name="Database Name",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.gcsPath",name="GCS Path",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="ReadyStatus",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,name="ReadyReason",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].message`,name="ReadyMessage",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".spec.gcsLogPath",name="GCS Log Path",type="string" + +// Import is the Schema for the imports API. +type Import struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ImportSpec `json:"spec,omitempty"` + Status ImportStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImportList contains a list of Import. +type ImportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Import `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Import{}, &ImportList{}) +} diff --git a/oracle/api/v1alpha1/instance_types.go b/oracle/api/v1alpha1/instance_types.go new file mode 100644 index 0000000..0a4f69d --- /dev/null +++ b/oracle/api/v1alpha1/instance_types.go @@ -0,0 +1,221 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" +) + +// This is the contract. This Instance is Anthos DB Operator compliant. +var _ commonv1alpha1.GenericInstance = &Instance{} + +// GenericInstanceSpec defines the common specifications for a Generic Instance. +func (i *Instance) GenericInstanceSpec() commonv1alpha1.GenericInstanceSpec { + return i.Spec.GenericInstanceSpec +} + +// GenericInstanceStatus defines the common status for a Generic Instance. +func (i *Instance) GenericInstanceStatus() commonv1alpha1.GenericInstanceStatus { + return i.Status.GenericInstanceStatus +} + +// Service is an Oracle Operator provided service. +type Service string + +// InstanceSpec defines the desired state of Instance. +type InstanceSpec struct { + // GenericInstanceSpec represents the database engine agnostic + // part of the spec describing the desired state of an Instance. + commonv1alpha1.GenericInstanceSpec `json:",inline"` + + // Restore and recovery request details. + // This section should normally be commented out unless an actual + // restore/recovery is required. + // +optional + Restore *RestoreSpec `json:"restore,omitempty"` + + // DatabaseUID represents an OS UID of a user running a database. + // +optional + DatabaseUID *int64 `json:"databaseUID,omitempty"` + + // DatabaseGID represents an OS group ID of a user running a database. + // +optional + DatabaseGID *int64 `json:"databaseGID,omitempty"` + + // DBDomain is an optional attribute to set a database domain. + // +optional + DBDomain string `json:"dbDomain,omitempty"` + + // CDBName is the intended name of the CDB attribute. If the CDBName is + // different from the original name (with which the CDB was created) the + // CDB will be renamed. + // +optional + CDBName string `json:"cdbName,omitempty"` + + // DBUniqueName represents a unique database name that would be + // set for a database (if not provided, as a default, + // the [_generic|_] will be appended to a DatabaseName). + // +optional + DBUniqueName string `json:"dbUniqueName,omitempty"` + + // CharacterSet used to create a database (the default is AL32UTF8). + // +optional + CharacterSet string `json:"characterSet,omitempty"` + + // MemoryPercent represents the percentage of memory that should be allocated + // for Oracle SGA (default is 25%). + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + MemoryPercent int `json:"memoryPercent,omitempty"` + + // DBNetworkServiceOptions allows to override some details of kubernetes + // Service created to expose a connection to database. + // +optional + DBNetworkServiceOptions *DBNetworkServiceOptions `json:"dbNetworkServiceOptions,omitempty"` +} + +// RestoreSpec defines optional restore and recovery attributes. +type RestoreSpec struct { + // Backup type to restore from. + // Oracle only supports: Snapshot or Physical. + // +optional + // +kubebuilder:validation:Enum=Snapshot;Physical + BackupType commonv1alpha1.BackupType `json:"backupType,omitempty"` + + // Backup name to restore from. + // +required + BackupID string `json:"backupId,omitempty"` + + // Similar to a (physical) backup, optionally indicate a degree + // of parallelism, also known as DOP. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=100 + Dop int32 `json:"dop,omitempty"` + + // Restore time limit. + // Optional field defaulting to three times the backup time limit. + // Don't include the unit (minutes), just the integer. + // +optional + // +kubebuilder:validation:Minimum=0 + TimeLimitMinutes int32 `json:"timeLimitMinutes,omitempty"` + + // To overwrite an existing, up and running instance, + // an explicit athorization is required. This is safeguard to avoid + // accidentally destroying a perfectly healthy (status=Ready) instance. + // +kubebuilder:validation:Enum=true;false + // +optional + Force bool `json:"force,omitempty"` + + // Request version as a date-time to avoid accidental triggering of + // a restore operation when reapplying an older version of a resource file. + // If at least one restore operation has occurred, any further restore + // operation that have the same RequestTime or earlier than the last Restore + // operation will be ignored. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + RequestTime metav1.Time `json:"requestTime"` +} + +// DBNetworkServiceOptions contains customization options of kubernetes Service +// exposing a database connection. +type DBNetworkServiceOptions struct { + // GCP contains Google Cloud specific attributes of Service configuration. + // +optional + GCP DBNetworkServiceOptionsGCP `json:"gcp,omitempty"` +} + +// DBNetworkServiceOptionsGCP contains customization options of kubernetes +// Service created for database connection that are specific to GCP. +type DBNetworkServiceOptionsGCP struct { + // LoadBalancerType let's define a type of load balancer, see + // https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + // +kubebuilder:validation:Enum="";Internal;External + // +optional + LoadBalancerType string `json:"loadBalancerType,omitempty"` +} + +// InstanceStatus defines the observed state of Instance. +type InstanceStatus struct { + // GenericInstanceStatus represents the database engine agnostic + // part of the status describing the observed state of an Instance. + commonv1alpha1.GenericInstanceStatus `json:",inline"` + + // List of database names (e.g. PDBs) hosted in the Instance. + DatabaseNames []string `json:"databasenames,omitempty"` + + // Last backup ID. + BackupID string `json:"backupid,omitempty"` + + // +optional + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastRestoreTime *metav1.Time `json:"lastRestoreTime,omitempty"` + + // CurrentServiceImage stores the image name used by the database instance. + CurrentServiceImage string `json:"currentServiceImage,omitempty"` + + // CurrentParameters stores the last successfully set instance parameters. + CurrentParameters map[string]string `json:"currentParameters,omitempty"` + + // LastFailedParameterUpdate is used to avoid getting into the failed + // parameter update loop. + LastFailedParameterUpdate map[string]string `json:"lastFailedParameterUpdate,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=genericinstances,shortName=ginst +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.type",name="DB Engine",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.version",name="Version",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.edition",name="Edition",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.endpoint",name="Endpoint",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.url",name="URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databasenames",name="DB Names",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.backupid",name="Backup ID",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="ReadyStatus",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,name="ReadyReason",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].message`,name="ReadyMessage",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="DatabaseInstanceReady")].status`,name="DBReadyStatus",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="DatabaseInstanceReady")].reason`,name="DBReadyReason",type="string" +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="DatabaseInstanceReady")].message`,name="DBReadyMessage",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.isChangeApplied",name="IsChangeApplied",type="string",priority=1 + +// Instance is the Schema for the instances API. +type Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec InstanceSpec `json:"spec,omitempty"` + Status InstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceList contains a list of Instance. +type InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Instance `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Instance{}, &InstanceList{}) +} diff --git a/oracle/api/v1alpha1/release_types.go b/oracle/api/v1alpha1/release_types.go new file mode 100644 index 0000000..f6d2bad --- /dev/null +++ b/oracle/api/v1alpha1/release_types.go @@ -0,0 +1,54 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ReleaseSpec defines the desired state of Release. +type ReleaseSpec struct { + Version string `json:"version"` +} + +// ReleaseStatus defines the observed state of Release. +type ReleaseStatus struct { + // Intentionally left empty. +} + +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:JSONPath=".spec.version",name="Release",type="string" + +// Release is the Schema for the releases API. +type Release struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ReleaseSpec `json:"spec,omitempty"` + Status ReleaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReleaseList contains a list of Release. +type ReleaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Release `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Release{}, &ReleaseList{}) +} diff --git a/oracle/api/v1alpha1/zz_generated.deepcopy.go b/oracle/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b3c74da --- /dev/null +++ b/oracle/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1314 @@ +// +build !ignore_autogenerated + +/* +Copyright 2021 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + apiv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupHistoryRecord) DeepCopyInto(out *BackupHistoryRecord) { + *out = *in + in.CreationTime.DeepCopyInto(&out.CreationTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupHistoryRecord. +func (in *BackupHistoryRecord) DeepCopy() *BackupHistoryRecord { + if in == nil { + return nil + } + out := new(BackupHistoryRecord) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRetentionPolicy) DeepCopyInto(out *BackupRetentionPolicy) { + *out = *in + if in.BackupRetention != nil { + in, out := &in.BackupRetention, &out.BackupRetention + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRetentionPolicy. +func (in *BackupRetentionPolicy) DeepCopy() *BackupRetentionPolicy { + if in == nil { + return nil + } + out := new(BackupRetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSchedule) DeepCopyInto(out *BackupSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSchedule. +func (in *BackupSchedule) DeepCopy() *BackupSchedule { + if in == nil { + return nil + } + out := new(BackupSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleList) DeepCopyInto(out *BackupScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleList. +func (in *BackupScheduleList) DeepCopy() *BackupScheduleList { + if in == nil { + return nil + } + out := new(BackupScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleSpec) DeepCopyInto(out *BackupScheduleSpec) { + *out = *in + in.BackupSpec.DeepCopyInto(&out.BackupSpec) + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.BackupRetentionPolicy != nil { + in, out := &in.BackupRetentionPolicy, &out.BackupRetentionPolicy + *out = new(BackupRetentionPolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleSpec. +func (in *BackupScheduleSpec) DeepCopy() *BackupScheduleSpec { + if in == nil { + return nil + } + out := new(BackupScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleStatus) DeepCopyInto(out *BackupScheduleStatus) { + *out = *in + if in.LastBackupTime != nil { + in, out := &in.LastBackupTime, &out.LastBackupTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupTotal != nil { + in, out := &in.BackupTotal, &out.BackupTotal + *out = new(int32) + **out = **in + } + if in.BackupHistory != nil { + in, out := &in.BackupHistory, &out.BackupHistory + *out = make([]BackupHistoryRecord, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleStatus. +func (in *BackupScheduleStatus) DeepCopy() *BackupScheduleStatus { + if in == nil { + return nil + } + out := new(BackupScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + out.BackupSpec = in.BackupSpec + if in.BackupItems != nil { + in, out := &in.BackupItems, &out.BackupItems + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Backupset != nil { + in, out := &in.Backupset, &out.Backupset + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + in.BackupStatus.DeepCopyInto(&out.BackupStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]apiv1alpha1.DiskSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.HostAntiAffinityNamespaces != nil { + in, out := &in.HostAntiAffinityNamespaces, &out.HostAntiAffinityNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronAnything) DeepCopyInto(out *CronAnything) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronAnything. +func (in *CronAnything) DeepCopy() *CronAnything { + if in == nil { + return nil + } + out := new(CronAnything) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronAnything) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronAnythingList) DeepCopyInto(out *CronAnythingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronAnything, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronAnythingList. +func (in *CronAnythingList) DeepCopy() *CronAnythingList { + if in == nil { + return nil + } + out := new(CronAnythingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronAnythingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronAnythingSpec) DeepCopyInto(out *CronAnythingSpec) { + *out = *in + if in.TriggerDeadlineSeconds != nil { + in, out := &in.TriggerDeadlineSeconds, &out.TriggerDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.FinishableStrategy != nil { + in, out := &in.FinishableStrategy, &out.FinishableStrategy + *out = new(FinishableStrategy) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.TotalResourceLimit != nil { + in, out := &in.TotalResourceLimit, &out.TotalResourceLimit + *out = new(int32) + **out = **in + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(ResourceRetention) + (*in).DeepCopyInto(*out) + } + if in.CascadeDelete != nil { + in, out := &in.CascadeDelete, &out.CascadeDelete + *out = new(bool) + **out = **in + } + if in.ResourceBaseName != nil { + in, out := &in.ResourceBaseName, &out.ResourceBaseName + *out = new(string) + **out = **in + } + if in.ResourceTimestampFormat != nil { + in, out := &in.ResourceTimestampFormat, &out.ResourceTimestampFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronAnythingSpec. +func (in *CronAnythingSpec) DeepCopy() *CronAnythingSpec { + if in == nil { + return nil + } + out := new(CronAnythingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronAnythingStatus) DeepCopyInto(out *CronAnythingStatus) { + *out = *in + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = (*in).DeepCopy() + } + if in.TriggerHistory != nil { + in, out := &in.TriggerHistory, &out.TriggerHistory + *out = make([]TriggerHistoryRecord, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PendingTrigger != nil { + in, out := &in.PendingTrigger, &out.PendingTrigger + *out = new(PendingTrigger) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronAnythingStatus. +func (in *CronAnythingStatus) DeepCopy() *CronAnythingStatus { + if in == nil { + return nil + } + out := new(CronAnythingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBNetworkServiceOptions) DeepCopyInto(out *DBNetworkServiceOptions) { + *out = *in + out.GCP = in.GCP +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBNetworkServiceOptions. +func (in *DBNetworkServiceOptions) DeepCopy() *DBNetworkServiceOptions { + if in == nil { + return nil + } + out := new(DBNetworkServiceOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBNetworkServiceOptionsGCP) DeepCopyInto(out *DBNetworkServiceOptionsGCP) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBNetworkServiceOptionsGCP. +func (in *DBNetworkServiceOptionsGCP) DeepCopy() *DBNetworkServiceOptionsGCP { + if in == nil { + return nil + } + out := new(DBNetworkServiceOptionsGCP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Database) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseList) DeepCopyInto(out *DatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Database, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList. +func (in *DatabaseList) DeepCopy() *DatabaseList { + if in == nil { + return nil + } + out := new(DatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { + *out = *in + out.DatabaseSpec = in.DatabaseSpec + if in.AdminPasswordGsmSecretRef != nil { + in, out := &in.AdminPasswordGsmSecretRef, &out.AdminPasswordGsmSecretRef + *out = new(apiv1alpha1.GsmSecretReference) + **out = **in + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]UserSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. +func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { + if in == nil { + return nil + } + out := new(DatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { + *out = *in + in.DatabaseStatus.DeepCopyInto(&out.DatabaseStatus) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserResourceVersions != nil { + in, out := &in.UserResourceVersions, &out.UserResourceVersions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. +func (in *DatabaseStatus) DeepCopy() *DatabaseStatus { + if in == nil { + return nil + } + out := new(DatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Export) DeepCopyInto(out *Export) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Export. +func (in *Export) DeepCopy() *Export { + if in == nil { + return nil + } + out := new(Export) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Export) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportList) DeepCopyInto(out *ExportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Export, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportList. +func (in *ExportList) DeepCopy() *ExportList { + if in == nil { + return nil + } + out := new(ExportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportSpec) DeepCopyInto(out *ExportSpec) { + *out = *in + if in.ExportObjects != nil { + in, out := &in.ExportObjects, &out.ExportObjects + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FlashbackTime != nil { + in, out := &in.FlashbackTime, &out.FlashbackTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportSpec. +func (in *ExportSpec) DeepCopy() *ExportSpec { + if in == nil { + return nil + } + out := new(ExportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportStatus) DeepCopyInto(out *ExportStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportStatus. +func (in *ExportStatus) DeepCopy() *ExportStatus { + if in == nil { + return nil + } + out := new(ExportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldResourceTimestampStrategy) DeepCopyInto(out *FieldResourceTimestampStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldResourceTimestampStrategy. +func (in *FieldResourceTimestampStrategy) DeepCopy() *FieldResourceTimestampStrategy { + if in == nil { + return nil + } + out := new(FieldResourceTimestampStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FinishableStrategy) DeepCopyInto(out *FinishableStrategy) { + *out = *in + if in.TimestampField != nil { + in, out := &in.TimestampField, &out.TimestampField + *out = new(TimestampFieldStrategy) + **out = **in + } + if in.StringField != nil { + in, out := &in.StringField, &out.StringField + *out = new(StringFieldStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FinishableStrategy. +func (in *FinishableStrategy) DeepCopy() *FinishableStrategy { + if in == nil { + return nil + } + out := new(FinishableStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Import) DeepCopyInto(out *Import) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Import. +func (in *Import) DeepCopy() *Import { + if in == nil { + return nil + } + out := new(Import) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Import) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportList) DeepCopyInto(out *ImportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Import, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportList. +func (in *ImportList) DeepCopy() *ImportList { + if in == nil { + return nil + } + out := new(ImportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportSpec) DeepCopyInto(out *ImportSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportSpec. +func (in *ImportSpec) DeepCopy() *ImportSpec { + if in == nil { + return nil + } + out := new(ImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportStatus) DeepCopyInto(out *ImportStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportStatus. +func (in *ImportStatus) DeepCopy() *ImportStatus { + if in == nil { + return nil + } + out := new(ImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceList) DeepCopyInto(out *InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceList. +func (in *InstanceList) DeepCopy() *InstanceList { + if in == nil { + return nil + } + out := new(InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { + *out = *in + in.GenericInstanceSpec.DeepCopyInto(&out.GenericInstanceSpec) + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreSpec) + (*in).DeepCopyInto(*out) + } + if in.DatabaseUID != nil { + in, out := &in.DatabaseUID, &out.DatabaseUID + *out = new(int64) + **out = **in + } + if in.DatabaseGID != nil { + in, out := &in.DatabaseGID, &out.DatabaseGID + *out = new(int64) + **out = **in + } + if in.DBNetworkServiceOptions != nil { + in, out := &in.DBNetworkServiceOptions, &out.DBNetworkServiceOptions + *out = new(DBNetworkServiceOptions) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSpec. +func (in *InstanceSpec) DeepCopy() *InstanceSpec { + if in == nil { + return nil + } + out := new(InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.GenericInstanceStatus.DeepCopyInto(&out.GenericInstanceStatus) + if in.DatabaseNames != nil { + in, out := &in.DatabaseNames, &out.DatabaseNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LastRestoreTime != nil { + in, out := &in.LastRestoreTime, &out.LastRestoreTime + *out = (*in).DeepCopy() + } + if in.CurrentParameters != nil { + in, out := &in.CurrentParameters, &out.CurrentParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LastFailedParameterUpdate != nil { + in, out := &in.LastFailedParameterUpdate, &out.LastFailedParameterUpdate + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PendingTrigger) DeepCopyInto(out *PendingTrigger) { + *out = *in + in.ScheduleTime.DeepCopyInto(&out.ScheduleTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PendingTrigger. +func (in *PendingTrigger) DeepCopy() *PendingTrigger { + if in == nil { + return nil + } + out := new(PendingTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Release) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReleaseList) DeepCopyInto(out *ReleaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseList. +func (in *ReleaseList) DeepCopy() *ReleaseList { + if in == nil { + return nil + } + out := new(ReleaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReleaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReleaseSpec) DeepCopyInto(out *ReleaseSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseSpec. +func (in *ReleaseSpec) DeepCopy() *ReleaseSpec { + if in == nil { + return nil + } + out := new(ReleaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReleaseStatus) DeepCopyInto(out *ReleaseStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseStatus. +func (in *ReleaseStatus) DeepCopy() *ReleaseStatus { + if in == nil { + return nil + } + out := new(ReleaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRetention) DeepCopyInto(out *ResourceRetention) { + *out = *in + if in.HistoryCountLimit != nil { + in, out := &in.HistoryCountLimit, &out.HistoryCountLimit + *out = new(int32) + **out = **in + } + if in.HistoryTimeLimitSeconds != nil { + in, out := &in.HistoryTimeLimitSeconds, &out.HistoryTimeLimitSeconds + *out = new(uint64) + **out = **in + } + in.ResourceTimestampStrategy.DeepCopyInto(&out.ResourceTimestampStrategy) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRetention. +func (in *ResourceRetention) DeepCopy() *ResourceRetention { + if in == nil { + return nil + } + out := new(ResourceRetention) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTimestampStrategy) DeepCopyInto(out *ResourceTimestampStrategy) { + *out = *in + if in.FieldResourceTimestampStrategy != nil { + in, out := &in.FieldResourceTimestampStrategy, &out.FieldResourceTimestampStrategy + *out = new(FieldResourceTimestampStrategy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTimestampStrategy. +func (in *ResourceTimestampStrategy) DeepCopy() *ResourceTimestampStrategy { + if in == nil { + return nil + } + out := new(ResourceTimestampStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { + *out = *in + in.RequestTime.DeepCopyInto(&out.RequestTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec. +func (in *RestoreSpec) DeepCopy() *RestoreSpec { + if in == nil { + return nil + } + out := new(RestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringFieldStrategy) DeepCopyInto(out *StringFieldStrategy) { + *out = *in + if in.FinishedValues != nil { + in, out := &in.FinishedValues, &out.FinishedValues + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringFieldStrategy. +func (in *StringFieldStrategy) DeepCopy() *StringFieldStrategy { + if in == nil { + return nil + } + out := new(StringFieldStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestampFieldStrategy) DeepCopyInto(out *TimestampFieldStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestampFieldStrategy. +func (in *TimestampFieldStrategy) DeepCopy() *TimestampFieldStrategy { + if in == nil { + return nil + } + out := new(TimestampFieldStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerHistoryRecord) DeepCopyInto(out *TriggerHistoryRecord) { + *out = *in + in.ScheduleTime.DeepCopyInto(&out.ScheduleTime) + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerHistoryRecord. +func (in *TriggerHistoryRecord) DeepCopy() *TriggerHistoryRecord { + if in == nil { + return nil + } + out := new(TriggerHistoryRecord) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.UserSpec.DeepCopyInto(&out.UserSpec) + if in.Privileges != nil { + in, out := &in.Privileges, &out.Privileges + *out = make([]PrivilegeSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} diff --git a/oracle/build/BUILD.bazel b/oracle/build/BUILD.bazel new file mode 100644 index 0000000..c4f9e42 --- /dev/null +++ b/oracle/build/BUILD.bazel @@ -0,0 +1,114 @@ +load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_push") + +# Create the full tarball by building :name.tar, the default target +# is just the name-layer.tar and cannot be imported. +# E.G: +# bazel build //build:dbinit.tar +# buildah pull docker-archive:bazel-bin/build/dbinit.tar + +# Read in workspace status values to configure _push targets. +# REGISTRY = "${PROW_IMAGE_REPO}".split("/")[0] +REGISTRY = "gcr.io" + +# PROJECT = "${PROW_IMAGE_REPO}".split("/")[1] +PROJECT = "{PROW_PROJECT}" + +TAG = "{PROW_IMAGE_TAG}" + +container_image( + name = "dbinit", + base = "//oracle:base_image_with_busybox", + directory = "agent_repo", + files = [ + "//oracle/cmd/dbdaemon", + "//oracle/cmd/dbdaemon_proxy", + "//oracle/cmd/init_oracle", + "//oracle/cmd/init_oracle:init_oracle_files", + "//oracle/pkg/database/provision:provision_files", + ], +) + +container_push( + name = "dbinit_push", + format = "OCI", + image = ":dbinit", + registry = REGISTRY, + repository = PROJECT + "/oracle.db.anthosapis.com/dbinit", + tag = TAG, +) + +container_image( + name = "configagent", + base = "//oracle:base_image", + entrypoint = ["/config_agent"], + files = [ + "//oracle/cmd/config_agent", + ], + symlinks = {"/configagent": "/config_agent"}, +) + +container_push( + name = "configagent_push", + format = "OCI", + image = ":configagent", + registry = REGISTRY, + repository = PROJECT + "/oracle.db.anthosapis.com/configagent", + tag = TAG, +) + +container_image( + name = "dbdaemon_client", + base = "//oracle:base_image_with_busybox", + files = [ + "//oracle/cmd/dbdaemon_client", + ], + symlinks = {"/dbdaemonclient": "/dbdaemon_client"}, +) + +container_push( + name = "dbdaemon_client_push", + format = "OCI", + image = ":dbdaemon_client", + registry = REGISTRY, + repository = PROJECT + "/oracle.db.anthosapis.com/dbdaemonclient", + tag = TAG, +) + +container_image( + name = "loggingsidecar", + base = "//oracle:base_image", + files = [ + "//oracle/cmd/logging", + ], + symlinks = {"/logging_main": "/logging"}, +) + +container_push( + name = "loggingsidecar_push", + format = "OCI", + image = ":loggingsidecar", + registry = REGISTRY, + repository = PROJECT + "/oracle.db.anthosapis.com/loggingsidecar", + tag = TAG, +) + +container_image( + name = "monitoring", + base = "//oracle:base_image_with_busybox", + entrypoint = ["/monitoring"], + files = [ + "//oracle/cmd/monitoring", + "//oracle/pkg/agents/monitoring:monitoring_files", + ], + ports = ["9161"], + symlinks = {"/monitoring_agent": "/monitoring"}, +) + +container_push( + name = "monitoring_push", + format = "OCI", + image = ":monitoring", + registry = REGISTRY, + repository = PROJECT + "/oracle.db.anthosapis.com/monitoring", + tag = TAG, +) diff --git a/oracle/build/config_agent/Dockerfile b/oracle/build/config_agent/Dockerfile new file mode 100644 index 0000000..aef00c8 --- /dev/null +++ b/oracle/build/config_agent/Dockerfile @@ -0,0 +1,36 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +FROM docker.io/golang:1.15 as builder + +WORKDIR /build +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Copy the go source +COPY oracle/pkg/ oracle/pkg/ +COPY oracle/cmd/ oracle/cmd/ + +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o configagent oracle/cmd/config_agent/config_agent.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/base:nonroot +WORKDIR / +COPY --from=builder /build/configagent . +USER nonroot:nonroot + +ENTRYPOINT ["/configagent"] diff --git a/oracle/build/dbdaemon_client/Dockerfile b/oracle/build/dbdaemon_client/Dockerfile new file mode 100644 index 0000000..b7b2ebd --- /dev/null +++ b/oracle/build/dbdaemon_client/Dockerfile @@ -0,0 +1,30 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the Database Daemon Client binary. +FROM docker.io/golang:1.15 as builder + +WORKDIR /build + +COPY go.mod go.mod +COPY go.sum go.sum + +# Copy the go source +COPY oracle/pkg/ oracle/pkg/ +COPY oracle/cmd/ oracle/cmd/ + +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o dbdaemon_client oracle/cmd/dbdaemon_client/dbdaemon_client.go + +FROM docker.io/busybox +COPY --from=builder /build/dbdaemon_client ./agent_repo/ diff --git a/oracle/build/dbimage/Dockerfile b/oracle/build/dbimage/Dockerfile new file mode 100644 index 0000000..12909cf --- /dev/null +++ b/oracle/build/dbimage/Dockerfile @@ -0,0 +1,54 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Base image can be either RHEL8 UBI or OEL8 slim +# BASE_IMAGE=registry.access.redhat.com/ubi8/ubi +# ARG BASE_IMAGE=oraclelinux:8-slim +# Oracle 8 does not support 12c. +# ARG BASE_IMAGE=oraclelinux:7-slim + +FROM docker.io/oraclelinux:7-slim as base +ARG DB_VERSION +ARG CREATE_CDB +ARG CDB_NAME +ARG CHARACTER_SET +ARG MEM_PCT +ARG EDITION +ARG PATCH_VERSION +ARG INSTALL_SCRIPT +ENV STAGE_DIR=/tmp/stage/ + +RUN mkdir $STAGE_DIR && chmod ug+w $STAGE_DIR +ADD ./* $STAGE_DIR +RUN /bin/bash -c \ + 'if [ "$DB_VERSION" = "18c" ] && [ "$EDITION" = "xe" ]; then \ + export INSTALL_SCRIPT=install-oracle-18c-xe.sh; \ + chmod +x $STAGE_DIR$INSTALL_SCRIPT; \ + $STAGE_DIR/$INSTALL_SCRIPT $CDB_NAME $CHARACTER_SET && \ + rm -rf $INSTALL_SCRIPT && \ + rm -rf $STAGE_DIR; \ + else \ + export INSTALL_SCRIPT=install-oracle.sh; \ + chmod +x $STAGE_DIR$INSTALL_SCRIPT; \ + $STAGE_DIR/$INSTALL_SCRIPT $DB_VERSION $EDITION $CREATE_CDB $CDB_NAME $CHARACTER_SET $MEM_PCT $PATCH_VERSION && \ + rm -rf $INSTALL_SCRIPT && \ + rm -rf $STAGE_DIR; \ + fi' + +VOLUME ["/u02", "/u03"] +# TODO: make the port number configurable +EXPOSE 1521 + +# Define default command to start Oracle Database. +CMD exec /bin/bash diff --git a/oracle/build/dbimage/README.md b/oracle/build/dbimage/README.md new file mode 100644 index 0000000..0bc7f9a --- /dev/null +++ b/oracle/build/dbimage/README.md @@ -0,0 +1,42 @@ +# Cloud Build for DB Image +This tooling picks up the software from GCS bucket and then creates a container +image with the RDBMS software preinstalled. At present, it supports Oracle 19c +and Oracle 12.2. The docker container does not contain the database, and needs +to be created separately. + +The base container OS is Oracle Enterprise Linux (OEL7-slim). + +## How to run + +Oracle 19c EE + +```shell +$ GCS_PATH= +$ gcloud builds submit --config=cloudbuild.yaml --substitutions=_INSTALL_PATH=$GCS_PATH,_DB_VERSION=19c +``` + +Oracle 12.2 EE + +```shell +$ GCS_PATH= +$ gcloud builds submit --config=cloudbuild.yaml --substitutions=_INSTALL_PATH=$GCS_PATH,_DB_VERSION=12.2 +``` + +Oracle 18c XE + +```shell +$ GCS_PATH= +$ gcloud builds submit --config=cloudbuild.yaml --substitutions=_INSTALL_PATH=$GCS_PATH,_DB_VERSION=18c,_CREATE_CDB=true,_CDB_NAME=MYDB +``` + +## Access + +When running the above command, you might see failures if the cloudbuilder +service account does not have 'Storage Viewer' access to the GCS bucket that +stores the software. + +```shell +export PROJECT_NUMBER= +export BUCKET_NAME= +gsutil iam ch serviceAccount:$PROJECT_NUMBER@cloudbuild.gserviceaccount.com:roles/storage.objectViewer gs://$BUCKET_NAME +``` diff --git a/oracle/build/dbimage/cloudbuild-18c-xe.yaml b/oracle/build/dbimage/cloudbuild-18c-xe.yaml new file mode 100644 index 0000000..aa37f9c --- /dev/null +++ b/oracle/build/dbimage/cloudbuild-18c-xe.yaml @@ -0,0 +1,33 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +options: + diskSizeGb: 1000 +timeout: 7200s +steps: + +- name: 'gcr.io/cloud-builders/docker' + args: + - 'build' + - '--no-cache' + - '--build-arg=DB_VERSION=18c' + - '--build-arg=CDB_NAME=$_CDB_NAME' + - '--build-arg=CHARACTER_SET=$_CHARACTER_SET' + - '--build-arg=EDITION=xe' + - '--tag=$_TAG' + - '--file=Dockerfile' + - '.' + +images: +- '$_TAG' diff --git a/oracle/build/dbimage/cloudbuild.yaml b/oracle/build/dbimage/cloudbuild.yaml new file mode 100644 index 0000000..edbf14c --- /dev/null +++ b/oracle/build/dbimage/cloudbuild.yaml @@ -0,0 +1,44 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +substitutions: + "_DB_VERSION": "12.2" + "_EDITION": "ee" +options: + diskSizeGb: 1000 +timeout: 7200s +steps: +- name: 'gcr.io/cloud-builders/gsutil' + id: 'install_zip' + args: ['-m', 'cp', "-r", '${_INSTALL_PATH}/*', '.'] + +- name: 'gcr.io/cloud-builders/docker' + waitFor: + - 'install_zip' + args: + - 'build' + - '--no-cache' + - '--build-arg=DB_VERSION=$_DB_VERSION' + - '--build-arg=CREATE_CDB=$_CREATE_CDB' + - '--build-arg=CDB_NAME=$_CDB_NAME' + - '--build-arg=CHARACTER_SET=$_CHARACTER_SET' + - '--build-arg=MEM_PCT=$_MEM_PCT' + - '--build-arg=EDITION=$_EDITION' + - '--build-arg=PATCH_VERSION=$_PATCH_VERSION' + - '--tag=$_TAG' + - '--file=Dockerfile' + - '.' + +images: +- '$_TAG' diff --git a/oracle/build/dbimage/image_build.sh b/oracle/build/dbimage/image_build.sh new file mode 100755 index 0000000..7a14d92 --- /dev/null +++ b/oracle/build/dbimage/image_build.sh @@ -0,0 +1,273 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +readonly ORACLE_12="12.2" +readonly ORACLE_19="19.3" +readonly ORACLE_18="18c" +readonly DUMMY_VALUE="-1" + +DB_VERSION='' +EDITION='ee' +CREATE_CDB=false +CDB_NAME='' +CHARACTER_SET='AL32UTF8' +MEM_PCT=25 +IMAGE_NAME_SUFFIX='' +INSTALL_PATH='' +NO_DRY_RUN=false +PROJECT_ID='' +LOCAL_BUILD=false +TAG='' + +sanity_check_params() { + + if [[ "${CREATE_CDB}" == true ]]; then + if [ -z "${CDB_NAME}" ]; then + CDB_NAME="GCLOUD" + fi + db_name_len=`expr length "${CDB_NAME}"` + if [[ "${db_name_len}" -le 0 || "${db_name_len}" -gt 8 ]]; then + echo "CDB_NAME should be less than or equal to 8 characters" + usage + fi + else + db_name_len=`expr length "${CDB_NAME}"` + if [[ "${db_name_len}" -gt 0 ]]; then + echo "CDB_NAME is set but CREATE_CDB is not" + usage + fi + fi + + if [[ -z "${DB_VERSION}" ]]; then + echo "Version DB_VERSION parameter is required to create images" + usage + fi + + if [[ "${DB_VERSION}" != "${ORACLE_12}" && "${DB_VERSION}" != "${ORACLE_18}" && "${DB_VERSION}" != "${ORACLE_19}" ]]; then + echo "${DB_VERSION} is not supported, the supported versions are ${ORACLE_12} and ${ORACLE_19}" + usage + fi + + if [ -z "${INSTALL_PATH}" ] && [ "${DB_VERSION}" != "${ORACLE_18}" ] && [ "${LOCAL_BUILD}" != true ]; then + echo "GCS path containing Oracle installation files is not provided" + usage + fi + + if [[ "${MEM_PCT}" -le 0 || "${MEM_PCT}" -gt 100 ]]; then + echo "MEM_PCT should be between 0 and 100" + usage + fi + + if [[ "${DB_VERSION}" = "${ORACLE_18}" ]]; then + EDITION="xe" + fi +} + +usage() { + echo "------USAGE------ + This tool allows you to build oracle database container images. + You have the option of using the GCP cloud build script or performing a local build by setting the --local_build flag to true. + Sanity checks are conducted on your inputs and safe defaults are used as necessary. + + image_build.sh --db_version [12.2, 19.3 or 18c] --create_cdb [true or false] --cdb_name [CDB_NAME] --install_path [INSTALL_PATH] + + REQUIRED FLAGS + --install_path + GCS path containing Oracle Database EE installation files. + This flag is only required when using GCP Cloud Build. + You do not need to specify this parameter for Oracle 18c XE. + + --db_version + Version of the Oracle database. + + --create_cdb + Specifies whether a CDB should be created. Must be set to 'true' if using Oracle 18c. + + OPTIONAL FLAGS + --cdb_name + Name of the CDB to create. Defaults to 'GCLOUD' if unspecified. + + --edition + Edition of the Oracle database. ee is used if unspecified. + This flag is not supported for Oracle 18c and will be ignored. + + --patch_version + Version of the Oracle database PSU. + If unspecified, 31312468 is used as the default value for 12.2 , + 31281355 is used as the default value for 19.3 . + This flag is not supported for Oracle 18c and will be ignored. + + --local_build + if true, docker is used to build an image locally. If false or unspecified, Google Cloud Build is used to build the image. + + --project_id + project_id GCP project to use for image build. If unspecified, your default gcloud project will be used. + For local builds, this flag can be set to 'local-build'. + + --mem_pct + Percentage of memory. + This flag is not supported for Oracle 18c and will be ignored. + + --character_set + Character set for the newly created CDB + + --tag + Tag that should be applied to the image. + If a tag is not specified, 'gcr.io/\$GCR_PROJECT_ID/oracle-database-images/oracle-\${DB_VERSION}-\${EDITION}-\${IMAGE_NAME_SUFFIX}:latest' is used. + + --no_dry_run + Run command in full mode. Will execute actions. + " + exit 1 +} + +function parse_arguments() { + opts=$(getopt -o i:v:c:n:p:m:c:h \ + --longoptions install_path:,db_version:,edition:,create_cdb:,cdb_name:,mem_pct:,character_set:,help:,project_id:,patch_version:,local_build:,tag:,no_dry_run,help \ + -n "$(basename "$0")" -- "$@") + eval set -- "$opts" + while true; do + case "$1" in + -i | --install_path) + shift + INSTALL_PATH=$1 + shift + ;; + -v | --db_version) + shift + DB_VERSION=$1 + shift + ;; + -e | --edition) + shift + EDITION=$1 + shift + ;; + -c | --create_cdb) + shift + CREATE_CDB=$1 + shift + ;; + -n | --cdb_name) + shift + CDB_NAME=$1 + shift + ;; + -m | --mem_pct) + shift + MEM_PCT=$1 + shift + ;; + --character_set) + shift + CHARACTER_SET=$1 + shift + ;; + --patch_version) + shift + PATCH_VERSION=$1 + shift + ;; + --project_id) + shift + PROJECT_ID=$1 + shift + ;; + --local_build) + shift + LOCAL_BUILD=$1 + shift + ;; + -t | --tag) + shift + TAG=$1 + shift + ;; + -h | --help) + usage + return 1 + ;; + --no_dry_run) + NO_DRY_RUN=true + shift + ;; + --) + shift + break + ;; + *) + echo Invalid argument "$1" + usage + exit 1 + ;; + esac + done + +} + +execute_command() { + IMAGE_NAME_SUFFIX=$(echo "$CDB_NAME" | tr '[:upper:]' '[:lower:]') + if [ -z "${PROJECT_ID}" ]; then + PROJECT_ID=$(gcloud config get-value project 2>/dev/null) + echo "Project not specified, falling back on current gcloud default:" + echo "$PROJECT_ID" + fi + + if [ -z "${PATCH_VERSION}" ]; then + PATCH_VERSION="${DUMMY_VALUE}" + fi + + GCR_PROJECT_ID=$(echo "$PROJECT_ID" | tr : /) + + if [[ "${CREATE_CDB}" == true ]]; then + IMAGE_NAME_SUFFIX="seeded-${IMAGE_NAME_SUFFIX}" + else + IMAGE_NAME_SUFFIX="unseeded" + CDB_NAME="${DUMMY_VALUE}" + fi + + if [ -z "${TAG}" ]; then + TAG="gcr.io/${GCR_PROJECT_ID}/oracle-database-images/oracle-${DB_VERSION}-${EDITION}-${IMAGE_NAME_SUFFIX}:latest" + fi + + if [ "${LOCAL_BUILD}" == true ]; then + BUILD_CMD=$(echo sudo docker build --no-cache --build-arg=DB_VERSION=${DB_VERSION} --build-arg=CREATE_CDB=${CREATE_CDB} --build-arg=CDB_NAME=${CDB_NAME} --build-arg=CHARACTER_SET=${CHARACTER_SET} --build-arg=MEM_PCT=${MEM_PCT} --build-arg=EDITION=${EDITION} --build-arg=PATCH_VERSION=${PATCH_VERSION} --tag=$TAG .) + else + if [ "${DB_VERSION}" == "${ORACLE_18}" ]; then + BUILD_CMD=$(echo gcloud builds submit --project=${PROJECT_ID} --config=cloudbuild-18c-xe.yaml --substitutions=_CDB_NAME="${CDB_NAME}",_CHARACTER_SET="${CHARACTER_SET}",_TAG="${TAG}") + else + BUILD_CMD=$(echo gcloud builds submit --project=${PROJECT_ID} --config=cloudbuild.yaml --substitutions=_INSTALL_PATH="${INSTALL_PATH}",_DB_VERSION="${DB_VERSION}",_EDITION="${EDITION}",_CREATE_CDB="${CREATE_CDB}",_CDB_NAME="${CDB_NAME}",_CHARACTER_SET="${CHARACTER_SET}",_MEM_PCT="${MEM_PCT}",_TAG="${TAG}",_PATCH_VERSION="${PATCH_VERSION}") + fi + fi + + if [[ "$NO_DRY_RUN" == true ]]; then + echo "Executing the following command:" + echo "$BUILD_CMD" + ${BUILD_CMD} + else + echo "Dry run mode: the command would have executed as follows:" + echo "$BUILD_CMD" + fi +} + +main() { + parse_arguments "$@" + sanity_check_params + date + time execute_command +} + +main "$@" diff --git a/oracle/build/dbimage/install-oracle-18c-xe.sh b/oracle/build/dbimage/install-oracle-18c-xe.sh new file mode 100644 index 0000000..d5e93c2 --- /dev/null +++ b/oracle/build/dbimage/install-oracle-18c-xe.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# shellcheck disable=2153 + +set -x +set -e +set -u +export PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" +readonly CDB_NAME=${1:-GCLOUD} +readonly CHARACTER_SET=${2:-AL32UTF8} +readonly USER="oracle" +readonly GROUP="dba" +readonly OHOME="/opt/oracle/product/18c/dbhomeXE" +readonly DB_VERSION="18c" + +setup_directories() { + mkdir -p "/home/${USER}" +} + +install_debug_utilities() { + yum install -y shadow-utils openssl sudo + yum install -y nmap-ncat.x86_64 + yum install -y strace.x86_64 + yum install -y net-tools.x86_64 + yum install -y lsof.x86_64 +} + +write_pam_files() { + echo "#%PAM-1.0 +auth include system-auth +account include system-auth +password include system-auth +" >/etc/pam.d/sudo + + echo "#%PAM-1.0 +auth sufficient pam_rootok.so +auth substack system-auth +auth include postlogin +account sufficient pam_succeed_if.so uid = 0 use_uid quiet +account include system-auth +password include system-auth +session include postlogin +session optional pam_xauth.so +" >/etc/pam.d/su +} + +set_environment() { + export ORACLE_DOCKER_INSTALL=true + echo "export ORACLE_HOME=${OHOME}" >>"/home/oracle/${CDB_NAME}.env" + echo "export ORACLE_BASE=/opt/oracle" >>"/home/oracle/${CDB_NAME}.env" + echo "export ORACLE_SID=${CDB_NAME}" >>"/home/oracle/${CDB_NAME}.env" + echo "export PATH=${OHOME}/bin:${OHOME}/OPatch:/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" >>"/home/oracle/${CDB_NAME}.env" + echo "export LD_LIBRARY_PATH=${OHOME}/lib" >>"/home/oracle/${CDB_NAME}.env" + source "/home/oracle/${CDB_NAME}.env" +} + + +install_oracle() { + yum -y localinstall https://download.oracle.com/otn-pub/otn_software/db-express/oracle-database-xe-18c-1.0-1.x86_64.rpm +} + +write_oracle_config() { + echo "\ +CHARSET=${CHARACTER_SET} +ORACLE_SID=${CDB_NAME} +SKIP_VALIDATIONS=FALSE" > /etc/sysconfig/oracle-xe-18c.conf +} + +create_cdb() { + local syspass="$(openssl rand -base64 16 | tr -dc a-zA-Z0-9)" + (echo "${syspass}"; echo "${syspass}";) | /etc/init.d/oracle-xe-18c configure +} + +set_file_ownership() { + chown -R "${USER}:${GROUP}" "${OHOME}" + chown -R "${USER}:${GROUP}" "/home/${USER}" + chown "${USER}:${GROUP}" /etc/oraInst.loc + chown -R "${USER}:${GROUP}" /opt +} + +shutdown_oracle() { + run_sql "shutdown immediate;" + echo "Oracle Database Shutdown" +} + +delete_xe_pdb() { + run_sql "ALTER PLUGGABLE DATABASE XEPDB1 CLOSE;" + run_sql "DROP PLUGGABLE DATABASE XEPDB1 INCLUDING DATAFILES;" +} + +run_sql() { + echo "${1}" | sudo -E -u oracle "${ORACLE_HOME}/bin/sqlplus" -S / as sysdba +} + +create_metadata_file() { + echo "ORACLE_HOME=${OHOME}" >>"/home/oracle/.metadata" + echo "ORACLE_SID=${CDB_NAME}" >>"/home/oracle/.metadata" + echo "VERSION=${DB_VERSION}" >>"/home/oracle/.metadata" +} + +main() { + echo "Running Oracle 18c XE install script..." + install_debug_utilities + write_pam_files + setup_directories + set_environment + install_oracle + write_oracle_config + create_cdb + set_file_ownership + delete_xe_pdb + shutdown_oracle + create_metadata_file + echo "Oracle 18c XE installation succeeded!" +} + +main \ No newline at end of file diff --git a/oracle/build/dbimage/install-oracle.sh b/oracle/build/dbimage/install-oracle.sh new file mode 100755 index 0000000..4310404 --- /dev/null +++ b/oracle/build/dbimage/install-oracle.sh @@ -0,0 +1,385 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# shellcheck disable=2153 + +set -x +set -e +set -u +export PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" +readonly DB_VERSION="$1" +readonly EDITION="$2" +readonly CREATE_CDB=${3:-true} +readonly CDB_NAME=${4:-GCLOUD} +readonly CHARACTER_SET=${5:-AL32UTF8} +readonly MEM_PCT=${6:-25} +PATCH_VERSION="$7" +readonly INIT_PARAMS="log_archive_dest_1='LOCATION=USE_DB_RECOVERY_FILE_DEST',enable_pluggable_database=TRUE,common_user_prefix='gcsql$'" +readonly USER='oracle' +readonly GROUP='dba' +readonly OCM_FILE="ocm.rsp" +readonly OHOME="/u01/app/oracle/product/${DB_VERSION}/db" +readonly ORACLE_12="12.2" +readonly ORACLE_19="19.3" + +setup_patching() { + if [[ "${PATCH_VERSION}" == "-1" ]]; then + if [[ "${DB_VERSION}" == "${ORACLE_12}" ]]; then + PATCH_VERSION="31312468" + elif [[ "${DB_VERSION}" == "${ORACLE_19}" ]]; then + PATCH_VERSION="31281355" + fi + fi + local patch_suffix + if [[ "${DB_VERSION}" == "${ORACLE_12}" ]]; then + patch_suffix="_122010_Linux-x86-64.zip" + elif [[ "${DB_VERSION}" == "${ORACLE_19}" ]]; then + patch_suffix="_190000_Linux-x86-64.zip" + fi + PATCH_ZIP="p${PATCH_VERSION}${patch_suffix}" + + if [[ ! -f "${STAGE_DIR}"/"${PATCH_ZIP}" ]]; then + echo "could not find the PSU zip in ${STAGE_DIR}/${PATCH_ZIP}, possible fixes: +if '--local_build' is enabled, try stage ${PATCH_ZIP} to the same directory with image_build.sh. +if '--local_build' is disabled, try stage ${PATCH_ZIP} to the gcs bucket. +Update '--patch_version' when running image_build.sh, the script assumes the file name is 'ppatch_version${patch_suffix}'." + exit 1 + fi +} + +setup_installers() { + if [[ "${DB_VERSION}" == "${ORACLE_12}" ]]; then + local -g INSTALL_CONFIG="ora12-config.sh" + # shellcheck source=ora12-config.sh + source "${STAGE_DIR}"/"${INSTALL_CONFIG}" + local -g PREINSTALL_RPM="oracle-database-server-12cR2-preinstall.x86_64" + local -g CHECKSUM_FILE="checksum.sha256.12" + elif [[ "${DB_VERSION}" == "${ORACLE_19}" ]]; then + local -g INSTALL_CONFIG="ora19-config.sh" + # shellcheck source=ora19-config.sh + source "${STAGE_DIR}"/"${INSTALL_CONFIG}" + local -g PREINSTALL_RPM="oracle-database-preinstall-19c.x86_64" + local -g CHECKSUM_FILE="checksum.sha256.19" + else + echo "DB version ${DB_VERSION} not supported" + exit 1 + fi + _fallback_install_file + _fallback_opatch_file +} + +_fallback_install_file() { + if [[ -f "${STAGE_DIR}"/"${INSTALL_ZIP}" ]]; then + echo "found DB installer zip ${STAGE_DIR}/${INSTALL_ZIP}, install will use ${INSTALL_ZIP}" + return + fi + # installer zip can be downloaded either from OTN or edelivery + # default file name for edelivery: V839960-01.zip + # default file name for OTN: linuxx64_12201_database.zip + local candidates=("linuxx64_12201_database.zip") + if [[ "${DB_VERSION}" == "${ORACLE_19}" ]]; then + candidates=("LINUX.X64_193000_db_home.zip") + fi + echo "could not find the specified DB installer zip. Will try falling back to one of these possible names:" "${candidates[@]}" + + for f in "${candidates[@]}"; do + if [[ -f "${STAGE_DIR}"/"${f}" ]]; then + echo "found DB installer zip ${STAGE_DIR}/${f},install will use ${f}" + INSTALL_ZIP="${f}" + break + else + echo "could not find fallback DB installer zip ${STAGE_DIR}/${f}, try specifying an installer from one of these possible names:" "${candidates[@]}" + fi + done +} + +_fallback_opatch_file() { + if [[ -f "${STAGE_DIR}"/"${OPATCH_ZIP}" ]]; then + echo "found opatch zip ${STAGE_DIR}/${OPATCH_ZIP}, install will use ${OPATCH_ZIP}" + return + fi + # OPATCH_ZIP can be p6880880_200000_Linux-x86-64.zip, p6880880_190000_Linux-x86-64.zip + # p6880880_180000_Linux-x86-64.zip, p6880880_122010_Linux-x86-64.zip + # for 04302020 OPATCH, their sha256sum are identical "B08320195434559D9662729C5E02ABC8436A5C602B4355CC33A673F24D9D174" + local candidates=( + "p6880880_200000_Linux-x86-64.zip" + "p6880880_190000_Linux-x86-64.zip" + "p6880880_180000_Linux-x86-64.zip" + "p6880880_122010_Linux-x86-64.zip" + ) + echo "cannot find opatch zip from config, try one of possible names" "${candidates[@]}" + + for f in "${candidates[@]}"; do + if [[ -f "${STAGE_DIR}"/"${f}" ]]; then + echo "found opatch zip ${STAGE_DIR}/${f},install will use ${f}" + OPATCH_ZIP="${f}" + break + else + echo "cannot find opatch zip ${STAGE_DIR}/${f}, try fallback to other possible names" + fi + done +} + +setup_package() { + yum install -y shadow-utils openssl sudo + yum install -y nmap-ncat.x86_64 + yum install -y strace.x86_64 + yum install -y net-tools.x86_64 + yum install -y lsof.x86_64 + yum install -y "${PREINSTALL_RPM}" + # Microdnf on RHEL 8 + # microdnf install -y "${PREINSTALL_RPM}" + # microdnf install -y shadow-utils openssl sudo + echo "#%PAM-1.0 +auth include system-auth +account include system-auth +password include system-auth +" >/etc/pam.d/sudo +} + +setup_ocm() { + if [[ "${DB_VERSION}" == "${ORACLE_19}" ]]; then + echo "oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v19.0.0" >"${STAGE_DIR}/${OCM_FILE}" + elif [[ "${DB_VERSION}" == "${ORACLE_12}" ]]; then + echo "oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v12.2.0" >"${STAGE_DIR}/${OCM_FILE}" + else + echo "Unsupported version ${DB_VERSION}" + exit 1 + fi + echo "oracle.install.option=INSTALL_DB_SWONLY" >>"${STAGE_DIR}/${OCM_FILE}" + echo "UNIX_GROUP_NAME=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "INVENTORY_LOCATION=/u01/app/oracle/oraInventory" >>"${STAGE_DIR}/${OCM_FILE}" + echo "ORACLE_HOME=${OHOME}" >>"${STAGE_DIR}/${OCM_FILE}" + echo "ORACLE_BASE=/u01/app/oracle" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.InstallEdition=${EDITION}" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.OSDBA_GROUP=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.OSOPER_GROUP=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.OSBACKUPDBA_GROUP=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.OSDGDBA_GROUP=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.OSKMDBA_GROUP=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "oracle.install.db.OSRACDBA_GROUP=dba" >>"${STAGE_DIR}/${OCM_FILE}" + echo "SECURITY_UPDATES_VIA_MYORACLESUPPORT=false" >>"${STAGE_DIR}/${OCM_FILE}" + echo "DECLINE_SECURITY_UPDATES=true" >>"${STAGE_DIR}/${OCM_FILE}" +} + +setup_directories() { + mkdir -p "${STAGE_DIR}/patches" + mkdir -p "/home/${USER}" + mkdir -p "${OHOME}" + chown "${USER}:${GROUP}" "${OHOME}" + chown -R "${USER}:${GROUP}" "/home/${USER}" + chown "${USER}:${GROUP}" "${STAGE_DIR}/patches" +} + +patch_oracle() { + cd "${STAGE_DIR}/patches/${PATCH_VERSION}/${PATCH_VERSION}" + sudo -E -u oracle "${OHOME}/OPatch/opatch" \ + apply -silent -ocmrf "${STAGE_DIR}/${OCM_FILE}" +} + +checksum_files() { + cd "${STAGE_DIR}" + cat >"${CHECKSUM_FILE}" <>/etc/oraInst.loc + chown "${USER}:${GROUP}" /etc/oraInst.loc + cd "${OHOME}" + export CV_ASSUME_DISTID=OL7 + sudo -E -u oracle "${OHOME}/runInstaller" \ + -silent \ + -waitforcompletion \ + -ignorePrereqFailure \ + INVENTORY_LOCATION=/etc/oraInst.loc \ + UNIX_GROUP_NAME=dba \ + ORACLE_HOME="${OHOME}" \ + ORACLE_BASE=/u01/app/oracle \ + oracle.install.db.InstallEdition="${EDITION}" \ + oracle.install.db.OSDBA_GROUP="${GROUP}" \ + oracle.install.db.OSOPER_GROUP="${GROUP}" \ + oracle.install.db.OSBACKUPDBA_GROUP="${GROUP}" \ + oracle.install.db.OSDGDBA_GROUP="${GROUP}" \ + oracle.install.db.OSKMDBA_GROUP="${GROUP}" \ + oracle.install.db.OSRACDBA_GROUP="${GROUP}" \ + oracle.install.option=INSTALL_DB_SWONLY || + (($? == 6)) # Check for successful install with warning suppressed. + + "${OHOME}/root.sh" +} + +install_oracle12() { + printf "inventory_loc=/u01/app/oraInventory\ninst_group=dba\n" \ + >>/etc/oraInst.loc + chown "${USER}:${GROUP}" /etc/oraInst.loc + sudo -u oracle "${STAGE_DIR}/database/runInstaller" \ + -silent \ + -force \ + -invptrloc /etc/oraInst.loc \ + -waitforcompletion \ + -ignoresysprereqs \ + -ignoreprereq \ + UNIX_GROUP_NAME=dba \ + ORACLE_HOME="${OHOME}" \ + ORACLE_BASE=/u01/app/oracle \ + oracle.install.db.InstallEdition="${EDITION}" \ + oracle.install.db.OSDBA_GROUP="${GROUP}" \ + oracle.install.db.OSOPER_GROUP="${GROUP}" \ + oracle.install.db.OSBACKUPDBA_GROUP="${GROUP}" \ + oracle.install.db.OSDGDBA_GROUP="${GROUP}" \ + oracle.install.db.OSKMDBA_GROUP="${GROUP}" \ + oracle.install.db.OSRACDBA_GROUP="${GROUP}" \ + oracle.install.option=INSTALL_DB_SWONLY || + (($? == 6)) # Check for successful install with warning suppressed. + + cd "${OHOME}/rdbms/lib" + sudo -u oracle \ + make -f ins_rdbms.mk uniaud_on ioracle ORACLE_HOME="${OHOME}" + + "${OHOME}/root.sh" + +} + +create_cdb() { + local syspass="$(openssl rand -base64 16 | tr -dc a-zA-Z0-9)" + sudo -u oracle "${OHOME}/bin/dbca" \ + -silent \ + -createDatabase \ + -templateName General_Purpose.dbc \ + -gdbname "${CDB_NAME}" \ + -createAsContainerDatabase true \ + -sid "${CDB_NAME}" \ + -responseFile NO_VALUE \ + -characterSet "${CHARACTER_SET}" \ + -memoryPercentage "${MEM_PCT}" \ + -emConfiguration NONE \ + -datafileDestination "/u01/app/oracle/oradata" \ + -storageType FS \ + -initParams "${INIT_PARAMS}" \ + -databaseType MULTIPURPOSE \ + -recoveryAreaDestination /u01/app/oracle/fast_recovery_area \ + -sysPassword "${syspass}" \ + -systemPassword "${syspass}" +} + +set_environment() { + echo "export ORACLE_HOME=${OHOME}" >>"/home/oracle/${CDB_NAME}.env" + echo "export ORACLE_BASE=/u01/app/oracle" >>"/home/oracle/${CDB_NAME}.env" + echo "export ORACLE_SID=${CDB_NAME}" >>"/home/oracle/${CDB_NAME}.env" + echo "export PATH=${OHOME}/bin:${OHOME}/OPatch:/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" >>"/home/oracle/${CDB_NAME}.env" + echo "export LD_LIBRARY_PATH=${OHOME}/lib" >>"/home/oracle/${CDB_NAME}.env" + source "/home/oracle/${CDB_NAME}.env" + chown "${USER}:${GROUP}" "/home/oracle/${CDB_NAME}.env" +} + +create_metadata_file() { + echo "ORACLE_HOME=${OHOME}" >>"/home/oracle/.metadata" + if [[ "${CREATE_CDB}" == true ]]; then + echo "ORACLE_SID=${CDB_NAME}" >>"/home/oracle/.metadata" + fi + echo "VERSION=${DB_VERSION}" >>"/home/oracle/.metadata" +} + +cleanup_post_success() { + # ORDS + rm -rf "${OHOME}/ords" && + # SQL Developer + rm -rf "${OHOME}/sqldeveloper" && + # UCP connection pool + rm -rf "${OHOME}/ucp" && + # All installer files + rm -rf "${OHOME}/lib/*.zip" && + # OUI backup + rm -rf "${OHOME}/inventory/backup/*" && + # Network tools help + rm -rf "${OHOME}/network/tools/help" && + # Database upgrade assistant + rm -rf "${OHOME}/assistants/dbua" && + # Database migration assistant + rm -rf "${OHOME}/dmu" && + # Remove pilot workflow installer + rm -rf "${OHOME}/install/pilot" && + # Support tools + rm -rf "${OHOME}/suptools" && + # Temp location + rm -rf /tmp/* && + # Install files + rm -rf "${STAGE_DIR}" +} + +unzip_binaries() { + if [[ "${DB_VERSION}" == "${ORACLE_12}" ]]; then + chown -R "${USER}:${GROUP}" "${STAGE_DIR}" + sudo -u oracle unzip "${STAGE_DIR}/${INSTALL_ZIP}" -d "${STAGE_DIR}" + elif [[ "${DB_VERSION}" == "${ORACLE_19}" ]]; then + sudo -u oracle unzip "${STAGE_DIR}/${INSTALL_ZIP}" -d "${OHOME}" + fi + sudo -u oracle unzip "${STAGE_DIR}/${PATCH_ZIP}" -d "${STAGE_DIR}/patches/${PATCH_VERSION}" + rm "${STAGE_DIR}/${INSTALL_ZIP}" + rm "${STAGE_DIR}/${PATCH_ZIP}" +} + +shutdown_oracle() { + run_sql "shutdown immediate;" + echo "Oracle Database Shutdown" +} + +run_sql() { + echo "${1}" | sudo -E -u oracle "${OHOME}/bin/sqlplus" -S / as sysdba +} + +main() { + setup_patching + setup_installers + checksum_files + setup_package + setup_directories + setup_ocm + if [[ "${CREATE_CDB}" == true ]]; then + set_environment + fi + chown -R "${USER}:${GROUP}" /u01 + unzip_binaries + install_oracle + rm -rf "${OHOME}/OPatch" + sudo -u oracle unzip "${STAGE_DIR}/${OPATCH_ZIP}" -d "${OHOME}" + patch_oracle + if [[ "${CREATE_CDB}" == true ]]; then + create_cdb + shutdown_oracle + fi + chown "${USER}:${GROUP}" /etc/oratab + cleanup_post_success + create_metadata_file + echo "Oracle installation success" +} +main diff --git a/oracle/build/dbimage/ora12-config.sh b/oracle/build/dbimage/ora12-config.sh new file mode 100644 index 0000000..7e530b0 --- /dev/null +++ b/oracle/build/dbimage/ora12-config.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INSTALL_ZIP="V839960-01.zip" +INSTALL_ZIP_SHA256="96ed97d21f15c1ac0cce3749da6c3dac7059bb60672d76b008103fc754d22dde" +OPATCH_ZIP="p6880880_122010_Linux-x86-64.zip" +OPATCH_ZIP_SHA256="b08320195434559d9662729c5e02abc8436a5c602b4355cc33a673f24d9d1740" diff --git a/oracle/build/dbimage/ora19-config.sh b/oracle/build/dbimage/ora19-config.sh new file mode 100644 index 0000000..27656e8 --- /dev/null +++ b/oracle/build/dbimage/ora19-config.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INSTALL_ZIP="V982063-01.zip" +INSTALL_ZIP_SHA256="ba8329c757133da313ed3b6d7f86c5ac42cd9970a28bf2e6233f3235233aa8d8" +OPATCH_ZIP="p6880880_190000_Linux-x86-64.zip" +OPATCH_ZIP_SHA256="b08320195434559d9662729c5e02abc8436a5c602b4355cc33a673f24d9d1740" diff --git a/oracle/build/dbinit/Dockerfile b/oracle/build/dbinit/Dockerfile new file mode 100644 index 0000000..ef5c78b --- /dev/null +++ b/oracle/build/dbinit/Dockerfile @@ -0,0 +1,44 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the Database Daemon binary. +FROM docker.io/golang:1.15 as builder + +WORKDIR /build + +COPY go.mod go.mod +COPY go.sum go.sum + +# Copy the go source +COPY oracle/pkg/ oracle/pkg/ +COPY oracle/cmd/ oracle/cmd/ + +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o dbdaemon oracle/cmd/dbdaemon/dbdaemon.go +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o dbdaemon_proxy oracle/cmd/dbdaemon_proxy/dbdaemon_proxy.go +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o init_oracle oracle/cmd/init_oracle/init_oracle.go + +FROM docker.io/busybox +COPY --from=builder /build/dbdaemon \ +/build/dbdaemon_proxy \ +/build/init_oracle \ +/build/oracle/cmd/init_oracle/init_oracle.sh \ +/build/oracle/cmd/init_oracle/stop_oracle.sh \ +/build/oracle/pkg/database/provision/bootstrap-database-initfile.template \ +/build/oracle/pkg/database/provision/bootstrap-database-initfile-oracle-xe.template \ +/build/oracle/pkg/database/provision/bootstrap-database-crcf.template \ +/build/oracle/pkg/database/provision/bootstrap-database-listener.template \ +/build/oracle/pkg/database/provision/bootstrap-database-tnsnames.template \ +/build/oracle/pkg/database/provision/sqlnet.ora \ +./agent_repo/ +RUN chmod -R 755 ./agent_repo/ diff --git a/oracle/build/loggingsidecar/Dockerfile b/oracle/build/loggingsidecar/Dockerfile new file mode 100644 index 0000000..c2e17d8 --- /dev/null +++ b/oracle/build/loggingsidecar/Dockerfile @@ -0,0 +1,34 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the Database Daemon binary. +FROM docker.io/golang:1.15 as builder + +WORKDIR /build + +COPY go.mod go.mod +COPY go.sum go.sum + +# Copy the go source +COPY oracle/pkg/ oracle/pkg/ +COPY oracle/cmd/ oracle/cmd/ + +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o logging_main oracle/cmd/logging/logging_main.go + +# Build the container image +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /build/logging_main / + +USER nonroot:nonroot diff --git a/oracle/build/monitoring/Dockerfile b/oracle/build/monitoring/Dockerfile new file mode 100644 index 0000000..54c3981 --- /dev/null +++ b/oracle/build/monitoring/Dockerfile @@ -0,0 +1,41 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM docker.io/golang:1.15 AS builder + +WORKDIR /build + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Copy the go source +COPY third_party/ third_party/ +COPY oracle/pkg/ oracle/pkg/ +COPY oracle/cmd/ oracle/cmd/ +# Without this, monitoring agent would fail for not access to the file. +RUN chmod a+r /build/oracle/pkg/agents/monitoring/default-metrics.yaml + +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o monitoring_agent oracle/cmd/monitoring/monitoring_agent.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /build/monitoring_agent . +COPY --from=builder /build/oracle/pkg/agents/monitoring/default-metrics.yaml . + +EXPOSE 9161 + +ENTRYPOINT ["/monitoring_agent"] diff --git a/oracle/cmd/config_agent/BUILD.bazel b/oracle/cmd/config_agent/BUILD.bazel new file mode 100644 index 0000000..9050f59 --- /dev/null +++ b/oracle/cmd/config_agent/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "config_agent_lib", + srcs = ["config_agent.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/cmd/config_agent", + visibility = ["//visibility:private"], + deps = [ + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/agents/config_agent/server", + "//oracle/pkg/agents/consts", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_binary( + name = "config_agent", + embed = [":config_agent_lib"], + visibility = ["//visibility:public"], +) diff --git a/oracle/cmd/config_agent/config_agent.go b/oracle/cmd/config_agent/config_agent.go new file mode 100644 index 0000000..bf2fc41 --- /dev/null +++ b/oracle/cmd/config_agent/config_agent.go @@ -0,0 +1,60 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "net" + "os" + + "google.golang.org/grpc" + "k8s.io/klog/v2" + + pb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + ca "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/server" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" +) + +var port = flag.Int("port", consts.DefaultConfigAgentPort, "The tcp port of a Config Agent server.") +var dbservice = flag.String("dbservice", "", "The DB service.") +var dbport = flag.Int("dbport", 0, "The DB service port.") + +func main() { + klog.InitFlags(nil) + flag.Parse() + // Start grpc service of config agent. + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + klog.ErrorS(err, "Config Agent failed to listen") + os.Exit(1) + } + + hostname, err := os.Hostname() + if err != nil { + klog.ErrorS(err, "Config Agent failed to get a hostname") + os.Exit(1) + } + + grpcSvr := grpc.NewServer() + + pb.RegisterConfigAgentServer(grpcSvr, &ca.ConfigServer{DBService: *dbservice, DBPort: *dbport}) + + klog.InfoS("Starting Config Agent", "hostname", hostname, "port", *port) + if err := grpcSvr.Serve(lis); err != nil { + klog.ErrorS(err, "Config Agent failed to start") + os.Exit(1) + } +} diff --git a/oracle/cmd/dbdaemon/BUILD.bazel b/oracle/cmd/dbdaemon/BUILD.bazel new file mode 100644 index 0000000..0bf72a8 --- /dev/null +++ b/oracle/cmd/dbdaemon/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "dbdaemon_lib", + srcs = ["dbdaemon.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/cmd/dbdaemon", + visibility = ["//visibility:private"], + deps = [ + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/database/dbdaemon", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_binary( + name = "dbdaemon", + embed = [":dbdaemon_lib"], + visibility = ["//visibility:public"], +) diff --git a/oracle/cmd/dbdaemon/dbdaemon.go b/oracle/cmd/dbdaemon/dbdaemon.go new file mode 100644 index 0000000..36b15f8 --- /dev/null +++ b/oracle/cmd/dbdaemon/dbdaemon.go @@ -0,0 +1,134 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Database Daemon listens on consts.DomainSocketFile domain socket file +// (or optionally on a specified TCP port) and accepts requests from other +// data plane agents running in containers. +// +// Usage: +// dbdaemon +// +package main + +import ( + "context" + "flag" + "fmt" + "net" + "os" + "os/user" + "syscall" + + "google.golang.org/grpc" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/dbdaemon" +) + +const ( + lockFile = "/var/tmp/dbdaemon.lock" + exitErrorCode = consts.DefaultExitErrorCode +) + +var cdbNameFromYaml = flag.String("cdb_name", "GCLOUD", "Name of the CDB to create") + +// A user running this program should not be root and +// a primary group should be either dba or oinstall. +func userCheck(skipChecking bool) error { + u, err := user.Current() + if err != nil { + return fmt.Errorf("userCheck: could not determine the current user: %v", err) + } + if skipChecking { + klog.InfoS("skipped by request, setting user", "username", u.Username) + return nil + } + + if u.Username == "root" { + return fmt.Errorf("userCheck: this program is designed to run by the Oracle software installation owner (e.g. oracle), not %q", u.Username) + } + + groups := []string{"dba", "oinstall"} + var gIDs []string + for _, group := range groups { + g, err := user.LookupGroup(group) + // Not both groups are mandatory, e.g. oinstall may not exist. + klog.InfoS("checking groups", "group", group, "g", g) + if err != nil { + continue + } + gIDs = append(gIDs, g.Gid) + } + for _, g := range gIDs { + if u.Gid == g { + return nil + } + } + return fmt.Errorf("userCheck: current user's primary group (GID=%q) is not dba|oinstall (GID=%q)", u.Gid, gIDs) +} + +func agentInit() error { + lock, err := os.Create(lockFile) + if err != nil { + klog.ErrorS(err, "failed to access lock file", "lockFile", lockFile) + return err + } + if err = syscall.Flock(int(lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + klog.ErrorS(err, "failed to obtain a lock on lock file. Another instance of Database Daemon may be running", "lockFile", lockFile) + return err + } + + return nil +} + +func main() { + klog.InitFlags(nil) + flag.Parse() + + var ( + lis net.Listener + err error + ) + + if err := agentInit(); err != nil { + os.Exit(exitErrorCode) + } + + lis, err = net.Listen("tcp", fmt.Sprintf(":%d", consts.DefaultDBDaemonPort)) + + if err != nil { + klog.ErrorS(err, "listen call failed") + os.Exit(exitErrorCode) + } + defer lis.Close() + + hostname, err := os.Hostname() + if err != nil { + klog.ErrorS(err, "failed to get hostname") + os.Exit(exitErrorCode) + } + + grpcSvr := grpc.NewServer() + dbdaemonServer, err := dbdaemon.New(context.Background(), *cdbNameFromYaml) + if err != nil { + klog.ErrorS(err, "failed to execute dbdaemon.New") + os.Exit(exitErrorCode) + } + dbdpb.RegisterDatabaseDaemonServer(grpcSvr, dbdaemonServer) + + klog.InfoS("Starting a Database Daemon...", "host", hostname, "listenerAddr", lis.Addr()) + grpcSvr.Serve(lis) +} diff --git a/oracle/cmd/dbdaemon_client/BUILD.bazel b/oracle/cmd/dbdaemon_client/BUILD.bazel new file mode 100644 index 0000000..5968993 --- /dev/null +++ b/oracle/cmd/dbdaemon_client/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "dbdaemon_client_lib", + srcs = ["dbdaemon_client.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/cmd/dbdaemon_client", + visibility = ["//visibility:private"], + deps = [ + "//oracle/pkg/agents/common", + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_binary( + name = "dbdaemon_client", + embed = [":dbdaemon_client_lib"], + visibility = ["//visibility:public"], +) diff --git a/oracle/cmd/dbdaemon_client/dbdaemon_client.go b/oracle/cmd/dbdaemon_client/dbdaemon_client.go new file mode 100644 index 0000000..bc788fa --- /dev/null +++ b/oracle/cmd/dbdaemon_client/dbdaemon_client.go @@ -0,0 +1,331 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A client program to interactively test Database Daemon functionality by +// simulating one of the calls issued from the Agent. +// Supported calls are: +// - CheckDatabase[CDB|PDB] +// - [Stop|Start]Database +// - [Stop|Start]Listeners +// - RunSQLPlus[Formatted] +// - DataPump[Import|Export]Async +package main + +import ( + "context" + "flag" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/golang/protobuf/ptypes/empty" + lropb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "k8s.io/klog/v2" + + dbdaemonlib "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +const exitErrorCode = consts.DefaultExitErrorCode + +var ( + action = flag.String("action", "", "Action to check: CheckDatabase[CDB|PDB], [Start|Stop]Database, [Start|Stop]Listeners, RunSQLPlus[Formatted], KnownPDBs, GetDatabaseType, GetDatabaseName") + databaseName = flag.String("database_name", "", "PDB database name") + operationId = flag.String("operation_id", "", "Operation id") + commands = flag.String("commands", "", "A list of SQL statements delimited by a semicolon") + reqTimeoutDefault = 10 * time.Minute + reqTimeout = flag.Duration("request_timeout", reqTimeoutDefault, "Maximum amount of time allowed to complete a request (default is 10 min)") + cdbName = flag.String("cdb_name", "GCLOUD", "CDB database name") + exportObjectTypeDefault = "SCHEMAS" + exportObjectType = flag.String("export_object_type", exportObjectTypeDefault, "Data pump export object type") + exportObjects = flag.String("export_objects", "", "A list of data pump export objects delimited by a comma") + gcsPath = flag.String("gcs_path", "", "GCS URI") + gcsLogPath = flag.String("gcs_log_path", "", "GCS URI for log upload") + flashbackTime = flag.String("flashback_time", "", "Flashback_time used in data pump export") + newDatabaseDaemonClient = func(cc *grpc.ClientConn) databaseDaemonStub { return dbdpb.NewDatabaseDaemonClient(cc) } +) + +// databaseDaemonStub is set up for dependency injection. +type databaseDaemonStub interface { + RunSQLPlus(context.Context, *dbdpb.RunSQLPlusCMDRequest, ...grpc.CallOption) (*dbdpb.RunCMDResponse, error) + RunSQLPlusFormatted(context.Context, *dbdpb.RunSQLPlusCMDRequest, ...grpc.CallOption) (*dbdpb.RunCMDResponse, error) + CheckDatabaseState(context.Context, *dbdpb.CheckDatabaseStateRequest, ...grpc.CallOption) (*dbdpb.CheckDatabaseStateResponse, error) + BounceDatabase(context.Context, *dbdpb.BounceDatabaseRequest, ...grpc.CallOption) (*dbdpb.BounceDatabaseResponse, error) + BounceListener(context.Context, *dbdpb.BounceListenerRequest, ...grpc.CallOption) (*dbdpb.BounceListenerResponse, error) + KnownPDBs(context.Context, *dbdpb.KnownPDBsRequest, ...grpc.CallOption) (*dbdpb.KnownPDBsResponse, error) + GetDatabaseType(context.Context, *dbdpb.GetDatabaseTypeRequest, ...grpc.CallOption) (*dbdpb.GetDatabaseTypeResponse, error) + GetDatabaseName(context.Context, *dbdpb.GetDatabaseNameRequest, ...grpc.CallOption) (*dbdpb.GetDatabaseNameResponse, error) + DataPumpImportAsync(ctx context.Context, req *dbdpb.DataPumpImportAsyncRequest, opts ...grpc.CallOption) (*lropb.Operation, error) + DataPumpExportAsync(ctx context.Context, req *dbdpb.DataPumpExportAsyncRequest, opts ...grpc.CallOption) (*lropb.Operation, error) + ListOperations(ctx context.Context, req *lropb.ListOperationsRequest, opts ...grpc.CallOption) (*lropb.ListOperationsResponse, error) + GetOperation(ctx context.Context, req *lropb.GetOperationRequest, opts ...grpc.CallOption) (*lropb.Operation, error) + DeleteOperation(ctx context.Context, in *lropb.DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DownloadDirectoryFromGCS(ctx context.Context, req *dbdpb.DownloadDirectoryFromGCSRequest, opts ...grpc.CallOption) (*dbdpb.DownloadDirectoryFromGCSResponse, error) +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s -action [CheckDatabase[CDB|PDB],"+ + " [Start|Stop]Database, [Start|Stop]Listeners, RunSQLPlus[Formatted],"+ + " DataPump[Import|Export]Async,"+ + " DownloadDirectoryFromGCS]"+ + " [-database_name -request_timeout -export_object_type -export_objects -gcs_path -gcs_log_path] [-port|-socket]", os.Args[0]) + flag.PrintDefaults() +} + +func main() { + klog.InitFlags(nil) + flag.Parse() + flag.Usage = usage + + hostname, err := os.Hostname() + if err != nil { + klog.ErrorS(err, "Failed to retrieve the hostname") + os.Exit(exitErrorCode) + } + + // There's a 5 min default timeout on a Dial and the overall + // total default timeout of 10 min, all configurable. + ctx, cancel := context.WithTimeout(context.Background(), *reqTimeout) + defer cancel() + + conn, err := dbdaemonlib.DatabaseDaemonDialLocalhost(ctx, consts.DefaultDBDaemonPort, grpc.WithBlock()) + if err != nil { + klog.ErrorS(err, "Failed to dial the Database Daemon") + os.Exit(exitErrorCode) + } + defer conn.Close() + + client := newDatabaseDaemonClient(conn) + + switch *action { + case "CheckDatabaseCDB": + klog.InfoS("checking the state of the CDB...", "container/host", hostname) + _, err := client.CheckDatabaseState(ctx, &dbdpb.CheckDatabaseStateRequest{IsCdb: true, DatabaseName: *cdbName}) + if err != nil { + klog.ErrorS(err, "failed to check the state of the CDB") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: CDB is healthy", "action", *action, "CDB", *cdbName, "container/host", hostname) + + case "CheckDatabasePDB": + klog.InfoS("checking the state of the PDB...", "container/host", hostname, "PDB", *databaseName) + _, err := client.CheckDatabaseState(ctx, &dbdpb.CheckDatabaseStateRequest{DatabaseName: *databaseName}) + if err != nil { + klog.ErrorS(err, "failed to check the state of the PDB") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: PDB is healthy", "action", *action, "container/host", hostname, "PDB", *databaseName) + + case "RunSQLPlus", "RunSQLPlusFormatted": + klog.InfoS("Executing a SQL statement...", "action", *action, "container/host", hostname) + if *commands == "" { + klog.Errorf("--action=RunSQLPlus requires --commands sub parameter, but none provided") + os.Exit(exitErrorCode) + } + + var resp *dbdpb.RunCMDResponse + if *action == "RunSQLPlus" { + resp, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: strings.Split(*commands, ";")}) + } else { + resp, err = client.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: strings.Split(*commands, ";")}) + } + if err != nil { + klog.ErrorS(err, "failed to run SQL statement", "sql", *commands) + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: SQL statement successfully executed", "action", *action, "sql", *commands, "response", resp) + + case "StopDatabase": + klog.InfoS("stopping a database...", "container/host", hostname, "PDB", *databaseName) + + resp, err := client.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + DatabaseName: *cdbName, + Option: "immediate", + }) + if err != nil { + klog.ErrorS(err, "failed to stop a database") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully stopped database", "action", *action, "CDB", *cdbName, "container/host", hostname, "response", resp) + + case "StartDatabase": + klog.InfoS("starting a database...", "container/host", hostname, "CDB", *databaseName) + resp, err := client.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: *cdbName, + Option: "open", + }) + if err != nil { + klog.ErrorS(err, "failed to start a database: %v") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully started database", "action", *action, "CDB", *cdbName, "host", hostname, "response", resp) + + case "StopListeners": + for listenerName := range consts.ListenerNames { + klog.InfoS("stopping listeners...", "container/host", hostname, "listener", listenerName) + resp, err := client.BounceListener(ctx, &dbdpb.BounceListenerRequest{ + ListenerName: listenerName, + TnsAdmin: filepath.Join(fmt.Sprintf(consts.ListenerDir, consts.DataMount), listenerName), + Operation: dbdpb.BounceListenerRequest_STOP, + }) + if err != nil { + klog.ErrorS(err, "failed to stop a listener", "listener", listenerName) + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully stopped a listener", "action", *action, "listener", listenerName, "container/host", hostname, "response", resp) + } + + case "StartListeners": + for listenerName := range consts.ListenerNames { + klog.InfoS("starting listeners...", "container/host", hostname, "listener", listenerName) + resp, err := client.BounceListener(ctx, &dbdpb.BounceListenerRequest{ + ListenerName: listenerName, + TnsAdmin: filepath.Join(fmt.Sprintf(consts.ListenerDir, consts.DataMount), listenerName), + Operation: dbdpb.BounceListenerRequest_START, + }) + if err != nil { + klog.ErrorS(err, "failed to start a listener", "listener", listenerName) + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully started a listener", "action", *action, "listener", listenerName, "container/host", hostname, "response", resp) + } + + case "KnownPDBs": + klog.InfoS("getting a list of known PDBs...", "container/host", hostname) + + resp, err := client.KnownPDBs(ctx, &dbdpb.KnownPDBsRequest{}) + if err != nil { + klog.ErrorS(err, "failed to get a list of known PDBs") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully retrieved list of known PDBs", "action", *action, "container/host", hostname, "response", resp) + + case "GetDatabaseType": + klog.InfoS("retrieving database type...", "container/host", hostname) + + resp, err := client.GetDatabaseType(ctx, &dbdpb.GetDatabaseTypeRequest{}) + if err != nil { + klog.ErrorS(err, "failed to retrieve database type") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully retrieved database type", "action", *action, "container/host", hostname, "response", resp) + + case "GetDatabaseName": + klog.InfoS("retrieving database name...", "container/host", hostname) + + resp, err := client.GetDatabaseName(ctx, &dbdpb.GetDatabaseNameRequest{}) + if err != nil { + klog.ErrorS(err, "failed to retrieve database name") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully retrieved database name", "action", *action, "container/host", hostname, "response", resp) + + case "DataPumpImportAsync": + klog.InfoS("starting data pump import...", "container/host", hostname, "PDB", databaseName) + + resp, err := client.DataPumpImportAsync(ctx, &dbdpb.DataPumpImportAsyncRequest{ + SyncRequest: &dbdpb.DataPumpImportRequest{ + PdbName: *databaseName, + DbDomain: "gke", + GcsPath: *gcsPath, + GcsLogPath: *gcsLogPath, + CommandParams: []string{ + "FULL=YES", + "METRICS=YES", + "LOGTIME=ALL", + }, + }, + }) + if err != nil { + klog.ErrorS(err, "failed to start data pump import") + os.Exit(exitErrorCode) + } + + klog.InfoS("action succeeded: Successfully started Data Pump import", "action", *action, "container/host", hostname, "response", resp) + + case "DataPumpExportAsync": + klog.InfoS("starting data pump export...", "container/host", hostname, "PDB", *databaseName, "objectType", *exportObjectType, "exportObjects", *exportObjects) + + resp, err := client.DataPumpExportAsync(ctx, &dbdpb.DataPumpExportAsyncRequest{ + SyncRequest: &dbdpb.DataPumpExportRequest{ + PdbName: *databaseName, + DbDomain: "gke", + ObjectType: *exportObjectType, + Objects: *exportObjects, + FlashbackTime: *flashbackTime, + GcsPath: *gcsPath, + GcsLogPath: *gcsLogPath, + CommandParams: []string{ + "METRICS=YES", + "LOGTIME=ALL", + }, + }, + }) + if err != nil { + klog.ErrorS(err, "failed to start data pump export") + os.Exit(exitErrorCode) + } + klog.InfoS("action succeeded: Successfully started data pump export", "action", *action, "container/host", hostname, "response", resp) + + case "ListOperations": + klog.InfoS("running ListOperations...") + resp, err := client.ListOperations(ctx, &lropb.ListOperationsRequest{}) + if err != nil { + klog.ErrorS(err, "failed listing operations") + os.Exit(exitErrorCode) + } + + klog.InfoS("action succeeded: Successfully listed operations", "action", *action, "container/host", hostname, "response", resp) + + case "GetOperation": + klog.InfoS("running GetOperation...") + resp, err := client.GetOperation(ctx, &lropb.GetOperationRequest{Name: *operationId}) + if err != nil { + klog.ErrorS(err, "failed getting operation", "id", operationId) + os.Exit(exitErrorCode) + } + + klog.InfoS("action succeeded: Successfully retrieved operation", "action", *action, "container/host", hostname, "response", resp) + + case "DeleteOperation": + klog.InfoS("running DeleteOperation...") + resp, err := client.DeleteOperation(ctx, &lropb.DeleteOperationRequest{Name: *operationId}) + if err != nil { + klog.ErrorS(err, "failed deleting operation", "id", operationId) + os.Exit(exitErrorCode) + } + + klog.InfoS("action succeeded: Successfully deleted operation", "action", *action, "container/host", hostname, "response", resp) + case "DownloadDirectoryFromGCS": + klog.InfoS("download from GCS bucket", "gcsPath", *gcsPath) + _, err := client.DownloadDirectoryFromGCS(ctx, &dbdpb.DownloadDirectoryFromGCSRequest{GcsPath: *gcsPath, LocalPath: consts.DefaultRMANDir}) + if err != nil { + klog.ErrorS(err, "failed downloading directory from gcs bucket", "gcs", *gcsPath, "local path", consts.DefaultRMANDir) + os.Exit(exitErrorCode) + } + klog.InfoS("download succeeded") + case "": + flag.Usage() + + default: + klog.Errorf("Unknown action: %q", *action) + os.Exit(exitErrorCode) + } +} diff --git a/oracle/cmd/dbdaemon_proxy/BUILD.bazel b/oracle/cmd/dbdaemon_proxy/BUILD.bazel new file mode 100644 index 0000000..43fe18b --- /dev/null +++ b/oracle/cmd/dbdaemon_proxy/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "dbdaemon_proxy_lib", + srcs = ["dbdaemon_proxy.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/cmd/dbdaemon_proxy", + visibility = ["//visibility:private"], + deps = [ + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/database/dbdaemonproxy", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_binary( + name = "dbdaemon_proxy", + embed = [":dbdaemon_proxy_lib"], + visibility = ["//visibility:public"], +) diff --git a/oracle/cmd/dbdaemon_proxy/dbdaemon_proxy.go b/oracle/cmd/dbdaemon_proxy/dbdaemon_proxy.go new file mode 100644 index 0000000..114d12a --- /dev/null +++ b/oracle/cmd/dbdaemon_proxy/dbdaemon_proxy.go @@ -0,0 +1,155 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Database Daemon Proxy + +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/user" + "syscall" + + "google.golang.org/grpc" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/dbdaemonproxy" +) + +const ( + lockFile = "/var/tmp/dbdaemon_proxy.lock" + exitErrorCode = consts.DefaultExitErrorCode +) + +var ( + sockFile = flag.String("socket", consts.ProxyDomainSocketFile, "Path to the domain socket file for a Database Daemon Proxy.") + port = flag.Int("port", 0, "Optional port to bind a Database Daemon Proxy to.") + skipUserCheck = flag.Bool("skip_user_check", false, "Optionally skip a check of a user who runs the Database Daemon Proxy (by default it should be a database software owner)") + cdbNameFromYaml = flag.String("cdb_name", "GCLOUD", "Name of the CDB to create") +) + +// A user running this program should not be root and +// a primary group should be either dba or oinstall. +func userCheck(skipChecking bool) error { + u, err := user.Current() + if err != nil { + return fmt.Errorf("dbdaemonproxy/userCheck: failed to determine the current user: %v", err) + } + if skipChecking { + klog.InfoS("dbdaemonproxy/userCheck: skipped by request", "username", u.Username) + return nil + } + + if u.Username == "root" { + return fmt.Errorf("dbdaemonproxy/userCheck: this program is designed to run by the Oracle software installation owner (e.g. oracle), not %q", u.Username) + } + + groups := []string{"dba", "oinstall"} + var gIDs []string + for _, group := range groups { + g, err := user.LookupGroup(group) + // Not both groups are mandatory, e.g. oinstall may not exist. + klog.InfoS("dbdaemonproxy/userCheck", "group", group, "g", g) + if err != nil { + continue + } + gIDs = append(gIDs, g.Gid) + } + for _, g := range gIDs { + if u.Gid == g { + return nil + } + } + return fmt.Errorf("dbdaemonproxy/userCheck: current user's primary group (GID=%q) is not dba|oinstall (GID=%q)", u.Gid, gIDs) +} + +func agentInit() error { + lock, err := os.Create(lockFile) + if err != nil { + klog.ErrorS(err, "dbdaemonproxy/agentInit: failed to access the lock file", "lockFile", lockFile) + return err + } + if err = syscall.Flock(int(lock.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + klog.ErrorS(err, "dbdaemonproxy/agentInit: failed to obtain a lock. Another instance of the Database Daemon may be running", "lockFile", lockFile) + return err + } + // If domain socket file exists remove it before listener tries to create it. + if err = os.Remove(*sockFile); err != nil && !os.IsNotExist(err) { + klog.ErrorS(err, "dbdaemonproxy/agentInit: failed to remove socket file", "sockFile", *sockFile) + return err + } + + return nil +} + +func main() { + klog.InitFlags(nil) + flag.Parse() + + var ( + lis net.Listener + err error + ) + + if err := userCheck(*skipUserCheck); err != nil { + klog.ErrorS(err, "dbdaemonproxy/main: failed a requested user check") + os.Exit(exitErrorCode) + } + + if err := agentInit(); err != nil { + os.Exit(exitErrorCode) + } + + if *port == 0 { + lis, err = net.Listen("unix", *sockFile) + } else { + lis, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + } + if err != nil { + klog.ErrorS(err, "dbdaemonproxy/main: listen call failed") + os.Exit(exitErrorCode) + } + defer lis.Close() + + if *port == 0 { + // Only root or a Database Daemon user id is allowed to communicate with + // the Database Daemon via socket file. + if err = os.Chmod(*sockFile, 0700); err != nil { + klog.ErrorS(err, "dbdaemonproxy/main: failed to set permissions on socket file %q: %v", "sockFile", *sockFile) + os.Exit(exitErrorCode) + } + } + + hostname, err := os.Hostname() + if err != nil { + klog.ErrorS(err, "dbdaemonproxy/main: failed to get a hostname") + os.Exit(exitErrorCode) + } + + grpcSvr := grpc.NewServer() + s, err := dbdaemonproxy.New(hostname, *cdbNameFromYaml) + if err != nil { + klog.ErrorS(err, "dbdaemonproxy/main: failed to execute New") + os.Exit(exitErrorCode) + } + dbdpb.RegisterDatabaseDaemonProxyServer(grpcSvr, s) + + klog.InfoS("Starting a Database Daemon Proxy...", "host", hostname, "address", lis.Addr()) + grpcSvr.Serve(lis) +} diff --git a/oracle/cmd/init_oracle/BUILD.bazel b/oracle/cmd/init_oracle/BUILD.bazel new file mode 100644 index 0000000..c2b0880 --- /dev/null +++ b/oracle/cmd/init_oracle/BUILD.bazel @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "init_oracle_lib", + srcs = ["init_oracle.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/cmd/init_oracle", + visibility = ["//visibility:private"], + deps = [ + "//oracle/pkg/agents/common", + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/database/provision", + "@com_google_cloud_go//compute/metadata", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_binary( + name = "init_oracle", + embed = [":init_oracle_lib"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "init_oracle_files", + srcs = [ + "init_oracle.sh", + "stop_oracle.sh", + ], + visibility = ["//visibility:public"], +) diff --git a/oracle/cmd/init_oracle/init_oracle.go b/oracle/cmd/init_oracle/init_oracle.go new file mode 100644 index 0000000..858c9f6 --- /dev/null +++ b/oracle/cmd/init_oracle/init_oracle.go @@ -0,0 +1,252 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" + "google.golang.org/grpc" + "k8s.io/klog/v2" + + dbdaemonlib "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/provision" +) + +const ( + bootstrapTimeout = 19 * time.Minute + minRequiredFreeMemInKB = 6 * 1000 * 1000 // At least 6 Gigs is required for consistently successful bootstrapping +) + +var ( + supportedVersions = map[string]bool{"12.2": true, "18.3": true, "18c": true, "19.2": true, "19.3": true} + pgaMB = flag.Uint64("pga", consts.DefaultPGAMB, "Oracle Database PGA memory sizing in MB") + sgaMB = flag.Uint64("sga", consts.DefaultSGAMB, "Oracle Database SGA memory sizing in MB") + dbDomain = flag.String("db_domain", "", "Oracle db_domain init parameter") + cdbNameFromYaml = flag.String("cdb_name", "GCLOUD", "Name of the CDB to create") + zoneName string + zoneNameOnce sync.Once +) + +type task interface { + GetName() string + Call(ctx context.Context) error +} + +var newBootstrapDatabaseTask = func(ctx context.Context, isCDB bool, cdbNameFromImage, cdbNameFromYaml, version string, pgaMB, sgaMB uint64, p bool, dbdClient dbdpb.DatabaseDaemonClient) (task, error) { + host, err := os.Hostname() + if err != nil { + return nil, err + } + return provision.NewBootstrapDatabaseTask(ctx, isCDB, true, cdbNameFromImage, cdbNameFromYaml, version, zone(), host, *dbDomain, pgaMB, sgaMB, p, dbdClient) +} + +var newDBDClient = func(ctx context.Context) (dbdpb.DatabaseDaemonClient, func() error, error) { + conn, err := dbdaemonlib.DatabaseDaemonDialLocalhost(ctx, consts.DefaultDBDaemonPort, grpc.WithBlock()) + if err != nil { + return nil, func() error { return nil }, err + } + return dbdpb.NewDatabaseDaemonClient(conn), conn.Close, nil +} + +func zone() string { + zone, err := retrieveZoneName() + if err != nil { + klog.InfoS("failed to retrieve a zone. Running outside of GCP?", "err", err) + zone = "generic" + } + + return zone +} + +// retrieveZoneName returns the zone of the GCE VM. It caches the value since the zone will never +// change. +func retrieveZoneName() (string, error) { + var err error + zoneNameOnce.Do(func() { + zoneName, err = metadata.Zone() + klog.InfoS("zoneName", "zoneName", zoneName) + }) + if err != nil { + return "", err + } + + return zoneName, nil +} + +func provisionHost(ctx context.Context, cdbNameFromImage string, cdbNameFromYaml string, version string) error { + p, err := hostProvisioned(ctx) + if err != nil { + return fmt.Errorf("failed to determine host provision state: %v", err) + } + dbdClient, closeConn, err := newDBDClient(ctx) + if err != nil { + return fmt.Errorf("failed to create database daemon client: %v", err) + } + defer closeConn() + + task, err := newBootstrapDatabaseTask(ctx, true, cdbNameFromImage, cdbNameFromYaml, version, *pgaMB, *sgaMB, p, dbdClient) + if err != nil { + return fmt.Errorf("failed to create bootstrap task: %v", err) + } + + if err := task.Call(ctx); err != nil { + return fmt.Errorf("failed to bootstrap database : %v", err) + } + + if !p { + if err := markProvisioned(); err != nil { + return err + } + } + return nil +} + +// hostProvisioned returns true if provisioner already ran. +func hostProvisioned(ctx context.Context) (bool, error) { + _, err := os.Stat(consts.ProvisioningDoneFile) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + // Something is wrong. Do not start provisioning. + return false, fmt.Errorf("failed to determine host status: %v", err) +} + +// markProvisioned creates a flag file to indicate that provisioning completed successfully +func markProvisioned() error { + f, err := os.Create(consts.ProvisioningDoneFile) + if err != nil { + return fmt.Errorf("could not create %s file: %v", consts.ProvisioningDoneFile, err) + } + defer f.Close() + return nil +} + +func postProvision(ctx context.Context, oracleHome, cdbName string) error { + if err := provision.RelinkConfigFiles(oracleHome, cdbName); err != nil { + return err + } + dbdClient, closeConn, err := newDBDClient(ctx) + if err != nil { + return fmt.Errorf("failed to create database daemon client: %v", err) + } + defer closeConn() + if _, err := dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + DatabaseName: cdbName, + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + }); err != nil { + klog.Error(err, "startup failed") + } + if _, err := dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{"alter pluggable database all open"}, + }); err != nil { + klog.Error(err, "open pdb failed") + } + if _, err := dbdClient.BounceListener(ctx, &dbdpb.BounceListenerRequest{ + ListenerName: "SECURE", + TnsAdmin: filepath.Join(fmt.Sprintf(consts.ListenerDir, consts.DataMount), consts.SECURE), + Operation: dbdpb.BounceListenerRequest_START, + }); err != nil { + klog.Error(err, "start listener failed") + } + return nil +} + +func main() { + klog.InitFlags(nil) + flag.Parse() + ctx, cancel := context.WithTimeout(context.Background(), bootstrapTimeout) + defer cancel() + + oracleHome, cdbNameFromImage, version, err := provision.FetchMetaDataFromImage(provision.MetaDataFile) + if err != nil { + klog.Error(err, "error while parsing image's metadata file") + os.Exit(consts.DefaultExitErrorCode) + } + + if !supportedVersions[version] { + klog.InfoS("preflight check", "unsupported version", version) + os.Exit(consts.DefaultExitErrorCode) + } + + if freeMem, err := getFreeMemInfoFromProc(); err != nil || freeMem < minRequiredFreeMemInKB { + klog.InfoS("Unable to determine free memory or not enough memory available to initiate bootstrapping", "available free memory", freeMem, "required free mem", minRequiredFreeMemInKB) + os.Exit(consts.DefaultExitErrorCode) + } + + klog.InfoS("metadata is as follows", "home", oracleHome, "cdbNameFromYaml", *cdbNameFromYaml, "version", version) + if cdbNameFromImage == "" { + if _, err := os.Stat(consts.ProvisioningDoneFile); err == nil { + if err := postProvision(ctx, oracleHome, *cdbNameFromYaml); err != nil { + klog.Error(err, "postProvision failed") + } + } else { + klog.InfoS("CDB provisioning skipped") + } + os.Exit(consts.DefaultExitErrorCode) + } + klog.InfoS("image contains CDB, starting provisioning") + + if err := provisionHost(ctx, cdbNameFromImage, *cdbNameFromYaml, version); err != nil { + klog.ErrorS(err, "CDB provisioning failed") + os.Exit(consts.DefaultExitErrorCode) + } + klog.InfoS("CDB provisioning: DONE") +} + +func getFreeMemInfoFromProc() (int, error) { + content, err := ioutil.ReadFile("/proc/meminfo") + if err != nil { + return -1, fmt.Errorf("unable to read /proc/meminfo file") + } + buffer := bytes.NewBuffer(content) + for { + line, err := buffer.ReadString('\n') + if err != nil && err != io.EOF { + break + } + // An example MemAvailable info line looks as follows + // MemAvailable: 1094508 kB + if ndx := strings.Index(line, "MemAvailable:"); ndx >= 0 { + s := strings.Split(line, ":") + if len(s) != 2 { + return -1, fmt.Errorf("error while parsing available memory info") + } + line = strings.TrimSpace(s[1]) + // Discard the last 3 characters in the line + if mem, err := strconv.Atoi(line[:len(line)-3]); err == nil { + klog.InfoS("Available memory size is ", "MemAvailable in KB", mem) + return mem, nil + } + } + } + return -1, fmt.Errorf("unable to determine available memory") +} diff --git a/oracle/cmd/init_oracle/init_oracle.sh b/oracle/cmd/init_oracle/init_oracle.sh new file mode 100644 index 0000000..5a99ede --- /dev/null +++ b/oracle/cmd/init_oracle/init_oracle.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +SCRIPTS_DIR="/agents" + +function term_handler() { + echo "$(date +%Y-%m-%d.%H:%M:%S) SIGTERM received, stopping a database container..." >> "${SCRIPTS_DIR}/init_oracle.log" + ${SCRIPTS_DIR}/stop_oracle.sh abort +} + +function kill_handler() { + echo "$(date +%Y-%m-%d.%H:%M:%S) SIGKILL received..." >> "${SCRIPTS_DIR}/init_oracle.log" +} + +function int_handler() { + echo "$(date +%Y-%m-%d.%H:%M:%S) SIGINT received..." >> "${SCRIPTS_DIR}/init_oracle.log" +} + +get_sga_pga() { + local tot=$(free -m|awk '/Mem/ {print $2}') + sga=$(( ${tot} * 1 / 2 )) + pga=$(( ${tot} * 1 / 8 )) +} + +trap term_handler SIGTERM +trap kill_handler SIGKILL +trap int_handler SIGINT + +${SCRIPTS_DIR}/dbdaemon_proxy --cdb_name="$1" & +childPID=$! +echo "$(date +%Y-%m-%d.%H:%M:%S) Initializing database daemon proxy with PID $childPID" >> "${SCRIPTS_DIR}/init_oracle.log" + +get_sga_pga +echo "$(date +%Y-%m-%d.%H:%M:%S) Initializing CDB database with PGA ${pga} and SGA ${sga} version:${VERSION}" >> "${SCRIPTS_DIR}/init_oracle.log" +${SCRIPTS_DIR}/init_oracle --pga="${pga}" --sga="${sga}" --cdb_name="$1" --db_domain="$2" --logtostderr=true +rc=$? +if (( ${rc} != 0 )); then + echo "$(date +%Y-%m-%d.%H:%M:%S) Error initializing CDB database: ${rc}" >> "${SCRIPTS_DIR}/init_oracle.log" +fi +echo "$(date +%Y-%m-%d.%H:%M:%S) Create CDB database done." >> "${SCRIPTS_DIR}/init_oracle.log" +wait $childPID diff --git a/oracle/cmd/init_oracle/stop_oracle.sh b/oracle/cmd/init_oracle/stop_oracle.sh new file mode 100644 index 0000000..c342fee --- /dev/null +++ b/oracle/cmd/init_oracle/stop_oracle.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +(( $# != 1 )) && { echo "Usage: $(basename "$0") "; exit 1; } + +OPT=${1} +if [[ "${OPT}" != "immediate" && "${OPT}" != "abort" && "${OPT}" != "force" ]]; then + echo "wrong stop option: ${OPT}" + exit 1 +fi + +ORACLE_SID=`grep ORACLE_SID= ~/.metadata | cut -d "=" -f2` +source /home/oracle/${ORACLE_SID}.env +if [[ "${OPT}" == "force" ]]; then + echo "Killing all ${ORACLE_SID} processes..." + ps -ef -u "oracle"|grep "${ORACLE_SID}"| grep -v grep|awk '{print $2}'|xargs kill -9 + exit $? +fi + +echo "Shutting down the database (${OPT}) ..." +sqlplus / as sysdba< 1 { + return "", fmt.Errorf("query returned more than one value, got=%d values", len(row)) + } + + var queryVal string + for _, value := range row { + queryVal = value + } + return queryVal, nil +} diff --git a/oracle/cmd/monitoring/BUILD.bazel b/oracle/cmd/monitoring/BUILD.bazel new file mode 100644 index 0000000..b06e970 --- /dev/null +++ b/oracle/cmd/monitoring/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "monitoring_lib", + srcs = ["monitoring_agent.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/cmd/monitoring", + visibility = ["//visibility:private"], + deps = [ + "//third_party/monitoring", + "@com_github_prometheus_client_golang//prometheus", + "@com_github_prometheus_client_golang//prometheus/promhttp", + "@io_k8s_apimachinery//pkg/util/wait", + "@io_k8s_klog_v2//:klog", + ], +) + +go_binary( + name = "monitoring", + embed = [":monitoring_lib"], + visibility = ["//visibility:public"], +) diff --git a/oracle/cmd/monitoring/monitoring_agent.go b/oracle/cmd/monitoring/monitoring_agent.go new file mode 100644 index 0000000..1a92622 --- /dev/null +++ b/oracle/cmd/monitoring/monitoring_agent.go @@ -0,0 +1,105 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "log" + "net/http" + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + //Required for debugging + //_ "net/http/pprof" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/third_party/monitoring" +) + +var ( + listenAddress = flag.String("listen_address", ":9161", "Address to listen on for web interface and telemetry") + metricPath = flag.String("telemetry_path", "/metrics", "Path under which to expose metrics") + defaultFileMetrics = flag.String("default_metrics", "default-metrics.yaml", "File with default metrics in a YAML file") + queryTimeout = flag.String("query_timeout", "5", "Query timeout in seconds") + customMetrics = flag.String("custom_metrics", "", "File that may contain various custom metrics in a YAML file") + dbservice = flag.String("dbservice", "", "The DB service.") + dbport = flag.Int("dbport", 0, "The DB service port.") + initTimeoutMin = flag.Int("init_timeout_min", 10, "The monitor agent initialization timeout in minutes, which includes the time to wait for the DB ready.") + logFlushFreq = 5 * time.Second +) + +func main() { + klog.InitFlags(nil) + flag.Parse() + klog.InfoS("Starting oracledb_exporter ") + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*initTimeoutMin)*time.Minute) + defer cancel() + exporter, err := monitoring.NewExporter(ctx, *defaultFileMetrics, *customMetrics, *dbservice, *dbport, *queryTimeout) + if err != nil { + klog.ErrorS(err, "error in starting monitoring agent") + os.Exit(1) + } + prometheus.MustRegister(exporter) + klog.InfoS("new exporter registered") + + InitLogs() + defer FlushLogs() + + opts := promhttp.HandlerOpts{ + ErrorLog: NewLogger("monitor"), + ErrorHandling: promhttp.ContinueOnError, + } + http.Handle(*metricPath, promhttp.HandlerFor(prometheus.DefaultGatherer, opts)) + + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Oracle DB Exporter

Oracle DB Exporter

Metrics

")) + }) + klog.InfoS("Listening on", *listenAddress) + if err = http.ListenAndServe(*listenAddress, nil); err != nil { + klog.ErrorS(err, "error in starting monitoring agent") + os.Exit(1) + } +} + +// KlogWriter serves as a bridge between the standard log package and the glog package. +type KlogWriter struct{} + +// Write implements the io.Writer interface. +func (writer KlogWriter) Write(data []byte) (n int, err error) { + klog.InfoDepth(1, string(data)) + return len(data), nil +} + +// InitLogs initializes logs the way we want for kubernetes. +func InitLogs() { + log.SetOutput(KlogWriter{}) + log.SetFlags(0) + // The default glog flush interval is 5 seconds. + go wait.Forever(klog.Flush, logFlushFreq) +} + +// FlushLogs flushes logs immediately. +func FlushLogs() { + klog.Flush() +} + +// NewLogger creates a new log.Logger which sends logs to klog.Info. +func NewLogger(prefix string) *log.Logger { + return log.New(KlogWriter{}, prefix, 0) +} diff --git a/oracle/config/certmanager/certificate.yaml b/oracle/config/certmanager/certificate.yaml new file mode 100644 index 0000000..237c937 --- /dev/null +++ b/oracle/config/certmanager/certificate.yaml @@ -0,0 +1,25 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/oracle/config/certmanager/kustomization.yaml b/oracle/config/certmanager/kustomization.yaml new file mode 100644 index 0000000..bebea5a --- /dev/null +++ b/oracle/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/oracle/config/certmanager/kustomizeconfig.yaml b/oracle/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000..90d7c31 --- /dev/null +++ b/oracle/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_backups.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_backups.yaml new file mode 100644 index 0000000..88d2671 --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_backups.yaml @@ -0,0 +1,272 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: backups.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance Name + type: string + - JSONPath: .spec.type + name: Backup Type + type: string + - JSONPath: .spec.subType + name: Backup SubType + type: string + - JSONPath: .spec.dop + name: DOP + type: integer + - JSONPath: .spec.backupset + name: BS/IC + type: boolean + - JSONPath: .spec.gcsPath + name: GCS Path + type: string + - JSONPath: .status.phase + name: Phase + type: string + - JSONPath: .status.backupid + name: Backup ID + type: string + - JSONPath: .status.backuptime + name: Backup Time + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Backup is the Schema for the backups API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSpec defines the desired state of Backup. + properties: + backupItems: + description: For a Physical backup this slice can be used to indicate + what PDBs, schemas, tablespaces or tables to back up. + items: + type: string + type: array + backupset: + description: For a Physical backup the choices are Backupset and Image + Copies. Backupset is the default, but if Image Copies are required, + flip this flag to false. + type: boolean + checkLogical: + description: For a Physical backup, optionally turn on an additional + "check logical" option. The default is off. + type: boolean + compressed: + description: For a Physical backup, optionally turn on compression, + by flipping this flag to true. The default is false. + type: boolean + dop: + description: For a Physical backup, optionally indicate a degree of + parallelism also known as DOP. + format: int32 + maximum: 100 + minimum: 1 + type: integer + filesperset: + description: For a Physical backup, optionally specify filesperset. + The default depends on a type of backup, generally 64. + format: int32 + type: integer + gcsPath: + description: If set up ahead of time, the backup sets of a physical + backup can be optionally transferred to a GCS bucket. A user is to + ensure proper write access to the bucket from within the Oracle Operator. + type: string + instance: + description: Instance is a name of an instance to take a backup for. + type: string + keepDataOnDeletion: + description: KeepDataOnDeletion defines whether to keep backup data + when backup resource is removed. The default value is false. + type: boolean + level: + description: For a Physical backup, optionally specify an incremental + level. The default is 0 (the whole database). + format: int32 + type: integer + localPath: + description: For a Physical backup, optionally specify a local backup + dir. If omitted, /u03/app/oracle/rman is assumed. + type: string + sectionSize: + description: For a Physical backup, optionally specify a section size + in MB. Don't include the unit (MB), just the integer. + format: int32 + type: integer + subType: + description: 'Backup sub-type, which is only relevant for a Physical + backup type (e.g. RMAN). If omitted, the default of Instance(Level) + is assumed. Supported options at this point are: Instance or Database + level backups.' + enum: + - Instance + - Database + - Tablespace + - Datafile + type: string + timeLimitMinutes: + description: For a Physical backup, optionally specify the time threshold. + If a threshold is reached, the backup request would time out and error + out. The threshold is expressed in minutes. Don't include the unit + (minutes), just the integer. + format: int32 + type: integer + type: + description: "Type describes a type of a backup to take. Immutable. + Available options are: - Snapshot: storage level disk snapshot. - + Physical: database engine specific backup that relies on a redo stream + / continuous archiving (WAL) and may allow a PITR. Examples + include pg_backup, pgBackRest, mysqlbackup. A Physical + backup may be file based or database block based \t (e.g. Oracle + RMAN). - Logical: database engine specific backup that relies on running + SQL statements, e.g. mysqldump, pg_dump, expdp. If not + specified, the default of Snapshot is assumed." + enum: + - Snapshot + - Physical + - Logical + type: string + volumeSnapshotClass: + description: VolumeSnapshotClass points to a particular CSI driver and + is used for taking a volume snapshot. If requested here at the Backup + level, this setting overrides the platform default as well as the + default set via the Config (global user preferences). + type: string + type: object + status: + description: BackupStatus defines the observed state of Backup. + properties: + backupid: + type: string + backuptime: + type: string + conditions: + description: Conditions represents the latest available observations + of the backup's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + phase: + description: Phase is a summary of current state of the Backup. + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_backupschedules.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_backupschedules.yaml new file mode 100644 index 0000000..ebfcc89 --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_backupschedules.yaml @@ -0,0 +1,296 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: backupschedules.oracle.db.anthosapis.com +spec: + group: oracle.db.anthosapis.com + names: + kind: BackupSchedule + listKind: BackupScheduleList + plural: backupschedules + singular: backupschedule + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: BackupSchedule is the Schema for the backupschedules API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupScheduleSpec defines the desired state of BackupSchedule. + properties: + backupRetentionPolicy: + description: BackupRetentionPolicy is the policy used to trigger automatic + deletion of backups produced from this BackupSchedule. + properties: + backupRetention: + description: BackupRetention is the number of successful backups + to keep around. The default is 7. A value of 0 means "do not delete + backups based on count". Max of 512 allows for ~21 days of hourly + backups or ~1.4 years of daily backups. + format: int32 + maximum: 512 + minimum: 0 + type: integer + type: object + backupSpec: + description: BackupSpec defines the Backup that will be created on the + provided schedule. + properties: + backupItems: + description: For a Physical backup this slice can be used to indicate + what PDBs, schemas, tablespaces or tables to back up. + items: + type: string + type: array + backupset: + description: For a Physical backup the choices are Backupset and + Image Copies. Backupset is the default, but if Image Copies are + required, flip this flag to false. + type: boolean + checkLogical: + description: For a Physical backup, optionally turn on an additional + "check logical" option. The default is off. + type: boolean + compressed: + description: For a Physical backup, optionally turn on compression, + by flipping this flag to true. The default is false. + type: boolean + dop: + description: For a Physical backup, optionally indicate a degree + of parallelism also known as DOP. + format: int32 + maximum: 100 + minimum: 1 + type: integer + filesperset: + description: For a Physical backup, optionally specify filesperset. + The default depends on a type of backup, generally 64. + format: int32 + type: integer + gcsPath: + description: If set up ahead of time, the backup sets of a physical + backup can be optionally transferred to a GCS bucket. A user is + to ensure proper write access to the bucket from within the Oracle + Operator. + type: string + instance: + description: Instance is a name of an instance to take a backup + for. + type: string + keepDataOnDeletion: + description: KeepDataOnDeletion defines whether to keep backup data + when backup resource is removed. The default value is false. + type: boolean + level: + description: For a Physical backup, optionally specify an incremental + level. The default is 0 (the whole database). + format: int32 + type: integer + localPath: + description: For a Physical backup, optionally specify a local backup + dir. If omitted, /u03/app/oracle/rman is assumed. + type: string + sectionSize: + description: For a Physical backup, optionally specify a section + size in MB. Don't include the unit (MB), just the integer. + format: int32 + type: integer + subType: + description: 'Backup sub-type, which is only relevant for a Physical + backup type (e.g. RMAN). If omitted, the default of Instance(Level) + is assumed. Supported options at this point are: Instance or Database + level backups.' + enum: + - Instance + - Database + - Tablespace + - Datafile + type: string + timeLimitMinutes: + description: For a Physical backup, optionally specify the time + threshold. If a threshold is reached, the backup request would + time out and error out. The threshold is expressed in minutes. + Don't include the unit (minutes), just the integer. + format: int32 + type: integer + type: + description: "Type describes a type of a backup to take. Immutable. + Available options are: - Snapshot: storage level disk snapshot. + - Physical: database engine specific backup that relies on a redo + stream / continuous archiving (WAL) and may allow + a PITR. Examples include pg_backup, pgBackRest, mysqlbackup. + \ A Physical backup may be file based or database block + based \t (e.g. Oracle RMAN). - Logical: database engine + specific backup that relies on running SQL statements, + e.g. mysqldump, pg_dump, expdp. If not specified, the default + of Snapshot is assumed." + enum: + - Snapshot + - Physical + - Logical + type: string + volumeSnapshotClass: + description: VolumeSnapshotClass points to a particular CSI driver + and is used for taking a volume snapshot. If requested here at + the Backup level, this setting overrides the platform default + as well as the default set via the Config (global user preferences). + type: string + type: object + schedule: + description: Schedule is a cron-style expression of the schedule on + which Backup will be created. For allowed syntax, see en.wikipedia.org/wiki/Cron + and godoc.org/github.com/robfig/cron. + type: string + startingDeadlineSeconds: + description: StartingDeadlineSeconds is an optional deadline in seconds + for starting the backup creation if it misses scheduled time for any + reason. The default is 30 seconds. + format: int64 + type: integer + suspend: + description: Suspend tells the controller to suspend operations - both + creation of new Backup and retention actions. This will not have any + effect on backups currently in progress. Default is false. + type: boolean + required: + - backupSpec + - schedule + type: object + status: + description: BackupScheduleStatus defines the observed state of BackupSchedule. + properties: + backupHistory: + description: BackupHistory stores the records for up to 7 of the latest + backups. + items: + description: BackupHistoryRecord is a historical record of a Backup. + properties: + backupName: + description: BackupName is the name of the Backup that gets created. + nullable: true + type: string + creationTime: + description: CreationTime is the time that the Backup gets created. + format: date-time + nullable: true + type: string + phase: + description: Phase tells the state of the Backup. + type: string + required: + - backupName + - creationTime + type: object + type: array + backupTotal: + description: BackupTotal stores the total number of current existing + backups created by this backupSchedule. + format: int32 + type: integer + conditions: + description: Conditions of the BackupSchedule. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: LastBackupTime is the time the last Backup was created + for this BackupSchedule. + format: date-time + nullable: true + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_configs.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_configs.yaml new file mode 100644 index 0000000..5827fbc --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_configs.yaml @@ -0,0 +1,144 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: configs.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.platform + name: Platform + type: string + - JSONPath: .spec.diskSizes + name: Disk Sizes + type: string + - JSONPath: .spec.storageClass + name: Storage Class + type: string + - JSONPath: .spec.volumeSnapshotClass + name: Volume Snapshot Class + type: string + group: oracle.db.anthosapis.com + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: Config is the Schema for the configs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConfigSpec defines the desired state of Config. + properties: + disks: + description: 'Disks slice describes at minimum two disks: data and log + (archive log), and optionally a backup disk.' + items: + description: DiskSpec defines the desired state of a disk. (the structure + is deliberately designed to be flexible, as a slice, so that if + we change a disk layout for different hosting platforms, the model + can be also adjusted to reflect that). + properties: + name: + description: 'Name of a disk. Allowed values are: DataDisk,LogDisk,BackupDisk' + enum: + - DataDisk + - LogDisk + - BackupDisk + type: string + size: + anyOf: + - type: integer + - type: string + description: 'Disk size. If not specified, the defaults are: DataDisk:"100Gi", + LogDisk:"150Gi",BackupDisk:"100Gi"' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + description: StorageClass points to a particular CSI driver and + is used for disk provisioning. + type: string + type: + description: Disk type. Depending on a deployment platform, DiskType + may take different values. On GCP, support "HDD" and "SSD". + Default to "HDD" if not specified. + type: string + required: + - name + type: object + type: array + hostAntiAffinityNamespaces: + description: HostAntiAffinityNamespaces is an optional list of namespaces + that need to be included in anti-affinity by hostname rule. The effect + of the rule is forbidding scheduling a database pod in the current + namespace on a host that already runs a database pod in any of the + listed namespaces. + items: + type: string + type: array + images: + additionalProperties: + type: string + description: Service agent and other data plane agent images. This is + an optional map that allows a customer to specify agent images different + from those chosen/provided by the Oracle Operator by default. See + an example of how this map can be used in config/samples/v1alpha1_config_gcp1.yaml + type: object + logLevel: + additionalProperties: + type: string + description: Log Levels for the various components. This is an optional + map for component -> log level See an example of how this map can + be used in config/samples/v1alpha1_config_gcp1.yaml + type: object + platform: + description: 'Deployment platform. Presently supported values are: GCP + (default), BareMetal.' + enum: + - GCP + - BareMetal + - Minikube + type: string + storageClass: + description: Storage class to use for dynamic provisioning. This value + varies depending on a platform. For GCP (and the default) it is "csi-gce-pd". + type: string + volumeSnapshotClass: + description: Volume Snapshot class to use for storage snapshots. This + value varies depending on a platform. For GCP (and the default) it + is "csi-gce-pd-snapshot-class". + type: string + type: object + status: + description: ConfigStatus defines the observed state of Config. + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_cronanythings.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_cronanythings.yaml new file mode 100644 index 0000000..01cff6b --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_cronanythings.yaml @@ -0,0 +1,287 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: cronanythings.oracle.db.anthosapis.com +spec: + group: oracle.db.anthosapis.com + names: + kind: CronAnything + listKind: CronAnythingList + plural: cronanythings + singular: cronanything + scope: Namespaced + validation: + openAPIV3Schema: + description: CronAnything is the Schema for the cronanythings API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CronAnythingSpec defines the desired state of CronAnything. + properties: + cascadeDelete: + description: CascadeDelete tells CronAnything to set up owner references + from the created resources to the CronAnything resource. This means + that if the CronAnything resource is deleted, all resources created + by it will also be deleted. This is an optional field that defaults + to false. + type: boolean + concurrencyPolicy: + description: ConcurrencyPolicy specifies how to treat concurrent resources + if the resource provides a status path that exposes completion. The + default policy if not provided is to allow a new resource to be created + even if an active resource already exists. If the resource doesn’t + have an active/completed status, the only supported concurrency policy + is to allow creating new resources. This field is mutable. If the + policy is changed to a more stringent policy while multiple resources + are active, it will not delete any existing resources. The exception + is if a creation of a new resource is triggered and the policy has + been changed to Replace. If multiple resources are active, they will + all be deleted and replaced by a new resource. + type: string + finishableStrategy: + description: FinishableStrategy defines how the CronAnything controller + an decide if a resource has completed. Some resources will do some + work after they have been created and at some point be finished. Jobs + are the most common example. If no strategy is defined, it is assumed + that the resources never finish. + properties: + stringField: + description: StringField contains the details for how the CronAnything + controller can find the string field on the resource needed to + decide if the resource has completed. It also lists the values + that mean the resource has completed. + properties: + fieldPath: + description: The path to the field on the resource that contains + a string value. + type: string + finishedValues: + description: The values of the field that means the resource + has completed. + items: + type: string + type: array + required: + - fieldPath + - finishedValues + type: object + timestampField: + description: TimestampField contains the details for how the CronAnything + controller can find the timestamp field on the resource in order + to decide if the resource has completed. + properties: + fieldPath: + description: The path to the field on the resource that contains + the timestamp. + type: string + required: + - fieldPath + type: object + type: + description: Type tells which strategy should be used. + type: string + required: + - type + type: object + resourceBaseName: + description: ResourceBaseName specifies the base name for the resources + created by CronAnything, which will be named using the format -. + This field is optional, and the default is to use the name of the + CronAnything resource as the ResourceBaseName. + type: string + resourceTimestampFormat: + description: ResourceTimestampFormat defines the format of the timestamp + in the name of Resources created by CronAnything -. + This field is optional, and the default is to format the timestamp + as unix time. If provided, it must be compatible with time.Format + in golang. + type: string + retention: + description: Retention defines the retention policy for resources created + by CronAnything. If no retention policy is defined, CronAnything will + never delete resources, so cleanup must be handled through some other + process. + properties: + historyCountLimit: + description: The number of completed resources to keep before deleting + them. This only affects finishable resources and the default value + is 3. This field is mutable and if it is changed to a number lower + than the current number of finished resources, the oldest ones + will eventually be deleted until the number of finished resources + matches the limit. + format: int32 + type: integer + historyTimeLimitSeconds: + description: The time since completion that a resource is kept before + deletion. This only affects finishable resources. This does not + have any default value and if it is not provided, HistoryCountLimit + will be used to prune completed resources. If both HistoryCountLimit + and HistoryTimeLimitSeconds are set, it is treated as an OR operation. + format: int64 + type: integer + resourceTimestampStrategy: + description: ResourceTimestampStrategy specifies how the CronAnything + controller can find the age of a resource. This is needed to support + retention. + properties: + field: + description: FieldResourceTimestampStrategy specifies how the + CronAnything controller can find the timestamp for the resource + from a field. + properties: + fieldPath: + description: The path to the field on the resource that + contains the timestamp. + type: string + required: + - fieldPath + type: object + type: + description: Type tells which strategy should be used. + type: string + required: + - type + type: object + required: + - resourceTimestampStrategy + type: object + schedule: + description: Schedule defines a time-based schedule, e.g., a standard + cron schedule such as “@every 10m”. This field is mandatory and mutable. + If it is changed, resources will simply be created at the new interval + from then on. + type: string + suspend: + description: Suspend tells the controller to suspend creation of additional + resources. The default value is false. This field is mutable. It will + not affect any existing resources, but only affect creation of additional + resources. + type: boolean + template: + description: Template is a template of a resource type for which instances + are to be created on the given schedule. This field is mandatory and + it must contain a valid template for an existing apiVersion and kind + in the cluster. It is immutable, so if the template needs to change, + the whole CronAnything resource should be replaced. + type: object + totalResourceLimit: + description: TotalResourceLimit specifies the total number of children + allowed for a particular CronAnything resource. If this limit is reached, + no additional resources will be created. This limit is mostly meant + to avoid runaway creation of resources that could bring down the cluster. + Both finished and unfinished resources count against this limit. This + field is mutable. If it is changed to a lower value than the existing + number of resources, none of the existing resources will be deleted + as a result, but no additional resources will be created until the + number of child resources goes below the limit. The field is optional + with a default value of 100. + format: int32 + type: integer + triggerDeadlineSeconds: + description: TriggerDeadlineSeconds defines Deadline in seconds for + creating the resource if it missed the scheduled time. If no deadline + is provided, the resource will be created no matter how far after + the scheduled time. If multiple triggers were missed, only the last + will be triggered and only one resource will be created. This field + is mutable and changing it will affect the creation of new resources + from that point in time. + format: int64 + type: integer + required: + - schedule + - template + type: object + status: + description: CronAnythingStatus defines the observed state of CronAnything. + properties: + lastScheduleTime: + description: LastScheduleTime keeps track of the scheduled time for + the last successfully completed creation of a resource. This is used + by the controller to determine when the next resource creation should + happen. If creation of a resource is delayed for any reason but eventually + does happen, this value will still be updated to the time when it + was originally scheduled to happen. + format: date-time + type: string + pendingTrigger: + description: PendingTrigger keeps track of any triggers that are past + their trigger time, but for some reason have not been completed yet. + This is typically a result of the create operation failing. + properties: + result: + description: Result tells why this trigger is in the pending state, + i.e. what prevented it from completing successfully. + type: string + scheduleTime: + description: ScheduleTime is the time when this trigger was scheduled + to be executed. + format: date-time + type: string + required: + - result + - scheduleTime + type: object + triggerHistory: + description: TriggerHistory keeps track of the status for the last 10 + triggers. This allows users of CronAnything to see whether any triggers + failed. It is important to know that this only keeps track of whether + a trigger was successfully executed (as in creating the given resource), + not whether the created resource was itself successful. For this information, + any users of CronAnything should observe the resources created. + items: + description: TriggerHistoryRecord contains information about the result + of a trigger. It can either have completed successfully, and if + it did not, the record will provide information about what is the + cause of the failure. + properties: + creationTimestamp: + description: CreationTimestamp is the time when this record was + created. This is thus also the time at which the final result + of the trigger was decided. + format: date-time + type: string + result: + description: Result contains the outcome of a trigger. It can + either be CreateSucceeded, which means the given resource was + created as intended, or it can be one of several error messages. + type: string + scheduleTime: + description: ScheduleTime is the time when this trigger was scheduled + to be executed. + format: date-time + type: string + required: + - creationTimestamp + - result + - scheduleTime + type: object + type: array + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_databases.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_databases.yaml new file mode 100644 index 0000000..a81850a --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_databases.yaml @@ -0,0 +1,265 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: databases.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance + type: string + - JSONPath: .status.usernames + name: Users + type: string + - JSONPath: .status.phase + name: Phase + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: DatabaseReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: DatabaseReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: DatabaseReadyMessage + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="UserReady")].status + name: UserReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="UserReady")].reason + name: UserReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="UserReady")].message + name: UserReadyMessage + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + categories: + - genericdatabases + kind: Database + listKind: DatabaseList + plural: databases + shortNames: + - gdb + singular: database + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Database is the Schema for the databases API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseSpec defines the desired state of Database. + properties: + admin_password: + description: AdminPassword is the password for the sys admin of the + database. + maxLength: 30 + minLength: 5 + type: string + adminPasswordGsmSecretRef: + description: AdminPasswordGsmSecretRef is a reference to the secret + object containing sensitive information to pass to config agent. This + field is optional, and may be empty if plaintext password is used. + properties: + projectId: + description: ProjectId identifies the project where the secret resource + is. + type: string + secretId: + description: SecretId identifies the secret. + type: string + version: + description: Version is the version of the secret. If "latest" is + specified, underlying the latest SecretId is used. + type: string + type: object + instance: + description: Name of the instance that the database belongs to. + type: string + name: + description: Name of the database. + type: string + users: + description: Users specifies an optional list of users to be created + in this database. + items: + description: UserSpec defines the desired state of the Database Users. + properties: + gsmSecretRef: + description: A reference to a GSM secret. + properties: + projectId: + description: ProjectId identifies the project where the secret + resource is. + type: string + secretId: + description: SecretId identifies the secret. + type: string + version: + description: Version is the version of the secret. If "latest" + is specified, underlying the latest SecretId is used. + type: string + type: object + name: + description: Name of the User. + type: string + password: + description: Plaintext password. + type: string + privileges: + description: Privileges specifies an optional list of privileges + to grant to the user. + items: + description: PrivilegeSpec defines the desired state of roles + and privileges. + type: string + type: array + secretRef: + description: A reference to a k8s secret. + properties: + name: + description: Name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: Namespace defines the space within which the + secret name must be unique. + type: string + type: object + type: object + type: array + type: object + status: + description: DatabaseStatus defines the observed state of Database. + properties: + UserResourceVersions: + additionalProperties: + type: string + description: 'UserResourceVersions is a map of username to user resource + version (plaintext or GSM). For GSM Resource version, use format: + "projects/{ProjectId}/secrets/{SecretId}/versions/{Version}".' + type: object + conditions: + description: Conditions represents the latest available observations + of the Database's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + isChangeApplied: + description: IsChangeApplied indicates whether database changes have + been applied + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by + the controller. + format: int64 + type: integer + phase: + description: Phase is a summary of the current state of the Database. + type: string + usernames: + description: List of user names. + items: + type: string + type: array + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_exports.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_exports.yaml new file mode 100644 index 0000000..708c2b3 --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_exports.yaml @@ -0,0 +1,207 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: exports.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance Name + type: string + - JSONPath: .spec.databaseName + name: Database Name + type: string + - JSONPath: .spec.exportObjectType + name: Export Object Type + type: string + - JSONPath: .spec.exportObjects + name: Export Objects + type: string + - JSONPath: .spec.gcsPath + name: GCS Path + type: string + - JSONPath: .spec.gcsLogPath + name: GCS Log Path + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + kind: Export + listKind: ExportList + plural: exports + singular: export + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Export is the Schema for the exports API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ExportSpec defines the desired state of Export + properties: + databaseName: + description: DatabaseName is the database resource name within Instance + to export from. + type: string + exportObjectType: + description: 'ExportObjectType is the type of objects to export. If + omitted, the default of Schemas is assumed. Supported options at this + point are: Schemas or Tables.' + enum: + - Schemas + - Tables + type: string + exportObjects: + description: ExportObjects are objects, schemas or tables, exported + by DataPump. + items: + type: string + type: array + flashbackTime: + description: FlashbackTime is an optional time. If this time is set, + the SCN that most closely matches the time is found, and this SCN + is used to enable the Flashback utility. The export operation is performed + with data that is consistent up to this SCN. + format: date-time + type: string + gcsLogPath: + description: GcsLogPath is an optional full path in GCS. If set up ahead + of time, export logs can be optionally transferred to set GCS bucket. + A user is to ensure proper write access to the bucket from within + the Oracle Operator. + type: string + gcsPath: + description: GcsPath is a full path in GCS bucket to transfer exported + files to. A user is to ensure proper write access to the bucket from + within the Oracle Operator. + type: string + instance: + description: Instance is the resource name within namespace to export + from. + type: string + type: + description: Type of the Export. If omitted, the default of DataPump + is assumed. + enum: + - DataPump + type: string + required: + - databaseName + - instance + type: object + status: + description: ExportStatus defines the observed state of Export. + properties: + conditions: + description: Conditions represents the latest available observations + of the export's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_imports.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_imports.yaml new file mode 100644 index 0000000..7d50b80 --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_imports.yaml @@ -0,0 +1,176 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: imports.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance Name + type: string + - JSONPath: .spec.databaseName + name: Database Name + type: string + - JSONPath: .spec.gcsPath + name: GCS Path + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + - JSONPath: .spec.gcsLogPath + name: GCS Log Path + type: string + group: oracle.db.anthosapis.com + names: + kind: Import + listKind: ImportList + plural: imports + singular: import + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Import is the Schema for the imports API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImportSpec defines the desired state of Import. + properties: + databaseName: + description: DatabaseName is the database resource name within Instance + to import into. + type: string + gcsLogPath: + description: GcsLogPath is an optional path in GCS to copy import log + to. A user is to ensure proper write access to the bucket from within + the Oracle Operator. + type: string + gcsPath: + description: GcsPath is a full path to the input file in GCS containing + import data. A user is to ensure proper write access to the bucket + from within the Oracle Operator. + type: string + instance: + description: Instance is the resource name within same namespace to + import into. + type: string + type: + description: Type of the Import. If not specified, the default of DataPump + is assumed, which is the only supported option currently. + enum: + - DataPump + type: string + type: object + status: + description: ImportStatus defines the observed state of Import. + properties: + conditions: + description: Conditions represents the latest available observations + of the import's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_instances.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_instances.yaml new file mode 100644 index 0000000..40df408 --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_instances.yaml @@ -0,0 +1,466 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: instances.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.type + name: DB Engine + type: string + - JSONPath: .spec.version + name: Version + type: string + - JSONPath: .spec.edition + name: Edition + type: string + - JSONPath: .status.endpoint + name: Endpoint + type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.databasenames + name: DB Names + type: string + - JSONPath: .status.backupid + name: Backup ID + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="DatabaseInstanceReady")].status + name: DBReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="DatabaseInstanceReady")].reason + name: DBReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="DatabaseInstanceReady")].message + name: DBReadyMessage + priority: 1 + type: string + - JSONPath: .status.isChangeApplied + name: IsChangeApplied + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + categories: + - genericinstances + kind: Instance + listKind: InstanceList + plural: instances + shortNames: + - ginst + singular: instance + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Instance is the Schema for the instances API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance. + properties: + cdbName: + description: CDBName is the intended name of the CDB attribute. If the + CDBName is different from the original name (with which the CDB was + created) the CDB will be renamed. + type: string + characterSet: + description: CharacterSet used to create a database (the default is + AL32UTF8). + type: string + cloudProvider: + description: CloudProvider is only relevant if the hosting type is Cloud, + MultiCloud, Hybrid or Bare Metal. + enum: + - GCP + - AWS + - Azure + - OCI + type: string + databaseGID: + description: DatabaseGID represents an OS group ID of a user running + a database. + format: int64 + type: integer + databaseUID: + description: DatabaseUID represents an OS UID of a user running a database. + format: int64 + type: integer + dbDomain: + description: DBDomain is an optional attribute to set a database domain. + type: string + dbNetworkServiceOptions: + description: DBNetworkServiceOptions allows to override some details + of kubernetes Service created to expose a connection to database. + properties: + gcp: + description: GCP contains Google Cloud specific attributes of Service + configuration. + properties: + loadBalancerType: + description: LoadBalancerType let's define a type of load balancer, + see https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + enum: + - "" + - Internal + - External + type: string + type: object + type: object + dbUniqueName: + description: DBUniqueName represents a unique database name that would + be set for a database (if not provided, as a default, the [_generic|_] will be appended to a DatabaseName). + type: string + deploymentType: + description: DeploymentType reflects a fully managed (DBaaS) vs. semi-managed + database. + enum: + - "" + - InCluster + - CloudSQL + - RDS + type: string + disks: + description: 'Disks slice describes at minimum two disks: data and log + (archive log), and optionally a backup disk.' + items: + description: DiskSpec defines the desired state of a disk. (the structure + is deliberately designed to be flexible, as a slice, so that if + we change a disk layout for different hosting platforms, the model + can be also adjusted to reflect that). + properties: + name: + description: 'Name of a disk. Allowed values are: DataDisk,LogDisk,BackupDisk' + enum: + - DataDisk + - LogDisk + - BackupDisk + type: string + size: + anyOf: + - type: integer + - type: string + description: 'Disk size. If not specified, the defaults are: DataDisk:"100Gi", + LogDisk:"150Gi",BackupDisk:"100Gi"' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + description: StorageClass points to a particular CSI driver and + is used for disk provisioning. + type: string + type: + description: Disk type. Depending on a deployment platform, DiskType + may take different values. On GCP, support "HDD" and "SSD". + Default to "HDD" if not specified. + type: string + required: + - name + type: object + type: array + edition: + description: Edition of a database. + type: string + hostingType: + description: HostingType conveys whether an Instance is meant to be + hosted on a cloud (single or multiple), on-prem, on Bare Metal, etc. + It is meant to be used as a filter and aggregation dimension. + enum: + - "" + - Cloud + - MultiCloud + - Hybrid + - BareMetal + - OnPrem + type: string + images: + additionalProperties: + type: string + description: Service agent and other data plane GCR images. This is + an optional map that allows a customer to specify GCR images different + from those chosen/provided. + type: object + maintenanceWindow: + description: MaintenanceWindow specifies the time windows during which + database downtimes are allowed for maintenance. + properties: + timeRanges: + description: Maintenance time ranges. + items: + description: TimeRange defines a window of time. Both start time + and duration are required. + properties: + duration: + description: Duration of the maintenance window + type: string + start: + description: Start time. + format: date-time + type: string + type: object + type: array + type: object + memoryPercent: + description: MemoryPercent represents the percentage of memory that + should be allocated for Oracle SGA (default is 25%). + maximum: 100 + minimum: 0 + type: integer + minMemoryForDBContainer: + description: MinMemoryForDBContainer overrides the default safe limit + for scheduling the db container without crashes due to memory pressure. + type: string + mode: + description: Mode specifies how this instance will be managed by the + operator. + enum: + - ManuallySetUpStandby + type: string + parameters: + additionalProperties: + type: string + description: Parameters contains the database flags in the map format + type: object + patching: + description: Patching contains all the patching related attributes like + patch version and image. + properties: + patchVersion: + description: Patch version + type: string + patchedServiceImage: + description: gcr link containing the patched service image. + type: string + type: object + restore: + description: Restore and recovery request details. This section should + normally be commented out unless an actual restore/recovery is required. + properties: + backupId: + description: Backup name to restore from. + type: string + backupType: + description: 'Backup type to restore from. Oracle only supports: + Snapshot or Physical.' + enum: + - Snapshot + - Physical + type: string + dop: + description: Similar to a (physical) backup, optionally indicate + a degree of parallelism, also known as DOP. + format: int32 + maximum: 100 + minimum: 1 + type: integer + force: + description: To overwrite an existing, up and running instance, + an explicit athorization is required. This is safeguard to avoid + accidentally destroying a perfectly healthy (status=Ready) instance. + enum: + - true + - false + type: boolean + requestTime: + description: Request version as a date-time to avoid accidental + triggering of a restore operation when reapplying an older version + of a resource file. If at least one restore operation has occurred, + any further restore operation that have the same RequestTime or + earlier than the last Restore operation will be ignored. + format: date-time + type: string + timeLimitMinutes: + description: Restore time limit. Optional field defaulting to three + times the backup time limit. Don't include the unit (minutes), + just the integer. + format: int32 + minimum: 0 + type: integer + required: + - requestTime + type: object + services: + additionalProperties: + type: boolean + description: Services list the optional semi-managed services that the + customers can choose from. + type: object + sourceCidrRanges: + description: Source IP CIDR ranges allowed for a client. + items: + type: string + type: array + type: + description: Type of a database engine. + enum: + - Oracle + type: string + version: + description: Version of a database. + type: string + type: object + status: + description: InstanceStatus defines the observed state of Instance. + properties: + backupid: + description: Last backup ID. + type: string + conditions: + description: Conditions represents the latest available observations + of the GenericInstance's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentParameters: + additionalProperties: + type: string + description: CurrentParameters stores the last successfully set instance + parameters. + type: object + currentServiceImage: + description: CurrentServiceImage stores the image name used by the database + instance. + type: string + databasenames: + description: List of database names (e.g. PDBs) hosted in the Instance. + items: + type: string + type: array + description: + description: Description is for a human consumption. E.g. when an Instance + is restored from a backup this field is populated with the human readable + restore details. + type: string + endpoint: + description: Endpoint is presently expressed in the format of -svc.. + type: string + isChangeApplied: + description: IsChangeApplied indicates whether instance changes have + been applied + type: string + lastFailedParameterUpdate: + additionalProperties: + type: string + description: LastFailedParameterUpdate is used to avoid getting into + the failed parameter update loop. + type: object + lastRestoreTime: + format: date-time + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by + the controller. + format: int64 + type: integer + url: + description: URL represents an IP and a port number info needed in order + to establish a database connection from outside a cluster. + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/bases/oracle.db.anthosapis.com_releases.yaml b/oracle/config/crd/bases/oracle.db.anthosapis.com_releases.yaml new file mode 100644 index 0000000..b977b63 --- /dev/null +++ b/oracle/config/crd/bases/oracle.db.anthosapis.com_releases.yaml @@ -0,0 +1,61 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: releases.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.version + name: Release + type: string + group: oracle.db.anthosapis.com + names: + kind: Release + listKind: ReleaseList + plural: releases + singular: release + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: Release is the Schema for the releases API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ReleaseSpec defines the desired state of Release. + properties: + version: + type: string + required: + - version + type: object + status: + description: ReleaseStatus defines the observed state of Release. + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/kustomization.yaml b/oracle/config/crd/kustomization.yaml new file mode 100644 index 0000000..c1c4787 --- /dev/null +++ b/oracle/config/crd/kustomization.yaml @@ -0,0 +1,45 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/oracle.db.anthosapis.com_instances.yaml +- bases/oracle.db.anthosapis.com_databases.yaml +- bases/oracle.db.anthosapis.com_backups.yaml +- bases/oracle.db.anthosapis.com_exports.yaml +- bases/oracle.db.anthosapis.com_configs.yaml +- bases/oracle.db.anthosapis.com_releases.yaml +- bases/oracle.db.anthosapis.com_imports.yaml +- bases/oracle.db.anthosapis.com_cronanythings.yaml +- bases/oracle.db.anthosapis.com_backupschedules.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_instances.yaml +#- patches/webhook_in_databases.yaml +#- patches/webhook_in_backups.yaml +#- patches/webhook_in_exports.yaml +#- patches/webhook_in_configs.yaml +#- patches/webhook_in_releases.yaml +#- patches/webhook_in_imports.yaml +#- patches/webhook_in_cronanythings.yaml +#- patches/webhook_in_backupschedules.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_instances.yaml +#- patches/cainjection_in_databases.yaml +#- patches/cainjection_in_backups.yaml +#- patches/cainjection_in_exports.yaml +#- patches/cainjection_in_configs.yaml +#- patches/cainjection_in_releases.yaml +#- patches/cainjection_in_imports.yaml +#- patches/cainjection_in_cronanythings.yaml +#- patches/cainjection_in_backupschedules.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/oracle/config/crd/kustomizeconfig.yaml b/oracle/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000..6f83d9a --- /dev/null +++ b/oracle/config/crd/kustomizeconfig.yaml @@ -0,0 +1,17 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/oracle/config/crd/patches/cainjection_in_backups.yaml b/oracle/config/crd/patches/cainjection_in_backups.yaml new file mode 100644 index 0000000..122a110 --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_backups.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: backups.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/cainjection_in_backupschedules.yaml b/oracle/config/crd/patches/cainjection_in_backupschedules.yaml new file mode 100644 index 0000000..c008288 --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_backupschedules.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: backupschedules.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/cainjection_in_configs.yaml b/oracle/config/crd/patches/cainjection_in_configs.yaml new file mode 100644 index 0000000..a6dc892 --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_configs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: configs.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/cainjection_in_cronanythings.yaml b/oracle/config/crd/patches/cainjection_in_cronanythings.yaml new file mode 100644 index 0000000..fdc206d --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_cronanythings.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: cronanythings.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/cainjection_in_databases.yaml b/oracle/config/crd/patches/cainjection_in_databases.yaml new file mode 100644 index 0000000..f26f4ae --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_databases.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: databases.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/cainjection_in_instances.yaml b/oracle/config/crd/patches/cainjection_in_instances.yaml new file mode 100644 index 0000000..5c8aed4 --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_instances.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: instances.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/cainjection_in_releases.yaml b/oracle/config/crd/patches/cainjection_in_releases.yaml new file mode 100644 index 0000000..e8eb498 --- /dev/null +++ b/oracle/config/crd/patches/cainjection_in_releases.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: releases.oracle.db.anthosapis.com diff --git a/oracle/config/crd/patches/webhook_in_backups.yaml b/oracle/config/crd/patches/webhook_in_backups.yaml new file mode 100644 index 0000000..0a94a5b --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_backups.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: backups.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/patches/webhook_in_backupschedules.yaml b/oracle/config/crd/patches/webhook_in_backupschedules.yaml new file mode 100644 index 0000000..aa1ca0c --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_backupschedules.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: backupschedules.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/patches/webhook_in_configs.yaml b/oracle/config/crd/patches/webhook_in_configs.yaml new file mode 100644 index 0000000..dcf429b --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_configs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configs.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/patches/webhook_in_cronanythings.yaml b/oracle/config/crd/patches/webhook_in_cronanythings.yaml new file mode 100644 index 0000000..88e6116 --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_cronanythings.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cronanythings.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/patches/webhook_in_databases.yaml b/oracle/config/crd/patches/webhook_in_databases.yaml new file mode 100644 index 0000000..3e4198d --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_databases.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: databases.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/patches/webhook_in_instances.yaml b/oracle/config/crd/patches/webhook_in_instances.yaml new file mode 100644 index 0000000..ff65d3c --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_instances.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: instances.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/patches/webhook_in_releases.yaml b/oracle/config/crd/patches/webhook_in_releases.yaml new file mode 100644 index 0000000..9d907a0 --- /dev/null +++ b/oracle/config/crd/patches/webhook_in_releases.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: releases.oracle.db.anthosapis.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshotclasses.yaml new file mode 100644 index 0000000..66cdb9f --- /dev/null +++ b/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshotclasses.yaml @@ -0,0 +1,84 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeSnapshotContent created through the + VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshotcontents.yaml new file mode 100644 index 0000000..e71a450 --- /dev/null +++ b/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshotcontents.yaml @@ -0,0 +1,233 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if a snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the complete size of the snapshot in bytes + jsonPath: .status.restoreSize + name: RestoreSize + type: integer + - description: Determines whether this VolumeSnapshotContent and its physical + snapshot on the underlying storage system should be deleted when its bound + VolumeSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical snapshot on + the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeSnapshotClass to which this snapshot belongs. + jsonPath: .spec.volumeSnapshotClassName + name: VolumeSnapshotClass + type: string + - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + jsonPath: .spec.volumeSnapshotRef.name + name: VolumeSnapshot + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. + For pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system. This + field is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. + This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in with the "creation_time" + value returned from CSI "CreateSnapshotRequest" gRPC call. For a + pre-existing snapshot, this field will be filled with the "creation_time" + value returned from the CSI "ListSnapshots" gRPC call if the driver + supports it. If not specified, it indicates the creation time is + unknown. The format of this field is a Unix nanoseconds time encoded + as an int64. On Unix, the command `date +%s%N` returns the current + time in nanoseconds since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, + this field will be filled with the "ready_to_use" value returned + from the CSI "ListSnapshots" gRPC call if the driver supports it, + otherwise, this field will be set to "True". If not specified, it + means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshots.yaml b/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshots.yaml new file mode 100644 index 0000000..96788be --- /dev/null +++ b/oracle/config/crd/testing/snapshot.storage.k8s.io_volumesnapshots.yaml @@ -0,0 +1,184 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if a snapshot is ready to be used to restore a volume. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Name of the source PVC from where a dynamically taken snapshot + will be created. + jsonPath: .spec.source.persistentVolumeClaimName + name: SourcePVC + type: string + - description: Name of the VolumeSnapshotContent which represents a pre-provisioned + snapshot. + jsonPath: .spec.source.volumeSnapshotContentName + name: SourceSnapshotContent + type: string + - description: Represents the complete size of the snapshot. + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + jsonPath: .spec.volumeSnapshotClassName + name: SnapshotClass + type: string + - description: The name of the VolumeSnapshotContent to which this VolumeSnapshot + is bound. + jsonPath: .status.boundVolumeSnapshotContentName + name: SnapshotContent + type: string + - description: Timestamp when the point-in-time snapshot is taken by the underlying + storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. + This field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is + no default snapshot class, dynamic snapshot creation will fail. + Empty string is not allowed for this field. TODO(xiangqian): a webhook + validation on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. + NOTE: status can be modified by sources other than system controllers, + and must not be depended upon for accuracy. Controllers should only + use information from the VolumeSnapshotContent object after verifying + that the binding is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot + object has not been successfully bound to a VolumeSnapshotContent + object yet. NOTE: Specified boundVolumeSnapshotContentName alone + does not mean binding is valid. Controllers MUST always verify + bidirectional binding between VolumeSnapshot and VolumeSnapshotContent + to avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in with the "creation_time" + value returned from CSI "CreateSnapshotRequest" gRPC call. For a + pre-existing snapshot, this field will be filled with the "creation_time" + value returned from the CSI "ListSnapshots" gRPC call if the driver + supports it. If not specified, it indicates that the creation time + of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, + this field will be filled with the "ready_to_use" value returned + from the CSI "ListSnapshots" gRPC call if the driver supports it, + otherwise, this field will be set to "True". If not specified, it + means the readiness of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/oracle/config/default/kustomization.yaml b/oracle/config/default/kustomization.yaml new file mode 100644 index 0000000..c7997ef --- /dev/null +++ b/oracle/config/default/kustomization.yaml @@ -0,0 +1,74 @@ +# Adds namespace to all resources. +namespace: operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patchesStrategicMerge: + # Protect the /metrics endpoint by putting it behind auth. + # Only one of manager_auth_proxy_patch.yaml and + # manager_prometheus_metrics_patch.yaml should be enabled. +- manager_auth_proxy_patch.yaml + # If you want your controller-manager to expose the /metrics + # endpoint w/o any authn/z, uncomment the following line and + # comment manager_auth_proxy_patch.yaml. + # Only one of manager_auth_proxy_patch.yaml and + # manager_prometheus_metrics_patch.yaml should be enabled. +#- manager_prometheus_metrics_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1alpha2 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1alpha2 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/oracle/config/default/manager_auth_proxy_patch.yaml b/oracle/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000..61cb5e7 --- /dev/null +++ b/oracle/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,25 @@ +# This patch inject a sidecar container which is a HTTP proxy for the controller manager, +# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: manager + args: + - "--metrics-addr=127.0.0.1:8080" + - "--enable-leader-election" diff --git a/oracle/config/default/manager_webhook_patch.yaml b/oracle/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000..738de35 --- /dev/null +++ b/oracle/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/oracle/config/default/webhookcainjection_patch.yaml b/oracle/config/default/webhookcainjection_patch.yaml new file mode 100644 index 0000000..7e79bf9 --- /dev/null +++ b/oracle/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,15 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/oracle/config/manager/kustomization.yaml b/oracle/config/manager/kustomization.yaml new file mode 100644 index 0000000..ad89940 --- /dev/null +++ b/oracle/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: gcr.io/elcarro/oracle.db.anthosapis.com/operator + newTag: latest diff --git a/oracle/config/manager/manager.yaml b/oracle/config/manager/manager.yaml new file mode 100644 index 0000000..d5938b1 --- /dev/null +++ b/oracle/config/manager/manager.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - --enable-leader-election + image: controller:latest + name: manager + resources: + limits: + cpu: 100m + memory: 40Mi + requests: + cpu: 100m + memory: 30Mi + terminationGracePeriodSeconds: 10 diff --git a/oracle/config/prometheus/db_monitor.yaml b/oracle/config/prometheus/db_monitor.yaml new file mode 100644 index 0000000..996dfc4 --- /dev/null +++ b/oracle/config/prometheus/db_monitor.yaml @@ -0,0 +1,16 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: db-monitor + namespace: monitoring +spec: + selector: + matchLabels: + app: agent-svc + namespaceSelector: + matchNames: + - db + endpoints: + - port: oracle-monitoring + interval: 10s + path: /metrics diff --git a/oracle/config/prometheus/kustomization.yaml b/oracle/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/oracle/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/oracle/config/prometheus/monitor.yaml b/oracle/config/prometheus/monitor.yaml new file mode 100644 index 0000000..e2d9b08 --- /dev/null +++ b/oracle/config/prometheus/monitor.yaml @@ -0,0 +1,15 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + selector: + control-plane: controller-manager diff --git a/oracle/config/rbac/auth_proxy_role.yaml b/oracle/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..618f5e4 --- /dev/null +++ b/oracle/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/oracle/config/rbac/auth_proxy_role_binding.yaml b/oracle/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..48ed1e4 --- /dev/null +++ b/oracle/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/oracle/config/rbac/auth_proxy_service.yaml b/oracle/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..6cf656b --- /dev/null +++ b/oracle/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/oracle/config/rbac/backup_editor_role.yaml b/oracle/config/rbac/backup_editor_role.yaml new file mode 100644 index 0000000..8f5d6b0 --- /dev/null +++ b/oracle/config/rbac/backup_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions to do edit backups. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: backup-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups/status + verbs: + - get + - patch + - update diff --git a/oracle/config/rbac/backup_viewer_role.yaml b/oracle/config/rbac/backup_viewer_role.yaml new file mode 100644 index 0000000..07691de --- /dev/null +++ b/oracle/config/rbac/backup_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions to do viewer backups. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: backup-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups/status + verbs: + - get diff --git a/oracle/config/rbac/backupschedule_editor_role.yaml b/oracle/config/rbac/backupschedule_editor_role.yaml new file mode 100644 index 0000000..f958306 --- /dev/null +++ b/oracle/config/rbac/backupschedule_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit backupschedules. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: backupschedule-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules/status + verbs: + - get diff --git a/oracle/config/rbac/backupschedule_viewer_role.yaml b/oracle/config/rbac/backupschedule_viewer_role.yaml new file mode 100644 index 0000000..fa07222 --- /dev/null +++ b/oracle/config/rbac/backupschedule_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view backupschedules. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: backupschedule-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules/status + verbs: + - get diff --git a/oracle/config/rbac/config_editor_role.yaml b/oracle/config/rbac/config_editor_role.yaml new file mode 100644 index 0000000..9cfa199 --- /dev/null +++ b/oracle/config/rbac/config_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions to do edit configs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: config-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs/status + verbs: + - get + - patch + - update diff --git a/oracle/config/rbac/config_viewer_role.yaml b/oracle/config/rbac/config_viewer_role.yaml new file mode 100644 index 0000000..a00f313 --- /dev/null +++ b/oracle/config/rbac/config_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions to do viewer configs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: config-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs/status + verbs: + - get diff --git a/oracle/config/rbac/cronanything_editor_role.yaml b/oracle/config/rbac/cronanything_editor_role.yaml new file mode 100644 index 0000000..a9695b9 --- /dev/null +++ b/oracle/config/rbac/cronanything_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit cronanythings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cronanything-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - cronanythings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - cronanythings/status + verbs: + - get diff --git a/oracle/config/rbac/cronanything_viewer_role.yaml b/oracle/config/rbac/cronanything_viewer_role.yaml new file mode 100644 index 0000000..4aab53c --- /dev/null +++ b/oracle/config/rbac/cronanything_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view cronanythings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cronanything-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - cronanythings + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - cronanythings/status + verbs: + - get diff --git a/oracle/config/rbac/database_editor_role.yaml b/oracle/config/rbac/database_editor_role.yaml new file mode 100644 index 0000000..f17ef44 --- /dev/null +++ b/oracle/config/rbac/database_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions to do edit databases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: database-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get + - patch + - update diff --git a/oracle/config/rbac/database_viewer_role.yaml b/oracle/config/rbac/database_viewer_role.yaml new file mode 100644 index 0000000..2acdbb2 --- /dev/null +++ b/oracle/config/rbac/database_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions to do viewer databases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: database-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get diff --git a/oracle/config/rbac/export_editor_role.yaml b/oracle/config/rbac/export_editor_role.yaml new file mode 100644 index 0000000..169c6eb --- /dev/null +++ b/oracle/config/rbac/export_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions to do edit exports. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: export-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports/status + verbs: + - get + - patch + - update diff --git a/oracle/config/rbac/export_viewer_role.yaml b/oracle/config/rbac/export_viewer_role.yaml new file mode 100644 index 0000000..042651e --- /dev/null +++ b/oracle/config/rbac/export_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions to do viewer exports. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: export-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports/status + verbs: + - get diff --git a/oracle/config/rbac/import_editor_role.yaml b/oracle/config/rbac/import_editor_role.yaml new file mode 100644 index 0000000..49f39da --- /dev/null +++ b/oracle/config/rbac/import_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions to do edit imports. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: import-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports/status + verbs: + - get + - patch + - update diff --git a/oracle/config/rbac/import_viewer_role.yaml b/oracle/config/rbac/import_viewer_role.yaml new file mode 100644 index 0000000..d4392f3 --- /dev/null +++ b/oracle/config/rbac/import_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions to do viewer imports. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: import-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports/status + verbs: + - get diff --git a/oracle/config/rbac/instance_editor_role.yaml b/oracle/config/rbac/instance_editor_role.yaml new file mode 100644 index 0000000..471a72e --- /dev/null +++ b/oracle/config/rbac/instance_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions to do edit instances. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: instance-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances/status + verbs: + - get + - patch + - update diff --git a/oracle/config/rbac/instance_viewer_role.yaml b/oracle/config/rbac/instance_viewer_role.yaml new file mode 100644 index 0000000..e849898 --- /dev/null +++ b/oracle/config/rbac/instance_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions to do viewer instances. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: instance-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances/status + verbs: + - get diff --git a/oracle/config/rbac/kustomization.yaml b/oracle/config/rbac/kustomization.yaml new file mode 100644 index 0000000..817f1fe --- /dev/null +++ b/oracle/config/rbac/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 3 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml diff --git a/oracle/config/rbac/leader_election_role.yaml b/oracle/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..eaa7915 --- /dev/null +++ b/oracle/config/rbac/leader_election_role.yaml @@ -0,0 +1,32 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create diff --git a/oracle/config/rbac/leader_election_role_binding.yaml b/oracle/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..eed1690 --- /dev/null +++ b/oracle/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/oracle/config/rbac/release_editor_role.yaml b/oracle/config/rbac/release_editor_role.yaml new file mode 100644 index 0000000..3098cd9 --- /dev/null +++ b/oracle/config/rbac/release_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit releases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-editor-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases/status + verbs: + - get diff --git a/oracle/config/rbac/release_viewer_role.yaml b/oracle/config/rbac/release_viewer_role.yaml new file mode 100644 index 0000000..57978b3 --- /dev/null +++ b/oracle/config/rbac/release_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view releases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-viewer-role +rules: +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases/status + verbs: + - get diff --git a/oracle/config/rbac/role.yaml b/oracle/config/rbac/role.yaml new file mode 100644 index 0000000..07f393a --- /dev/null +++ b/oracle/config/rbac/role.yaml @@ -0,0 +1,343 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get + - patch + - update +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - services + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - database.oracle.db.anthosapis.com + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - cronanythings + verbs: + - '*' + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases/status + verbs: + - get + - patch + - update +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/oracle/config/rbac/role_binding.yaml b/oracle/config/rbac/role_binding.yaml new file mode 100644 index 0000000..8f26587 --- /dev/null +++ b/oracle/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/oracle/config/samples/v1alpha1_backup_rman1.yaml b/oracle/config/samples/v1alpha1_backup_rman1.yaml new file mode 100644 index 0000000..ee83507 --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_rman1.yaml @@ -0,0 +1,9 @@ +# Bare bones physical backup config. +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: rman1-inst +spec: + instance: mydb + type: Physical + subType: Instance diff --git a/oracle/config/samples/v1alpha1_backup_rman2.yaml b/oracle/config/samples/v1alpha1_backup_rman2.yaml new file mode 100644 index 0000000..0f6941a --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_rman2.yaml @@ -0,0 +1,16 @@ +# Similar to the bare bones physical backup config, but requesting Image Copy, not a Backupset. +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: rman2-inst-ic +spec: + instance: mydb + type: Physical + subType: Instance + # Requesting specifically an image copy, not a backupset (currently it is not + # supported to restore declaratively from backups created with backupset = false) + backupset: false + # For RMAN backup to gcs bucket, localPath will be ignored. + # Replace example-bucket with the bucket that contains a full RMAN backup (currently restore + # from gcs bucket is only supported for full backups). + gcsPath: "gs://example-bucket/rman" diff --git a/oracle/config/samples/v1alpha1_backup_rman3.yaml b/oracle/config/samples/v1alpha1_backup_rman3.yaml new file mode 100644 index 0000000..96e564e --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_rman3.yaml @@ -0,0 +1,22 @@ +# Physical backup config for the whole Instance with all the options. +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: rman3-inst-opts +spec: + instance: mydb + type: Physical + subType: Instance + backupset: true + checkLogical: true + compressed: true + # DOP = Degree of Parallelism. + dop: 4 + # Level: 0=Full Backup, 1=Incremental, 2=Cumulative + # level: 0 + filesperset: 10 + # Backup Section Size in MB (don't specify the unit, just the integer). + sectionSize: 100 + # Backup threshold is expressed in minutes (don't specify the unit, just the integer). + timeLimitMinutes: 30 + localPath: "/u03/app/oracle/rman" \ No newline at end of file diff --git a/oracle/config/samples/v1alpha1_backup_rman4.yaml b/oracle/config/samples/v1alpha1_backup_rman4.yaml new file mode 100644 index 0000000..6b1b9b5 --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_rman4.yaml @@ -0,0 +1,28 @@ +# Physical backup config for the (subset) of Pluggable Databases with all the options. +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: rman4-db +spec: + instance: mydb + type: Physical + # Note the subType here is Database, not Instance, which requires backupItems settings. + subType: Database + # The items listed here have to exists, e.g. pdb1 and pdb2 listed below have to be valid PDBs. + backupItems: ["pdb1", "pdb2"] + backupset: true + checkLogical: true + compressed: true + # DOP = Degree of Parallelism. + dop: 4 + # Level: 0=Full Backup, 1=Incremental, 2=Cumulative + # level: 0 + # filesperset: 10 + # Backup Section Size in MB (don't specify the unit, just the integer). + sectionSize: 500 + # Backup threshold is expressed in minutes (don't specify the unit, just the integer). + timeLimitMinutes: 30 + # For RMAN backup to gcs bucket, localPath will be ignored. + # Replace example-bucket with the bucket that contains a full RMAN backup (currently restore + # from gcs bucket is only supported for full backups). + gcsPath: "gs://example-bucket/rman" \ No newline at end of file diff --git a/oracle/config/samples/v1alpha1_backup_snap1.yaml b/oracle/config/samples/v1alpha1_backup_snap1.yaml new file mode 100644 index 0000000..c2f4d06 --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_snap1.yaml @@ -0,0 +1,7 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: snap1 +spec: + instance: mydb + type: Snapshot diff --git a/oracle/config/samples/v1alpha1_backup_snap2.yaml b/oracle/config/samples/v1alpha1_backup_snap2.yaml new file mode 100644 index 0000000..79e2dc6 --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_snap2.yaml @@ -0,0 +1,9 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: snap2 +spec: + instance: mydb + type: Snapshot + subType: Instance + volumeSnapshotClass: "csi-gce-pd-snapshot-class" diff --git a/oracle/config/samples/v1alpha1_backup_snap_minikube.yaml b/oracle/config/samples/v1alpha1_backup_snap_minikube.yaml new file mode 100644 index 0000000..9c624f6 --- /dev/null +++ b/oracle/config/samples/v1alpha1_backup_snap_minikube.yaml @@ -0,0 +1,9 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Backup +metadata: + name: snap +spec: + instance: mydb + type: Snapshot + subType: Instance + volumeSnapshotClass: "csi-hostpath-snapclass" diff --git a/oracle/config/samples/v1alpha1_backupschedule.yaml b/oracle/config/samples/v1alpha1_backupschedule.yaml new file mode 100644 index 0000000..a7bda32 --- /dev/null +++ b/oracle/config/samples/v1alpha1_backupschedule.yaml @@ -0,0 +1,16 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: BackupSchedule +metadata: + name: backupschedule-sample +spec: + # Add fields here + backupSpec: + instance: mydb + type: Physical + subType: Instance + # Optionally transfer to GCS. + gcsPath: "gs://bucket/rman" + schedule: "*/5 * * * *" + startingDeadlineSeconds: 60 + backupRetentionPolicy: + backupRetention: 3 diff --git a/oracle/config/samples/v1alpha1_config_bm1.yaml b/oracle/config/samples/v1alpha1_config_bm1.yaml new file mode 100644 index 0000000..78b6305 --- /dev/null +++ b/oracle/config/samples/v1alpha1_config_bm1.yaml @@ -0,0 +1,6 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Config +metadata: + name: config +spec: + platform: "BareMetal" diff --git a/oracle/config/samples/v1alpha1_config_bm2.yaml b/oracle/config/samples/v1alpha1_config_bm2.yaml new file mode 100644 index 0000000..2ddd143 --- /dev/null +++ b/oracle/config/samples/v1alpha1_config_bm2.yaml @@ -0,0 +1,9 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Config +metadata: + name: config +spec: + platform: "BareMetal" + storageClass: "trident-nfs-csi" + volumeSnapshotClass: "trident-nfs-csi-volume-snapshot" + diff --git a/oracle/config/samples/v1alpha1_config_gcp1.yaml b/oracle/config/samples/v1alpha1_config_gcp1.yaml new file mode 100644 index 0000000..416475c --- /dev/null +++ b/oracle/config/samples/v1alpha1_config_gcp1.yaml @@ -0,0 +1,12 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Config +metadata: + name: config +spec: + platform: "GCP" + # Log levels for different components. The log level will print all log + # messages equal to or lower than the given level (0 is the default level). + # See "Verbose Logging" at http://rpg.ifi.uzh.ch/docs/glog.html for details. + logLevel: + config-agent: "3" + operator: "1" diff --git a/oracle/config/samples/v1alpha1_config_gcp2.yaml b/oracle/config/samples/v1alpha1_config_gcp2.yaml new file mode 100644 index 0000000..33d78ab --- /dev/null +++ b/oracle/config/samples/v1alpha1_config_gcp2.yaml @@ -0,0 +1,23 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Config +metadata: + name: config +spec: + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-unseeded" + config: "gcr.io/${PROJECT_ID}/oracle.db.anthosapis.com/configagent:latest" + platform: "GCP" + disks: [ + { + name: "DataDisk", + type: "pd-standard", + size: "100Gi", + }, + { + name: "LogDisk", + type: "pd-standard", + size: "150Gi", + } + ] + volumeSnapshotClass: "csi-gce-pd-snapshot-class" diff --git a/oracle/config/samples/v1alpha1_config_gcp3.yaml b/oracle/config/samples/v1alpha1_config_gcp3.yaml new file mode 100644 index 0000000..b23e6d3 --- /dev/null +++ b/oracle/config/samples/v1alpha1_config_gcp3.yaml @@ -0,0 +1,7 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Config +metadata: + name: config +spec: + platform: "GCP" + hostAntiAffinityNamespaces: ["db", "db2"] diff --git a/oracle/config/samples/v1alpha1_config_minikube.yaml b/oracle/config/samples/v1alpha1_config_minikube.yaml new file mode 100644 index 0000000..3d469e5 --- /dev/null +++ b/oracle/config/samples/v1alpha1_config_minikube.yaml @@ -0,0 +1,15 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Config +metadata: + name: config +spec: + platform: "Minikube" + storageClass: "csi-hostpath-sc" + volumeSnapshotClass: "csi-hostpath-snapclass" + images: + service: "localhost:5000/oracle-12.2-ee-seeded-mydb:latest" + dbinit: "localhost:5000/oracle.db.anthosapis.com/dbinit:latest" + dbdaemon_client: "localhost:5000/oracle.db.anthosapis.com/dbdaemon_client:latest" + logging_sidecar: "localhost:5000/oracle.db.anthosapis.com/loggingsidecar:latest" + config: "localhost:5000/oracle.db.anthosapis.com/configagent:latest" + monitoring: "localhost:5000/oracle.db.anthosapis.com/monitoring:latest" diff --git a/oracle/config/samples/v1alpha1_cronanything.yaml b/oracle/config/samples/v1alpha1_cronanything.yaml new file mode 100644 index 0000000..41a46a9 --- /dev/null +++ b/oracle/config/samples/v1alpha1_cronanything.yaml @@ -0,0 +1,21 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: CronAnything +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: cronanything-sample +spec: + schedule: "*/5 * * * *" + totalResourceLimit: 10 + historyCountLimit: 2 + concurrencyPolicy: Allow + template: + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Backup + spec: + instance: mydb + type: Physical + subType: Instance + # Optionally transfer to GCS. + gcsPath: "gs://bucket/rman" + diff --git a/oracle/config/samples/v1alpha1_database_pdb1.yaml b/oracle/config/samples/v1alpha1_database_pdb1.yaml new file mode 100644 index 0000000..ffa3fee --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb1.yaml @@ -0,0 +1,23 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - create session diff --git a/oracle/config/samples/v1alpha1_database_pdb1_express.yaml b/oracle/config/samples/v1alpha1_database_pdb1_express.yaml new file mode 100644 index 0000000..3b8a637 --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb1_express.yaml @@ -0,0 +1,15 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace diff --git a/oracle/config/samples/v1alpha1_database_pdb1_gsm.yaml b/oracle/config/samples/v1alpha1_database_pdb1_gsm.yaml new file mode 100644 index 0000000..c7fc833 --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb1_gsm.yaml @@ -0,0 +1,35 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + adminPasswordGsmSecretRef: + projectId: ${PROJECT_ID} + secretId: GPDB_ADMIN + version: "1" + users: + - name: superuser + passwordGsmSecretRef: + projectId: ${PROJECT_ID} + secretId: superuser + version: "1" + privileges: + - dba + - name: scott + passwordGsmSecretRef: + projectId: ${PROJECT_ID} + secretId: scott + version: "1" + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + passwordGsmSecretRef: + projectId: ${PROJECT_ID} + secretId: proberuser + version: "1" + privileges: + - create session diff --git a/oracle/config/samples/v1alpha1_database_pdb1_unseeded.yaml b/oracle/config/samples/v1alpha1_database_pdb1_unseeded.yaml new file mode 100644 index 0000000..ffa3fee --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb1_unseeded.yaml @@ -0,0 +1,23 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - create session diff --git a/oracle/config/samples/v1alpha1_database_pdb2.yaml b/oracle/config/samples/v1alpha1_database_pdb2.yaml new file mode 100644 index 0000000..72937dc --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb2.yaml @@ -0,0 +1,19 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb2 +spec: + name: pdb2 + instance: mydb + admin_password: google + users: + - name: scott2 + password: tiger2 + privileges: + - connect + - resource + - name: scott3 + password: tiger3 + privileges: + - execute_catalog_role + - create session diff --git a/oracle/config/samples/v1alpha1_database_pdb3.yaml b/oracle/config/samples/v1alpha1_database_pdb3.yaml new file mode 100644 index 0000000..62816a5 --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb3.yaml @@ -0,0 +1,19 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb3 +spec: + name: pdb3 + instance: mydb + admin_password: google + users: + - name: scott + password: tiger + privileges: + - connect + - resource + - name: scott3 + password: tiger3 + privileges: + - execute_catalog_role + - create session diff --git a/oracle/config/samples/v1alpha1_database_pdb4.yaml b/oracle/config/samples/v1alpha1_database_pdb4.yaml new file mode 100644 index 0000000..f08f745 --- /dev/null +++ b/oracle/config/samples/v1alpha1_database_pdb4.yaml @@ -0,0 +1,29 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb4 +spec: + name: pdb4 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: scott2 + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - create session diff --git a/oracle/config/samples/v1alpha1_export_dmp1.yaml b/oracle/config/samples/v1alpha1_export_dmp1.yaml new file mode 100644 index 0000000..5eff6b5 --- /dev/null +++ b/oracle/config/samples/v1alpha1_export_dmp1.yaml @@ -0,0 +1,23 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Export +metadata: + name: export-dmp1 +spec: + instance: mydb + databaseName: pdb1 + type: DataPump + exportObjectType: Schemas # 'Schemas' or 'Tables' + exportObjects: + - SCOTT + # Uncomment to enable flashback time feature + # Time is in RFC3339 for datetime format, + # for example 1985-04-12T23:20:50.52Z represents 20 minutes and 50.52 seconds after the 23rd hour of April 12th, 1985 in UTC. + # before enabling make sure undo_retention settings are consistent with set time +# flashbackTime: "2021-01-05T15:00:00Z" # optional + + # Service account should have write access to the destination bucket, + # sample command to grant access (replace with actual SA email): + # > gsutil iam ch serviceaccount:SA@PROJECT.iam.gserviceaccount.com:objectCreator gs://ex-bucket + # Add .gz as GCS object file extension to enable compression. + gcsPath: "gs://ex-bucket/export/pdb1/exportSchema.dmp" + gcsLogPath: "gs://ex-bucket/export/pdb1/exportSchema.log" # optional diff --git a/oracle/config/samples/v1alpha1_export_dmp2.yaml b/oracle/config/samples/v1alpha1_export_dmp2.yaml new file mode 100644 index 0000000..d820cc0 --- /dev/null +++ b/oracle/config/samples/v1alpha1_export_dmp2.yaml @@ -0,0 +1,14 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Export +metadata: + name: export-dmp2 +spec: + instance: mydb + databaseName: pdb1 + type: DataPump + exportObjectType: Tables # 'Schemas' or 'Tables' + exportObjects: + - SCOTT.t1 + - SCOTT.t2 + gcsPath: "gs://bucket/export/pdb1/exportTable.dmp" + gcsLogPath: "gs://bucket/export/pdb1/exportTable.log" # optional diff --git a/oracle/config/samples/v1alpha1_import_pdb1.yaml b/oracle/config/samples/v1alpha1_import_pdb1.yaml new file mode 100644 index 0000000..f638203 --- /dev/null +++ b/oracle/config/samples/v1alpha1_import_pdb1.yaml @@ -0,0 +1,18 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Import +metadata: + name: import-pdb1 +spec: + instance: mydb + databaseName: pdb1 + type: DataPump + # Service account should have read access to the destination bucket, + # sample command to grant read access (replace with actual SA email): + # > gsutil iam ch serviceaccount:SA@PROJECT.iam.gserviceaccount.com:objectViewer gs://ex-bucket + gcsPath: "gs://ex-bucket/import/pdb1/import.dmp" + # Uncomment to enable import log upload to GCS. + # Service account should have write access to the destination bucket, + # sample command to grant access (replace with actual SA email): + # > gsutil iam ch serviceaccount:SA@PROJECT.iam.gserviceaccount.com:objectCreator gs://ex-bucket + # Add .gz as GCS object file extension to enable compression. +# gcsLogPath: "gs://ex-bucket/import/pdb1.log" diff --git a/oracle/config/samples/v1alpha1_instance.yaml b/oracle/config/samples/v1alpha1_instance.yaml new file mode 100644 index 0000000..a814d9d --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance.yaml @@ -0,0 +1,41 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-gce-pd" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-gce-pd" + services: + Backup: true + Monitoring: true + Logging: true + sourceCidrRanges: [ 0.0.0.0/0 ] + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DB}" + cdbName: ${DB} + minMemoryForDBContainer: 4.0Gi + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" #(or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/samples/v1alpha1_instance_18c_XE.yaml b/oracle/config/samples/v1alpha1_instance_18c_XE.yaml new file mode 100644 index 0000000..c59f23e --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_18c_XE.yaml @@ -0,0 +1,43 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "18c" + edition: Express + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-gce-pd" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-gce-pd" + services: + Backup: true + Monitoring: true + Logging: true + HA/DR: false + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-18c-xe-seeded-${DB}" + sourceCidrRanges: [0.0.0.0/0] + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: ${DB} + minMemoryForDBContainer: 4.0Gi + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" (or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimit: 180 \ No newline at end of file diff --git a/oracle/config/samples/v1alpha1_instance_18c_XE_express.yaml b/oracle/config/samples/v1alpha1_instance_18c_XE_express.yaml new file mode 100644 index 0000000..8070543 --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_18c_XE_express.yaml @@ -0,0 +1,36 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "18c" + edition: Express + dbDomain: "gke" + disks: + - name: DataDisk + - name: LogDisk + services: + Backup: true + Monitoring: true + Logging: true + HA/DR: false + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-18c-xe-seeded-${DB}" + sourceCidrRanges: [0.0.0.0/0] + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: ${DB} + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" (or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/samples/v1alpha1_instance_custom_seeded.yaml b/oracle/config/samples/v1alpha1_instance_custom_seeded.yaml new file mode 100644 index 0000000..fb88c0d --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_custom_seeded.yaml @@ -0,0 +1,43 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-gce-pd" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-gce-pd" + services: + Backup: true + Monitoring: true + Logging: true + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DB}" + sourceCidrRanges: [0.0.0.0/0] + databaseUID: 54321 + databaseGID: 54322 + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: ${DB} + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" (or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/samples/v1alpha1_instance_express.yaml b/oracle/config/samples/v1alpha1_instance_express.yaml new file mode 100644 index 0000000..e3288ed --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_express.yaml @@ -0,0 +1,36 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + - name: LogDisk + services: + Backup: true + Monitoring: true + Logging: true + HA/DR: false + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DB}" + sourceCidrRanges: [0.0.0.0/0] + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: ${DB} + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" (or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/samples/v1alpha1_instance_gcp_ilb.yaml b/oracle/config/samples/v1alpha1_instance_gcp_ilb.yaml new file mode 100644 index 0000000..dc630c8 --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_gcp_ilb.yaml @@ -0,0 +1,43 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-gce-pd" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-gce-pd" + services: + Backup: true + Monitoring: true + Logging: true + sourceCidrRanges: [0.0.0.0/0] + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DB}" + minMemoryForDBContainer: 4.0Gi + + dbNetworkServiceOptions: + gcp: + loadBalancerType: Internal +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" #(or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/samples/v1alpha1_instance_minikube.yaml b/oracle/config/samples/v1alpha1_instance_minikube.yaml new file mode 100644 index 0000000..0e67e65 --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_minikube.yaml @@ -0,0 +1,39 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + storageClass: "csi-hostpath-sc" + - name: LogDisk + size: 55Gi + type: pd-standard + storageClass: "csi-hostpath-sc" + services: + Backup: true + Monitoring: true + Logging: true + sourceCidrRanges: [0.0.0.0/0] + databaseUID: 54321 + databaseGID: 54322 + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: ${DB} +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" #(or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 \ No newline at end of file diff --git a/oracle/config/samples/v1alpha1_instance_standby.yaml b/oracle/config/samples/v1alpha1_instance_standby.yaml new file mode 100644 index 0000000..acaf726 --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_standby.yaml @@ -0,0 +1,22 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + services: + Backup: true + Monitoring: true + Logging: true + images: + # Replace below with the actual URIs hosting the service agent images. + # It is preferable to use unseeded images to set up standby instance. + # Database is expected to be created in the process of manual standby setup. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-unseeded" + + sourceCidrRanges: [0.0.0.0/0] + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: "GOOG" + mode: "ManuallySetUpStandby" diff --git a/oracle/config/samples/v1alpha1_instance_unseeded.yaml b/oracle/config/samples/v1alpha1_instance_unseeded.yaml new file mode 100644 index 0000000..2a1ce5b --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_unseeded.yaml @@ -0,0 +1,47 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + - name: LogDisk + size: 55Gi + type: pd-standard + + services: + Backup: true + Monitoring: true + Logging: true + + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-unseeded" + + sourceCidrRanges: [0.0.0.0/0] + # Oracle SID character limit is 8, anything > gets truncated by Oracle + cdbName: "GOOG" + # dbUniqueName character constraints are [a-zA-Z0-9_#$] + dbUniqueName: "GOOG_gke" + memoryPercent: 25 + characterSet: "US7ASCII" + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" +# backupId: "mydb-20201030-snap-538716798" +# force: True +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/samples/v1alpha1_instance_with_backup_disk.yaml b/oracle/config/samples/v1alpha1_instance_with_backup_disk.yaml new file mode 100644 index 0000000..682a759 --- /dev/null +++ b/oracle/config/samples/v1alpha1_instance_with_backup_disk.yaml @@ -0,0 +1,39 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb +spec: + type: Oracle + version: "12.2" + edition: Enterprise + dbDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + - name: LogDisk + size: 55Gi + type: pd-standard + - name: BackupDisk + size: 55Gi + type: pd-standard + services: + Backup: true + Monitoring: true + sourceCidrRanges: [0.0.0.0/0] + images: + # Replace below with the actual URIs hosting the service agent images. + service: "gcr.io/${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-${DB}" + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" (or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/config/webhook/kustomization.yaml b/oracle/config/webhook/kustomization.yaml new file mode 100644 index 0000000..9cf2613 --- /dev/null +++ b/oracle/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/oracle/config/webhook/kustomizeconfig.yaml b/oracle/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000..25e21e3 --- /dev/null +++ b/oracle/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/oracle/config/webhook/manifests.yaml b/oracle/config/webhook/manifests.yaml new file mode 100644 index 0000000..e69de29 diff --git a/oracle/config/webhook/service.yaml b/oracle/config/webhook/service.yaml new file mode 100644 index 0000000..31e0f82 --- /dev/null +++ b/oracle/config/webhook/service.yaml @@ -0,0 +1,12 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/oracle/config/workflows/Kptfile b/oracle/config/workflows/Kptfile new file mode 100644 index 0000000..8f9d4dc --- /dev/null +++ b/oracle/config/workflows/Kptfile @@ -0,0 +1,28 @@ +apiVersion: kpt.dev/v1alpha1 +kind: Kptfile +metadata: + name: workflows +packageMetadata: + tags: + - kpt.dev/app=YourApp + shortDescription: YourApp kpt package +openAPI: + definitions: + io.k8s.cli.setters.services: + type: array + x-k8s-cli: + setter: + name: services + value: "" + listValues: + - + io.k8s.cli.setters.namespace: + x-k8s-cli: + setter: + name: namespace + value: + io.k8s.cli.setters.dbimage: + x-k8s-cli: + setter: + name: dbimage + value: diff --git a/oracle/config/workflows/README.md b/oracle/config/workflows/README.md new file mode 100644 index 0000000..00a8d66 --- /dev/null +++ b/oracle/config/workflows/README.md @@ -0,0 +1,16 @@ +# workflows + +## Description +Operator kpt package + +# SYNOPSIS + + kpt cfg set config/workflows namespace db + kpt cfg set config/workflows services Backup Logging Monitoring + kpt cfg set config/workflows dbimage "gcr.io//oracle_12ee_database" + +# Description + +Operator kpt package can be used to build declarative workflows for databases +with services that are based on common org DRY templates. +The templates then can then be hydrated for specific databases, env or fleet wide. diff --git a/oracle/config/workflows/v1alpha1_database_pdb1.yaml b/oracle/config/workflows/v1alpha1_database_pdb1.yaml new file mode 100644 index 0000000..4f9d0ac --- /dev/null +++ b/oracle/config/workflows/v1alpha1_database_pdb1.yaml @@ -0,0 +1,24 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Database +metadata: + name: pdb1 + namespace: "" # {"$kpt-set":"namespace"} +spec: + name: pdb1 + instance: mydb + admin_password: google + users: + - name: superuser + password: superpassword + privileges: + - dba + - name: scott + password: tiger + privileges: + - connect + - resource + - unlimited tablespace + - name: proberuser + password: proberpassword + privileges: + - create session diff --git a/oracle/config/workflows/v1alpha1_instance.yaml b/oracle/config/workflows/v1alpha1_instance.yaml new file mode 100644 index 0000000..e927056 --- /dev/null +++ b/oracle/config/workflows/v1alpha1_instance.yaml @@ -0,0 +1,44 @@ +apiVersion: oracle.db.anthosapis.com/v1alpha1 +kind: Instance +metadata: + name: mydb + namespace: "" # {"$kpt-set":"namespace"} +spec: + type: Oracle + version: "12.2" + edition: Enterprise + DBDomain: "gke" + disks: + - name: DataDisk + size: 45Gi + type: pd-standard + - name: LogDisk + size: 55Gi + type: pd-standard + services: # {"$kpt-set":"services"} + - "" # {"$kpt-set":"services"} + images: + service: "" # {"$kpt-set":"dbimage"} + sourceCidrRanges: [0.0.0.0/0] + minMemoryForDBContainer: 4.0Gi + maintenanceWindow: + timeRanges: + - start: "2121-04-20T15:45:30Z" + duration: "168h" + + # parameters: + # parallel_servers_target: "15" + # disk_asynch_io: "true" + +# Uncomment this section to trigger a restore. +# restore: +# backupType: "Snapshot" #(or "Physical") +# backupId: "mydb-20200705-snap-996678001" +# force: True +# # once applied, new requests with same or older time will be ignored, +# # current time can be generated using the command: date -u '+%Y-%m-%dT%H:%M:%SZ' +# requestTime: "2000-01-19T01:23:45Z" +# # Physical backup specific attributes: +# dop: 2 +# # The unit for time limit is minutes (but specify just an integer). +# timeLimitMinutes: 180 diff --git a/oracle/controllers/BUILD.bazel b/oracle/controllers/BUILD.bazel new file mode 100644 index 0000000..0781962 --- /dev/null +++ b/oracle/controllers/BUILD.bazel @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "controllers", + srcs = [ + "common.go", + "config_agent_helpers.go", + "exec.go", + "grpc_error.go", + "resources.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers", + visibility = ["//visibility:public"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/agents/consts", + "//oracle/pkg/database/common", + "@com_github_go_logr_logr//:logr", + "@com_github_kubernetes_csi_external_snapshotter_v2//pkg/apis/volumesnapshot/v1beta1", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_k8s_api//apps/v1:apps", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_apimachinery//pkg/util/intstr", + "@io_k8s_apimachinery//pkg/util/wait", + "@io_k8s_client_go//kubernetes", + "@io_k8s_client_go//kubernetes/scheme", + "@io_k8s_client_go//rest", + "@io_k8s_client_go//tools/remotecommand", + "@io_k8s_client_go//util/retry", + "@io_k8s_klog_v2//:klog", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) + +go_test( + name = "controllers_test", + srcs = [ + "common_test.go", + "resources_test.go", + ], + embed = [":controllers"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + ], +) diff --git a/oracle/controllers/backupcontroller/BUILD.bazel b/oracle/controllers/backupcontroller/BUILD.bazel new file mode 100644 index 0000000..9a544a5 --- /dev/null +++ b/oracle/controllers/backupcontroller/BUILD.bazel @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "backupcontroller", + srcs = ["backup_controller.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/backupcontroller", + visibility = ["//visibility:public"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@com_github_kubernetes_csi_external_snapshotter_v2//pkg/apis/volumesnapshot/v1beta1", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/labels", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/selection", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) + +go_test( + name = "backupcontroller_test", + srcs = ["backup_controller_test.go"], + embed = [":backupcontroller"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_kubernetes_csi_external_snapshotter_v2//pkg/apis/volumesnapshot/v1beta1", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/backupcontroller/backup_controller.go b/oracle/controllers/backupcontroller/backup_controller.go new file mode 100644 index 0000000..94e693a --- /dev/null +++ b/oracle/controllers/backupcontroller/backup_controller.go @@ -0,0 +1,465 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backupcontroller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + snapv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + backupName = "%s-%s-%s-%d" + pvcNameFull = "%s-u0%d-%s-0" +) + +// BackupReconciler reconciles a Backup object. +type BackupReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ClientFactory controllers.ConfigAgentClientFactory + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=backups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=backups/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources=volumesnapshotclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources=volumesnapshots,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch +// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=persistentvolumes,verbs=get;list;watch;create;update;patch;delete + +func backupSubType(st string) capb.PhysicalBackupRequest_Type { + switch st { + case "Instance": + return capb.PhysicalBackupRequest_INSTANCE + case "Database": + return capb.PhysicalBackupRequest_DATABASE + case "Tablespace": + return capb.PhysicalBackupRequest_TABLESPACE + case "Datafile": + return capb.PhysicalBackupRequest_DATAFILE + } + + // If backup sub type is unknown default to Instance. + // Defaulting to Instance seems more user friendly + // (at the expense of silently swallowing a potential user error). + return capb.PhysicalBackupRequest_INSTANCE +} + +func (r *BackupReconciler) backupInProgress(ctx context.Context, backup v1alpha1.Backup, ns string, inst *v1alpha1.Instance) error { + r.Log.Info("found a backup request in-progress") + + sel := labels.NewSelector() + vsLabels := []string{backup.Status.BackupID + "-u02", backup.Status.BackupID + "-u03"} + req1, err := labels.NewRequirement("name", selection.In, vsLabels) + if err != nil { + return err + } + sel.Add(*req1) + + req2, err := labels.NewRequirement("namespace", selection.Equals, []string{ns}) + if err != nil { + return err + } + sel.Add(*req2) + + listOpts := []client.ListOption{ + client.InNamespace(ns), + client.MatchingLabelsSelector{Selector: sel}, + } + + var volSnaps snapv1.VolumeSnapshotList + if err := r.List(ctx, &volSnaps, listOpts...); err != nil { + r.Log.Error(err, "failed to get a volume snapshot") + return err + } + r.Log.Info("list of found volume snapshots", "volSnaps", volSnaps) + + if len(volSnaps.Items) < 1 { + r.Log.Info("no volume snapshots found for a backup request marked as in-progress.", "backup.Status", backup.Status) + return nil + } + r.Log.Info("found a volume snapshot(s) for a backup request in-progress") + + vsStatus := make(map[string]bool) + for i, vs := range volSnaps.Items { + r.Log.Info("iterating over volume snapshots", "VolumeSnapshot#", i, "name", vs.Name) + vsStatus[vs.Name] = false + + if vs.Status == nil { + return fmt.Errorf("not yet ready: Status missing for Volume Snapshot %s/%s: %v", vs.Namespace, vs.Name, vs) + } + + if !*vs.Status.ReadyToUse { + return fmt.Errorf("not yet ready: Status found, but it's not flipped to DONE yet for VolumeSnapshot %s/%s: %v", vs.Namespace, vs.Name, vs.Status) + } + r.Log.Info("ready to use status", "VolumeSnapshot#", i, "name", vs, "status", *vs.Status.ReadyToUse) + vsStatus[vs.Name] = true + } + r.Log.Info("summary of VolumeSnapshot statuses", "vsStatus", vsStatus) + + r.Recorder.Eventf(&backup, corev1.EventTypeNormal, "BackupCompleted", "BackupId:%v, Elapsed time: %v", backup.Status.BackupID, k8s.ElapsedTimeFromLastTransitionTime(k8s.FindCondition(backup.Status.Conditions, k8s.Ready), time.Second)) + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionTrue, k8s.BackupReady, "") + + r.Log.Info("snapshot is ready") + if err := r.updateBackupStatus(ctx, &backup, inst); err != nil { + r.Log.Error(err, "failed to flip the snapshot status from in-progress to ready") + return err + } + + return nil +} + +// loadConfig attempts to find a customer specific Operator config +// if it's been provided. There should be at most one config. +// If no config is provided by a customer, no errors are raised and +// all defaults are assumed. +func (r *BackupReconciler) loadConfig(ctx context.Context, ns string) (*v1alpha1.Config, error) { + var configs v1alpha1.ConfigList + if err := r.List(ctx, &configs, client.InNamespace(ns)); err != nil { + return nil, err + } + + if len(configs.Items) == 0 { + return nil, nil + } + + if len(configs.Items) > 1 { + return nil, fmt.Errorf("this release only supports a single customer provided config (received %d)", len(configs.Items)) + } + + return &configs.Items[0], nil +} + +// updateBackupStatus updates the phase of Backup and Instance objects to the required state. +func (r *BackupReconciler) updateBackupStatus(ctx context.Context, backup *v1alpha1.Backup, inst *v1alpha1.Instance) error { + readyCond := k8s.FindCondition(backup.Status.Conditions, k8s.Ready) + if k8s.ConditionReasonEquals(readyCond, k8s.BackupInProgress) { + backup.Status.Phase = commonv1alpha1.BackupInProgress + } else if k8s.ConditionReasonEquals(readyCond, k8s.BackupFailed) { + backup.Status.Phase = commonv1alpha1.BackupFailed + } else if k8s.ConditionReasonEquals(readyCond, k8s.BackupReady) { + backup.Status.Phase = commonv1alpha1.BackupSucceeded + if err := r.Status().Update(ctx, backup); err != nil { + return err + } + inst.Status.BackupID = backup.Status.BackupID + return r.Status().Update(ctx, inst) + } else { + // No handlers found for current set of conditions + backup.Status.Phase = "" + } + + return r.Status().Update(ctx, backup) +} + +func (r *BackupReconciler) Reconcile(req ctrl.Request) (result ctrl.Result, recErr error) { + ctx := context.Background() + log := r.Log.WithValues("Backup", req.NamespacedName) + + log.Info("reconciling backup requests") + + var backup v1alpha1.Backup + if err := r.Get(ctx, req.NamespacedName, &backup); err != nil { + log.Error(err, "get backup request error") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Check if the Backup object is already reconciled + readyCond := k8s.FindCondition(backup.Status.Conditions, k8s.Ready) + namespace := req.NamespacedName.Namespace + if k8s.ConditionReasonEquals(readyCond, k8s.BackupReady) || k8s.ConditionReasonEquals(readyCond, k8s.BackupFailed) { + log.Info("Backup reconciler: nothing to do, backup status", "readyCond", readyCond, "Status", backup.Status) + return ctrl.Result{}, nil + } + + // Verify preflight conditions + var inst v1alpha1.Instance + // skip backupPreflightCheck if backup is ready + if !k8s.ConditionStatusEquals(readyCond, v1.ConditionTrue) { + if err := r.backupPreflightCheck(ctx, req, &backup, &inst); err != nil { + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionFalse, k8s.BackupFailed, err.Error()) + if updateErr := r.updateBackupStatus(ctx, &backup, &inst); updateErr != nil { + log.Error(updateErr, "unable to update backup status") + } + r.Recorder.Event(&backup, corev1.EventTypeWarning, "BackupFailed", err.Error()) + return ctrl.Result{}, err + } + } + + if k8s.ConditionReasonEquals(readyCond, k8s.BackupInProgress) { + if backup.Spec.Type == "Snapshot" { + if err := r.backupInProgress(ctx, backup, namespace, &inst); err != nil { + return ctrl.Result{}, err + } + } else { + id := lroOperationID(&backup) + operation, err := controllers.GetLROOperation(r.ClientFactory, ctx, r, req.Namespace, id, backup.Spec.Instance) + if err != nil { + log.Error(err, "GetLROOperation error") + return ctrl.Result{}, err + } + if operation.Done { + log.Info("LRO is DONE", "id", id) + if operation.GetError() != nil { + log.Error(fmt.Errorf(operation.GetError().GetMessage()), "backup failed") + r.Recorder.Event(&backup, corev1.EventTypeWarning, "BackupFailed", operation.GetError().GetMessage()) + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionFalse, k8s.BackupFailed, operation.GetError().GetMessage()) + } else { + r.Recorder.Eventf(&backup, corev1.EventTypeNormal, "BackupCompleted", "BackupId:%v, Elapsed time: %v", backup.Status.BackupID, k8s.ElapsedTimeFromLastTransitionTime(readyCond, time.Second)) + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionTrue, k8s.BackupReady, "") + } + if err := r.updateBackupStatus(ctx, &backup, &inst); err != nil { + log.Error(err, "failed to update the backup resource", "backup", backup) + return ctrl.Result{}, err + } + _ = controllers.DeleteLROOperation(r.ClientFactory, ctx, r, req.Namespace, id, backup.Spec.Instance) + } else { + log.Info("LRO is in progress", "id", id) + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + } + + // Don't proceed to checking new backups requests until all in-progress are resolved. + log.Info("in-progress backup has been marked as resolved: DONE", + "new phase", backup.Status.Phase) + return ctrl.Result{}, nil + } + + log.Info("new backup request detected", "backup", backup) + + // If a snapshot type backup doesn't have a sub-type set or + // if it's set to anything other than Instance, force Instance. + // (this is because it's the only supported sub-type for a snapshot + // and it's a reasonable default for RMAN backup too). + + log.Info("BEFORE", "backup.Spec.Subtype", backup.Spec.Subtype) + if backup.Spec.Subtype == "" || (backup.Spec.Type == "Snapshot" && backup.Spec.Subtype != "Instance") { + backup.Spec.Subtype = "Instance" + } + log.Info("AFTER", "backup.Spec.Subtype", backup.Spec.Subtype) + + if err := r.Update(ctx, &backup); err != nil { + return ctrl.Result{}, err + } + + bktype := "snap" + timeLimitMinutes := controllers.PhysBackupTimeLimitDefault + + switch backup.Spec.Type { + case "Snapshot": + + case "Physical": + bktype = "phys" + + // If omitted, the default DOP is 1. + if backup.Spec.Dop == 0 { + backup.Spec.Dop = 1 + } + + // If omitted, the default is backupset, not image copy, so flip it to true. + // If set, just pass it along "as is". + if backup.Spec.Backupset == nil { + backup.Spec.Backupset = func() *bool { b := true; return &b }() + } + + if backup.Spec.TimeLimitMinutes != 0 { + timeLimitMinutes = time.Duration(backup.Spec.TimeLimitMinutes) * time.Minute + } + + default: + return ctrl.Result{}, fmt.Errorf("unsupported backup request type: %q", backup.Spec.Type) + } + + if backup.Spec.Instance == "" { + return ctrl.Result{}, fmt.Errorf("spec.Instance is not set in the backup request: %v", backup) + } + + // Load default preferences (aka "config") if provided by a customer. + config, err := r.loadConfig(ctx, namespace) + if err != nil { + return ctrl.Result{}, err + } + + if config != nil { + log.Info("customer config loaded", "config", config) + } else { + log.Info("no customer specific config found, assuming all defaults") + } + + vsc, err := controllers.ConfigAttribute("VolumeSnapshotClass", backup.Spec.VolumeSnapshotClass, config) + if err != nil || vsc == "" { + return ctrl.Result{}, fmt.Errorf("failed to identify a volumeSnapshotClassName for instance: %q", inst.Name) + } + log.Info("VolumeSnapshotClass", "volumeSnapshotClass", vsc) + + backupID := fmt.Sprintf(backupName, inst.Name, time.Now().Format("20060102"), bktype, time.Now().Nanosecond()) + + if backup.Spec.Type == "Snapshot" { + applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("backup-controller")} + + for _, diskSpec := range inst.Spec.Disks { + shortPVCName, mount := controllers.GetPVCNameAndMount(inst.Name, diskSpec.Name) + fullPVCName := fmt.Sprintf("%s-%s-0", shortPVCName, fmt.Sprintf(controllers.StsName, inst.Name)) + snapshotName := fmt.Sprintf("%s-%s", backupID, mount) + bk, err := controllers.NewSnapshot(&backup, r.Scheme, fullPVCName, snapshotName, vsc) + if err != nil { + return ctrl.Result{}, err + } + log.Info("new Backup/Snapshot resource", "backup", bk) + + if err := r.Patch(ctx, bk, client.Apply, applyOpts...); err != nil { + return ctrl.Result{}, err + } + } + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionFalse, k8s.BackupInProgress, "") + } else { + if err := preflightCheck(ctx, r, namespace, backup.Spec.Instance); err != nil { + log.Error(err, "external LB is not ready") + return ctrl.Result{}, err + } + + ctxBackup, cancel := context.WithTimeout(context.Background(), timeLimitMinutes) + defer cancel() + + caClient, closeConn, err := r.ClientFactory.New(ctxBackup, r, namespace, backup.Spec.Instance) + if err != nil { + log.Error(err, "failed to create config agent client") + return ctrl.Result{}, err + } + defer closeConn() + + resp, err := caClient.PhysicalBackup(ctxBackup, &capb.PhysicalBackupRequest{ + BackupSubType: backupSubType(backup.Spec.Subtype), + BackupItems: backup.Spec.BackupItems, + Backupset: *backup.Spec.Backupset, + CheckLogical: backup.Spec.CheckLogical, + Compressed: backup.Spec.Compressed, + Dop: backup.Spec.Dop, + Level: backup.Spec.Level, + Filesperset: backup.Spec.Filesperset, + SectionSize: backup.Spec.SectionSize, + LocalPath: backup.Spec.LocalPath, + GcsPath: backup.Spec.GcsPath, + LroInput: &capb.LROInput{OperationId: lroOperationID(&backup)}, + }) + if err != nil { + if !controllers.IsAlreadyExistsError(err) { + return ctrl.Result{}, fmt.Errorf("failed on PhysicalBackup gRPC call: %v", err) + } + log.Info("operation already exists") + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionFalse, k8s.BackupInProgress, "") + } else { + log.Info("caClient.PhysicalBackup", "response", resp) + if resp.Done { + log.Info("PhysicalBackup succeeded") + r.Recorder.Eventf(&backup, corev1.EventTypeNormal, "BackupCompleted", "BackupId:%v, Elapsed time: %v", backup.Status.BackupID, k8s.ElapsedTimeFromLastTransitionTime(readyCond, time.Second)) + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionTrue, k8s.BackupReady, "") + } else { + log.Info("PhysicalBackup started") + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionFalse, k8s.BackupInProgress, "") + } + } + } + + if err := r.Update(ctx, &backup); err != nil { + log.Info("failed to update the backup resource", "backup", backup) + return ctrl.Result{}, err + } + + backup.Status.BackupID = backupID + backup.Status.BackupTime = time.Now().Format("20060102150405") + if err := r.updateBackupStatus(ctx, &backup, &inst); err != nil { + return ctrl.Result{}, err + } + + log.Info("reconciling backup: DONE") + + return ctrl.Result{}, nil +} + +func (r *BackupReconciler) SetupWithManager(mgr ctrl.Manager) error { + mgr.GetFieldIndexer().IndexField( + context.TODO(), + &snapv1.VolumeSnapshot{}, ".spec.name", + func(obj runtime.Object) []string { + snapName := obj.(*snapv1.VolumeSnapshot).Name + if snapName == "" { + return nil + } + return []string{snapName} + }) + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Backup{}). + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&corev1.PersistentVolume{}). + Owns(&snapv1.VolumeSnapshotClass{}). + Owns(&snapv1.VolumeSnapshot{}). + Complete(r) +} + +// backupPreflightCheck checks if the instance is ready for taking backups. +func (r *BackupReconciler) backupPreflightCheck(ctx context.Context, req ctrl.Request, backup *v1alpha1.Backup, inst *v1alpha1.Instance) error { + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: backup.Spec.Instance}, inst); err != nil { + r.Log.Error(err, "Error finding instance for backup validation", "backup", backup) + return fmt.Errorf("Error finding instance - %v", err) + } + if !k8s.ConditionStatusEquals(k8s.FindCondition(inst.Status.Conditions, k8s.Ready), v1.ConditionTrue) { + r.Log.Error(fmt.Errorf("Instance not in ready state for backup"), "Instance not in ready state for backup", "inst.Status.Conditions", inst.Status.Conditions) + return fmt.Errorf("Instance is not in a ready state") + } + return nil +} + +func lroOperationID(backup *v1alpha1.Backup) string { + return fmt.Sprintf("Backup_%s", backup.GetUID()) +} + +var preflightCheck = func(ctx context.Context, r *BackupReconciler, namespace, instName string) error { + // Confirm that an external LB is ready. + svc := &corev1.Service{} + if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(controllers.SvcName, instName), Namespace: namespace}, svc); err != nil { + return err + } + + if len(svc.Status.LoadBalancer.Ingress) == 0 { + return fmt.Errorf("preflight check: physical backup: external LB is NOT ready") + } + r.Log.Info("preflight check: physical backup, external LB service is ready", "succeededExecCmd#:", 1, "svc", svc.Name) + return nil +} diff --git a/oracle/controllers/backupcontroller/backup_controller_test.go b/oracle/controllers/backupcontroller/backup_controller_test.go new file mode 100644 index 0000000..80a12fa --- /dev/null +++ b/oracle/controllers/backupcontroller/backup_controller_test.go @@ -0,0 +1,359 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backupcontroller + +import ( + "context" + "fmt" + "testing" + "time" + + snapv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager + reconciler *BackupReconciler + fakeClientFactory *testhelpers.FakeClientFactory +) + +func TestBackupController(t *testing.T) { + fakeClientFactory = &testhelpers.FakeClientFactory{} + + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Backup controller", func() []testhelpers.Reconciler { + reconciler = &BackupReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Backup"), + Scheme: k8sManager.GetScheme(), + ClientFactory: fakeClientFactory, + Recorder: k8sManager.GetEventRecorderFor("backup-controller"), + } + + return []testhelpers.Reconciler{reconciler} + }) +} + +var _ = Describe("Backup controller", func() { + // Define utility constants for object names and testing timeouts and intervals. + const ( + Namespace = "default" + BackupName = "test-backup" + InstanceName = "test-instance" + + timeout = time.Second * 15 + interval = time.Millisecond * 15 + ) + + var instance v1alpha1.Instance + + ctx := context.Background() + + BeforeEach(func() { + instance = v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: testhelpers.RandName(InstanceName), + Namespace: Namespace, + }, + Spec: v1alpha1.InstanceSpec{ + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + }, + }, + }, + } + createdInstance := v1alpha1.Instance{} + objKey := client.ObjectKey{Namespace: Namespace, Name: instance.Name} + testhelpers.K8sCreateAndGet(k8sClient, ctx, objKey, &instance, &createdInstance) + + instance.Status.Conditions = k8s.Upsert(instance.Status.Conditions, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, "") + Expect(k8sClient.Status().Update(ctx, &instance)).Should(Succeed()) + + fakeClientFactory.Reset() + }) + + AfterEach(func() { + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, &instance) + createdBackups := &v1alpha1.BackupList{} + Expect(k8sClient.List(ctx, createdBackups)).Should(Succeed()) + for _, backup := range createdBackups.Items { + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, &backup) + } + }) + + Context("New backup through snapshot", func() { + It("Should create volume snapshots correctly", func() { + By("By creating a Snapshot type backup of the instance") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: BackupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instance.Name, + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + } + + objKey := client.ObjectKey{Namespace: Namespace, Name: BackupName} + testhelpers.K8sCreateWithRetry(k8sClient, ctx, backup) + + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupInProgress)) + + var snapshots snapv1.VolumeSnapshotList + Expect(k8sClient.List(ctx, &snapshots, client.InNamespace(Namespace))).Should(Succeed()) + Expect(len(snapshots.Items)).Should(Equal(2)) + }) + + It("Should mark backup as failed because of invalid instance name", func() { + By("By creating a Snapshot type backup of the instance") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: BackupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "bad-instance-name", + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + } + + objKey := client.ObjectKey{Namespace: Namespace, Name: BackupName} + testhelpers.K8sCreateWithRetry(k8sClient, ctx, backup) + + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupFailed)) + }) + + It("Should mark backup as failed because of instance is not ready", func() { + By("By marking instance as not ready") + instance.Status.Conditions = k8s.Upsert(instance.Status.Conditions, k8s.Ready, metav1.ConditionFalse, k8s.CreateInProgress, "") + Expect(k8sClient.Status().Update(ctx, &instance)).Should(Succeed()) + By("By creating a Snapshot type backup of the instance") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: BackupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instance.Name, + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + } + objKey := client.ObjectKey{Namespace: Namespace, Name: BackupName} + testhelpers.K8sCreateWithRetry(k8sClient, ctx, backup) + + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupFailed)) + }) + }) + + Context("New backup through RMAN", func() { + It("Should create RMAN backup correctly", func() { + oldFunc := preflightCheck + preflightCheck = func(ctx context.Context, r *BackupReconciler, namespace, instName string) error { + return nil + } + defer func() { preflightCheck = oldFunc }() + + By("By creating a RMAN type backup of the instance") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: BackupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instance.Name, + Type: commonv1alpha1.BackupTypePhysical, + }, + }, + } + + objKey := client.ObjectKey{Namespace: Namespace, Name: BackupName} + testhelpers.K8sCreateWithRetry(k8sClient, ctx, backup) + + By("By checking that physical backup are created") + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupReady)) + Expect(fakeClientFactory.Caclient.PhysicalBackupCalledCnt).Should(Equal(1)) + }) + }) + + Context("New backup through RMAN in LRO async environment", func() { + It("Should create RMAN backup correctly", func() { + oldFunc := preflightCheck + preflightCheck = func(ctx context.Context, r *BackupReconciler, namespace, instName string) error { + return nil + } + defer func() { preflightCheck = oldFunc }() + + // configure fake ConfigAgent to be in LRO mode + fakeConfigAgentClient := fakeClientFactory.Caclient + fakeConfigAgentClient.AsyncPhysicalBackup = true + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusRunning + + By("By creating a RMAN type backup of the instance") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: BackupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instance.Name, + Type: commonv1alpha1.BackupTypePhysical, + }, + }, + } + + objKey := client.ObjectKey{Namespace: Namespace, Name: BackupName} + var createdBackup v1alpha1.Backup + testhelpers.K8sCreateAndGet(k8sClient, ctx, objKey, backup, &createdBackup) + + By("By checking that physical backup was started") + // test env should trigger reconciliation in background + // and reconciler is expected to start backup. + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupInProgress)) + Expect(fakeConfigAgentClient.PhysicalBackupCalledCnt).Should(Equal(1)) + + By("By checking that reconciler watches backup LRO status") + getOperationCallsCntBefore := fakeConfigAgentClient.GetOperationCalledCnt + + Expect(triggerReconcile(ctx, objKey)).Should(Succeed()) + Eventually(func() int { + return fakeConfigAgentClient.GetOperationCalledCnt + }, timeout, interval).ShouldNot(Equal(getOperationCallsCntBefore)) + + var updatedBackup v1alpha1.Backup + Expect(k8sClient.Get(ctx, objKey, &updatedBackup)).Should(Succeed()) + Expect(k8s.FindCondition(updatedBackup.Status.Conditions, k8s.Ready).Reason).Should(Equal(k8s.BackupInProgress)) + + By("By checking that physical backup is Ready on backup LRO completion") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDone + Expect(triggerReconcile(ctx, objKey)).Should(Succeed()) + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupReady)) + + Eventually(fakeConfigAgentClient.DeleteOperationCalledCnt, timeout, interval).Should(Equal(1)) + }) + + It("Should mark unsuccessful RMAN backup as Failed", func() { + oldFunc := preflightCheck + preflightCheck = func(ctx context.Context, r *BackupReconciler, namespace, instName string) error { + return nil + } + defer func() { preflightCheck = oldFunc }() + + // configure fake ConfigAgent to be in LRO mode with a + // failed operation result. + fakeConfigAgentClient := fakeClientFactory.Caclient + fakeConfigAgentClient.AsyncPhysicalBackup = true + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDoneWithError + + By("By creating a RMAN type backup of the instance") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: BackupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instance.Name, + Type: commonv1alpha1.BackupTypePhysical, + }, + }, + } + + objKey := client.ObjectKey{Namespace: Namespace, Name: BackupName} + var createdBackup v1alpha1.Backup + testhelpers.K8sCreateAndGet(k8sClient, ctx, objKey, backup, &createdBackup) + + By("By checking that physical backup was resolved to the Failed state") + // test env should trigger reconciliation in background. + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.BackupFailed)) + + var inst v1alpha1.Instance + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: Namespace, Name: instance.Name}, &inst)).Should(Succeed()) + Expect(inst.Status.BackupID).Should(Equal("")) + }) + }) +}) + +func getConditionReason(ctx context.Context, objKey client.ObjectKey, condType string) (string, error) { + var backup v1alpha1.Backup + if err := k8sClient.Get(ctx, objKey, &backup); err != nil { + return "", err + } + + cond := k8s.FindCondition(backup.Status.Conditions, condType) + if cond == nil { + return "", fmt.Errorf("%v condition type not found", condType) + } + return cond.Reason, nil +} + +// triggerReconcile invokes k8s reconcile action by updating +// an irrelevant field. +func triggerReconcile(ctx context.Context, objKey client.ObjectKey) error { + var backup v1alpha1.Backup + if err := k8sClient.Get(ctx, objKey, &backup); err != nil { + return err + } + + backup.Spec.SectionSize++ + + err := k8sClient.Update(ctx, &backup) + if errors.IsConflict(err) { + return nil + } + return err +} diff --git a/oracle/controllers/backupschedulecontroller/BUILD.bazel b/oracle/controllers/backupschedulecontroller/BUILD.bazel new file mode 100644 index 0000000..c17d56b --- /dev/null +++ b/oracle/controllers/backupschedulecontroller/BUILD.bazel @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "backupschedulecontroller", + srcs = [ + "backupschedule_controller.go", + "operations.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/backupschedulecontroller", + visibility = ["//visibility:public"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/labels", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/runtime/schema", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//util/retry", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/controller/controllerutil", + "@io_k8s_sigs_controller_runtime//pkg/handler", + "@io_k8s_sigs_controller_runtime//pkg/manager", + "@io_k8s_sigs_controller_runtime//pkg/reconcile", + "@io_k8s_sigs_controller_runtime//pkg/source", + ], +) + +go_test( + name = "backupschedulecontroller_test", + srcs = ["backupschedule_controller_test.go"], + embed = [":backupschedulecontroller"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "@com_github_ghodss_yaml//:yaml", + "@com_github_google_go_cmp//cmp", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/runtime/schema", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/reconcile", + "@io_k8s_utils//pointer", + ], +) diff --git a/oracle/controllers/backupschedulecontroller/backupschedule_controller.go b/oracle/controllers/backupschedulecontroller/backupschedule_controller.go new file mode 100644 index 0000000..13920e3 --- /dev/null +++ b/oracle/controllers/backupschedulecontroller/backupschedule_controller.go @@ -0,0 +1,344 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backupschedulecontroller + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +const ( + defaultTriggerDeadlineSeconds int64 = 30 + defaultRetention int32 = 7 + defaultMaxHistoryRecords int32 = 7 +) + +var ( + defaultTimeFormat = "20060102-150405" + backupKind = schema.GroupVersion{Group: "oracle.db.anthosapis.com", Version: "v1alpha1"}.WithKind("Backup") +) + +type backupScheduleControl interface { + Get(name, namespace string) (*v1alpha1.BackupSchedule, error) + UpdateStatus(backupSchedule *v1alpha1.BackupSchedule) error +} + +type cronAnythingControl interface { + Create(cron *v1alpha1.CronAnything) error + Get(name, namespace string) (*v1alpha1.CronAnything, error) + Update(cron *v1alpha1.CronAnything) error +} + +type backupControl interface { + List(cronAnythingName string) ([]*v1alpha1.Backup, error) + Delete(backup *v1alpha1.Backup) error +} + +var _ reconcile.Reconciler = &BackupScheduleReconciler{} + +// BackupScheduleReconciler reconciles a BackupSchedule object +type BackupScheduleReconciler struct { + client.Client + Log logr.Logger + scheme *runtime.Scheme + backupScheduleCtrl backupScheduleControl + cronAnythingCtrl cronAnythingControl + backupCtrl backupControl +} + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=backupschedules,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=backupschedules/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=cronanythings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=backups,verbs=list;delete + +// Reconcile is a generic reconcile function for BackupSchedule resources. +func (r *BackupScheduleReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("backupschedule", req.NamespacedName) + backupSchedule, err := r.backupScheduleCtrl.Get(req.Name, req.Namespace) + if err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + cron, err := r.lookupCron(backupSchedule) + if err != nil && !errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if errors.IsNotFound(err) { + log.Info("No cron found for backup schedule. Creating new one", "backupSchedule", backupSchedule.Namespace+"/"+backupSchedule.Name) + err := r.createCron(backupSchedule) + return reconcile.Result{}, err + } + + err = r.updateCron(backupSchedule, cron) + if err != nil { + return reconcile.Result{}, err + } + + var backups []*v1alpha1.Backup + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + backups, err = r.getSortedBackupsForCron(cron) + if err != nil { + return err + } + backupSchedule, err := r.backupScheduleCtrl.Get(req.Name, req.Namespace) + if err != nil { + return err + } + return r.updateHistory(backupSchedule, backups) + }) + + if err != nil { + return reconcile.Result{}, err + } + + return ctrl.Result{}, r.pruneBackups(backupSchedule.Spec.BackupRetentionPolicy, backups) +} + +func (r *BackupScheduleReconciler) lookupCron(backupSchedule *v1alpha1.BackupSchedule) (*v1alpha1.CronAnything, error) { + cron, err := r.cronAnythingCtrl.Get(r.getCronName(backupSchedule), backupSchedule.Namespace) + if err != nil { + return nil, err + } + return cron, nil +} + +func (r *BackupScheduleReconciler) createCron(backupSchedule *v1alpha1.BackupSchedule) error { + name := r.getCronName(backupSchedule) + triggerDeadlineSeconds := defaultTriggerDeadlineSeconds + if backupSchedule.Spec.StartingDeadlineSeconds != nil { + triggerDeadlineSeconds = *backupSchedule.Spec.StartingDeadlineSeconds + } + + backupBytes, err := getBackupBytes(backupSchedule) + if err != nil { + return err + } + + cron := &v1alpha1.CronAnything{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: backupSchedule.Namespace, + }, + Spec: v1alpha1.CronAnythingSpec{ + Schedule: backupSchedule.Spec.Schedule, + TriggerDeadlineSeconds: &triggerDeadlineSeconds, + ConcurrencyPolicy: v1alpha1.ForbidConcurrent, + FinishableStrategy: &v1alpha1.FinishableStrategy{ + Type: v1alpha1.FinishableStrategyStringField, + StringField: &v1alpha1.StringFieldStrategy{ + FieldPath: fmt.Sprintf("{.status.conditions[?(@.type==\"%s\")].reason}", k8s.Ready), + FinishedValues: []string{ + k8s.BackupReady, + k8s.BackupFailed, + }, + }, + }, + ResourceBaseName: &name, + ResourceTimestampFormat: &defaultTimeFormat, + Template: runtime.RawExtension{ + Raw: backupBytes, + }, + }, + } + + err = controllerutil.SetControllerReference(backupSchedule, cron, r.scheme) + if err != nil { + return err + } + + err = r.cronAnythingCtrl.Create(cron) + if err != nil { + return err + } + return nil +} + +func (r *BackupScheduleReconciler) updateCron(backupSchedule *v1alpha1.BackupSchedule, cron *v1alpha1.CronAnything) error { + backupBytes, err := getBackupBytes(backupSchedule) + if err != nil { + return err + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + freshCron, err := r.cronAnythingCtrl.Get(cron.Name, cron.Namespace) + if err != nil { + return err + } + + templatesEqual, err := r.compareTemplate(freshCron.Spec.Template.Raw, backupBytes) + if err != nil { + return err + } + + scheduleEqual := backupSchedule.Spec.Schedule == freshCron.Spec.Schedule + startingDeadlineSecondsEqual := compareInt64Pointers(backupSchedule.Spec.StartingDeadlineSeconds, freshCron.Spec.TriggerDeadlineSeconds) + + r.Log.Info("backup schedule diff", "templateUnchanged", templatesEqual, "scheduleUnchanged", scheduleEqual, "StartingDeadlineSecondsUnchanged", startingDeadlineSecondsEqual) + + if templatesEqual && scheduleEqual && startingDeadlineSecondsEqual { + return nil + } + freshCron.Spec.Schedule = backupSchedule.Spec.Schedule + freshCron.Spec.Template.Raw = backupBytes + freshCron.Spec.TriggerDeadlineSeconds = backupSchedule.Spec.StartingDeadlineSeconds + return r.cronAnythingCtrl.Update(freshCron) + }) +} + +func (r *BackupScheduleReconciler) updateHistory(backupSchedule *v1alpha1.BackupSchedule, sortedBackups []*v1alpha1.Backup) error { + newBackupHistory := []v1alpha1.BackupHistoryRecord{} + for _, backup := range sortedBackups { + newBackupHistory = append(newBackupHistory, v1alpha1.BackupHistoryRecord{ + BackupName: backup.GetName(), + CreationTime: backup.GetCreationTimestamp(), + Phase: backup.Status.Phase, + }) + } + backupTotal := int32(len(newBackupHistory)) + if backupTotal > defaultMaxHistoryRecords { + newBackupHistory = newBackupHistory[:defaultMaxHistoryRecords] + } + backupSchedule.Status.BackupTotal = &backupTotal + backupSchedule.Status.BackupHistory = newBackupHistory + return r.backupScheduleCtrl.UpdateStatus(backupSchedule) +} + +func (r *BackupScheduleReconciler) pruneBackups(retention *v1alpha1.BackupRetentionPolicy, sortedBackups []*v1alpha1.Backup) error { + max := defaultRetention + if retention != nil && retention.BackupRetention != nil { + max = *retention.BackupRetention + } + if max == 0 { + return nil + } + + count := max + for _, backup := range sortedBackups { + if count <= 0 { + r.Log.Info("deleting backup", "backup", backup) + if err := r.backupCtrl.Delete(backup); err != nil { + return err + } + } + if backup.Status.Phase == commonv1alpha1.BackupSucceeded && count > 0 { + count -= 1 + } + } + return nil +} + +func (r *BackupScheduleReconciler) compareTemplate(left, right []byte) (bool, error) { + var leftMap map[string]interface{} + err := json.Unmarshal(left, &leftMap) + if err != nil { + return false, err + } + + var rightMap map[string]interface{} + err = json.Unmarshal(right, &rightMap) + if err != nil { + return false, err + } + return reflect.DeepEqual(leftMap, rightMap), nil +} + +func compareInt64Pointers(i1, i2 *int64) bool { + if i1 == nil && i2 == nil { + return true + } + if i1 == nil || i2 == nil { + return false + } + return *i1 == *i2 +} +func (r *BackupScheduleReconciler) getCronName(backupSchedule *v1alpha1.BackupSchedule) string { + return fmt.Sprintf("%s-cron", backupSchedule.Name) +} + +func (r *BackupScheduleReconciler) getSortedBackupsForCron(cron *v1alpha1.CronAnything) ([]*v1alpha1.Backup, error) { + backupList, err := r.backupCtrl.List(cron.Name) + if err != nil { + return nil, err + } + + sort.Slice(backupList, func(i, j int) bool { + iTime := backupList[i].GetCreationTimestamp() + jTime := backupList[j].GetCreationTimestamp() + return jTime.Before(&iTime) + }) + return backupList, nil +} + +func getBackupBytes(backupSchedule *v1alpha1.BackupSchedule) ([]byte, error) { + specBytes, err := json.Marshal(backupSchedule.Spec.BackupSpec) + if err != nil { + return nil, err + } + + var specMap map[string]interface{} + err = json.Unmarshal(specBytes, &specMap) + if err != nil { + return nil, err + } + + backupMap := make(map[string]interface{}) + backupMap["apiVersion"] = backupKind.GroupVersion().String() + backupMap["kind"] = backupKind.Kind + backupMap["spec"] = specMap + return json.Marshal(backupMap) +} + +// NewBackupScheduleReconciler returns a BackupScheduleReconciler object. +func NewBackupScheduleReconciler(mgr manager.Manager) *BackupScheduleReconciler { + return &BackupScheduleReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("BackupSchedule"), + scheme: mgr.GetScheme(), + backupScheduleCtrl: &realBackupScheduleControl{client: mgr.GetClient()}, + cronAnythingCtrl: &realCronAnythingControl{client: mgr.GetClient()}, + backupCtrl: &realBackupControl{client: mgr.GetClient()}, + } +} + +// SetupWithManager configures the reconciler. +func (r *BackupScheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.BackupSchedule{}). + Watches(&source.Kind{Type: &v1alpha1.CronAnything{}}, + &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.BackupSchedule{}, IsController: true}). + Complete(r) +} diff --git a/oracle/controllers/backupschedulecontroller/backupschedule_controller_test.go b/oracle/controllers/backupschedulecontroller/backupschedule_controller_test.go new file mode 100644 index 0000000..f130240 --- /dev/null +++ b/oracle/controllers/backupschedulecontroller/backupschedule_controller_test.go @@ -0,0 +1,574 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backupschedulecontroller + +import ( + "strings" + "testing" + "time" + + "github.com/ghodss/yaml" + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +const ( + testBackupScheduleName = "test-backup-schedule" + testNamespace = "db" + testSchedule = "* * * * *" // At every minute +) + +func TestReconcileWithNoBackupSchedule(t *testing.T) { + reconciler, backupScheduleCtrl, _, _ := newTestBackupScheduleReconciler() + backupScheduleCtrl.get = func(name, _ string) (*v1alpha1.BackupSchedule, error) { + return nil, errors.NewNotFound(schema.GroupResource{Group: "oracle.db.anthosapis.com", Resource: "BackupSchedule"}, name) + } + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + }) + if err != nil { + t.Errorf("reconciler.Reconcile got %v, want nil", err) + } +} + +func TestReconcileWithCronCreation(t *testing.T) { + reconciler, backupScheduleCtrl, cronAnythingCtrl, _ := newTestBackupScheduleReconciler() + testCases := []struct { + name string + backupScheduleSpec *v1alpha1.BackupScheduleSpec + wantCronStr string + }{ + { + name: "minimum spec", + backupScheduleSpec: &v1alpha1.BackupScheduleSpec{ + BackupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb", + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + Schedule: testSchedule, + }, + wantCronStr: strings.TrimSpace(` +metadata: + creationTimestamp: null + name: test-backup-schedule-cron + namespace: db + ownerReferences: + - apiVersion: oracle.db.anthosapis.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: BackupSchedule + name: test-backup-schedule + uid: "" +spec: + concurrencyPolicy: Forbid + finishableStrategy: + stringField: + fieldPath: '{.status.conditions[?(@.type=="Ready")].reason}' + finishedValues: + - BackupReady + - BackupFailed + type: StringField + resourceBaseName: test-backup-schedule-cron + resourceTimestampFormat: 20060102-150405 + schedule: '* * * * *' + template: + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Backup + spec: + instance: mydb + type: Snapshot + triggerDeadlineSeconds: 30 +status: {}`), + }, + { + name: "spec trigger deadlines", + backupScheduleSpec: &v1alpha1.BackupScheduleSpec{ + BackupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb1", + Type: commonv1alpha1.BackupTypePhysical, + }, + Subtype: "Instance", + GcsPath: "gs://bucket/rman", + }, + Schedule: "*/5 * * * *", + StartingDeadlineSeconds: pointer.Int64Ptr(60), + }, + wantCronStr: strings.TrimSpace(` +metadata: + creationTimestamp: null + name: test-backup-schedule-cron + namespace: db + ownerReferences: + - apiVersion: oracle.db.anthosapis.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: BackupSchedule + name: test-backup-schedule + uid: "" +spec: + concurrencyPolicy: Forbid + finishableStrategy: + stringField: + fieldPath: '{.status.conditions[?(@.type=="Ready")].reason}' + finishedValues: + - BackupReady + - BackupFailed + type: StringField + resourceBaseName: test-backup-schedule-cron + resourceTimestampFormat: 20060102-150405 + schedule: '*/5 * * * *' + template: + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Backup + spec: + gcsPath: gs://bucket/rman + instance: mydb1 + subType: Instance + type: Physical + triggerDeadlineSeconds: 60 +status: {}`), + }, + } + backupSchedule := &v1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + } + backupScheduleCtrl.get = func(_, _ string) (*v1alpha1.BackupSchedule, error) { + return backupSchedule, nil + } + + backupScheduleCtrl.updateStatus = func(backupSchedule *v1alpha1.BackupSchedule) error { + return nil + } + + cronAnythingCtrl.get = func(name, namespace string) (*v1alpha1.CronAnything, error) { + return nil, errors.NewNotFound(schema.GroupResource{Group: "oracle.db.anthosapis.com", Resource: "CronAnything"}, name) + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var gotCronStr string + cronAnythingCtrl.create = func(cron *v1alpha1.CronAnything) error { + b, err := yaml.Marshal(cron) + gotCronStr = strings.TrimSpace(string(b)) + return err + } + backupSchedule.Spec = *tc.backupScheduleSpec + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + }) + if err != nil { + t.Fatalf("reconciler.Reconcile want nil, got %v", err) + } + if gotCronStr != tc.wantCronStr { + t.Errorf("reconciler.Reconcile create CronAnything got spec \n%s\n want \n%s\n", gotCronStr, tc.wantCronStr) + } + }) + } +} + +func TestReconcileWithCronUpdate(t *testing.T) { + reconciler, backupScheduleCtrl, cronAnythingCtrl, backupCtrl := newTestBackupScheduleReconciler() + schedule := v1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + Spec: v1alpha1.BackupScheduleSpec{ + BackupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb1", + Type: commonv1alpha1.BackupTypePhysical, + }, + Subtype: "Instance", + GcsPath: "gs://bucket/rman", + }, + Schedule: testSchedule, + }, + } + backup, err := getBackupBytes(&schedule) + if err != nil { + t.Fatalf("failed to parse backup bytes: %v", err) + } + + schedule1 := schedule + schedule1.Spec.BackupSpec = v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb", + Type: commonv1alpha1.BackupTypeSnapshot, + }, + } + changedBackup, err := getBackupBytes(&schedule1) + if err != nil { + t.Fatalf("failed to parse changedBackup bytes: %v", err) + } + wantCronStr := strings.TrimSpace(` +resourceBaseName: test-backup-schedule-cron +resourceTimestampFormat: 20060102-150405 +schedule: '* * * * *' +template: + apiVersion: oracle.db.anthosapis.com/v1alpha1 + kind: Backup + spec: + gcsPath: gs://bucket/rman + instance: mydb1 + subType: Instance + type: Physical`) + + backupScheduleCtrl.get = func(_, _ string) (*v1alpha1.BackupSchedule, error) { + return &schedule, nil + } + + backupScheduleCtrl.updateStatus = func(backupSchedule *v1alpha1.BackupSchedule) error { + return nil + } + + backupCtrl.list = func(cronAnythingName string) ([]*v1alpha1.Backup, error) { + return []*v1alpha1.Backup{}, nil + } + + testCases := []struct { + name string + backupSchedule *v1alpha1.BackupSchedule + oldCronSpec *v1alpha1.CronAnythingSpec + wantCronSpecStr string + }{ + { + name: "schedule changed", + backupSchedule: &schedule, + oldCronSpec: &v1alpha1.CronAnythingSpec{ + Schedule: "*/10 * * * *", + Template: runtime.RawExtension{Raw: backup}, + ResourceBaseName: pointer.StringPtr("test-backup-schedule-cron"), + ResourceTimestampFormat: pointer.StringPtr("20060102-150405"), + }, + wantCronSpecStr: wantCronStr, + }, + { + name: "backup spec changed", + backupSchedule: &schedule, + oldCronSpec: &v1alpha1.CronAnythingSpec{ + Schedule: testSchedule, + Template: runtime.RawExtension{Raw: changedBackup}, + ResourceBaseName: pointer.StringPtr("test-backup-schedule-cron"), + ResourceTimestampFormat: pointer.StringPtr("20060102-150405"), + }, + wantCronSpecStr: wantCronStr, + }, + { + name: "StartingDeadlineSeconds changed", + backupSchedule: &schedule, + oldCronSpec: &v1alpha1.CronAnythingSpec{ + Schedule: testSchedule, + Template: runtime.RawExtension{Raw: backup}, + ResourceBaseName: pointer.StringPtr("test-backup-schedule-cron"), + ResourceTimestampFormat: pointer.StringPtr("20060102-150405"), + TriggerDeadlineSeconds: pointer.Int64Ptr(60), + }, + wantCronSpecStr: wantCronStr, + }, + { + name: "unchanged", + backupSchedule: &schedule, + oldCronSpec: &v1alpha1.CronAnythingSpec{ + Schedule: testSchedule, + Template: runtime.RawExtension{Raw: backup}, + ResourceBaseName: pointer.StringPtr("test-backup-schedule-cron"), + ResourceTimestampFormat: pointer.StringPtr("20060102-150405"), + }, + wantCronSpecStr: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cronAnythingCtrl.get = func(name, namespace string) (*v1alpha1.CronAnything, error) { + return &v1alpha1.CronAnything{ + Spec: *tc.oldCronSpec, + }, nil + } + var gotCronSpecStr string + cronAnythingCtrl.update = func(cron *v1alpha1.CronAnything) error { + b, err := yaml.Marshal(cron.Spec) + gotCronSpecStr = strings.TrimSpace(string(b)) + return err + } + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + }) + if err != nil { + t.Fatalf("reconciler.Reconcile want nil, got %v", err) + } + if gotCronSpecStr != tc.wantCronSpecStr { + t.Errorf("reconciler.Reconcile create CronAnything got spec \n%s\n want \n%s\n", gotCronSpecStr, tc.wantCronSpecStr) + } + }) + } +} + +func TestReconcileWithBackupPrune(t *testing.T) { + reconciler, backupScheduleCtrl, cronAnythingCtrl, backupCtrl := newTestBackupScheduleReconciler() + schedule := v1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + Spec: v1alpha1.BackupScheduleSpec{ + BackupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb1", + Type: commonv1alpha1.BackupTypePhysical, + }, + Subtype: "Instance", + GcsPath: "gs://bucket/rman", + }, + Schedule: testSchedule, + BackupRetentionPolicy: &v1alpha1.BackupRetentionPolicy{BackupRetention: pointer.Int32Ptr(2)}, + }, + } + backupBytes, err := getBackupBytes(&schedule) + if err != nil { + t.Fatalf("failed to parse backup byptes: %v", err) + } + + backupScheduleCtrl.get = func(_, _ string) (*v1alpha1.BackupSchedule, error) { + return &schedule, nil + } + backupScheduleCtrl.updateStatus = func(backupSchedule *v1alpha1.BackupSchedule) error { + return nil + } + + cronAnythingCtrl.get = func(name, namespace string) (*v1alpha1.CronAnything, error) { + return &v1alpha1.CronAnything{ + Spec: v1alpha1.CronAnythingSpec{ + Template: runtime.RawExtension{Raw: backupBytes}, + }, + }, nil + } + cronAnythingCtrl.update = func(cron *v1alpha1.CronAnything) error { + return nil + } + + backups := makeSortedBackups(t, 4) + + testCases := []struct { + name string + backups []*v1alpha1.Backup + wantDeleted []*v1alpha1.Backup + }{ + { + name: "delete 1 backup", + backups: backups[0:3], + wantDeleted: backups[2:3], + }, + { + name: "delete 2 backups", + backups: backups, + wantDeleted: backups[2:4], + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + backupCtrl.list = func(cronAnythingName string) ([]*v1alpha1.Backup, error) { + return tc.backups, nil + } + var gotDeleted []*v1alpha1.Backup + backupCtrl.delete = func(backup *v1alpha1.Backup) error { + gotDeleted = append(gotDeleted, backup) + return nil + } + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testBackupScheduleName, + Namespace: testNamespace, + }, + }) + if err != nil { + t.Fatalf("reconciler.Reconcile want nil, got %v", err) + } + if diff := cmp.Diff(tc.wantDeleted, gotDeleted); diff != "" { + t.Errorf("reconciler.Reconcile got unexpected backups deleted: -want +got %v", diff) + } + }) + } +} + +func TestUpdateBackupHistory(t *testing.T) { + reconciler, backupScheduleCtrl, _, _ := newTestBackupScheduleReconciler() + testCases := []struct { + name string + backupTotal int + wantTotal int32 + wantRecordTotal int + }{ + { + name: "less than max backup records limitation", + backupTotal: 5, + wantTotal: 5, + wantRecordTotal: 5, + }, + { + name: "more than backup records limitation", + backupTotal: 15, + wantTotal: 15, + wantRecordTotal: 7, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + backups := makeSortedBackups(t, tc.backupTotal) + schedule := &v1alpha1.BackupSchedule{ + Spec: v1alpha1.BackupScheduleSpec{ + BackupRetentionPolicy: &v1alpha1.BackupRetentionPolicy{BackupRetention: pointer.Int32Ptr(100)}, + }, + } + var gotSchedule *v1alpha1.BackupSchedule + backupScheduleCtrl.updateStatus = func(backupSchedule *v1alpha1.BackupSchedule) error { + gotSchedule = backupSchedule + return nil + } + if err := reconciler.updateHistory(schedule, backups); err != nil { + t.Fatalf("reconciler.UpdateHistory want nil, got %v", err) + } + if *gotSchedule.Status.BackupTotal != tc.wantTotal { + t.Errorf("reconciler.UpdateHistory got BackupTotal %d, want %d", *schedule.Status.BackupTotal, tc.wantTotal) + } + if len(gotSchedule.Status.BackupHistory) != tc.wantRecordTotal { + t.Fatalf("reconciler.UpdateHistory len(BackupHistory) got %d, want %d", len(schedule.Status.BackupHistory), tc.wantRecordTotal) + } + for i := 0; i < tc.wantRecordTotal; i++ { + if gotSchedule.Status.BackupHistory[i].CreationTime != backups[i].CreationTimestamp { + t.Errorf("reconciler.UpdateHistory BackupHistory[%d] got %v, want %v", i, gotSchedule.Status.BackupHistory[i], backups[i]) + } + } + }) + } +} + +func newTestBackupScheduleReconciler() (reconciler *BackupScheduleReconciler, + backupScheduleCtrl *fakeBackupScheduleControl, + cronAnythingCtrl *fakeCronAnythingControl, + backupCtrl *fakeBackupControl) { + + backupScheduleCtrl = &fakeBackupScheduleControl{} + cronAnythingCtrl = &fakeCronAnythingControl{} + backupCtrl = &fakeBackupControl{} + scheme := runtime.NewScheme() + v1alpha1.AddToScheme(scheme) + + return &BackupScheduleReconciler{ + Log: ctrl.Log.WithName("controllers").WithName("BackupSchedule"), + scheme: scheme, + backupScheduleCtrl: backupScheduleCtrl, + cronAnythingCtrl: cronAnythingCtrl, + backupCtrl: backupCtrl, + }, backupScheduleCtrl, cronAnythingCtrl, backupCtrl +} + +func timeFromStr(t *testing.T, dateStr string) time.Time { + date, err := time.Parse("2006-01-02T15:04:05Z", dateStr) + if err != nil { + t.Fatalf("failed to parse %s: %v", dateStr, err) + } + return date +} + +func makeSortedBackups(t *testing.T, total int) (backups []*v1alpha1.Backup) { + timestamp := timeFromStr(t, "2020-12-21T01:00:00Z") + for i := 0; i < total; i++ { + backups = append(backups, &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.NewTime(timestamp), + }, + Status: v1alpha1.BackupStatus{ + BackupStatus: commonv1alpha1.BackupStatus{ + Phase: commonv1alpha1.BackupSucceeded, + }, + }, + }) + timestamp = timestamp.Add(time.Hour) + } + return backups +} + +type fakeBackupScheduleControl struct { + get func(name, namespace string) (*v1alpha1.BackupSchedule, error) + updateStatus func(backupSchedule *v1alpha1.BackupSchedule) error +} + +func (f *fakeBackupScheduleControl) Get(name, namespace string) (*v1alpha1.BackupSchedule, error) { + return f.get(name, namespace) +} +func (f *fakeBackupScheduleControl) UpdateStatus(backupSchedule *v1alpha1.BackupSchedule) error { + return f.updateStatus(backupSchedule) +} + +type fakeCronAnythingControl struct { + create func(cron *v1alpha1.CronAnything) error + get func(name, namespace string) (*v1alpha1.CronAnything, error) + update func(cron *v1alpha1.CronAnything) error + delete func(cron *v1alpha1.CronAnything) error +} + +func (f *fakeCronAnythingControl) Create(cron *v1alpha1.CronAnything) error { + return f.create(cron) +} +func (f *fakeCronAnythingControl) Get(name, namespace string) (*v1alpha1.CronAnything, error) { + return f.get(name, namespace) +} +func (f *fakeCronAnythingControl) Update(cron *v1alpha1.CronAnything) error { + return f.update(cron) +} + +type fakeBackupControl struct { + list func(cronAnythingName string) ([]*v1alpha1.Backup, error) + delete func(backup *v1alpha1.Backup) error +} + +func (f *fakeBackupControl) List(cronAnythingName string) ([]*v1alpha1.Backup, error) { + return f.list(cronAnythingName) +} +func (f *fakeBackupControl) Delete(backup *v1alpha1.Backup) error { + return f.delete(backup) +} diff --git a/oracle/controllers/backupschedulecontroller/functest/BUILD.bazel b/oracle/controllers/backupschedulecontroller/functest/BUILD.bazel new file mode 100644 index 0000000..553031d --- /dev/null +++ b/oracle/controllers/backupschedulecontroller/functest/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "functest_test", + srcs = ["backupschedule_controller_functional_test.go"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/backupschedulecontroller", + "//oracle/controllers/cronanythingcontroller", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/reconcile", + "@io_k8s_utils//pointer", + ], +) diff --git a/oracle/controllers/backupschedulecontroller/functest/backupschedule_controller_functional_test.go b/oracle/controllers/backupschedulecontroller/functest/backupschedule_controller_functional_test.go new file mode 100644 index 0000000..1e109ad --- /dev/null +++ b/oracle/controllers/backupschedulecontroller/functest/backupschedule_controller_functional_test.go @@ -0,0 +1,247 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backupschedulecontroller_func_test + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/backupschedulecontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/cronanythingcontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager +) + +type fakeBackupReconiler struct { + client.Client +} + +func (f *fakeBackupReconiler) Reconcile(req reconcile.Request) (reconcile.Result, error) { + ctx := context.TODO() + var backup v1alpha1.Backup + if err := f.Get(ctx, req.NamespacedName, &backup); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + readyCond := k8s.FindCondition(backup.Status.Conditions, k8s.Ready) + if k8s.ConditionReasonEquals(readyCond, k8s.BackupReady) { + return ctrl.Result{}, nil + } + backup.Status.Conditions = k8s.Upsert(backup.Status.Conditions, k8s.Ready, v1.ConditionTrue, k8s.BackupReady, "") + if err := f.Status().Update(ctx, &backup); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil +} + +func (f *fakeBackupReconiler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Backup{}).Complete(f) +} + +func TestBackupsScheduleController(t *testing.T) { + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "BackupSchedule controller", func() []testhelpers.Reconciler { + backupReconciler := &fakeBackupReconiler{k8sClient} + backupScheduleReconciler := backupschedulecontroller.NewBackupScheduleReconciler(k8sManager) + cronanythingReconciler, err := cronanythingcontroller.NewCronAnythingReconciler(k8sManager) + if err != nil { + t.Fatalf("failed to create cronanythingcontroller for backup schedule test") + } + return []testhelpers.Reconciler{backupReconciler, backupScheduleReconciler, cronanythingReconciler} + }) +} + +var _ = Describe("BackupSchedule controller", func() { + // Define utility constants for object names and testing timeouts and intervals. + const ( + namespace = "default" + backupScheduleName = "test-backup-schedule" + instanceName = "test-instance" + + timeout = time.Second * 15 + interval = time.Millisecond * 15 + ) + + var instance v1alpha1.Instance + + ctx := context.Background() + + BeforeEach(func() { + instance = v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: testhelpers.RandName(instanceName), + Namespace: namespace, + }, + Spec: v1alpha1.InstanceSpec{ + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + }, + { + Name: "LogDisk", + }, + }, + }, + }} + Expect(k8sClient.Create(ctx, &instance)).Should(Succeed()) + instance.Status.Conditions = k8s.Upsert(instance.Status.Conditions, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, "") + Expect(k8sClient.Status().Update(ctx, &instance)).Should(Succeed()) + + createdInstance := &v1alpha1.Instance{} + // We'll need to retry getting this newly created Instance, given that creation may not immediately happen. + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instance.Name}, createdInstance) + }, timeout, interval).Should(Succeed()) + }) + + AfterEach(func() { + Expect(k8sClient.Delete(ctx, &instance)).Should(Succeed()) + createdBackupSchedules := &v1alpha1.BackupScheduleList{} + Expect(k8sClient.List(ctx, createdBackupSchedules)).Should(Succeed()) + for _, backupSchedule := range createdBackupSchedules.Items { + Expect(k8sClient.Delete(ctx, &backupSchedule)).To(Succeed()) + } + createdBackups := &v1alpha1.BackupList{} + Expect(k8sClient.List(ctx, createdBackups)).Should(Succeed()) + + for _, backup := range createdBackups.Items { + Expect(k8sClient.Delete(ctx, &backup)).To(Succeed()) + } + }) + Context("New backup schedule", func() { + It("Should create backups based on schedule", func() { + testBackupCreation(namespace, backupScheduleName, instanceName) + }) + }) + + Context("New backup schedule with retention policy", func() { + It("Should prune backups based on retention policy", func() { + testBackupRetention(namespace, backupScheduleName, instanceName) + }) + }) +}) + +func testBackupCreation(namespace, backupScheduleName, instanceName string) { + By("By creating a BackupSchedule of the instance") + backupSchedule := &v1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: backupScheduleName, + }, + Spec: v1alpha1.BackupScheduleSpec{ + BackupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instanceName, + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + Schedule: "* * * * *", + StartingDeadlineSeconds: pointer.Int64Ptr(5), + }, + } + Expect(k8sClient.Create(context.TODO(), backupSchedule)).Should(Succeed()) + By("Checking for the first Backup to be created") + Eventually(func() (int, error) { + return getBackupsTotal() + }, time.Minute*2, time.Second).Should(Equal(1)) + By("Checking for the second Backup to be created") + Eventually(func() (int, error) { + return getBackupsTotal() + }, time.Second*65, time.Second).Should(Equal(2)) + By("Checking for the third Backup to be created") + Eventually(func() (int, error) { + return getBackupsTotal() + }, time.Second*65, time.Second).Should(Equal(3)) +} + +func testBackupRetention(namespace, backupScheduleName, instanceName string) { + By("By creating a BackupSchedule of the instance") + backupSchedule := &v1alpha1.BackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: backupScheduleName, + }, + Spec: v1alpha1.BackupScheduleSpec{ + BackupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instanceName, + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + Schedule: "* * * * *", + StartingDeadlineSeconds: pointer.Int64Ptr(5), + BackupRetentionPolicy: &v1alpha1.BackupRetentionPolicy{ + BackupRetention: pointer.Int32Ptr(2), + }, + }, + } + Expect(k8sClient.Create(context.TODO(), backupSchedule)).Should(Succeed()) + By("Checking for the first Backup to be created") + var toBeDelete v1alpha1.Backup + toBeDeleteKey := client.ObjectKey{Namespace: toBeDelete.Namespace, Name: toBeDelete.Name} + Eventually(func() (int, error) { + backups, err := getBackups() + if err != nil { + return -1, err + } + if len(backups) == 1 { + toBeDelete = backups[0] + } + return len(backups), nil + }, time.Minute*2, time.Second).Should(Equal(1)) + + By("Checking for the first Backup to be deleted") + Eventually(func() bool { + backup := &v1alpha1.Backup{} + return apierrors.IsNotFound(k8sClient.Get(context.TODO(), toBeDeleteKey, backup)) + }, time.Second*200, time.Second).Should(BeTrue()) +} + +func getBackupsTotal() (int, error) { + backups, err := getBackups() + if err != nil { + return -1, err + } + return len(backups), nil +} + +func getBackups() ([]v1alpha1.Backup, error) { + backupList := &v1alpha1.BackupList{} + err := k8sClient.List(context.TODO(), backupList) + if err != nil { + return nil, fmt.Errorf("unable to list backup: %v", err) + } + return backupList.Items, nil +} diff --git a/oracle/controllers/backupschedulecontroller/operations.go b/oracle/controllers/backupschedulecontroller/operations.go new file mode 100644 index 0000000..64a0c16 --- /dev/null +++ b/oracle/controllers/backupschedulecontroller/operations.go @@ -0,0 +1,92 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backupschedulecontroller + +import ( + "context" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +type realBackupScheduleControl struct { + client client.Client +} + +func (r *realBackupScheduleControl) Get(name, namespace string) (*v1alpha1.BackupSchedule, error) { + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + var backupSchedule v1alpha1.BackupSchedule + err := r.client.Get(context.TODO(), key, &backupSchedule) + return &backupSchedule, err +} + +func (r *realBackupScheduleControl) UpdateStatus(schedule *v1alpha1.BackupSchedule) error { + return r.client.Status().Update(context.TODO(), schedule) +} + +type realCronAnythingControl struct { + client client.Client +} + +func (r *realCronAnythingControl) Create(cron *v1alpha1.CronAnything) error { + return r.client.Create(context.TODO(), cron) +} + +func (r *realCronAnythingControl) Get(name, namespace string) (*v1alpha1.CronAnything, error) { + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + var cron v1alpha1.CronAnything + err := r.client.Get(context.TODO(), key, &cron) + return &cron, err +} + +func (r *realCronAnythingControl) Update(cron *v1alpha1.CronAnything) error { + return r.client.Update(context.TODO(), cron) +} + +type realBackupControl struct { + client client.Client +} + +func (r *realBackupControl) List(cronAnythingName string) ([]*v1alpha1.Backup, error) { + listOptions := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{v1alpha1.CronAnythingCreatedByLabel: cronAnythingName}), + } + var backupList v1alpha1.BackupList + err := r.client.List(context.TODO(), &backupList, listOptions) + if err != nil { + return nil, err + } + var backups []*v1alpha1.Backup + for _, b := range backupList.Items { + if b.DeletionTimestamp != nil { + continue + } + backups = append(backups, b.DeepCopy()) + } + return backups, nil +} + +func (r *realBackupControl) Delete(backup *v1alpha1.Backup) error { + return r.client.Delete(context.TODO(), backup) +} diff --git a/oracle/controllers/common.go b/oracle/controllers/common.go new file mode 100644 index 0000000..57a6da7 --- /dev/null +++ b/oracle/controllers/common.go @@ -0,0 +1,160 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" +) + +const ( + PhysBackupTimeLimitDefault = 60 * time.Minute + StatusReady = "Ready" + StatusInProgress = "InProgress" + + RestoreInProgress = "Restore" + StatusInProgress + CreateInProgress = "Create" + StatusInProgress +) + +var ( + // SvcName is a string template for service names. + SvcName = "%s-svc" + // AgentSvcName is a string template for agent service names. + AgentSvcName = "%s-agent-svc" + // DbdaemonSvcName is a string template for dbdaemon service names. + DbdaemonSvcName = "%s-dbdaemon-svc" + // SvcEndpoint is a string template for service endpoints. + SvcEndpoint = "%s.%s" // SvcName.namespaceName + sourceCidrRange = []string{"0.0.0.0/0"} + // StsName is a string template for Database stateful set names. + StsName = "%s-sts" + // AgentDeploymentName is a string template for agent deployment names. + AgentDeploymentName = "%s-agent-deployment" + // PvcMountName is a string template for pvc names. + PvcMountName = "%s-pvc-%s" // inst.name-pvc-mount, e.g. mydb-pvc-u02 + // CmName is a string template for config map names. + CmName = "%s-cm" + // DatabasePodAppLabel is the 'app' label assigned to db pod. + DatabasePodAppLabel = "db-op" + defaultDiskSpecs = map[string]commonv1alpha1.DiskSpec{ + "DataDisk": { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + "LogDisk": { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + "BackupDisk": { + Name: "BackupDisk", + Size: resource.MustParse("100Gi"), + }, + } + + defaultDiskMountLocations = map[string]string{ + "DataDisk": "u02", + "LogDisk": "u03", + "BackupDisk": "u04", + } +) + +// StsParams stores parameters for creating a database stateful set. +type StsParams struct { + Inst *v1alpha1.Instance + Scheme *runtime.Scheme + Namespace string + Images map[string]string + SvcName string + StsName string + PrivEscalation bool + ConfigMap *corev1.ConfigMap + Restore *v1alpha1.RestoreSpec + Disks []commonv1alpha1.DiskSpec + Config *v1alpha1.Config + Log logr.Logger + Services []commonv1alpha1.Service +} + +// AgentDeploymentParams stores parameters for creating a agent deployment. +type AgentDeploymentParams struct { + Inst *v1alpha1.Instance + Scheme *runtime.Scheme + Images map[string]string + PrivEscalation bool + Name string + Log logr.Logger + Args map[string][]string + Services []commonv1alpha1.Service +} + +// ConfigAgentClientFactory is a GRPC implementation of ConfigAgentClientFactory. Exists for test mock. +type GrpcConfigAgentClientFactory struct { + caclient *capb.ConfigAgentClient +} + +type ConnCloseFunc func() + +// ConfigAgentClientFactory is a GRPC implementation of ConfigAgentClientFactory. Exists for test mock. +type ConfigAgentClientFactory interface { + // New returns new Client. + // connection close function should be invoked by the caller if + // error is nil. + New(ctx context.Context, r client.Reader, namespace, instName string) (capb.ConfigAgentClient, ConnCloseFunc, error) +} + +// GetPVCNameAndMount returns PVC names and their corresponding mount. +func GetPVCNameAndMount(instName, diskName string) (string, string) { + spec := defaultDiskSpecs[diskName] + mountLocation := defaultDiskMountLocations[spec.Name] + pvcName := fmt.Sprintf(PvcMountName, instName, mountLocation) + return pvcName, mountLocation +} + +// New returns a new config agent client. +func (g *GrpcConfigAgentClientFactory) New(ctx context.Context, r client.Reader, namespace, instName string) (capb.ConfigAgentClient, ConnCloseFunc, error) { + agentSvc := &corev1.Service{} + if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(AgentSvcName, instName), Namespace: namespace}, agentSvc); err != nil { + return nil, nil, err + } + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", agentSvc.Spec.ClusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + return nil, nil, fmt.Errorf("failed to create a conn via gRPC.Dial: %w", err) + } + return capb.NewConfigAgentClient(conn), func() { _ = conn.Close() }, nil +} + +// Contains check whether given "elem" presents in "array" +func Contains(array []string, elem string) bool { + for _, v := range array { + if v == elem { + return true + } + } + return false +} diff --git a/oracle/controllers/common_test.go b/oracle/controllers/common_test.go new file mode 100644 index 0000000..343ee44 --- /dev/null +++ b/oracle/controllers/common_test.go @@ -0,0 +1,61 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "testing" +) + +func TestGetPVCNameAndMount(t *testing.T) { + + testCases := []struct { + Name string + DiskName string + wantPVCName string + wantMount string + }{ + { + Name: "DataDisk", + DiskName: "DataDisk", + wantPVCName: "inst-pvc-u02", + wantMount: "u02", + }, + { + Name: "LogDisk", + DiskName: "LogDisk", + wantPVCName: "inst-pvc-u03", + wantMount: "u03", + }, + { + Name: "BackupDisk", + DiskName: "BackupDisk", + wantPVCName: "inst-pvc-u04", + wantMount: "u04", + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + gotPVCName, gotMount := GetPVCNameAndMount("inst", tc.DiskName) + + if gotPVCName != tc.wantPVCName { + t.Errorf("got pvcName %v, want %v", gotPVCName, tc.wantPVCName) + } + + if gotMount != tc.wantMount { + t.Errorf("got pvcName %v, want %v", gotMount, tc.wantMount) + } + }) + } +} diff --git a/oracle/controllers/config_agent_helpers.go b/oracle/controllers/config_agent_helpers.go new file mode 100644 index 0000000..a48ee7a --- /dev/null +++ b/oracle/controllers/config_agent_helpers.go @@ -0,0 +1,45 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + + lropb "google.golang.org/genproto/googleapis/longrunning" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetLROOperation returns LRO operation for the specified namespace instance and operation id. +func GetLROOperation(caClientFactory ConfigAgentClientFactory, ctx context.Context, r client.Reader, namespace, id, instName string) (*lropb.Operation, error) { + caClient, closeConn, err := caClientFactory.New(ctx, r, namespace, instName) + if err != nil { + return nil, err + } + defer closeConn() + + return caClient.GetOperation(ctx, &lropb.GetOperationRequest{Name: id}) +} + +// DeleteLROOperation deletes LRO operation for the specified namespace instance and operation id. +func DeleteLROOperation(caClientFactory ConfigAgentClientFactory, ctx context.Context, r client.Reader, namespace, id, instName string) error { + caClient, closeConn, err := caClientFactory.New(ctx, r, namespace, instName) + if err != nil { + return err + } + defer closeConn() + + _, err = caClient.DeleteOperation(ctx, &lropb.DeleteOperationRequest{Name: id}) + return err +} diff --git a/oracle/controllers/configcontroller/BUILD.bazel b/oracle/controllers/configcontroller/BUILD.bazel new file mode 100644 index 0000000..1fd713b --- /dev/null +++ b/oracle/controllers/configcontroller/BUILD.bazel @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "configcontroller", + srcs = ["config_controller.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/configcontroller", + visibility = ["//visibility:public"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers", + "@com_github_go_logr_logr//:logr", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) + +go_test( + name = "configcontroller_test", + srcs = ["config_controller_test.go"], + embed = [":configcontroller"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/configcontroller/config_controller.go b/oracle/controllers/configcontroller/config_controller.go new file mode 100644 index 0000000..9811e68 --- /dev/null +++ b/oracle/controllers/configcontroller/config_controller.go @@ -0,0 +1,132 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configcontroller + +import ( + "context" + "flag" + "fmt" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" +) + +// ConfigReconciler reconciles a Config object +type ConfigReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Images map[string]string + Recorder record.EventRecorder +} + +var ( + findInstance = (*ConfigReconciler).findInstance + patch = (*ConfigReconciler).Patch +) + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=configs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=configs/status,verbs=get;update;patch + +// Reconcile looks for the config upload requests and populates +// Operator config with the customer requested values. +func (r *ConfigReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("Config", req.NamespacedName) + + log.Info("reconciling config requests") + + var config v1alpha1.Config + if err := r.Get(ctx, req.NamespacedName, &config); err != nil { + log.V(1).Error(err, "get config request error") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if config.Spec.LogLevel != nil && config.Spec.LogLevel[controllers.OperatorName] != "" { + flag.Set("v", config.Spec.LogLevel[controllers.OperatorName]) + } else { + flag.Set("v", "0") + } + + inst, err := findInstance(r, ctx) + if err != nil { + return ctrl.Result{}, err + } + + // This is to allow creating a Config even when no Instances exist yet. + if inst == nil { + log.Info("no Instances found") + return ctrl.Result{}, nil + } + log.V(1).Info("Instance found", "inst", inst) + + applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("instance-controller")} + agentParam := controllers.AgentDeploymentParams{ + Inst: inst, + Scheme: r.Scheme, + Name: fmt.Sprintf(controllers.AgentDeploymentName, inst.Name), + Images: r.Images, + PrivEscalation: false, + Log: log, + Args: controllers.GetLogLevelArgs(&config), + } + + agentDeployment, err := controllers.NewAgentDeployment(agentParam) + if err != nil { + log.Error(err, "failed to create a Deployment", "agent deployment", agentDeployment) + return ctrl.Result{}, err + } + + if err := patch(r, ctx, agentDeployment, client.Apply, applyOpts...); err != nil { + log.Error(err, "failed to patch the Deployment", "agent deployment.Status", agentDeployment.Status) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// findInstance attempts to find a customer specific instance +// if it's been provided. There should be at most one instance. +func (r *ConfigReconciler) findInstance(ctx context.Context) (*v1alpha1.Instance, error) { + var insts v1alpha1.InstanceList + listOptions := []client.ListOption{} + if err := r.List(ctx, &insts, listOptions...); err != nil { + r.Log.Error(err, "failed to list instances") + return nil, err + } + + if len(insts.Items) == 0 { + return nil, nil + } + + if len(insts.Items) != 1 { + return nil, fmt.Errorf("number of instances != 1, numInstances:%d", len(insts.Items)) + } + + return &insts.Items[0], nil +} + +// SetupWithManager starts the reconciler loop. +func (r *ConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Config{}). + Complete(r) +} diff --git a/oracle/controllers/configcontroller/config_controller_test.go b/oracle/controllers/configcontroller/config_controller_test.go new file mode 100644 index 0000000..565dc17 --- /dev/null +++ b/oracle/controllers/configcontroller/config_controller_test.go @@ -0,0 +1,95 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configcontroller + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" +) + +var k8sClient client.Client +var k8sManager ctrl.Manager + +func TestConfigController(t *testing.T) { + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Config controller", func() []testhelpers.Reconciler { + return []testhelpers.Reconciler{ + &ConfigReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Config"), + Scheme: k8sManager.GetScheme(), + Images: map[string]string{"config": "config_image"}, + }, + } + }) +} + +var _ = Describe("Config controller", func() { + // Define utility constants for object names and testing timeouts and intervals. + const ( + Namespace = "default" + ConfigName = "test-config" + + timeout = time.Second * 15 + interval = time.Millisecond * 250 + ) + + config := &v1alpha1.Config{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: ConfigName, + }, + } + + var reconciler ConfigReconciler + BeforeEach(func() { + reconciler = ConfigReconciler{ + Client: k8sClient, + Log: ctrl.Log, + Scheme: k8sManager.GetScheme(), + Images: map[string]string{"config": "config_image"}, + } + }) + + objKey := client.ObjectKey{Namespace: Namespace, Name: ConfigName} + It("Should success when config exists", func() { + Expect(k8sClient.Create(context.Background(), config)).Should(Succeed()) + + createdConfig := &v1alpha1.Config{} + Eventually(func() bool { + err := k8sClient.Get(context.Background(), client.ObjectKey{Namespace: Namespace, Name: ConfigName}, createdConfig) + return err == nil + }, timeout, interval).Should(BeTrue()) + + _, err := reconciler.Reconcile(ctrl.Request{NamespacedName: objKey}) + Expect(err).ToNot(HaveOccurred()) + + Expect(k8sClient.Delete(context.Background(), config)).Should(Succeed()) + }) + + It("Should success when config doesn't exist", func() { + _, err := reconciler.Reconcile(ctrl.Request{NamespacedName: objKey}) + Expect(err).ToNot(HaveOccurred()) + }) +}) diff --git a/oracle/controllers/cronanythingcontroller/BUILD.bazel b/oracle/controllers/cronanythingcontroller/BUILD.bazel new file mode 100644 index 0000000..79b2842 --- /dev/null +++ b/oracle/controllers/cronanythingcontroller/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "cronanythingcontroller", + srcs = [ + "cronanything_controller.go", + "operations.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/cronanythingcontroller", + visibility = ["//visibility:public"], + deps = [ + "//oracle/api/v1alpha1", + "@com_github_robfig_cron//:cron", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/api/meta", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured", + "@io_k8s_apimachinery//pkg/labels", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/runtime/schema", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//discovery", + "@io_k8s_client_go//dynamic", + "@io_k8s_client_go//rest", + "@io_k8s_client_go//tools/record", + "@io_k8s_client_go//util/jsonpath", + "@io_k8s_client_go//util/retry", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/manager", + "@io_k8s_sigs_controller_runtime//pkg/reconcile", + "@io_k8s_sigs_controller_runtime//pkg/runtime/log", + ], +) + +go_test( + name = "cronanythingcontroller_test", + srcs = ["cronanything_controller_test.go"], + embed = [":cronanythingcontroller"], + deps = [ + "//oracle/api/v1alpha1", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/runtime/schema", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/reconcile", + ], +) diff --git a/oracle/controllers/cronanythingcontroller/cronanything_controller.go b/oracle/controllers/cronanythingcontroller/cronanything_controller.go new file mode 100644 index 0000000..db6a809 --- /dev/null +++ b/oracle/controllers/cronanythingcontroller/cronanything_controller.go @@ -0,0 +1,695 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronanythingcontroller + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strconv" + "sync" + "time" + + "github.com/robfig/cron" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/jsonpath" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + cronanything "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=cronanythings,verbs=* + +var controllerKind = schema.GroupVersion{Group: "oracle.db.anthosapis.com", Version: "v1alpha1"}.WithKind("CronAnything") + +var log = logf.Log.WithName("cronanything-controller") + +type resourceResolver interface { + Start(refreshInterval time.Duration, stopCh <-chan struct{}) + Resolve(gvk schema.GroupVersionKind) (schema.GroupVersionResource, bool) +} + +// cronAnythingControl provides methods for getting and updating CronAnything resources. +type cronAnythingControl interface { + Get(key client.ObjectKey) (*cronanything.CronAnything, error) + Update(ca *cronanything.CronAnything) error +} + +// resourceControl provides methods for creating, deleting and listing any resource. +type resourceControl interface { + Delete(resource schema.GroupVersionResource, namespace, name string) error + Create(resource schema.GroupVersionResource, namespace string, template *unstructured.Unstructured) error + List(resource schema.GroupVersionResource, cronAnythingName string) ([]*unstructured.Unstructured, error) +} + +// NewCronAnythingReconciler returns a new CronAnything Reconciler. +func NewCronAnythingReconciler(mgr manager.Manager) (*ReconcileCronAnything, error) { + recorder := mgr.GetEventRecorderFor("cronanything-controller") + + dynClient, err := dynamic.NewForConfig(mgr.GetConfig()) + if err != nil { + return nil, err + } + + resolver := NewResourceResolver(mgr.GetConfig()) + stopCh := make(chan struct{}) + resolver.Start(30*time.Second, stopCh) + + return &ReconcileCronAnything{ + cronanythingControl: &realCronAnythingControl{ + kubeClient: mgr.GetClient(), + }, + scheme: mgr.GetScheme(), + resourceResolver: resolver, + resourceControl: &realResourceControl{ + dynClient: dynClient, + }, + eventRecorder: recorder, + nextTrigger: make(map[string]time.Time), + currentTime: time.Now, + }, nil +} + +// SetupWithManager configures the reconciler. +func (r *ReconcileCronAnything) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&cronanything.CronAnything{}). + Complete(r) +} + +var _ reconcile.Reconciler = &ReconcileCronAnything{} + +// ReconcileCronAnything reconciles a CronAnything object. +type ReconcileCronAnything struct { + cronanythingControl cronAnythingControl + scheme *runtime.Scheme + resourceControl resourceControl + resourceResolver resourceResolver + eventRecorder record.EventRecorder + + nextTrigger map[string]time.Time + nextTriggerMutex sync.Mutex + + currentTime func() time.Time +} + +// Reconcile loop for CronAnything. The CronAnything controller does not watch child reosources. +// To make sure the reconcile loop are triggered when a cron expression triggers, the controller +// uses RequeueAfter. +func (r *ReconcileCronAnything) Reconcile(request reconcile.Request) (reconcile.Result, error) { + instance, err := r.cronanythingControl.Get(request.NamespacedName) + if err != nil { + if errors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + canonicalName := fmt.Sprintf("%s/%s", instance.Namespace, instance.Name) + now := r.currentTime() + + if instance.DeletionTimestamp != nil { + log.Info("Not creating resource because it is being deleted", "caName", canonicalName) + return reconcile.Result{}, nil + } + + crt, err := templateToUnstructured(instance) + if err != nil { + return reconcile.Result{}, err + } + + cgvr, found := r.resourceResolver.Resolve(crt.GroupVersionKind()) + if !found { + return reconcile.Result{}, fmt.Errorf("unable to resolve child resource for %s", canonicalName) + } + + // Look up all child resources of the cronanything resource. These are needed to make sure the + // controller adheres to the concurrency policy, to clean up completed resources as specified + // in the historyLimit and control the total number of child resources as specified in totalResourceLimit + childResources, err := r.getChildResources(instance, crt, cgvr) + if err != nil { + return reconcile.Result{}, err + } + + // Cleanup finished resources. Do this before checking the total resource usage to make sure + // finished resources that should be deleted does not count against the total limit. + childResources = r.cleanupHistory(instance, childResources, cgvr, now) + + // Just return without doing any work if it is suspended. + if instance.Spec.Suspend != nil && *instance.Spec.Suspend { + return reconcile.Result{}, nil + } + + unmetScheduleTimes, nextScheduleTime, err := getScheduleTimes(instance, now.Add(1*time.Second)) + if err != nil { + return reconcile.Result{}, err + } + + if len(unmetScheduleTimes) == 0 { + log.Info("No unmet trigger times", "caName", canonicalName) + return r.updateTriggerTimes(canonicalName, nextScheduleTime), nil + } + + scheduleTime := unmetScheduleTimes[len(unmetScheduleTimes)-1] + droppedSchedules := unmetScheduleTimes[:len(unmetScheduleTimes)-1] + + if len(droppedSchedules) > 0 { + log.Info("Dropping unmet triggers", "caName", canonicalName, "count", len(droppedSchedules)) + latestDropped := droppedSchedules[len(droppedSchedules)-1] + + historyRecord := cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(latestDropped), + CreationTimestamp: metav1.NewTime(now), + } + + if len(droppedSchedules) == 1 && instance.Status.PendingTrigger != nil && instance.Status.PendingTrigger.ScheduleTime.Equal(getMetaTimePointer(latestDropped)) { + // If we get here it means we have one dropped trigger and also we have a trigger that + // we haven't been able to complete. Use the info from the pending trigger to report this + // in the trigger history. + historyRecord.Result = instance.Status.PendingTrigger.Result + } else { + historyRecord.Result = cronanything.TriggerResultMissed + } + + err := r.updateCronAnythingStatus(instance.Name, instance.Namespace, func(freshStatus *cronanything.CronAnythingStatus) { + updateLastScheduleTime(freshStatus, latestDropped) + freshStatus.PendingTrigger = nil + addToTriggerHistory(freshStatus, historyRecord) + }) + if err != nil { + // Since we haven't done anything yet, we can safely just return the error here and let the controller + // retry. + return reconcile.Result{}, err + } + + } + + log.Info("Unmet trigger time", "caName", canonicalName, "scheduledTime", scheduleTime.Format(time.RFC3339)) + + if instance.Spec.TriggerDeadlineSeconds != nil { + triggerDeadline := time.Duration(*instance.Spec.TriggerDeadlineSeconds) + if scheduleTime.Add(triggerDeadline * time.Second).Before(now) { + log.Info("Trigger deadline exceeded", "caName", canonicalName, "triggerTime", canonicalName, "scheduleTime", scheduleTime.Format(time.RFC3339)) + err = r.updateCronAnythingStatus(instance.Name, instance.Namespace, func(freshStatus *cronanything.CronAnythingStatus) { + updateLastScheduleTime(freshStatus, scheduleTime) + + historyRecord := cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(scheduleTime), + CreationTimestamp: metav1.NewTime(now), + } + + if freshStatus.PendingTrigger != nil && freshStatus.PendingTrigger.ScheduleTime.Equal(getMetaTimePointer(scheduleTime)) { + historyRecord.Result = freshStatus.PendingTrigger.Result + } else { + historyRecord.Result = cronanything.TriggerResultDeadlineExceeded + } + addToTriggerHistory(freshStatus, historyRecord) + + freshStatus.PendingTrigger = nil + }) + if err != nil { + return reconcile.Result{}, err + } + return r.updateTriggerTimes(canonicalName, nextScheduleTime), nil + } + } + + activeChildResources := findActiveChildResources(instance, childResources) + log.Info("Found active child resources", "caName", canonicalName, "numActiveChildResources", len(activeChildResources)) + if len(activeChildResources) > 0 { + switch instance.Spec.ConcurrencyPolicy { + case cronanything.ForbidConcurrent: + log.Info("Found existing active resource, so no new scheduled due to ForbidConcurrent policy", "caName", canonicalName) + err = r.updateCronAnythingStatus(instance.Name, instance.Namespace, func(freshStatus *cronanything.CronAnythingStatus) { + updateLastScheduleTime(freshStatus, scheduleTime) + + freshStatus.PendingTrigger = nil + + addToTriggerHistory(freshStatus, cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(scheduleTime), + CreationTimestamp: metav1.NewTime(now), + Result: cronanything.TriggerResultForbidConcurrent, + }) + }) + if err != nil { + return reconcile.Result{}, err + } + return r.updateTriggerTimes(canonicalName, nextScheduleTime), nil + case cronanything.ReplaceConcurrent: + // All currently active resources should be replaced. We do this by deleting them. + for _, activeResource := range activeChildResources { + // No need to delete resource if it already in the process of being deleted. + if activeResource.GetDeletionTimestamp() != nil { + continue + } + log.Info("Deleting resource due to ReplaceConcurrent policy", "caName", canonicalName, "activeResource", activeResource.GetName()) + err = r.resourceControl.Delete(cgvr, activeResource.GetNamespace(), activeResource.GetName()) + if err != nil { + r.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedDeleteForReplace", "Error deleting resource %s: %v", activeResource.GetName(), err) + return reconcile.Result{}, err + } + r.eventRecorder.Eventf(instance, v1.EventTypeNormal, "DeletedForReplace", "Resource %s deleted due to Replace policy", activeResource.GetName()) + } + // Returning here. Next iteration the resources will (hopefully) have been deleted and a new object can + // be created. If the deletion is not completed, it will have to wait longer. + return reconcile.Result{ + RequeueAfter: 1 * time.Second, + }, nil + } + } + + if instance.Spec.TotalResourceLimit != nil && int32(len(childResources)) >= *instance.Spec.TotalResourceLimit { + log.Info("Resource limit info", "caName", canonicalName, "limit", *instance.Spec.TotalResourceLimit, "numChildResources", len(childResources)) + r.eventRecorder.Eventf(instance, v1.EventTypeWarning, "ResourceLimitReached", "Limit of %d resources has been reached", len(childResources)) + log.Info("Resource limit has been reached. No new resource can be created", "caName", canonicalName) + err = r.updateCronAnythingStatus(instance.Name, instance.Namespace, func(freshStatus *cronanything.CronAnythingStatus) { + updateLastScheduleTime(freshStatus, scheduleTime) + + freshStatus.PendingTrigger = nil + + addToTriggerHistory(freshStatus, cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(scheduleTime), + CreationTimestamp: metav1.NewTime(now), + Result: cronanything.TriggerResultResourceLimitReached, + }) + }) + + if err != nil { + return reconcile.Result{}, err + } + return r.updateTriggerTimes(canonicalName, nextScheduleTime), nil + } + + name := getResourceName(instance, scheduleTime) + + crt.SetName(name) + labels := crt.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + labels[cronanything.CronAnythingCreatedByLabel] = instance.Name + labels[cronanything.CronAnythingScheduleTimeLabel] = strconv.FormatInt(scheduleTime.Unix(), 10) + crt.SetLabels(labels) + if instance.Spec.CascadeDelete != nil && *instance.Spec.CascadeDelete { + crt.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(instance, controllerKind)}) + } + + err = r.resourceControl.Create(cgvr, instance.Namespace, crt) + if err != nil { + statusErr := r.updateCronAnythingStatus(instance.Name, instance.Namespace, func(freshStatus *cronanything.CronAnythingStatus) { + freshStatus.PendingTrigger = &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(scheduleTime), + Result: cronanything.TriggerResultCreateFailed, + } + + }) + if statusErr != nil { + log.Error(statusErr, "Failed to update status for CronAnything after failed create attempt", "caName", canonicalName) + } + r.eventRecorder.Eventf(instance, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err) + return reconcile.Result{}, err + } + log.Info("Created new resource", "caName", canonicalName, "childName", name) + r.eventRecorder.Eventf(instance, v1.EventTypeNormal, "SuccessfulCreate", "Created resource %s", name) + + err = r.updateCronAnythingStatus(instance.Name, instance.Namespace, func(freshStatus *cronanything.CronAnythingStatus) { + updateLastScheduleTime(freshStatus, scheduleTime) + + freshStatus.PendingTrigger = nil + + addToTriggerHistory(freshStatus, cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(scheduleTime), + CreationTimestamp: metav1.NewTime(now), + Result: cronanything.TriggerResultCreateSucceeded, + }) + }) + if err != nil { + return reconcile.Result{}, err + } + + return r.updateTriggerTimes(canonicalName, nextScheduleTime), nil +} + +func updateLastScheduleTime(status *cronanything.CronAnythingStatus, scheduleTime time.Time) { + if status.LastScheduleTime == nil || status.LastScheduleTime.Time.Before(scheduleTime) { + status.LastScheduleTime = &metav1.Time{Time: scheduleTime} + } +} + +func addToTriggerHistory(status *cronanything.CronAnythingStatus, record cronanything.TriggerHistoryRecord) { + status.TriggerHistory = append([]cronanything.TriggerHistoryRecord{record}, status.TriggerHistory...) + + if len(status.TriggerHistory) > cronanything.TriggerHistoryMaxLength { + status.TriggerHistory = status.TriggerHistory[:cronanything.TriggerHistoryMaxLength] + } +} + +func (r *ReconcileCronAnything) updateCronAnythingStatus(name, namespace string, updateFunc func(*cronanything.CronAnythingStatus)) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + instance, err := r.cronanythingControl.Get(key) + if err != nil { + return err + } + + updateFunc(&instance.Status) + return r.cronanythingControl.Update(instance) + }) +} + +// updateTriggerTimes updates the local map of trigger times. +// This map requires syncronized access, so a lock is acquired on the nextTriggerMutex. +func (r *ReconcileCronAnything) updateTriggerTimes(canonicalName string, nextScheduleTime time.Time) reconcile.Result { + r.nextTriggerMutex.Lock() + defer r.nextTriggerMutex.Unlock() + triggerTime, found := r.nextTrigger[canonicalName] + if found && triggerTime.Equal(nextScheduleTime) { + log.Info("Next trigger already queued", "caName", canonicalName, "nextScheduleTime", nextScheduleTime.Format(time.RFC3339)) + return reconcile.Result{} + } + r.nextTrigger[canonicalName] = nextScheduleTime + log.Info("Next trigger time queued", "caName", canonicalName, "nextScheduleTime", nextScheduleTime.Format(time.RFC3339)) + return reconcile.Result{ + RequeueAfter: nextScheduleTime.Sub(r.currentTime()), + } +} + +// byTimestamp allows for sorting a slice of unstructured based on timestamp. +type byTimestamp struct { + items []*unstructured.Unstructured + timestampCache map[types.UID]time.Time +} + +func newByTimestamp(ca *cronanything.CronAnything, items []*unstructured.Unstructured) byTimestamp { + cache := make(map[types.UID]time.Time) + for _, item := range items { + finishedTime, err := getResourceTimestamp(ca, item) + if err == nil { + cache[item.GetUID()] = finishedTime + } + } + + return byTimestamp{ + items: items, + timestampCache: cache, + } +} + +func (b byTimestamp) Len() int { return len(b.items) } +func (b byTimestamp) Swap(i, j int) { b.items[i], b.items[j] = b.items[j], b.items[i] } +func (b byTimestamp) Less(i, j int) bool { + iResource := b.items[i] + jResource := b.items[j] + iTimestamp, iHasTimestamp := b.timestampCache[iResource.GetUID()] + jTimestamp, jHasTimestamp := b.timestampCache[jResource.GetUID()] + + if !iHasTimestamp && !jHasTimestamp { + iCreationTime := iResource.GetCreationTimestamp() + jCreationTime := jResource.GetCreationTimestamp() + return jCreationTime.Before(&iCreationTime) + } + if !iHasTimestamp && jHasTimestamp { + return false + } + if iHasTimestamp && !jHasTimestamp { + return true + } + return jTimestamp.Before(iTimestamp) +} + +// cleanupHistory cleans up finished resources if the count is higher than the threshold provided in the +// historyLimit field. +func (r *ReconcileCronAnything) cleanupHistory(ca *cronanything.CronAnything, childResources []*unstructured.Unstructured, resource schema.GroupVersionResource, now time.Time) []*unstructured.Unstructured { + if ca.Spec.Retention == nil { + return childResources + } + historyTimeLimit := ca.Spec.Retention.HistoryTimeLimitSeconds + historyCountLimit := ca.Spec.Retention.HistoryCountLimit + if historyTimeLimit == nil && historyCountLimit == nil { + return childResources + } + + sort.Sort(newByTimestamp(ca, childResources)) + + keeperCount := int32(0) + var toBeDeleted []*unstructured.Unstructured + for _, child := range childResources { + finished, err := isFinished(ca, child) + if err != nil { + log.Error(err, "Error checking if resource is finished", "childName", child.GetName()) + continue + } + if !finished { + continue + } + + timestamp, err := getResourceTimestamp(ca, child) + if err != nil { + log.Error(err, "Error looking up finish time on resource", "childName", child.GetName()) + continue + } + + if historyTimeLimit != nil && timestamp.Before(now) && now.Sub(timestamp).Seconds() > float64(*historyTimeLimit) { + toBeDeleted = append(toBeDeleted, child) + continue + } + + if historyCountLimit != nil && keeperCount >= *historyCountLimit { + toBeDeleted = append(toBeDeleted, child) + continue + } + keeperCount += 1 + } + + return r.deleteInactiveResources(ca, toBeDeleted, childResources, resource) +} + +// deleteInactiveResources deletes all child resources in the toBeDeleted slice and returns an updated slice of child resources +// that no longer includes any resources that has been successfully deleted. +func (r *ReconcileCronAnything) deleteInactiveResources(ca *cronanything.CronAnything, toBeDeleted []*unstructured.Unstructured, childResources []*unstructured.Unstructured, resource schema.GroupVersionResource) []*unstructured.Unstructured { + deleted := make(map[string]bool) + for _, child := range toBeDeleted { + if child.GetDeletionTimestamp() != nil { + continue + } + log.Info("Deleting inactive resource", "childName", child.GetName()) + err := r.resourceControl.Delete(resource, child.GetNamespace(), child.GetName()) + if err != nil { + r.eventRecorder.Eventf(ca, v1.EventTypeWarning, "FailedDeleteFinished", "Error deleting finished resource: %v", err) + log.Error(err, "Error deleting finished resource", "childName", child.GetName()) + } else { + deleted[child.GetName()] = true + } + } + + var remaining []*unstructured.Unstructured + for _, resource := range childResources { + if _, isDeleted := deleted[resource.GetName()]; !isDeleted { + remaining = append(remaining, resource) + } + } + + return remaining +} + +// Filters the list of resources to only the resources that have not finished. +func findActiveChildResources(ca *cronanything.CronAnything, childResources []*unstructured.Unstructured) []*unstructured.Unstructured { + return filterChildResources(ca, childResources, func(ca *cronanything.CronAnything, resource *unstructured.Unstructured) (bool, error) { + finished, err := isFinished(ca, resource) + return !finished, err + }) +} + +type filterFunc func(ca *cronanything.CronAnything, resource *unstructured.Unstructured) (bool, error) + +func filterChildResources(ca *cronanything.CronAnything, childResources []*unstructured.Unstructured, include filterFunc) []*unstructured.Unstructured { + var filtered []*unstructured.Unstructured + for _, resource := range childResources { + shouldInclude, err := include(ca, resource) + if err != nil { + log.Error(err, "Unable to invoke include function") + } + if shouldInclude { + filtered = append(filtered, resource) + } + } + return filtered +} + +// isFinished checks whether the provided resource has finished. It is considered finished if the +// the field .status.finishedAt exists and contains a valid timestamp. +func isFinished(ca *cronanything.CronAnything, resource *unstructured.Unstructured) (bool, error) { + if ca.Spec.FinishableStrategy == nil { + return false, nil + } + switch ca.Spec.FinishableStrategy.Type { + case cronanything.FinishableStrategyTimestampField: + strategy := ca.Spec.FinishableStrategy.TimestampField + if strategy == nil { + return false, fmt.Errorf("FinishableStrategy type is timestampField, but TimestampField property is nil") + } + _, isFinished, err := getTimestamp(strategy.FieldPath, resource) + return isFinished, err + case cronanything.FinishableStrategyStringField: + strategy := ca.Spec.FinishableStrategy.StringField + if strategy == nil { + return false, fmt.Errorf("FinishableStrategy type is stringField, but StringField property is nil") + } + value, err := getFieldValue(strategy.FieldPath, resource) + if err != nil { + return false, err + } + + for _, v := range strategy.FinishedValues { + if v == value { + return true, nil + } + } + return false, nil + default: + // If no strategy matches, we assume the resource is not finishable. + return false, nil + } +} + +func getResourceTimestamp(ca *cronanything.CronAnything, resource *unstructured.Unstructured) (time.Time, error) { + switch ca.Spec.Retention.ResourceTimestampStrategy.Type { + case cronanything.ResourceTimestampStrategyField: + strategy := ca.Spec.Retention.ResourceTimestampStrategy.FieldResourceTimestampStrategy + timestamp, _, err := getTimestamp(strategy.FieldPath, resource) + if err != nil { + return time.Time{}, err + } + return timestamp, nil + } + return time.Time{}, fmt.Errorf("can't find timestamp for resource %s", resource.GetName()) +} + +// getTimestamp returns the finish time for the given resource, a bool value that tells whether the resource has finished and +// an error in the event something goes wrong. The location of the finished field is taken from the CronAnything +// resource or the default .status.finishedAt . +func getTimestamp(timestampFieldPath string, resource *unstructured.Unstructured) (time.Time, bool, error) { + value, err := getFieldValue(timestampFieldPath, resource) + if err != nil { + return time.Time{}, false, err + } + if len(value) == 0 { + return time.Time{}, false, nil + } + var timestamp metav1.Time + err = timestamp.UnmarshalQueryParameter(value) + if err != nil { + return time.Time{}, false, err + } + return timestamp.Time, true, nil +} + +func getFieldValue(fieldPath string, resource *unstructured.Unstructured) (string, error) { + jp := jsonpath.New("parser").AllowMissingKeys(true) + err := jp.Parse(fieldPath) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + err = jp.Execute(buf, resource.Object) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// getChildResources lists out all the child resources of the given cronanything resource. This means all resources +// that have the apiVersion and kind specified by the template and that have an owner +// reference that points to the cronanything resource. +func (r *ReconcileCronAnything) getChildResources(ca *cronanything.CronAnything, template *unstructured.Unstructured, resource schema.GroupVersionResource) ([]*unstructured.Unstructured, error) { + return r.resourceControl.List(resource, ca.Name) +} + +// getResourceName returns for the resource created by the given cronanything resource at the given time. +func getResourceName(ca *cronanything.CronAnything, scheduleTime time.Time) string { + baseName := ca.Name + if ca.Spec.ResourceBaseName != nil && len(*ca.Spec.ResourceBaseName) > 0 { + baseName = *ca.Spec.ResourceBaseName + } + + timestamp := fmt.Sprintf("%d", scheduleTime.Unix()) + if ca.Spec.ResourceTimestampFormat != nil && len(*ca.Spec.ResourceTimestampFormat) > 0 { + timestamp = scheduleTime.Format(*ca.Spec.ResourceTimestampFormat) + } + return fmt.Sprintf("%s-%s", baseName, timestamp) +} + +// getScheduleTimes returns the next schedule time for the cronexpression and the given time. It also returns a slice with +// all previously schedule times based on the .Status.LastScheduleTime field in CronAnything. +func getScheduleTimes(ca *cronanything.CronAnything, now time.Time) ([]time.Time, time.Time, error) { + schedule, err := cron.ParseStandard(ca.Spec.Schedule) + if err != nil { + return nil, time.Time{}, fmt.Errorf("unable to parse schedule string: %v", err) + } + + var scheduleTimes []time.Time + lastScheduleTime := ca.Status.LastScheduleTime + var startSchedTime time.Time + if lastScheduleTime == nil { + startSchedTime = ca.CreationTimestamp.Time + } else { + startSchedTime = lastScheduleTime.Time + } + for t := schedule.Next(startSchedTime); t.Before(now); t = schedule.Next(t) { + scheduleTimes = append(scheduleTimes, t) + } + + next := schedule.Next(now) + + return scheduleTimes, next, nil +} + +// templateToUnstructured takes the raw extension template from the cronanything +// resource and turns it into an unstructured that can be used to create new resources. +func templateToUnstructured(ca *cronanything.CronAnything) (*unstructured.Unstructured, error) { + template := make(map[string]interface{}) + err := json.Unmarshal(ca.Spec.Template.Raw, &template) + if err != nil { + return nil, fmt.Errorf("unable to parse template: %v", err) + } + + return &unstructured.Unstructured{ + Object: template, + }, nil +} + +func getMetaTimePointer(t time.Time) *metav1.Time { + metaTime := metav1.NewTime(t) + return &metaTime +} diff --git a/oracle/controllers/cronanythingcontroller/cronanything_controller_test.go b/oracle/controllers/cronanythingcontroller/cronanything_controller_test.go new file mode 100644 index 0000000..b06140c --- /dev/null +++ b/oracle/controllers/cronanythingcontroller/cronanything_controller_test.go @@ -0,0 +1,1393 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronanythingcontroller + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + cronanything "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +const ( + apiVersion = "resource.k8s.io/v1alpha1" + kind = "resource" + name = "name" + namespace = "namespace" + defaultCronExpr = "* * * * *" +) + +var ( + baseTime = time.Date(2018, time.April, 20, 4, 20, 30, 0, time.UTC) +) + +func createReconciler() (*ReconcileCronAnything, *fakeCronAnythingControl, *fakeResourceControl) { + fakeRecorder := &record.FakeRecorder{} + fakeCronAnythingControl := &fakeCronAnythingControl{} + fakeResourceControl := &fakeResourceControl{ + deleteSlice: make([]string, 0), + } + fakeResourceResolver := &fakeResourceResolver{} + return &ReconcileCronAnything{ + cronanythingControl: fakeCronAnythingControl, + scheme: runtime.NewScheme(), + resourceResolver: fakeResourceResolver, + resourceControl: fakeResourceControl, + eventRecorder: fakeRecorder, + nextTrigger: make(map[string]time.Time), + currentTime: func() time.Time { return baseTime }, + }, fakeCronAnythingControl, fakeResourceControl +} + +func newCronAnything(apiVersion, kind, name, namespace string) *cronanything.CronAnything { + return &cronanything.CronAnything{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{ + Time: baseTime.Add(-60 * time.Second), + }, + Name: name, + Namespace: namespace, + UID: "myUID", + }, + Spec: cronanything.CronAnythingSpec{ + Schedule: defaultCronExpr, + Template: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf("{\"apiVersion\": \"%s\", \"kind\": \"%s\"}", apiVersion, kind)), + }, + }, + } +} + +func newUnstructuredResource(ca *cronanything.CronAnything, name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Resource", + "metadata": map[string]interface{}{ + "namespace": "default", + "name": name, + "uid": name, + "ownerReferences": []interface{}{ + map[string]interface{}{ + "apiVersion": controllerKind.GroupVersion().String(), + "kind": controllerKind.Kind, + "uid": string(ca.ObjectMeta.UID), + "controller": true, + }, + }, + }, + "status": map[string]interface{}{ + "finishedAt": "", + }, + }, + } +} + +func TestCreateNewResourceOnTrigger(t *testing.T) { + tVal := true + fVal := false + testCases := map[string]struct { + cascadeDelete *bool + hasOwnerRef bool + }{ + "cascadeDelete is not set, so no owner ref": { + cascadeDelete: nil, + hasOwnerRef: false, + }, + "cascadeDelete is false, so no owner ref": { + cascadeDelete: &fVal, + hasOwnerRef: false, + }, + "cascadeDelete is true, so has owner ref": { + cascadeDelete: &tVal, + hasOwnerRef: true, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, name, namespace) + ca.Spec.CascadeDelete = tc.cascadeDelete + + fakeCronAnythingControl.getCronAnything = ca + + fakeResourceControl.listResult = []*unstructured.Unstructured{ + newUnstructuredResource(ca, "resource1"), + newUnstructuredResource(ca, "resource2"), + } + + result, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + if result.RequeueAfter == time.Duration(0) { + t.Error("Expected result to have RequeueAfter, but it didn't") + } + + expectedDuration, _ := time.ParseDuration("30s") + if result.RequeueAfter != expectedDuration { + t.Errorf("Expected next requeue to be after %f seconds, but it was %f seconds", expectedDuration.Seconds(), result.RequeueAfter.Seconds()) + } + + createTemplate := fakeResourceControl.createTemplate + if createTemplate.Object["apiVersion"] != apiVersion { + t.Errorf("Expected apiVersion to be %s, but found %s", apiVersion, createTemplate.Object["apiVersion"]) + } + if createTemplate.Object["kind"] != kind { + t.Errorf("Expected kind to be %s, but found %s", kind, createTemplate.Object["kind"]) + } + + if tc.hasOwnerRef && len(createTemplate.GetOwnerReferences()) == 0 { + t.Errorf("Expected resource to have owner reference, but it didn't") + } + if !tc.hasOwnerRef && len(createTemplate.GetOwnerReferences()) == 1 { + t.Errorf("Expected resource to have no owner reference, but it did") + } + }) + } +} + +func TestAlreadyDeletedCronAnythingResource(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, name, namespace) + ca.DeletionTimestamp = &metav1.Time{ + Time: baseTime, + } + ca.Status.LastScheduleTime = &metav1.Time{ + Time: baseTime.Add(-10 * time.Hour), + } + fakeCronAnythingControl.getCronAnything = ca + + result, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + if result.RequeueAfter != time.Duration(0) || result.Requeue != false { + t.Errorf("Expected request to not be reqeueued, but found %v", result) + } + + if fakeResourceControl.createTemplate != nil { + t.Errorf("Expected no new resource to have been created") + } +} + +func TestSuspended(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, name, namespace) + T := true + ca.Spec.Suspend = &T + ca.Status.LastScheduleTime = &metav1.Time{ + Time: baseTime.Add(-10 * time.Hour), + } + fakeCronAnythingControl.getCronAnything = ca + + result, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + if result.RequeueAfter != time.Duration(0) || result.Requeue != false { + t.Errorf("Expected request to not be reqeueued, but found %v", result) + } + + if fakeResourceControl.createTemplate != nil { + t.Errorf("Expected no new resource to have been created") + } +} + +func TestScheduleTrigger(t *testing.T) { + testCases := map[string]struct { + currentTime time.Time + lastScheduleTime time.Time + schedule string + + expectCreate bool + }{ + "single past deadline": { + baseTime, + baseTime.Add(-1 * time.Minute), + defaultCronExpr, + true, + }, + "multiple past deadline": { + baseTime, + baseTime.Add(-1 * time.Hour), + defaultCronExpr, + true, + }, + "none past deadline": { + baseTime, + baseTime.Add(-1 * time.Hour), + "0 0 ? 12 ?", + false, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.Status.LastScheduleTime = &metav1.Time{ + Time: tc.lastScheduleTime, + } + ca.Spec.Schedule = tc.schedule + + fakeCronAnythingControl.getCronAnything = ca + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + didCreate := fakeResourceControl.createTemplate != nil + if didCreate != tc.expectCreate { + t.Errorf("Expected create: %t, did create: %t", tc.expectCreate, didCreate) + } + }) + } +} + +func TestTriggerDeadline(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.Status.LastScheduleTime = &metav1.Time{ + Time: baseTime.Add(-60 * time.Second), + } + twenty := int64(20) + ca.Spec.TriggerDeadlineSeconds = &twenty + fakeCronAnythingControl.getCronAnything = ca + + result, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + if result.RequeueAfter == time.Duration(0) { + t.Errorf("Expected request to be reqeueued, but found %v", result) + } + + if fakeResourceControl.createCount != 0 { + t.Errorf("Expected no resource to be created, but found one") + } +} + +func TestIsFinished(t *testing.T) { + timestampFieldPath := "{.status.myCustomField}" + stringFieldPath := "{.status.myCustomPhase}" + testCases := map[string]struct { + ca *cronanything.CronAnything + resource *unstructured.Unstructured + result bool + }{ + "no strategy defined means not finished": { + &cronanything.CronAnything{}, + &unstructured.Unstructured{}, + false, + }, + "timestamp strategy, custom correct timestamp": { + &cronanything.CronAnything{ + Spec: cronanything.CronAnythingSpec{ + FinishableStrategy: &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: timestampFieldPath, + }, + }, + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "myCustomField": baseTime.Format(time.RFC3339), + }, + }, + }, + true, + }, + "timestamp strategy, custom incorrect timestamp": { + &cronanything.CronAnything{ + Spec: cronanything.CronAnythingSpec{ + FinishableStrategy: &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: timestampFieldPath, + }, + }, + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "myCustomField": "incorrectTimestamp", + }, + }, + }, + false, + }, + "timeestamp strategy, custom field does not exist": { + &cronanything.CronAnything{ + Spec: cronanything.CronAnythingSpec{ + FinishableStrategy: &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: timestampFieldPath, + }, + }, + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + false, + }, + "timestamp strategy, custom advanced jsonpath expression": { + ca: &cronanything.CronAnything{ + Spec: cronanything.CronAnythingSpec{ + FinishableStrategy: &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: `{.status.conditions[?(@.reason=="PodCompleted")].lastTransitionTime}`, + }, + }, + }, + }, + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": "2018-08-23T22:42:51Z", + "status": "True", + "type": "Initialized", + }, + map[string]interface{}{ + "lastTransitionTime": "2018-08-23T22:43:06Z", + "status": "True", + "type": "Ready", + }, + map[string]interface{}{ + "lastTransitionTime": "2018-08-23T22:42:51Z", + "status": "True", + "type": "PodScheduled", + }, + map[string]interface{}{ + "lastTransitionTime": "2018-09-19T19:26:00Z", + "reason": "PodCompleted", + "status": "True", + "type": "Initialized", + }, + }, + }, + }, + }, + result: true, + }, + "string strategy, not finished": { + &cronanything.CronAnything{ + Spec: cronanything.CronAnythingSpec{ + FinishableStrategy: &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyStringField, + StringField: &cronanything.StringFieldStrategy{ + FieldPath: stringFieldPath, + FinishedValues: []string{"Finished", "Failed"}, + }, + }, + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "myCustomPhase": "Running", + }, + }, + }, + false, + }, + "string strategy, finished": { + &cronanything.CronAnything{ + Spec: cronanything.CronAnythingSpec{ + FinishableStrategy: &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyStringField, + StringField: &cronanything.StringFieldStrategy{ + FieldPath: stringFieldPath, + FinishedValues: []string{"Finished", "Failed"}, + }, + }, + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "myCustomPhase": "Finished", + }, + }, + }, + true, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + res, _ := isFinished(tc.ca, tc.resource) + if res != tc.result { + t.Errorf("Expected %t, but got %t", tc.result, res) + } + }) + } +} + +func TestConcurrencyPolicy(t *testing.T) { + testCases := map[string]struct { + concurrencyPolicy cronanything.ConcurrencyPolicy + existingActiveResourceCount int + existingFinishedResourceCount int + expectedDeleteCount int + expectedCreateCount int + }{ + "allow concurrent, existing resource": { + cronanything.AllowConcurrent, + 1, + 1, + 0, + 1, + }, + "allow concurrent, no existing resources": { + cronanything.AllowConcurrent, + 0, + 0, + 0, + 1, + }, + "forbid concurrent, existing active resource": { + cronanything.ForbidConcurrent, + 1, + 0, + 0, + 0, + }, + "forbid concurrent, existing finished resource": { + cronanything.ForbidConcurrent, + 0, + 1, + 0, + 1, + }, + "forbid concurrent, no existing resources": { + cronanything.ForbidConcurrent, + 0, + 0, + 0, + 1, + }, + "replace concurrent, existing active resource": { + cronanything.ReplaceConcurrent, + 1, + 0, + 1, + 0, + }, + "replace concurrent, existing finished resource": { + cronanything.ReplaceConcurrent, + 0, + 2, + 0, + 1, + }, + "replace concurrent, no existing resources": { + cronanything.ReplaceConcurrent, + 0, + 0, + 0, + 1, + }, + "replace concurrent, multiple active resources": { + cronanything.ReplaceConcurrent, + 5, + 2, + 5, + 0, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.Spec.ConcurrencyPolicy = tc.concurrencyPolicy + ca.Spec.FinishableStrategy = &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: `{.status.finishedAt}`, + }, + } + + fakeCronAnythingControl.getCronAnything = ca + + var resourceListResult []*unstructured.Unstructured + for i := 0; i < tc.existingActiveResourceCount; i++ { + resourceListResult = append(resourceListResult, newUnstructuredResource(ca, fmt.Sprintf("activeResource%d", i))) + } + for i := 0; i < tc.existingFinishedResourceCount; i++ { + resource := newUnstructuredResource(ca, fmt.Sprintf("finishedResource%d", i)) + unstructured.SetNestedField(resource.Object, baseTime.Format(time.RFC3339), "status", "finishedAt") + resourceListResult = append(resourceListResult, resource) + } + fakeResourceControl.listResult = resourceListResult + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + if len(fakeResourceControl.deleteSlice) != tc.expectedDeleteCount { + t.Errorf("Expected %d deletes, but found %d", tc.expectedDeleteCount, len(fakeResourceControl.deleteSlice)) + } + + if fakeResourceControl.createCount != tc.expectedCreateCount { + t.Error("Expected resource to be created, but it wasn't") + } + }) + } +} + +func TestReplaceConcurrentIgnoreResourcesMarkedForDeletion(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.Spec.ConcurrencyPolicy = cronanything.ReplaceConcurrent + fakeCronAnythingControl.getCronAnything = ca + + resource := newUnstructuredResource(ca, "resource") + unstructured.SetNestedField(resource.Object, baseTime.Format(time.RFC3339), "metadata", "deletionTimestamp") + fakeResourceControl.listResult = []*unstructured.Unstructured{resource} + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Error(err) + } + + if len(fakeResourceControl.deleteSlice) != 0 { + t.Errorf("Expected no delete, but found one") + } +} + +func TestCleanupHistory(t *testing.T) { + testCases := map[string]struct { + useHistoryCountLimit bool + historyCountLimit int32 + useHistoryTimeLimitSeconds bool + historyTimeLimitSeconds uint64 + existingActiveResources []string + existingFinishedResources map[string]time.Time + deletedResources []string + }{ + "no active and no finished resourced": { + true, + 3, + true, + 60, + []string{}, + map[string]time.Time{}, + []string{}, + }, + "historyCountLimit and finished resources": { + true, + 2, + false, + 0, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Hour), + "fr2": baseTime.Add(-9 * time.Hour), + "fr3": baseTime.Add(-8 * time.Hour), + "fr4": baseTime.Add(-7 * time.Hour), + "fr5": baseTime.Add(-6 * time.Hour), + }, + []string{"fr1", "fr2", "fr3"}, + }, + "historyCountLimit and fewer finished resources": { + true, + 5, + false, + 0, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Hour), + "fr2": baseTime.Add(-9 * time.Hour), + "fr3": baseTime.Add(-8 * time.Hour), + "fr4": baseTime.Add(-7 * time.Hour), + "fr5": baseTime.Add(-6 * time.Hour), + }, + []string{}, + }, + "historyTimeLimit and finished resources": { + false, + 0, + true, + (6 * 60) + 30, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Minute), + "fr2": baseTime.Add(-9 * time.Minute), + "fr3": baseTime.Add(-8 * time.Minute), + "fr4": baseTime.Add(-7 * time.Minute), + "fr5": baseTime.Add(-6 * time.Minute), + }, + []string{"fr1", "fr2", "fr3", "fr4"}, + }, + "historyCountLimit and no finished resources old enough": { + false, + 0, + true, + 12 * 60, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Minute), + "fr2": baseTime.Add(-9 * time.Minute), + "fr3": baseTime.Add(-8 * time.Minute), + "fr4": baseTime.Add(-7 * time.Minute), + "fr5": baseTime.Add(-6 * time.Minute), + }, + []string{}, + }, + "no historyCountLimit or historyTimeLimit should not delete any": { + false, + 0, + false, + 0, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Second), + "fr2": baseTime.Add(-9 * time.Second), + "fr3": baseTime.Add(-8 * time.Second), + "fr4": baseTime.Add(-7 * time.Second), + "fr5": baseTime.Add(-6 * time.Second), + }, + []string{}, + }, + "both historyCountLimit and historyTimeLimit, many old resources": { + true, + 6, + true, + 3 * 60, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Minute), + "fr2": baseTime.Add(-9 * time.Minute), + "fr3": baseTime.Add(-8 * time.Minute), + "fr4": baseTime.Add(-7 * time.Minute), + "fr5": baseTime.Add(-6 * time.Minute), + }, + []string{"fr1", "fr2", "fr3", "fr4", "fr5"}, + }, + "both historyCountLimit and historyTimeLimit, many new resources": { + true, + 1, + true, + 3 * 60, + []string{"ar1", "ar2", "ar3"}, + map[string]time.Time{ + "fr1": baseTime.Add(-10 * time.Second), + "fr2": baseTime.Add(-9 * time.Second), + "fr3": baseTime.Add(-8 * time.Second), + "fr4": baseTime.Add(-7 * time.Second), + "fr5": baseTime.Add(-6 * time.Second), + }, + []string{"fr1", "fr2", "fr3", "fr4"}, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.Spec.FinishableStrategy = &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: `{.status.finishedAt}`, + }, + } + ca.Spec.Retention = &cronanything.ResourceRetention{ + ResourceTimestampStrategy: cronanything.ResourceTimestampStrategy{ + Type: cronanything.ResourceTimestampStrategyField, + FieldResourceTimestampStrategy: &cronanything.FieldResourceTimestampStrategy{ + FieldPath: `{.status.finishedAt}`, + }, + }, + } + if tc.useHistoryCountLimit { + ca.Spec.Retention.HistoryCountLimit = &tc.historyCountLimit + } + if tc.useHistoryTimeLimitSeconds { + ca.Spec.Retention.HistoryTimeLimitSeconds = &tc.historyTimeLimitSeconds + } + T := true + ca.Spec.Suspend = &T + fakeCronAnythingControl.getCronAnything = ca + + var resourceListResult []*unstructured.Unstructured + for _, n := range tc.existingActiveResources { + resourceListResult = append(resourceListResult, newUnstructuredResource(ca, n)) + } + for n, finishedTimestamp := range tc.existingFinishedResources { + resource := newUnstructuredResource(ca, n) + unstructured.SetNestedField(resource.Object, finishedTimestamp.Format(time.RFC3339), "status", "finishedAt") + resourceListResult = append(resourceListResult, resource) + } + fakeResourceControl.listResult = resourceListResult + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + sort.Strings(tc.deletedResources) + sort.Strings(fakeResourceControl.deleteSlice) + if !reflect.DeepEqual(tc.deletedResources, fakeResourceControl.deleteSlice) { + t.Errorf("Expected deletion of resources %s, but found %s", strings.Join(tc.deletedResources, ","), strings.Join(fakeResourceControl.deleteSlice, ",")) + } + }) + } +} + +func TestTotalResourceLimit(t *testing.T) { + testCases := map[string]struct { + totalResourceLimit int32 + historyLimit int32 + existingActiveResourceCount int + existingFinishedResourceCount int + created bool + }{ + "total number under totalResourceLimit": { + 5, + 3, + 2, + 2, + true, + }, + "active over totalResourceLimit": { + 5, + 3, + 6, + 0, + false, + }, + "active at totalResourceLimit and some finished": { + 5, + 3, + 5, + 3, + false, + }, + "active plus finished inside limit over totalResourceLimit": { + 5, + 3, + 3, + 3, + false, + }, + "active and finished over totalResourceLimit, but finished over historyLimit": { + 5, + 1, + 2, + 10, + true, + }, + "no active and no finished resources": { + 1, + 4, + 0, + 0, + true, + }, + "totalResourceLimit is zero": { + 0, + 4, + 0, + 0, + false, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.Spec.TotalResourceLimit = &tc.totalResourceLimit + ca.Spec.FinishableStrategy = &cronanything.FinishableStrategy{ + Type: cronanything.FinishableStrategyTimestampField, + TimestampField: &cronanything.TimestampFieldStrategy{ + FieldPath: `{.status.finishedAt}`, + }, + } + ca.Spec.Retention = &cronanything.ResourceRetention{ + ResourceTimestampStrategy: cronanything.ResourceTimestampStrategy{ + Type: cronanything.ResourceTimestampStrategyField, + FieldResourceTimestampStrategy: &cronanything.FieldResourceTimestampStrategy{ + FieldPath: `{.status.finishedAt}`, + }, + }, + } + ca.Spec.Retention.HistoryCountLimit = &tc.historyLimit + + fakeCronAnythingControl.getCronAnything = ca + + var resourceListResult []*unstructured.Unstructured + for i := 0; i < tc.existingActiveResourceCount; i++ { + resourceListResult = append(resourceListResult, newUnstructuredResource(ca, fmt.Sprintf("activeResource%d", i))) + } + for i := 0; i < tc.existingFinishedResourceCount; i++ { + resource := newUnstructuredResource(ca, fmt.Sprintf("finishedResource%d", i)) + unstructured.SetNestedField(resource.Object, baseTime.Format(time.RFC3339), "status", "finishedAt") + resourceListResult = append(resourceListResult, resource) + } + fakeResourceControl.listResult = resourceListResult + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + didCreate := fakeResourceControl.createCount > 0 + if didCreate != tc.created { + t.Errorf("Expected %t, but found %t", tc.created, didCreate) + } + }) + } +} + +func TestCreateResourceAndUpdateCronAnything(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + + resourceTemplate := map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "spec": map[string]interface{}{ + "greeting": "Hello World!", + "greetCount": 42, + }, + } + resourceTemplateBytes, _ := json.Marshal(resourceTemplate) + + ca := &cronanything.CronAnything{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{ + Time: baseTime.Add(-60 * time.Second), + }, + Name: "myCronAnything", + UID: "myUID", + }, + Spec: cronanything.CronAnythingSpec{ + Schedule: defaultCronExpr, + Template: runtime.RawExtension{ + Raw: resourceTemplateBytes, + }, + }, + } + fakeCronAnythingControl.getCronAnything = ca + + times, _, _ := getScheduleTimes(ca, baseTime) + expectedTriggerTime := times[len(times)-1] + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if err != nil { + t.Errorf("%s: Unexpected error: %v", name, err) + } + + createdResourceTemplate := fakeResourceControl.createTemplate.Object + if !reflect.DeepEqual(createdResourceTemplate, fakeResourceControl.createTemplate.Object) { + t.Error("Expected template and created resource to be equal, but they weren't") + } + + lastScheduleTime := fakeCronAnythingControl.updateCronAnything.Status.LastScheduleTime.Time + if expectedTriggerTime != lastScheduleTime { + t.Errorf("Expected %s, but got %s", expectedTriggerTime.Format(time.RFC3339), lastScheduleTime.Format(time.RFC3339)) + } +} + +func TestGetResourceName(t *testing.T) { + timestamp := toTimestamp(t, "2012-11-01T22:08:41+00:00") + + testCases := map[string]struct { + cronAnything *cronanything.CronAnything + scheduleTime time.Time + expectedName string + }{ + "default name, default timestamp format": { + cronAnything: newCronAnythingForResourceName("myresource", nil, nil), + scheduleTime: timestamp, + expectedName: "myresource-1351807721", + }, + "custom name, default timestamp format": { + cronAnything: newCronAnythingForResourceName("myresource", toPointer("anotherResource"), nil), + scheduleTime: timestamp, + expectedName: "anotherResource-1351807721", + }, + "default name, custom timestamp format": { + cronAnything: newCronAnythingForResourceName("myresource", nil, toPointer("20060102150405")), + scheduleTime: timestamp, + expectedName: "myresource-20121101220841", + }, + "custom name, custom timestamp format": { + cronAnything: newCronAnythingForResourceName("myresource", toPointer("anotherResource"), toPointer("2006-01-02-15-04-05")), + scheduleTime: timestamp, + expectedName: "anotherResource-2012-11-01-22-08-41", + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + name := getResourceName(tc.cronAnything, tc.scheduleTime) + if tc.expectedName != name { + t.Errorf("expected %s, but got %s", tc.expectedName, name) + } + }) + } +} + +func TestTriggerHistory(t *testing.T) { + testCases := map[string]struct { + CurrentTime time.Time + TriggerDeadline *int64 + ConcurrencyPolicy cronanything.ConcurrencyPolicy + ExistingResourceCount int + CurrentStatus cronanything.CronAnythingStatus + CreateResourceError error + ReconcileFails bool + ExpectedTriggerHistory []cronanything.TriggerHistoryRecord + ExpectedPendingTrigger *cronanything.PendingTrigger + }{ + "Successful create added to history": { + CurrentTime: time.Date(2018, time.April, 20, 4, 20, 01, 0, time.UTC), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 19, 0, 0, time.UTC)), + }, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 01, 0, time.UTC)), + Result: cronanything.TriggerResultCreateSucceeded, + }, + }, + }, + "Create fails first time": { + CurrentTime: time.Date(2018, time.April, 20, 4, 20, 01, 0, time.UTC), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 19, 0, 0, time.UTC)), + }, + CreateResourceError: errors.New("this is a test error"), + ReconcileFails: true, + ExpectedPendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + "Create fails second time": { + CurrentTime: time.Date(2018, time.April, 20, 4, 20, 05, 0, time.UTC), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 19, 0, 0, time.UTC)), + PendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 00, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + CreateResourceError: errors.New("this is a test error"), + ReconcileFails: true, + ExpectedPendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + "New trigger time reached while create fails": { + CurrentTime: time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 19, 0, 0, time.UTC)), + PendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 00, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + CreateResourceError: errors.New("this is a test error"), + ReconcileFails: true, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + ExpectedPendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + "New trigger time reached with create failed and new succeeds": { + CurrentTime: time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 19, 0, 0, time.UTC)), + PendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 00, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC)), + Result: cronanything.TriggerResultCreateSucceeded, + }, + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + }, + "Multiple triggers missed with no pending trigger": { + CurrentTime: time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 10, 0, 0, time.UTC)), + }, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC)), + Result: cronanything.TriggerResultCreateSucceeded, + }, + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC)), + Result: cronanything.TriggerResultMissed, + }, + }, + }, + "Trigger deadline exceeded without pending trigger": { + CurrentTime: time.Date(2018, time.April, 20, 4, 21, 31, 0, time.UTC), + TriggerDeadline: func(n int64) *int64 { return &n }(30), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + }, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 31, 0, time.UTC)), + Result: cronanything.TriggerResultDeadlineExceeded, + }, + }, + }, + "Trigger deadline exceeded with pending trigger": { + CurrentTime: time.Date(2018, time.April, 20, 4, 21, 31, 0, time.UTC), + TriggerDeadline: func(n int64) *int64 { return &n }(30), + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + PendingTrigger: &cronanything.PendingTrigger{ + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 31, 0, time.UTC)), + Result: cronanything.TriggerResultCreateFailed, + }, + }, + }, + "Forbid concurrent policy with existing unfinished resource": { + CurrentTime: time.Date(2018, time.April, 20, 4, 21, 1, 0, time.UTC), + ConcurrencyPolicy: cronanything.ForbidConcurrent, + CurrentStatus: cronanything.CronAnythingStatus{ + LastScheduleTime: getMetaTimePointer(time.Date(2018, time.April, 20, 4, 20, 0, 0, time.UTC)), + }, + ExistingResourceCount: 1, + ExpectedTriggerHistory: []cronanything.TriggerHistoryRecord{ + { + ScheduleTime: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 0, 0, time.UTC)), + CreationTimestamp: metav1.NewTime(time.Date(2018, time.April, 20, 4, 21, 01, 0, time.UTC)), + Result: cronanything.TriggerResultForbidConcurrent, + }, + }, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + reconciler, fakeCronAnythingControl, fakeResourceControl := createReconciler() + reconciler.currentTime = func() time.Time { return tc.CurrentTime } + + ca := newCronAnything(apiVersion, kind, namespace, name) + ca.CreationTimestamp = metav1.NewTime(tc.CurrentTime.Add(-time.Hour)) + ca.Spec.Schedule = defaultCronExpr + ca.Spec.TriggerDeadlineSeconds = tc.TriggerDeadline + ca.Spec.ConcurrencyPolicy = tc.ConcurrencyPolicy + ca.Status = tc.CurrentStatus + + fakeCronAnythingControl.getCronAnything = ca + + fakeResourceControl.listResult = createUnstructuredSlice(ca, tc.ExistingResourceCount) + fakeResourceControl.createError = tc.CreateResourceError + + _, err := reconciler.Reconcile(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }) + if !tc.ReconcileFails && err != nil { + t.Errorf("Expected reconcile to succeed, but it failed with error %v", err) + } + if tc.ReconcileFails && err == nil { + t.Errorf("Expected reconcile to fail, but it didn't") + } + + status := fakeCronAnythingControl.updateCronAnything.Status + if !isTriggerHistoriesEqual(status.TriggerHistory, tc.ExpectedTriggerHistory) { + t.Errorf("Expected %v in trigger history, but found %v", tc.ExpectedTriggerHistory, status.TriggerHistory) + } + + if !reflect.DeepEqual(status.PendingTrigger, tc.ExpectedPendingTrigger) { + t.Errorf("Expected %v as pending trigger, but found %v", tc.ExpectedPendingTrigger, status.PendingTrigger) + } + }) + } +} + +func TestTriggerHistoryLength(t *testing.T) { + existingRecordsStatus := cronanything.TriggerResultCreateSucceeded + newRecordStatus := cronanything.TriggerResultCreateFailed + + testCases := map[string]struct { + initialHistoryLength int + finalHistoryLength int + }{ + "new record to empty history": { + initialHistoryLength: 0, + finalHistoryLength: 1, + }, + "new record to full history": { + initialHistoryLength: 10, + finalHistoryLength: 10, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + var history []cronanything.TriggerHistoryRecord + for i := 0; i < tc.initialHistoryLength; i++ { + history = append(history, cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(time.Now()), + CreationTimestamp: metav1.NewTime(time.Now()), + Result: existingRecordsStatus, + }) + } + status := &cronanything.CronAnythingStatus{ + TriggerHistory: history, + } + + r := cronanything.TriggerHistoryRecord{ + ScheduleTime: metav1.NewTime(time.Now()), + CreationTimestamp: metav1.NewTime(time.Now()), + Result: newRecordStatus, + } + addToTriggerHistory(status, r) + + if got, want := len(status.TriggerHistory), tc.finalHistoryLength; got != want { + t.Errorf("Expected trigger history to have %d record, but found %d", want, got) + } + + headOfHistory := status.TriggerHistory[0] + if headOfHistory.Result != newRecordStatus { + t.Errorf("Expected new record to be first in the history, but it was't") + } + }) + } +} + +func isTriggerHistoriesEqual(actualHistory []cronanything.TriggerHistoryRecord, expectedHistory []cronanything.TriggerHistoryRecord) bool { + if len(actualHistory) != len(expectedHistory) { + return false + } + for index, actual := range actualHistory { + expected := expectedHistory[index] + if expected.Result != actual.Result || !expected.ScheduleTime.Equal(&actual.ScheduleTime) || !expected.CreationTimestamp.Equal(&actual.CreationTimestamp) { + return false + } + } + return true +} + +func createUnstructuredSlice(ca *cronanything.CronAnything, count int) []*unstructured.Unstructured { + var unstructuredSlice []*unstructured.Unstructured + for i := 0; i < count; i++ { + unstructuredSlice = append(unstructuredSlice, newUnstructuredResource(ca, fmt.Sprintf("resource-%d", i))) + } + return unstructuredSlice +} + +func newCronAnythingForResourceName(name string, resourceBaseName, resourceTimestampFormat *string) *cronanything.CronAnything { + ca := newCronAnything("oracle.db.anthosapis.com/v1alpha1", "TestKind", name, "default") + ca.Spec.ResourceBaseName = resourceBaseName + ca.Spec.ResourceTimestampFormat = resourceTimestampFormat + return ca +} + +func toPointer(s string) *string { + return &s +} + +func toTimestamp(t *testing.T, timestampString string) time.Time { + timestamp, err := time.Parse(time.RFC3339, timestampString) + if err != nil { + t.Fatal(err) + } + return timestamp +} + +type fakeCronAnythingControl struct { + getKey client.ObjectKey + getCronAnything *cronanything.CronAnything + getError error + + updateCronAnything *cronanything.CronAnything + updateError error +} + +func (r *fakeCronAnythingControl) Get(key client.ObjectKey) (*cronanything.CronAnything, error) { + r.getKey = key + return r.getCronAnything, r.getError +} + +func (r *fakeCronAnythingControl) Update(ca *cronanything.CronAnything) error { + r.updateCronAnything = ca + return r.updateError +} + +type fakeResourceControl struct { + createResource schema.GroupVersionResource + createNamespace string + createTemplate *unstructured.Unstructured + createCount int + createError error + + deleteSlice []string + deleteError error + + listResult []*unstructured.Unstructured + listError error +} + +func (r *fakeResourceControl) Delete(resource schema.GroupVersionResource, namespace, name string) error { + r.deleteSlice = append(r.deleteSlice, name) + return r.deleteError +} + +func (r *fakeResourceControl) Create(resource schema.GroupVersionResource, namespace string, template *unstructured.Unstructured) error { + r.createResource = resource + r.createNamespace = namespace + r.createTemplate = template + r.createCount += 1 + return r.createError +} + +func (r *fakeResourceControl) List(resource schema.GroupVersionResource, _ string) ([]*unstructured.Unstructured, error) { + return r.listResult, r.listError +} + +type fakeResourceResolver struct { +} + +func (r *fakeResourceResolver) Start(interval time.Duration, stopCh <-chan struct{}) {} + +func (r *fakeResourceResolver) Resolve(gvk schema.GroupVersionKind) (schema.GroupVersionResource, bool) { + return schema.GroupVersionResource{}, true +} diff --git a/oracle/controllers/cronanythingcontroller/operations.go b/oracle/controllers/cronanythingcontroller/operations.go new file mode 100644 index 0000000..00e87a1 --- /dev/null +++ b/oracle/controllers/cronanythingcontroller/operations.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cronanythingcontroller + +import ( + "context" + "strings" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + cronanything "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +type realCronAnythingControl struct { + kubeClient client.Client +} + +func (r *realCronAnythingControl) Get(key client.ObjectKey) (*cronanything.CronAnything, error) { + ca := &cronanything.CronAnything{} + err := r.kubeClient.Get(context.TODO(), key, ca) + return ca, err +} + +func (r *realCronAnythingControl) Update(ca *cronanything.CronAnything) error { + return r.kubeClient.Update(context.TODO(), ca) +} + +type realResourceControl struct { + dynClient dynamic.Interface +} + +func (r *realResourceControl) Delete(resource schema.GroupVersionResource, namespace, name string) error { + deleteForeground := metav1.DeletePropagationForeground + return r.dynClient.Resource(resource).Namespace(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &deleteForeground}) +} + +func (r *realResourceControl) Create(resource schema.GroupVersionResource, namespace string, template *unstructured.Unstructured) error { + _, err := r.dynClient.Resource(resource).Namespace(namespace).Create(context.TODO(), template, metav1.CreateOptions{}) + return err +} + +func (r *realResourceControl) List(resource schema.GroupVersionResource, cronAnythingName string) ([]*unstructured.Unstructured, error) { + res, err := r.dynClient.Resource(resource).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{cronanything.CronAnythingCreatedByLabel: cronAnythingName}).String(), + }) + if err != nil { + return []*unstructured.Unstructured{}, err + } + list, err := meta.ExtractList(res) + if err != nil { + return []*unstructured.Unstructured{}, err + } + + returnList := make([]*unstructured.Unstructured, 0) + for _, obj := range list { + unstructuredResource, _ := obj.(*unstructured.Unstructured) + returnList = append(returnList, unstructuredResource) + } + return returnList, nil +} + +// NewResourceResolver creates a resource resolver to find the corresponding +// group version resource for a given group version kind. +func NewResourceResolver(config *rest.Config) *realResourceResolver { + dc := discovery.NewDiscoveryClientForConfigOrDie(config) + return &realResourceResolver{ + dc: dc, + } +} + +type realResourceResolver struct { + mu sync.Mutex + dc *discovery.DiscoveryClient + resourceMapping map[schema.GroupVersionKind]schema.GroupVersionResource +} + +func (r *realResourceResolver) Start(refreshInterval time.Duration, stopCh <-chan struct{}) { + go func() { + + ticker := time.NewTicker(refreshInterval) + defer ticker.Stop() + + for { + r.refresh() + + select { + case <-stopCh: + return + case <-ticker.C: + } + } + }() +} + +func (r *realResourceResolver) refresh() { + resources, err := r.dc.ServerResources() + if err != nil { + log.Error(err, "Unable to fetch server resources") + return + } + + mapping := make(map[schema.GroupVersionKind]schema.GroupVersionResource) + for _, apiResourceList := range resources { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + log.Error(err, "Error parsing group version", "groupVersion", apiResourceList.GroupVersion) + continue + } + for _, apiResource := range apiResourceList.APIResources { + gvk := gv.WithKind(apiResource.Kind) + gvr := gv.WithResource(apiResource.Name) + // temporary fix to avoid adding subResource. For example, backups and + // backups/status shared the same gvk. + if !strings.Contains(apiResource.Name, "/") { + mapping[gvk] = gvr + } + } + } + + r.mu.Lock() + defer r.mu.Unlock() + r.resourceMapping = mapping +} + +func (r *realResourceResolver) Resolve(gvk schema.GroupVersionKind) (schema.GroupVersionResource, bool) { + r.mu.Lock() + defer r.mu.Unlock() + item, found := r.resourceMapping[gvk] + return item, found +} diff --git a/oracle/controllers/databasecontroller/BUILD.bazel b/oracle/controllers/databasecontroller/BUILD.bazel new file mode 100644 index 0000000..104914d --- /dev/null +++ b/oracle/controllers/databasecontroller/BUILD.bazel @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "databasecontroller", + srcs = [ + "database_controller.go", + "database_resources.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/databasecontroller", + visibility = ["//visibility:public"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/pkg/agents/common/sql", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/agents/consts", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/builder", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/event", + "@io_k8s_sigs_controller_runtime//pkg/handler", + "@io_k8s_sigs_controller_runtime//pkg/predicate", + "@io_k8s_sigs_controller_runtime//pkg/reconcile", + "@io_k8s_sigs_controller_runtime//pkg/source", + "@io_k8s_utils//integer", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_test( + name = "databasecontroller_test", + srcs = ["database_controller_test.go"], + embed = [":databasecontroller"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_api//apps/v1:apps", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/databasecontroller/database_controller.go b/oracle/controllers/databasecontroller/database_controller.go new file mode 100644 index 0000000..97d7122 --- /dev/null +++ b/oracle/controllers/databasecontroller/database_controller.go @@ -0,0 +1,335 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package databasecontroller + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +const ( + DatabaseContainerName = "oracledb" +) + +// These variables allow to plug in mock objects for functional tests +var ( + skipLBCheckForTest = false + CheckStatusInstanceFunc = controllers.CheckStatusInstanceFunc +) + +// DatabaseReconciler reconciles a Database object +type DatabaseReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ClientFactory controllers.ConfigAgentClientFactory + Recorder record.EventRecorder +} + +func (r *DatabaseReconciler) findPod(ctx context.Context, namespace, instName string) (*corev1.PodList, error) { + // List the Pods matching the PodTemplate Labels + var pods corev1.PodList + if err := r.List(ctx, &pods, client.InNamespace(namespace), client.MatchingLabels{"instance": instName}); err != nil { + return nil, err + } + + return &pods, nil +} + +func findContainer(pod corev1.Pod, c string) (*corev1.Container, error) { + for _, con := range pod.Spec.Containers { + if con.Name == c { + return &con, nil + } + } + return nil, fmt.Errorf("failed to find a container %s in a pod: %v", c, pod) +} + +// updateIsChangeApplied sets status.IsChangeApplied field to false if observedGeneration < generation, it sets it to true if changes are applied. +func (r *DatabaseReconciler) updateIsChangeApplied(ctx context.Context, db *v1alpha1.Database) { + if db.Status.ObservedGeneration < db.Generation { + db.Status.IsChangeApplied = v1.ConditionFalse + db.Status.ObservedGeneration = db.Generation + r.Log.Info("change detected", "observedGeneration", db.Status.ObservedGeneration, "generation", db.Generation) + } + if db.Status.IsChangeApplied == v1.ConditionTrue { + return + } + userUpdateDone := k8s.ConditionStatusEquals(k8s.FindCondition(db.Status.Conditions, k8s.UserReady), v1.ConditionTrue) + if userUpdateDone { + db.Status.IsChangeApplied = v1.ConditionTrue + } + r.Log.Info("change applied", "observedGeneration", db.Status.ObservedGeneration, "generation", db.Generation) +} + +// +kubebuilder:rbac:groups=database.oracle.db.anthosapis.com,resources=databases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=database.oracle.db.anthosapis.com,resources=databases/status,verbs=get;update;patch + +// +kubebuilder:rbac:groups=core,resources=services,verbs=list;watch;get;patch +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch +// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=pods/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=pods/exec,verbs=get;list;create;update;patch +// +kubebuilder:rbac:groups="",resources=pods/log,verbs=get;list + +// Reconcile is the main method that reconciles the Database resource. +func (r *DatabaseReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("Database", req.NamespacedName) + + log.Info("reconciling database") + + var db v1alpha1.Database + + if err := r.Get(ctx, req.NamespacedName, &db); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if err := validateSpec(&db); err != nil { + return ctrl.Result{}, r.handlePreflightCheckError(ctx, &db, err) + } + + // Find an Instance resource that the Database belongs to. + var inst v1alpha1.Instance + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: db.Spec.Instance}, &inst); err != nil { + return ctrl.Result{}, r.handlePreflightCheckError(ctx, &db, fmt.Errorf("failed to find instance %q for database %q", db.Spec.Instance, db.Name)) + } + log.Info("found the following instance for the create a new database request", "db.Spec.Instance", db.Spec.Instance, "inst", inst) + + DBDomain := controllers.GetDBDomain(&inst) + + // Find a pod running a database container. + pods, err := r.findPod(ctx, req.Namespace, db.Spec.Instance) + if err != nil { + return ctrl.Result{}, r.handlePreflightCheckError(ctx, &db, fmt.Errorf("failed to find a pod")) + } + log.V(2).Info("found a pod", "pods", pods) + + if len(pods.Items) != 1 { + return ctrl.Result{}, r.handlePreflightCheckError(ctx, &db, fmt.Errorf("expected 1 pod, found %d", len(pods.Items))) + } + + // Find a database container within that pod. + if _, err := findContainer(pods.Items[0], DatabaseContainerName); err != nil { + log.Error(err, "reconciling database - failed to find a database container") + return ctrl.Result{}, err + } + log.V(1).Info("a database container identified") + + // svc is needed to extract the ClusterIP, which is used in all the gRPC calls. + svc := &corev1.Service{} + if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(controllers.AgentSvcName, db.Spec.Instance), Namespace: req.NamespacedName.Namespace}, svc); err != nil { + return ctrl.Result{}, err + } + + // CDBName is specified in Instance specs + cdbName := inst.Spec.CDBName + istatus, err := CheckStatusInstanceFunc(ctx, db.Spec.Instance, cdbName, svc.Spec.ClusterIP, DBDomain, log) + if err != nil { + log.Error(err, "preflight check failed", "check the database instance status", "failed") + return ctrl.Result{}, err + } + + if istatus != controllers.StatusReady { + return ctrl.Result{}, r.handlePreflightCheckError(ctx, &db, fmt.Errorf("database instance doesn't appear to be ready yet")) + } + + log.Info("preflight check: database instance is ready") + + // Confirm that an external LB is ready. + lbSvc := &corev1.Service{} + if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(controllers.SvcName, db.Spec.Instance), Namespace: req.NamespacedName.Namespace}, lbSvc); err != nil { + return ctrl.Result{}, err + } + + if len(lbSvc.Status.LoadBalancer.Ingress) == 0 && !skipLBCheckForTest { + return ctrl.Result{}, fmt.Errorf("preflight check: createDatabase: external LB is NOT ready") + } + log.Info("preflight check: createDatabase external LB service is ready", "svcName", lbSvc.Name) + + alreadyExists, err := NewDatabase(ctx, r, &db, svc.Spec.ClusterIP, DBDomain, cdbName, log) + if err != nil { + return ctrl.Result{}, err + } + + r.Recorder.Eventf(&db, corev1.EventTypeNormal, k8s.CreatedDatabase, fmt.Sprintf("Created new database %q", db.Spec.Name)) + db.Status.Phase = commonv1alpha1.DatabaseReady + db.Status.Conditions = k8s.Upsert(db.Status.Conditions, k8s.Ready, v1.ConditionTrue, k8s.CreateComplete, "") + if err := r.Status().Update(ctx, &db); err != nil { + return ctrl.Result{}, err + } + + if alreadyExists { + if err := SyncUsers(ctx, r, &db, svc.Spec.ClusterIP, cdbName, log); err != nil { + log.Error(err, "failed to sync database") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + log.V(1).Info("[DEBUG] create users", "Database", db.Spec.Name, "Users/Privs", db.Spec.Users) + if err := NewUsers(ctx, r, &db, svc.Spec.ClusterIP, DBDomain, cdbName, log); err != nil { + return ctrl.Result{}, err + } + + // check DB name against existing ones to decide whether this is a new DB + if !controllers.Contains(inst.Status.DatabaseNames, db.Spec.Name) { + log.Info("found a new DB", "dbName", db.Spec.Name) + inst.Status.DatabaseNames = append(inst.Status.DatabaseNames, db.Spec.Name) + } else { + log.V(1).Info("not a new DB, skipping the update", "dbName", db.Spec.Name) + } + + log.Info("instance status", "conditions", inst.Status.Conditions, "endpoint", inst.Status.Endpoint, + "url", inst.Status.URL, "databases", inst.Status.DatabaseNames) + + if err := r.Status().Update(ctx, &inst); err != nil { + log.Error(err, "failed to update an Instance status") + return ctrl.Result{}, err + } + + log.Info("reconciling database: DONE") + + return ctrl.Result{}, nil +} + +// SetupWithManager starts the reconciler loop. +func (r *DatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Define a mapping from the object in the event to one or more objects to Reconcile + mapFunc := handler.ToRequestsFunc( + func(a handler.MapObject) []reconcile.Request { + requests := []reconcile.Request{} + for _, name := range a.Object.(*v1alpha1.Instance).Status.DatabaseNames { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: a.Meta.GetNamespace(), + }}) + } + r.Log.Info("Instance event triggered reconcile ", "requests", requests) + return requests + }) + + // UpdateFunc is used to judge if instance event is a 'DatabaseInstanceReady' event. If that is true, the event will be processed by the database reconciler + databaseInstanceReadyPredicate := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldInstance, ok := e.ObjectOld.(*v1alpha1.Instance) + if !ok { + r.Log.Info("Expected instance", "type", e.ObjectOld.GetObjectKind().GroupVersionKind().String()) + return false + } + if cond := k8s.FindCondition(oldInstance.Status.Conditions, k8s.DatabaseInstanceReady); k8s.ConditionStatusEquals(cond, v1.ConditionTrue) { + return false + } + newInstance, ok := e.ObjectNew.(*v1alpha1.Instance) + if !ok { + r.Log.Info("Expected instance", "type", e.ObjectNew.GetObjectKind().GroupVersionKind().String()) + return false + } + if cond := k8s.FindCondition(newInstance.Status.Conditions, k8s.DatabaseInstanceReady); !k8s.ConditionStatusEquals(cond, v1.ConditionTrue) { + return false + } + r.Log.Info("DatabaseInstanceReady changes to true") + return true + }, + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + } + + // We watch the instance event so we can trigger database creation when instance is ready. + // Add a databaseInstanceReadyPredicate to avoid constantly triggering reconciliation for every instance event. + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Database{}). + Owns(&corev1.Service{}). + Watches( + &source.Kind{Type: &v1alpha1.Instance{}}, + &handler.EnqueueRequestsFromMapFunc{ToRequests: mapFunc}, + builder.WithPredicates(databaseInstanceReadyPredicate), + ). + Complete(r) +} + +func (r *DatabaseReconciler) handlePreflightCheckError(ctx context.Context, db *v1alpha1.Database, err error) error { + r.Log.Error(err, "database preflightCheck failed") + r.Recorder.Eventf(db, corev1.EventTypeWarning, k8s.CreatePending, err.Error()) + db.Status.Phase = commonv1alpha1.DatabaseCreating + db.Status.Conditions = k8s.Upsert(db.Status.Conditions, k8s.Ready, v1.ConditionFalse, k8s.CreatePending, err.Error()) + if err := r.Status().Update(ctx, db); err != nil { + r.Log.Error(err, "failed to update database status") + } + return err +} + +// validateSpec validates the database spec. +func validateSpec(db *v1alpha1.Database) error { + // Currently only support validate db spec for user credentials. + // no sensitive information is logged underlying. + if (db.Spec.AdminPassword != "") && (db.Spec.AdminPasswordGsmSecretRef != nil) { + return fmt.Errorf("resources/validateSpec: invalid database admin password spec; you can only specify either admin_password or adminPasswordGsmSecretRef") + } + for _, u := range db.Spec.Users { + if (u.Password != "") && (u.GsmSecretRef != nil) { + return fmt.Errorf("resources/validateSpec: invalid database user password spec for user %q; you can only specify either password or GsmSecretRef", u.Name) + } + } + + if _, err := sql.Identifier(db.Spec.Name); err != nil { + return fmt.Errorf("resources/validateSpec: pdb name is not valid: %w", err) + } + if db.Spec.AdminPassword != "" { + if _, err := sql.Identifier(db.Spec.AdminPassword); err != nil { + return fmt.Errorf("resources/validateSpec: admin_password is not valid: %w", err) + } + } + for _, u := range db.Spec.Users { + if _, err := sql.ObjectName(u.Name); err != nil { + return fmt.Errorf("resources/validateSpec: invalid user %q: %w", u.Name, err) + } + if u.Password != "" { + if _, err := sql.Identifier(u.Password); err != nil { + return fmt.Errorf("resources/validateSpec: password for user %q is not valid: %w", u.Name, err) + } + } + for _, privilege := range u.Privileges { + if !sql.IsPrivilege(string(privilege)) { + return fmt.Errorf("resources/validateSpec: invalid privilege %q for user %q", privilege, u.Name) + } + } + } + + return nil +} diff --git a/oracle/controllers/databasecontroller/database_controller_test.go b/oracle/controllers/databasecontroller/database_controller_test.go new file mode 100644 index 0000000..4005900 --- /dev/null +++ b/oracle/controllers/databasecontroller/database_controller_test.go @@ -0,0 +1,254 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package databasecontroller + +import ( + "context" + "testing" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager + reconciler *DatabaseReconciler + fakeClientFactory *testhelpers.FakeClientFactory + DatabaseName = testhelpers.RandName("db1") + Namespace = testhelpers.RandName("ns1") +) + +func TestDatabaseController(t *testing.T) { + // Mock function returns. + skipLBCheckForTest = true + CheckStatusInstanceFunc = func(ctx context.Context, instName, cdbName, clusterIP, DBDomain string, log logr.Logger) (string, error) { + return "Ready", nil + } + fakeClientFactory = &testhelpers.FakeClientFactory{} + // Run test suite for database reconciler. + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Database controller", func() []testhelpers.Reconciler { + reconciler = &DatabaseReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Database"), + Scheme: k8sManager.GetScheme(), + ClientFactory: fakeClientFactory, + Recorder: k8sManager.GetEventRecorderFor("database-controller"), + } + return []testhelpers.Reconciler{reconciler} + }) +} + +var _ = Describe("Database controller", func() { + // Define utility constants for object names and testing timeouts and intervals. + const ( + containerDatabaseName = "oracledb" + instanceName = "mydb" + podName = "podname" + svcName = "mydb-svc" + svcAgentName = "mydb-agent-svc" + adminPassword = "pwd123" + userName = "joice" + password = "guess" + privileges = "connect" + timeout = time.Second * 15 + interval = time.Millisecond * 15 + ) + + ctx := context.Background() + + createdInstance := &v1alpha1.Instance{} + createdNs := &v1.Namespace{} + createdPod := &v1.Pod{} + createdAgentSvc := &v1.Service{} + createdSvc := &v1.Service{} + createdDatabase := &v1alpha1.Database{} + + // Currently we only have one create database tests, after each + // serves as finally resource clean-up. + AfterEach(func() { + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, createdInstance) + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, createdNs) + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, createdPod) + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, createdAgentSvc) + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, createdSvc) + testhelpers.K8sDeleteWithRetry(k8sClient, ctx, createdDatabase) + }) + + Context("Setup database with manager", func() { + It("Should create a database successfully", func() { + By("By creating a namespace") + + ns := &v1.Namespace{ + TypeMeta: metav1.TypeMeta{Kind: "namespace"}, + ObjectMeta: metav1.ObjectMeta{ + Name: Namespace, + }, + } + testhelpers.K8sCreateAndGet(k8sClient, ctx, client.ObjectKey{Name: Namespace}, ns, createdNs) + + By("By creating a new instance") + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: instanceName, + Namespace: Namespace, + Labels: map[string]string{"instance": instanceName}, + }, + } + testhelpers.K8sCreateAndGet(k8sClient, ctx, client.ObjectKey{Namespace: Namespace, Name: instanceName}, instance, createdInstance) + + var sts appsv1.StatefulSetList + Expect(k8sClient.List(ctx, &sts, client.InNamespace(Namespace))).Should(Succeed()) + Expect(len(sts.Items) == 1) + + By("By creating a pod") + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: Namespace, + Labels: map[string]string{"instance": instanceName}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "oracledb", + Image: "image", + Command: []string{"cmd"}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("0m"), + }, + }, + }, + }, + }, + } + testhelpers.K8sCreateAndGet(k8sClient, ctx, client.ObjectKey{Name: podName, Namespace: Namespace}, pod, createdPod) + + By("By creating a service") + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: Namespace, + }, + Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 8787}}}, + } + objKey := client.ObjectKey{Name: svcName, Namespace: Namespace} + testhelpers.K8sCreateAndGet(k8sClient, ctx, objKey, svc, createdSvc) + + By("By creating an agent service") + AgentSvc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcAgentName, + Namespace: Namespace, + }, + Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 8788}}}, + } + testhelpers.K8sCreateAndGet(k8sClient, ctx, client.ObjectKey{Name: svcAgentName, Namespace: Namespace}, AgentSvc, createdAgentSvc) + + By("By creating/reconciling a database") + // Note that reconcile will be called automatically + // by kubernetes runtime on database creation. + database := &v1alpha1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: DatabaseName, + Labels: map[string]string{"instance": instanceName}, + }, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: DatabaseName, + Instance: instanceName, + }, + AdminPassword: adminPassword, + Users: []v1alpha1.UserSpec{ + {UserSpec: commonv1alpha1.UserSpec{Name: userName, CredentialSpec: commonv1alpha1.CredentialSpec{Password: password}}, Privileges: []v1alpha1.PrivilegeSpec{privileges}}, + {UserSpec: commonv1alpha1.UserSpec{Name: "testUser2", CredentialSpec: commonv1alpha1.CredentialSpec{Password: password}}, Privileges: []v1alpha1.PrivilegeSpec{privileges}}, + {UserSpec: commonv1alpha1.UserSpec{Name: "testUser3", CredentialSpec: commonv1alpha1.CredentialSpec{Password: password}}, Privileges: []v1alpha1.PrivilegeSpec{privileges}}, + {UserSpec: commonv1alpha1.UserSpec{Name: "testUser4", CredentialSpec: commonv1alpha1.CredentialSpec{Password: password}}, Privileges: []v1alpha1.PrivilegeSpec{privileges}}, + }, + }, + } + DbObjKey := client.ObjectKey{Namespace: Namespace, Name: DatabaseName} + testhelpers.K8sCreateAndGet(k8sClient, ctx, DbObjKey, database, createdDatabase) + + Expect(k8sClient.List(ctx, &sts, client.InNamespace(DatabaseName))).Should(Succeed()) + Expect(len(sts.Items) == 1) + + By("By checking that the updated database succeeded") + var updatedDatabase v1alpha1.Database + Expect(k8sClient.Get(ctx, DbObjKey, &updatedDatabase)).Should(Succeed()) + Eventually(func() (commonv1alpha1.DatabasePhase, error) { + return getPhase(ctx, DbObjKey) + }, timeout, interval).Should(Equal(commonv1alpha1.DatabaseReady)) + + By("checking database ready status") + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, DbObjKey, k8s.Ready) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + + By("checking the user names") + Eventually(func() ([]string, error) { + return getUserNames(ctx, DbObjKey) + }, timeout, interval).Should(Equal([]string{userName, "testUser2", "testUser3", "..."})) + + By("checking user ready status") + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, DbObjKey, k8s.UserReady) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + }) + }) +}) + +func getPhase(ctx context.Context, objKey client.ObjectKey) (commonv1alpha1.DatabasePhase, error) { + var database v1alpha1.Database + if err := k8sClient.Get(ctx, objKey, &database); err != nil { + return "", err + } + return database.Status.Phase, nil +} + +func getConditionStatus(ctx context.Context, objKey client.ObjectKey, condType string) (metav1.ConditionStatus, error) { + var database v1alpha1.Database + if err := k8sClient.Get(ctx, objKey, &database); err != nil { + return metav1.ConditionFalse, err + } + if cond := k8s.FindCondition(database.Status.Conditions, condType); cond != nil { + return cond.Status, nil + } + return metav1.ConditionFalse, nil +} + +func getUserNames(ctx context.Context, objKey client.ObjectKey) ([]string, error) { + var database v1alpha1.Database + if err := k8sClient.Get(ctx, objKey, &database); err != nil { + return []string{""}, err + } + return database.Status.UserNames, nil +} diff --git a/oracle/controllers/databasecontroller/database_resources.go b/oracle/controllers/databasecontroller/database_resources.go new file mode 100644 index 0000000..b44356f --- /dev/null +++ b/oracle/controllers/databasecontroller/database_resources.go @@ -0,0 +1,349 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package databasecontroller + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/integer" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + k8s "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +const ( + gsmResourceVersionString = "projects/%s/secrets/%s/versions/%s" + pdbAdminUserName = "GPDB_ADMIN" +) + +var ( + dialTimeout = 3 * time.Minute +) + +// NewDatabase attempts to create a new PDB if it doesn't exist yet. +// The first return value of NewDatabase is "bail out or not?". +// If a PDB is new, just created now, NewDatabase returns bail=false. +// If it's an existing PDB, NewDatabase returns bail=true (so that the rest +// of the workflow, e.g. creating users step, is not attempted). +func NewDatabase(ctx context.Context, r *DatabaseReconciler, db *v1alpha1.Database, clusterIP, dbDomain, cdbName string, log logr.Logger) (bool, error) { + log.Info("resources/NewDatabase: new database requested", "db", db, "clusterIP", clusterIP) + r.Recorder.Eventf(db, corev1.EventTypeNormal, k8s.CreatingDatabase, fmt.Sprintf("Creating new database %q", db.Spec.Name)) + + // Establish a connection to a Config Agent. + ctx, cancel := context.WithTimeout(ctx, dialTimeout) + defer cancel() + + caClient, closeConn, err := r.ClientFactory.New(ctx, r, db.Namespace, db.Spec.Instance) + if err != nil { + log.Error(err, "resources/NewDatabase: failed to create config agent client") + return false, err + } + defer closeConn() + + req := &capb.CreateDatabaseRequest{ + Name: db.Spec.Name, + CdbName: cdbName, + DbDomain: dbDomain, + } + userVerStr := "" + // database_controller.validateSpec has validated the spec earlier; + // So no duplicated validation here. + if db.Spec.AdminPassword != "" { + userVerStr = db.Spec.AdminPassword + req.Password = db.Spec.AdminPassword + if lastPwd, ok := db.Status.UserResourceVersions[pdbAdminUserName]; ok { + req.LastPassword = lastPwd + } + } + if db.Spec.AdminPasswordGsmSecretRef != nil { + userVerStr = fmt.Sprintf(gsmResourceVersionString, db.Spec.AdminPasswordGsmSecretRef.ProjectId, db.Spec.AdminPasswordGsmSecretRef.SecretId, db.Spec.AdminPasswordGsmSecretRef.Version) + ref := &capb.GsmSecretReference{ + ProjectId: db.Spec.AdminPasswordGsmSecretRef.ProjectId, + SecretId: db.Spec.AdminPasswordGsmSecretRef.SecretId, + Version: db.Spec.AdminPasswordGsmSecretRef.Version, + } + if lastVer, ok := db.Status.UserResourceVersions[pdbAdminUserName]; ok { + ref.LastVersion = lastVer + } + req.AdminPasswordGsmSecretRef = ref + } + cdOut, err := caClient.CreateDatabase(ctx, req) + if err != nil { + return false, fmt.Errorf("resource/NewDatabase: failed on CreateDatabase gRPC call: %v", err) + } + log.Info("resource/NewDatabase: CreateDatabase DONE with this output", "out", cdOut) + + // "AdminUserSyncCompleted" status indicates PDB existed + // and admin user sync completed. + if cdOut != nil && cdOut.Status == "AdminUserSyncCompleted" { + r.Recorder.Eventf(db, corev1.EventTypeWarning, k8s.DatabaseAlreadyExists, fmt.Sprintf("Database %q already exists, sync admin user performed", db.Spec.Name)) + // Update user version status map after newly synced database admin user. + // The caller will update the status by r.Status().Update. + if db.Status.UserResourceVersions == nil { + db.Status.UserResourceVersions = make(map[string]string) + } + db.Status.UserResourceVersions[pdbAdminUserName] = userVerStr + // Return true indicating PDB already existed and return + // PDB admin userVerMap which need to by synced by caller. + // The caller will trigger syncUser instead of createUser later. + return true, nil + } + + // Indicated underlying database exists and admin user is in sync with the config. + if cdOut != nil && cdOut.Status == "AlreadyExists" { + r.Recorder.Eventf(db, corev1.EventTypeWarning, k8s.DatabaseAlreadyExists, fmt.Sprintf("Database %q already exists", db.Spec.Name)) + return true, nil + } + + hostname, err := os.Hostname() + if err != nil { + log.Error(err, "resources/NewDatabase: failed to get a hostname") + } + + log.V(1).Info("resources/NewDatabase: new database requested: DONE", "hostname", hostname) + // Update user version status map after newly created database. + // The caller will update the status by r.Status().Update. + if db.Status.UserResourceVersions == nil { + db.Status.UserResourceVersions = make(map[string]string) + } + db.Status.UserResourceVersions[pdbAdminUserName] = userVerStr + return false, nil +} + +// NewUsers attempts to create a new user. +func NewUsers(ctx context.Context, r *DatabaseReconciler, db *v1alpha1.Database, clusterIP, dbDomain, cdbName string, log logr.Logger) error { + log.Info("resources/NewUsers: new database users requested", "dbName", db.Spec.Name, "clusterIP", clusterIP, "requestedUsers", db.Spec.Users) + var usernames, usersCmds, grantsCmds []string + var userSpecs []*capb.User + userVerMap := make(map[string]string) + // Copy pdb admin user version into local map to sync later. + if v, ok := db.Status.UserResourceVersions[pdbAdminUserName]; ok { + userVerMap[pdbAdminUserName] = v + } + for k, u := range db.Spec.Users { + log.Info("create user", "user#", k, "username", u.Name) + if len(usernames) < 3 { + usernames = append(usernames, u.Name) + } else if len(usernames) == 3 { + usernames = append(usernames, "...") + } + // database_controller.validateSpec has validated the spec earlier; + // So no duplicated validation here. + if u.Password != "" { + usersCmds = append(usersCmds, sql.QueryCreateUser(u.Name, u.Password)) + userVerMap[u.Name] = u.Password + } + if u.GsmSecretRef != nil { + userSpecs = append(userSpecs, &capb.User{ + Name: u.Name, + PasswordGsmSecretRef: &capb.GsmSecretReference{ + ProjectId: u.GsmSecretRef.ProjectId, + SecretId: u.GsmSecretRef.SecretId, + Version: u.GsmSecretRef.Version, + }}) + userVerMap[u.Name] = fmt.Sprintf(gsmResourceVersionString, u.GsmSecretRef.ProjectId, u.GsmSecretRef.SecretId, u.GsmSecretRef.Version) + } + + for _, p := range u.Privileges { + grantsCmds = append(grantsCmds, sql.QueryGrantPrivileges(string(p), u.Name)) + } + } + + r.Recorder.Eventf(db, corev1.EventTypeNormal, k8s.CreatingUser, "Creating new users %v", usernames) + + // Establish a connection to a Config Agent. + ctx, cancel := context.WithTimeout(ctx, dialTimeout) + defer cancel() + + caClient, closeConn, err := r.ClientFactory.New(ctx, r, db.Namespace, db.Spec.Instance) + if err != nil { + log.Error(err, "resources/NewUsers: failed to create config agent client") + return err + } + defer closeConn() + + req := &capb.CreateUsersRequest{ + CdbName: cdbName, + PdbName: db.Spec.Name, + GrantPrivsCmd: grantsCmds, + DbDomain: dbDomain, + } + if usersCmds != nil { + req.CreateUsersCmd = usersCmds + } + if userSpecs != nil { + req.User = userSpecs + } + cdOut, err := caClient.CreateUsers(ctx, req) + if err != nil { + log.Error(err, "resources/NewUsers: failed on CreateUsers gRPC call") + } + log.Info("resources/NewUsers: CreateUsers succeeded with this output", "output", cdOut) + + hostname, err := os.Hostname() + if err != nil { + log.Error(err, "resources/NewUsers: failed to get a hostname") + } + log.V(1).Info("resources/NewUsers: new database users requested: DONE", "hostname", hostname) + r.Recorder.Eventf(db, corev1.EventTypeNormal, k8s.CreatedUser, "Created new users %v", usernames) + + db.Status.Conditions = k8s.Upsert(db.Status.Conditions, k8s.UserReady, v1.ConditionTrue, k8s.CreateComplete, "") + db.Status.UserNames = usernames + db.Status.UserResourceVersions = userVerMap + r.updateIsChangeApplied(ctx, db) + if err := r.Status().Update(ctx, db); err != nil { + return err + } + return nil +} + +// SyncUsers attempts to update PDB users. +func SyncUsers(ctx context.Context, r *DatabaseReconciler, db *v1alpha1.Database, clusterIP, cdbName string, log logr.Logger) error { + // Establish a connection to a Config Agent. + log.Info("resources/syncUsers: sync database users requested", "db", db, "clusterIP", clusterIP) + r.Recorder.Eventf(db, corev1.EventTypeNormal, k8s.SyncingUser, fmt.Sprintf("Syncing users for database %q", db.Spec.Name)) + + ctx, cancel := context.WithTimeout(ctx, dialTimeout) + defer cancel() + + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", clusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + log.Error(err, "resources/syncUsers: failed to create a conn via gRPC.Dial") + return err + } + defer conn.Close() + + caClient := capb.NewConfigAgentClient(conn) + var userSpecs []*capb.User + var usernames []string + userVerMap := make(map[string]string) + // Copy pdb admin user version into local map to sync later. + if v, ok := db.Status.UserResourceVersions[pdbAdminUserName]; ok { + userVerMap[pdbAdminUserName] = v + } + for _, user := range db.Spec.Users { + var privs []string + usernames = append(usernames, user.Name) + for _, specPriv := range user.Privileges { + privs = append(privs, string(specPriv)) + } + userSpec := &capb.User{ + Name: user.Name, + Privileges: privs, + } + // database_controller.validateSpec has validated the spec earlier; + // So no duplicated validation here. + if user.Password != "" { + userVerMap[user.Name] = user.Password + userSpec.Password = user.Password + lastPwd, ok := db.Status.UserResourceVersions[user.Name] + if ok { + userSpec.LastPassword = lastPwd + } + } + if user.GsmSecretRef != nil { + userVerMap[user.Name] = fmt.Sprintf(gsmResourceVersionString, user.GsmSecretRef.ProjectId, user.GsmSecretRef.SecretId, user.GsmSecretRef.Version) + ref := &capb.GsmSecretReference{ + ProjectId: user.GsmSecretRef.ProjectId, + SecretId: user.GsmSecretRef.SecretId, + Version: user.GsmSecretRef.Version, + } + if lastVer, ok := db.Status.UserResourceVersions[user.Name]; ok { + ref.LastVersion = lastVer + } + userSpec.PasswordGsmSecretRef = ref + } + userSpecs = append(userSpecs, userSpec) + } + resp, err := caClient.UsersChanged(ctx, &capb.UsersChangedRequest{ + PdbName: db.Spec.Name, + UserSpecs: userSpecs, + }) + if err != nil { + log.Error(err, "resources/syncUsers: failed on UsersChanged gRPC call") + return err + } + + if resp.GetChanged() { + db.Status.Phase = commonv1alpha1.DatabaseUpdating + db.Status.Conditions = k8s.Upsert(db.Status.Conditions, k8s.UserReady, v1.ConditionFalse, k8s.SyncInProgress, "") + if err := r.Status().Update(ctx, db); err != nil { + return err + } + log.Info("resources/syncUsers: update database users requested", "CDB", cdbName, "PDB", db.Spec.Name) + if _, err := caClient.UpdateUsers(ctx, &capb.UpdateUsersRequest{ + PdbName: db.Spec.Name, + UserSpecs: userSpecs, + }); err != nil { + log.Error(err, "resources/syncUsers: failed on UpdateUser gRPC call") + return err + } + log.Info("resources/syncUsers: update database users done", "CDB", cdbName, "PDB", db.Spec.Name) + } + log.Info("resources/syncUsers: sync database users done", "CDB", cdbName, "PDB", db.Spec.Name) + + userReady := &v1.Condition{ + Type: k8s.UserReady, + Status: v1.ConditionTrue, + Reason: k8s.SyncComplete, + Message: "", + } + + if len(resp.GetSuppressed()) != 0 { + userReady.Status = v1.ConditionFalse + userReady.Reason = k8s.UserOutOfSync + var msg []string + for _, u := range resp.GetSuppressed() { + if u.SuppressType == capb.UsersChangedResponse_DELETE { + msg = append(msg, fmt.Sprintf("User %q not defined in database spec, "+ + "supposed to be deleted. suppressed SQL %q. Fix by deleting the user in DB or updating DB spec to include the user", u.GetUserName(), u.GetSql())) + } else if u.SuppressType == capb.UsersChangedResponse_CREATE { + msg = append(msg, fmt.Sprintf("User %q cannot be created, "+ + "password is not provided. Fix by creating the user in DB or updating DB spec to include password", u.GetUserName())) + } + } + userReady.Message = strings.Join(msg, ".") + } + + if k8s.ConditionStatusEquals(userReady, v1.ConditionTrue) { + r.Recorder.Eventf(db, corev1.EventTypeNormal, k8s.SyncedUser, fmt.Sprintf("Synced users for database %q", db.Spec.Name)) + } else { + r.Recorder.Eventf(db, corev1.EventTypeWarning, k8s.FailedToSyncUser, fmt.Sprintf("Failed to sync users for database %q, %s", db.Spec.Name, userReady.Message)) + } + + db.Status.Conditions = k8s.Upsert(db.Status.Conditions, userReady.Type, userReady.Status, userReady.Reason, userReady.Message) + db.Status.UserResourceVersions = userVerMap + db.Status.UserNames = usernames[0:integer.IntMin(3, len(usernames))] + if len(usernames) > 3 { + db.Status.UserNames = append(db.Status.UserNames, "...") + } + r.updateIsChangeApplied(ctx, db) + if err := r.Status().Update(ctx, db); err != nil { + return err + } + return nil +} diff --git a/oracle/controllers/exec.go b/oracle/controllers/exec.go new file mode 100644 index 0000000..f865da9 --- /dev/null +++ b/oracle/controllers/exec.go @@ -0,0 +1,93 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "bytes" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/util/retry" + log "k8s.io/klog/v2" +) + +// ExecCmdParams stores parameters for invoking pod/exec. +type ExecCmdParams struct { + Pod string + Ns string + Con *corev1.Container + Sch *runtime.Scheme + RestConfig *rest.Config + Client kubernetes.Interface +} + +// ExecCmdFunc invokes pod/exec. +var ExecCmdFunc = func(p ExecCmdParams, cmd string) (string, error) { + var cmdOut, cmdErr bytes.Buffer + + cmdShell := []string{"sh", "-c", cmd} + + req := p.Client.CoreV1().RESTClient().Post().Resource("pods").Name(p.Pod). + Namespace(p.Ns).SubResource("exec") + + req.VersionedParams(&corev1.PodExecOptions{ + Container: p.Con.Name, + Command: cmdShell, + Stdout: true, + Stderr: true, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(p.RestConfig, "POST", req.URL()) + if err != nil { + return "", fmt.Errorf("failed to init executor: %v", err) + } + + // exec.Stream might return timout error, use a backoff with 4 retries + // 100ms, 500ms, 2.5s, 12.5s + var backoff = wait.Backoff{ + Steps: 4, + Duration: 100 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, + } + if err := retry.OnError(backoff, func(error) bool { return true }, func() error { + e := exec.Stream(remotecommand.StreamOptions{ + Stdout: &cmdOut, + Stderr: &cmdErr, + Tty: false, + }) + if e != nil { + log.Error(fmt.Sprintf("exec.Stream failed, retrying, err: %v, stderr: %v, stdout: %v", + err, cmdErr.String(), cmdOut.String())) + } + return e + }); err != nil { + return "", fmt.Errorf("failed to run a command [%v], err: %v, stderr: %v, stdout: %v", + cmd, err, cmdErr.String(), cmdOut.String()) + } + + if cmdErr.Len() > 0 { + return "", fmt.Errorf("stderr: %v", cmdErr.String()) + } + + return cmdOut.String(), nil +} diff --git a/oracle/controllers/exportcontroller/BUILD.bazel b/oracle/controllers/exportcontroller/BUILD.bazel new file mode 100644 index 0000000..8234aef --- /dev/null +++ b/oracle/controllers/exportcontroller/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "exportcontroller", + srcs = ["export_controller.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/exportcontroller", + visibility = ["//visibility:public"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) + +go_test( + name = "exportcontroller_test", + srcs = ["export_controller_test.go"], + embed = [":exportcontroller"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/exportcontroller/export_controller.go b/oracle/controllers/exportcontroller/export_controller.go new file mode 100644 index 0000000..6676401 --- /dev/null +++ b/oracle/controllers/exportcontroller/export_controller.go @@ -0,0 +1,277 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exportcontroller + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// ExportReconciler reconciles an export object. +type ExportReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ClientFactory controllers.ConfigAgentClientFactory + Recorder record.EventRecorder +} + +const ( + reconcileTimeout = 3 * time.Minute +) + +// readyConditionWrapper simplifies updating and using Ready condition +// of Export's status. +type readyConditionWrapper struct { + exp *v1alpha1.Export + changed bool + defaultState string +} + +func (w *readyConditionWrapper) getState() string { + readyCond := k8s.FindCondition(w.exp.Status.Conditions, k8s.Ready) + if readyCond == nil { + w.setState(w.defaultState, "") + } + + return k8s.FindCondition((&w.exp.Status).Conditions, k8s.Ready).Reason +} + +func (w *readyConditionWrapper) setState(condReason, message string) { + status := &w.exp.Status + + condStatus := metav1.ConditionFalse + if condReason == k8s.ExportComplete { + condStatus = metav1.ConditionTrue + } + + status.Conditions = k8s.Upsert(status.Conditions, k8s.Ready, condStatus, condReason, message) + w.changed = true +} + +func (w *readyConditionWrapper) elapsedSinceLastStateChange() time.Duration { + return k8s.ElapsedTimeFromLastTransitionTime(k8s.FindCondition(w.exp.Status.Conditions, k8s.Ready), time.Second) +} + +var ( + requeueSoon = ctrl.Result{RequeueAfter: 30 * time.Second} + requeueLater = ctrl.Result{RequeueAfter: time.Minute} +) + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=exports,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=exports/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances,verbs=get;list;watch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances/status,verbs=get +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=databases,verbs=get;list;watch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=databases/status,verbs=get +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// Reconcile is a generic reconcile function for Export resources. +func (r *ExportReconciler) Reconcile(req ctrl.Request) (result ctrl.Result, recErr error) { + log := r.Log.WithValues("Export", req.NamespacedName) + log.Info("reconciling export") + ctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout) + defer cancel() + + exp := &v1alpha1.Export{} + if err := r.Get(ctx, req.NamespacedName, exp); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + expStatusWrapper := &readyConditionWrapper{exp: exp, defaultState: k8s.ExportPending} + defer func() { + if !expStatusWrapper.changed { + return + } + if err := r.Status().Update(ctx, exp); err != nil { + log.Error(err, "failed to update the export status") + if recErr == nil { + recErr = err + } + } + }() + + switch expStatusWrapper.getState() { + case k8s.ExportPending: + return r.handleNotStartedExport(ctx, log, expStatusWrapper, req) + case k8s.ExportInProgress: + return r.handleRunningExport(ctx, log, expStatusWrapper, req) + default: + log.Info(fmt.Sprintf("export is in the state %q, no action needed", expStatusWrapper.getState())) + return ctrl.Result{}, nil + } + +} + +func (r *ExportReconciler) handleNotStartedExport(ctx context.Context, log logr.Logger, expWrapper *readyConditionWrapper, req ctrl.Request) (ctrl.Result, error) { + var ( + db = &v1alpha1.Database{} + inst = &v1alpha1.Instance{} + exp = expWrapper.exp + ) + + // get referenced objects: database and instance + dbKey := types.NamespacedName{ + Namespace: req.Namespace, + Name: exp.Spec.DatabaseName, + } + if err := r.Get(ctx, dbKey, db); err != nil { + log.Error(err, "error getting database", "database", dbKey) + return ctrl.Result{}, err + } + + instKey := types.NamespacedName{ + Namespace: req.Namespace, + Name: exp.Spec.Instance, + } + if err := r.Get(ctx, instKey, inst); err != nil { + log.Error(err, "error getting instance", "instance", instKey) + return ctrl.Result{}, err + } + + // validate + if exp.Spec.Instance != db.Spec.Instance { + return ctrl.Result{}, fmt.Errorf("instance names in Export and Database specs do not match:"+ + " %q != %q", exp.Spec.Instance, db.Spec.Instance) + } + if len(exp.Spec.ExportObjects) == 0 { + return ctrl.Result{}, fmt.Errorf("no object to export, exportObjects: %v", exp.Spec.ExportObjects) + } + + dbReady := k8s.ConditionStatusEquals( + k8s.FindCondition(db.Status.Conditions, k8s.Ready), + metav1.ConditionTrue) + + // if can start, begin export + if dbReady { + caClient, closeConn, err := r.ClientFactory.New(ctx, r, req.Namespace, exp.Spec.Instance) + if err != nil { + log.Error(err, "failed to create config agent client") + return ctrl.Result{}, err + } + defer closeConn() + + resp, err := caClient.DataPumpExport(ctx, &capb.DataPumpExportRequest{ + PdbName: db.Spec.Name, + DbDomain: inst.Spec.DBDomain, + ObjectType: exp.Spec.ExportObjectType, + Objects: strings.Join(exp.Spec.ExportObjects, ","), + GcsPath: exp.Spec.GcsPath, + GcsLogPath: exp.Spec.GcsLogPath, + LroInput: &capb.LROInput{OperationId: lroOperationID(exp)}, + FlashbackTime: getFlashbackTime(exp.Spec.FlashbackTime), + }) + + if err != nil { + if !controllers.IsAlreadyExistsError(err) { + expWrapper.setState(k8s.ExportPending, fmt.Sprintf("failed to start export: %v", err)) + return ctrl.Result{}, fmt.Errorf("failed to start export: %v", err) + + } + log.Info("Export operation was already running") + } else { + log.Info("started DataPumpExport operation", "response", resp) + } + + // Export started successfully + expWrapper.setState(k8s.ExportInProgress, "") + + } else { + log.Info("database is not yet ready") + } + + return requeueSoon, nil +} + +func (r *ExportReconciler) handleRunningExport(ctx context.Context, log logr.Logger, expWrapper *readyConditionWrapper, req ctrl.Request) (ctrl.Result, error) { + exp := expWrapper.exp + operationID := lroOperationID(exp) + + // check export LRO status + operation, err := controllers.GetLROOperation(r.ClientFactory, ctx, r, req.Namespace, operationID, exp.Spec.Instance) + if err != nil { + log.Error(err, "GetLROOperation returned an error") + return ctrl.Result{}, err + } + log.Info("GetLROOperation", "response", operation) + + if !operation.Done { + return requeueLater, nil + } + + // handle export LRO completion + log.Info("LRO is DONE", "operationID", operationID) + defer func() { + _ = controllers.DeleteLROOperation(r.ClientFactory, ctx, r, req.Namespace, operationID, exp.Spec.Instance) + }() + + if operation.GetError() != nil { + expWrapper.setState( + k8s.ExportFailed, + fmt.Sprintf("Failed to export objectType %s objects %v on %s to %s: %s", + exp.Spec.ExportObjectType, exp.Spec.ExportObjects, + time.Now().Format(time.RFC3339), exp.Spec.GcsPath, operation.GetError().GetMessage())) + + r.Recorder.Eventf(exp, corev1.EventTypeWarning, k8s.ExportFailed, fmt.Sprintf("Export error: %v", operation.GetError().GetMessage())) + + return ctrl.Result{}, err + } + + // successful completion + if expWrapper.getState() != k8s.ExportComplete { + r.Recorder.Eventf(exp, corev1.EventTypeNormal, k8s.ExportComplete, + "Export has completed successfully. Elapsed Time: %v", expWrapper.elapsedSinceLastStateChange()) + } + expWrapper.setState(k8s.ExportComplete, fmt.Sprintf("Exported objectType %s objects %v on %s to %s", + exp.Spec.ExportObjectType, exp.Spec.ExportObjects, + time.Now().Format(time.RFC3339), exp.Spec.GcsPath)) + + return ctrl.Result{}, nil +} + +// SetupWithManager configures the reconciler. +func (r *ExportReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Export{}). + Complete(r) +} + +func lroOperationID(exp *v1alpha1.Export) string { + return fmt.Sprintf("Export_%s", exp.GetUID()) +} + +func getFlashbackTime(t *metav1.Time) string { + var flashbackTime = "" + if t != nil { + flashbackTime = fmt.Sprintf("TO_TIMESTAMP('%s', 'DD-MM-YYYY HH24:MI:SS')", t.Format("02-01-2006 15:04:05")) + } + return flashbackTime +} diff --git a/oracle/controllers/exportcontroller/export_controller_test.go b/oracle/controllers/exportcontroller/export_controller_test.go new file mode 100644 index 0000000..65a234c --- /dev/null +++ b/oracle/controllers/exportcontroller/export_controller_test.go @@ -0,0 +1,237 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exportcontroller + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager + reconciler *ExportReconciler + fakeClientFactory *testhelpers.FakeClientFactory +) + +func TestExportController(t *testing.T) { + fakeClientFactory = &testhelpers.FakeClientFactory{} + + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Export controller", func() []testhelpers.Reconciler { + reconciler = &ExportReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Export"), + Scheme: k8sManager.GetScheme(), + ClientFactory: fakeClientFactory, + Recorder: k8sManager.GetEventRecorderFor("export-controller"), + } + + return []testhelpers.Reconciler{reconciler} + }) +} + +var _ = Describe("Export controller", func() { + const ( + namespace = "default" + exportName = "test-export" + instanceName = "test-instance" + databaseName = "pdb1" + adminPassword = "pwd123" + timeout = time.Second * 15 + interval = time.Millisecond * 15 + ) + + var ( + instance *v1alpha1.Instance + database *v1alpha1.Database + export *v1alpha1.Export + dbObjKey client.ObjectKey + objKey client.ObjectKey + fakeConfigAgentClient *testhelpers.FakeConfigAgentClient + ) + ctx := context.Background() + + BeforeEach(func() { + By("creating an instance") + instance = &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: testhelpers.RandName(instanceName), + Namespace: namespace, + }, + } + Expect(k8sClient.Create(ctx, instance)).Should(Succeed()) + instance.Status.Conditions = k8s.Upsert(instance.Status.Conditions, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, "") + Expect(k8sClient.Status().Update(ctx, instance)).Should(Succeed()) + + createdInstance := &v1alpha1.Instance{} + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instance.Name}, createdInstance) + }, timeout, interval).Should(Succeed()) + + By("creating a database") + database = &v1alpha1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: databaseName, + Namespace: namespace, + }, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: databaseName, + Instance: instance.Name, + }, + AdminPassword: adminPassword, + Users: []v1alpha1.UserSpec{}, + }, + } + Expect(k8sClient.Create(ctx, database)).Should(Succeed()) + + dbObjKey = client.ObjectKey{Namespace: namespace, Name: databaseName} + createdDatabase := &v1alpha1.Database{} + Eventually( + func() error { + return k8sClient.Get(ctx, dbObjKey, createdDatabase) + }, timeout, interval).Should(Succeed()) + + fakeClientFactory.Reset() + fakeConfigAgentClient = fakeClientFactory.Caclient + }) + + AfterEach(func() { + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, database)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, export)).Should(Succeed()) + }) + + CreateExport := func() { + By("creating a new export") + export = &v1alpha1.Export{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: exportName, + }, + Spec: v1alpha1.ExportSpec{ + Instance: instance.Name, + DatabaseName: databaseName, + Type: "DataPump", + ExportObjectType: "Schemas", + ExportObjects: []string{"scott"}, + FlashbackTime: &metav1.Time{Time: time.Now()}, + }, + } + + objKey = client.ObjectKey{Namespace: namespace, Name: exportName} + Expect(k8sClient.Create(ctx, export)).Should(Succeed()) + } + + SetDatabaseReadyStatus := func(cond metav1.ConditionStatus) { + By("setting database ready status") + database.Status.Conditions = k8s.Upsert(database.Status.Conditions, k8s.Ready, cond, k8s.CreateComplete, "") + Expect(k8sClient.Status().Update(ctx, database)).Should(Succeed()) + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, dbObjKey, k8s.Ready) + }, timeout, interval).Should(Equal(cond)) + } + + Context("export through data pump", func() { + It("should mark export as pending", func() { + SetDatabaseReadyStatus(metav1.ConditionFalse) + CreateExport() + + By("verifying export is pending") + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.ExportPending)) + + By("verifying post-conditions") + Expect(fakeConfigAgentClient.DataPumpExportCalledCnt()).Should(Equal(0)) + Expect(fakeConfigAgentClient.DeleteOperationCalledCnt()).Should(Equal(0)) + }) + + It("should mark export as complete", func() { + SetDatabaseReadyStatus(metav1.ConditionTrue) + + By("setting LRO status to Done") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDone + + CreateExport() + + By("checking export condition") + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.ExportComplete)) + + By("verifying post-conditions") + Expect(fakeConfigAgentClient.DataPumpExportCalledCnt()).Should(Equal(1)) + Expect(fakeConfigAgentClient.DeleteOperationCalledCnt()).Should(Equal(1)) + }) + + It("should mark export as failed", func() { + SetDatabaseReadyStatus(metav1.ConditionTrue) + + By("setting LRO status to DoneWithError") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDoneWithError + + CreateExport() + + By("checking export has failed") + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.ExportFailed)) + + By("verifying post-conditions") + Expect(fakeConfigAgentClient.DataPumpExportCalledCnt()).Should(Equal(1)) + Expect(fakeConfigAgentClient.DeleteOperationCalledCnt()).Should(Equal(1)) + }) + }) +}) + +func getConditionReason(ctx context.Context, objKey client.ObjectKey, condType string) (string, error) { + var export v1alpha1.Export + + if err := k8sClient.Get(ctx, objKey, &export); err != nil { + return "", err + } + + cond := k8s.FindCondition(export.Status.Conditions, condType) + if cond == nil { + return "", fmt.Errorf("%v condition type not found", condType) + } + return cond.Reason, nil +} + +func getConditionStatus(ctx context.Context, objKey client.ObjectKey, condType string) (metav1.ConditionStatus, error) { + var database v1alpha1.Database + if err := k8sClient.Get(ctx, objKey, &database); err != nil { + return metav1.ConditionFalse, err + } + if cond := k8s.FindCondition(database.Status.Conditions, condType); cond != nil { + return cond.Status, nil + } + return metav1.ConditionFalse, nil +} diff --git a/oracle/controllers/grpc_error.go b/oracle/controllers/grpc_error.go new file mode 100644 index 0000000..ebba38f --- /dev/null +++ b/oracle/controllers/grpc_error.go @@ -0,0 +1,42 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// IsAlreadyExistsError returns true if given error is caused by object already exists. +func IsAlreadyExistsError(err error) bool { + if err == nil { + return false + } + if s, ok := status.FromError(err); ok { + return s.Code() == codes.AlreadyExists + } + return false +} + +// IsNotFoundError returns true if given error is caused by object not found. +func IsNotFoundError(err error) bool { + if err == nil { + return false + } + if s, ok := status.FromError(err); ok { + return s.Code() == codes.NotFound + } + return false +} diff --git a/oracle/controllers/importcontroller/BUILD.bazel b/oracle/controllers/importcontroller/BUILD.bazel new file mode 100644 index 0000000..073d130 --- /dev/null +++ b/oracle/controllers/importcontroller/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "importcontroller", + srcs = ["import_controller.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/importcontroller", + visibility = ["//visibility:public"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) + +go_test( + name = "importcontroller_test", + srcs = ["import_controller_test.go"], + embed = [":importcontroller"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/importcontroller/import_controller.go b/oracle/controllers/importcontroller/import_controller.go new file mode 100644 index 0000000..efb2268 --- /dev/null +++ b/oracle/controllers/importcontroller/import_controller.go @@ -0,0 +1,262 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package importcontroller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// ImportReconciler reconciles an Import object. +type ImportReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ClientFactory controllers.ConfigAgentClientFactory + Recorder record.EventRecorder +} + +const ( + reconcileTimeout = 3 * time.Minute +) + +// readyConditionWrapper simplifies updating and using Ready condition +// of Import's status. +type readyConditionWrapper struct { + imp *v1alpha1.Import + changed bool + defaultState string +} + +func (w *readyConditionWrapper) getState() string { + readyCond := k8s.FindCondition(w.imp.Status.Conditions, k8s.Ready) + if readyCond == nil { + w.setState(w.defaultState, "") + } + + return k8s.FindCondition((&w.imp.Status).Conditions, k8s.Ready).Reason +} + +func (w *readyConditionWrapper) setState(condReason, message string) { + status := &w.imp.Status + + condStatus := metav1.ConditionFalse + if condReason == k8s.ImportComplete { + condStatus = metav1.ConditionTrue + } + + status.Conditions = k8s.Upsert(status.Conditions, k8s.Ready, condStatus, condReason, message) + w.changed = true +} + +func (w *readyConditionWrapper) elapsedSinceLastStateChange() time.Duration { + return k8s.ElapsedTimeFromLastTransitionTime(k8s.FindCondition(w.imp.Status.Conditions, k8s.Ready), time.Second) +} + +var ( + requeueSoon = ctrl.Result{RequeueAfter: 30 * time.Second} + requeueLater = ctrl.Result{RequeueAfter: time.Minute} +) + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=imports,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=imports/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances,verbs=get;list;watch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances/status,verbs=get +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=databases,verbs=get;list;watch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=databases/status,verbs=get +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// Reconcile is a generic reconcile function for Import resources. +func (r *ImportReconciler) Reconcile(req ctrl.Request) (result ctrl.Result, recErr error) { + log := r.Log.WithValues("Import", req.NamespacedName) + log.Info("reconciling import") + ctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout) + defer cancel() + + imp := &v1alpha1.Import{} + if err := r.Get(ctx, req.NamespacedName, imp); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + impStatusWrapper := &readyConditionWrapper{imp: imp, defaultState: k8s.ImportPending} + defer func() { + if !impStatusWrapper.changed { + return + } + if err := r.Status().Update(ctx, imp); err != nil { + log.Error(err, "failed to update the import status") + if recErr == nil { + recErr = err + } + } + }() + + switch impStatusWrapper.getState() { + case k8s.ImportPending: + return r.handleNotStartedImport(ctx, log, impStatusWrapper, req) + case k8s.ImportInProgress: + return r.handleRunningImport(ctx, log, impStatusWrapper, req) + default: + log.Info(fmt.Sprintf("import is in the state %q, no action needed", impStatusWrapper.getState())) + return ctrl.Result{}, nil + } + +} + +func (r *ImportReconciler) handleNotStartedImport(ctx context.Context, log logr.Logger, impWrapper *readyConditionWrapper, req ctrl.Request) (ctrl.Result, error) { + var ( + db = &v1alpha1.Database{} + inst = &v1alpha1.Instance{} + imp = impWrapper.imp + ) + + // get referenced objects: database and instance + dbKey := types.NamespacedName{ + Namespace: req.Namespace, + Name: imp.Spec.DatabaseName, + } + if err := r.Get(ctx, dbKey, db); err != nil { + log.Error(err, "error getting database", "database", dbKey) + return ctrl.Result{}, err + } + + instKey := types.NamespacedName{ + Namespace: req.Namespace, + Name: imp.Spec.Instance, + } + if err := r.Get(ctx, instKey, inst); err != nil { + log.Error(err, "error getting instance", "instance", instKey) + return ctrl.Result{}, err + } + + // validate + if imp.Spec.Instance != db.Spec.Instance { + return ctrl.Result{}, fmt.Errorf("instance names in Import and Database specs do not match:"+ + " %q != %q", imp.Spec.Instance, db.Spec.Instance) + } + + dbReady := k8s.ConditionStatusEquals( + k8s.FindCondition(db.Status.Conditions, k8s.Ready), + metav1.ConditionTrue) + + // if can start, begin import + if dbReady { + caClient, closeConn, err := r.ClientFactory.New(ctx, r, req.Namespace, imp.Spec.Instance) + if err != nil { + log.Error(err, "failed to create config agent client") + return ctrl.Result{}, err + } + defer closeConn() + + resp, err := caClient.DataPumpImport(ctx, &capb.DataPumpImportRequest{ + PdbName: db.Spec.Name, + DbDomain: inst.Spec.DBDomain, + GcsPath: imp.Spec.GcsPath, + GcsLogPath: imp.Spec.GcsLogPath, + LroInput: &capb.LROInput{OperationId: lroOperationID(imp)}, + }) + if err != nil { + if !controllers.IsAlreadyExistsError(err) { + impWrapper.setState(k8s.ImportPending, fmt.Sprintf("failed to start import: %v", err)) + return ctrl.Result{}, fmt.Errorf("failed to start import: %v", err) + + } + log.Info("Import operation was already running") + + } else { + log.Info("started DataPumpImport operation", "response", resp) + } + + // Import started successfully + impWrapper.setState(k8s.ImportInProgress, "") + + } else { + log.Info("database is not yet ready") + } + + return requeueSoon, nil +} + +func (r *ImportReconciler) handleRunningImport(ctx context.Context, log logr.Logger, impWrapper *readyConditionWrapper, req ctrl.Request) (ctrl.Result, error) { + imp := impWrapper.imp + operationID := lroOperationID(imp) + + // check import LRO status + operation, err := controllers.GetLROOperation(r.ClientFactory, ctx, r, req.Namespace, operationID, imp.Spec.Instance) + if err != nil { + log.Error(err, "GetLROOperation returned an error") + return ctrl.Result{}, err + } + log.Info("GetLROOperation", "response", operation) + + if !operation.Done { + return requeueLater, nil + } + + // handle import LRO completion + log.Info("LRO is DONE", "operationID", operationID) + defer func() { + _ = controllers.DeleteLROOperation(r.ClientFactory, ctx, r, req.Namespace, operationID, imp.Spec.Instance) + }() + + if operation.GetError() != nil { + impWrapper.setState( + k8s.ImportFailed, + fmt.Sprintf("Failed to import on %s from %s: %s", + time.Now().Format(time.RFC3339), imp.Spec.GcsPath, operation.GetError().GetMessage())) + + r.Recorder.Eventf(imp, corev1.EventTypeWarning, k8s.ImportFailed, fmt.Sprintf("Import error: %v", operation.GetError().GetMessage())) + + return ctrl.Result{}, err + } + + // successful completion + if impWrapper.getState() != k8s.ImportComplete { + r.Recorder.Eventf(imp, corev1.EventTypeNormal, k8s.ImportComplete, + "Import has completed successfully. Elapsed Time: %v", impWrapper.elapsedSinceLastStateChange()) + } + impWrapper.setState( + k8s.ImportComplete, + fmt.Sprintf("Imported data on %s from %s", + time.Now().Format(time.RFC3339), imp.Spec.GcsPath)) + + return ctrl.Result{}, nil +} + +// SetupWithManager configures the reconciler. +func (r *ImportReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Import{}). + Complete(r) +} + +func lroOperationID(imp *v1alpha1.Import) string { + return fmt.Sprintf("Import_%s", imp.GetUID()) +} diff --git a/oracle/controllers/importcontroller/import_controller_test.go b/oracle/controllers/importcontroller/import_controller_test.go new file mode 100644 index 0000000..4c8867a --- /dev/null +++ b/oracle/controllers/importcontroller/import_controller_test.go @@ -0,0 +1,253 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package importcontroller + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager + reconciler *ImportReconciler + fakeClientFactory *testhelpers.FakeClientFactory +) + +func TestImportController(t *testing.T) { + fakeClientFactory = &testhelpers.FakeClientFactory{} + + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Import controller", func() []testhelpers.Reconciler { + reconciler = &ImportReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Import"), + Scheme: k8sManager.GetScheme(), + ClientFactory: fakeClientFactory, + Recorder: k8sManager.GetEventRecorderFor("import-controller"), + } + + return []testhelpers.Reconciler{reconciler} + }) +} + +var _ = Describe("Import controller", func() { + const ( + namespace = "default" + databaseName = "pdb1" + + timeout = time.Second * 15 + interval = time.Millisecond * 50 + ) + + ctx := context.Background() + + var fakeConfigAgentClient *testhelpers.FakeConfigAgentClient + + var ( + instance *v1alpha1.Instance + database *v1alpha1.Database + imp *v1alpha1.Import + importObjectKey client.ObjectKey + ) + + BeforeEach(func() { + // create instance + instance = &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: testhelpers.RandName("instance"), + Namespace: namespace, + }, + } + Expect(k8sClient.Create(ctx, instance)).Should(Succeed()) + + Eventually(func() error { + return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instance.Name}, &v1alpha1.Instance{}) + }, timeout, interval).Should(Succeed()) + + // create database + database = &v1alpha1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: testhelpers.RandName("db"), + }, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: databaseName, + Instance: instance.Name, + }, + AdminPassword: "123456", + Users: []v1alpha1.UserSpec{}, + }, + } + + Expect(k8sClient.Create(ctx, database)).Should(Succeed()) + + Eventually(func() error { + return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: database.Name}, &v1alpha1.Database{}) + }, timeout, interval).Should(Succeed()) + + fakeClientFactory.Reset() + fakeConfigAgentClient = fakeClientFactory.Caclient + + // define import, expect each test case create one + importObjectKey = client.ObjectKey{Namespace: namespace, Name: testhelpers.RandName("import")} + imp = &v1alpha1.Import{ + ObjectMeta: metav1.ObjectMeta{ + Name: importObjectKey.Name, + Namespace: importObjectKey.Namespace, + }, + Spec: v1alpha1.ImportSpec{ + Instance: instance.Name, + DatabaseName: database.Name, + GcsPath: "gs://ex_bucket/import.dmp", + }, + } + }) + + AfterEach(func() { + Expect(k8sClient.Delete(ctx, database)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, imp)).Should(Succeed()) + }) + + Context("Database is ready", func() { + + BeforeEach(func() { + dbKey := client.ObjectKey{Namespace: database.Namespace, Name: database.Name} + database.Status.Conditions = k8s.Upsert(database.Status.Conditions, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, "") + + Expect(k8sClient.Status().Update(ctx, database)).Should(Succeed()) + + Eventually(func() metav1.ConditionStatus { + cond, err := getDatabaseReadyCondition(ctx, dbKey) + if err != nil || cond == nil { + return metav1.ConditionFalse + } + return cond.Status + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + }) + + It("Should succeed when LRO completes successfully", func() { + By("simulating successful DataPumpImport LRO completion") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDone + + By("creating a new import") + Expect(k8sClient.Create(ctx, imp)).Should(Succeed()) + + By("verifying post-conditions") + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, importObjectKey, k8s.Ready) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + Eventually(fakeConfigAgentClient.DataPumpImportCalledCnt, timeout, interval).Should(Equal(1)) + Eventually(fakeConfigAgentClient.DeleteOperationCalledCnt, timeout, interval).Should(Equal(1)) + + readyCond, err := getCondition(ctx, importObjectKey, k8s.Ready) + Expect(err).ShouldNot(HaveOccurred()) + Expect(readyCond.Reason).Should(Equal(k8s.ImportComplete)) + }) + + It("Should handle LRO failure", func() { + By("simulating failed DataPumpImport LRO completion") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDoneWithError + + By("creating a new import") + Expect(k8sClient.Create(ctx, imp)).Should(Succeed()) + + By("verifying post-conditions") + Eventually(func() (string, error) { + return getConditionReason(ctx, importObjectKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.ImportFailed)) + Eventually(fakeConfigAgentClient.DataPumpImportCalledCnt, timeout, interval).Should(Equal(1)) + Eventually(fakeConfigAgentClient.DeleteOperationCalledCnt, timeout, interval).Should(Equal(1)) + }) + }) + + Context("Database is not ready", func() { + + BeforeEach(func() { + dbKey := client.ObjectKey{Namespace: database.Namespace, Name: database.Name} + database.Status.Conditions = k8s.Upsert(database.Status.Conditions, k8s.Ready, metav1.ConditionFalse, k8s.CreatePending, "") + + Expect(k8sClient.Status().Update(ctx, database)).Should(Succeed()) + + Eventually(func() string { + cond, err := getDatabaseReadyCondition(ctx, dbKey) + if err != nil || cond == nil { + return "" + } + return cond.Reason + }, timeout, interval).Should(Equal(k8s.CreatePending)) + }) + + It("Should keep Import in Pending state", func() { + By("creating a new import") + Expect(k8sClient.Create(ctx, imp)).Should(Succeed()) + + Eventually(func() (string, error) { + return getConditionReason(ctx, importObjectKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.ImportPending)) + + // give controller some time to run import if it unexpectedly is going to + time.Sleep(100 * time.Millisecond) + Expect(fakeConfigAgentClient.DataPumpImportCalledCnt()).Should(Equal(0)) + Expect(getConditionReason(ctx, importObjectKey, k8s.Ready)).Should(Equal(k8s.ImportPending)) + }) + }) +}) + +func getConditionStatus(ctx context.Context, objKey client.ObjectKey, condType string) (metav1.ConditionStatus, error) { + cond, err := getCondition(ctx, objKey, condType) + if cond == nil { + return metav1.ConditionFalse, err + } + return cond.Status, err +} + +func getConditionReason(ctx context.Context, objKey client.ObjectKey, condType string) (string, error) { + cond, err := getCondition(ctx, objKey, condType) + if cond == nil { + return "", err + } + return cond.Reason, err +} + +func getCondition(ctx context.Context, objKey client.ObjectKey, condType string) (*metav1.Condition, error) { + imp := &v1alpha1.Import{} + if err := k8sClient.Get(ctx, objKey, imp); err != nil { + return nil, err + } + return k8s.FindCondition(imp.Status.Conditions, condType), nil +} + +func getDatabaseReadyCondition(ctx context.Context, objKey client.ObjectKey) (*metav1.Condition, error) { + db := &v1alpha1.Database{} + if err := k8sClient.Get(ctx, objKey, db); err != nil { + return &metav1.Condition{}, err + } + + return k8s.FindCondition(db.Status.Conditions, k8s.Ready), nil +} diff --git a/oracle/controllers/instancecontroller/BUILD.bazel b/oracle/controllers/instancecontroller/BUILD.bazel new file mode 100644 index 0000000..5880c6a --- /dev/null +++ b/oracle/controllers/instancecontroller/BUILD.bazel @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "instancecontroller", + srcs = [ + "instance_controller.go", + "instance_controller_parameters.go", + "instance_controller_standby.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/instancecontroller", + visibility = ["//visibility:public"], + deps = [ + "//common/api/v1alpha1", + "//common/pkg/maintenance", + "//common/pkg/utils", + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/controllers/databasecontroller", + "//oracle/pkg/agents/common/sql", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/agents/consts", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_k8s_api//apps/v1:apps", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//tools/record", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/handler", + "@io_k8s_sigs_controller_runtime//pkg/source", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_test( + name = "instancecontroller_test", + srcs = ["instance_controller_test.go"], + embed = [":instancecontroller"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_go_logr_logr//:logr", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_api//apps/v1:apps", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_client_go//util/retry", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/instancecontroller/instance_controller.go b/oracle/controllers/instancecontroller/instance_controller.go new file mode 100644 index 0000000..ef50130 --- /dev/null +++ b/oracle/controllers/instancecontroller/instance_controller.go @@ -0,0 +1,1086 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instancecontroller + +import ( + "context" + "fmt" + "reflect" + "strings" + "time" + + "github.com/go-logr/logr" + lropb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + commonutils "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/pkg/utils" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/databasecontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var CheckStatusInstanceFunc = controllers.CheckStatusInstanceFunc + +// InstanceReconciler reconciles an Instance object. +type InstanceReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Images map[string]string + ClientFactory controllers.ConfigAgentClientFactory + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=instances/status,verbs=get;update;patch + +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=persistentvolumes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=services,verbs=list;watch;get;patch;create +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=databases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=databases/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=configs,verbs=get;list;watch;create;update;patch;delete + +const ( + physicalRestore = "PhysicalRestore" + instanceProvisionTimeout = 20 * time.Minute + createDatabaseInstanceTimeout = 20 * time.Minute // 20 minutes because it can take ~10 minutes for unseeded CDB creations + dateFormat = "20060102" +) + +var defaultDisks = []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, +} + +func restoreDOP(r, b int32) int32 { + // Determine the restore DOP. The order of preference is: + // - If DOP is explicitly requested in the restore section, take it. + // - If not and the DOP was specified when a backup was taken, use it. + // - Otherwise, use the default, which is 1. + if r > 0 { + return r + } + + if b > 0 { + return b + } + + return 1 +} + +// findBackupForRestore fetches the backup with the backup_id specified in the spec for initiating the instance restore. +func (r *InstanceReconciler) findBackupForRestore(ctx context.Context, inst v1alpha1.Instance, namespace string) (*v1alpha1.Backup, error) { + var backups v1alpha1.BackupList + if err := r.List(ctx, &backups, client.InNamespace(namespace)); err != nil { + return nil, fmt.Errorf("preflight check: failed to list backups for a restore: %v", err) + } + + var backup v1alpha1.Backup + for _, b := range backups.Items { + if b.Status.BackupID == inst.Spec.Restore.BackupID { + r.Log.V(1).Info("requested backup found") + backup = b + } + } + + if backup.Spec.Type == "" { + return nil, fmt.Errorf("preflight check: failed to locate the requested backup %q", inst.Spec.Restore.BackupID) + } + + if backup.Spec.Type != inst.Spec.Restore.BackupType { + return nil, fmt.Errorf("preflight check: located a backup of type %q, wanted: %q", backup.Spec.Type, inst.Spec.Restore.BackupType) + } + + return &backup, nil +} + +// restorePhysical runs the pre-flight checks and if all is good +// it makes a gRPC call to a PhysicalRestore. +func (r *InstanceReconciler) restorePhysical(ctx context.Context, inst v1alpha1.Instance, backup *v1alpha1.Backup, req ctrl.Request) (*lropb.Operation, error) { + // Confirm that an external LB is ready. + if err := restorePhysicalPreflightCheck(ctx, r, req.Namespace, inst.Name); err != nil { + return nil, err + } + + if !*backup.Spec.Backupset { + return nil, fmt.Errorf("preflight check: located a physical backup, but in this release the auto-restore is only supported from a Backupset backup: %v", backup.Spec.Backupset) + } + + if backup.Spec.Subtype != "Instance" { + return nil, fmt.Errorf("preflight check: located a physical backup, but in this release the auto-restore is only supported from a Backupset taken at the Instance level: %q", backup.Spec.Subtype) + } + + backupReadyCond := k8s.FindCondition(backup.Status.Conditions, k8s.Ready) + if !k8s.ConditionStatusEquals(backupReadyCond, v1.ConditionTrue) { + return nil, fmt.Errorf("preflight check: located a physical backup, but it's not in the ready state: %q", backup.Status) + } + r.Log.Info("preflight check for a restore from a physical backup - all DONE", "backup", backup) + + dop := restoreDOP(inst.Spec.Restore.Dop, backup.Spec.Dop) + + caClient, closeConn, err := r.ClientFactory.New(ctx, r, req.Namespace, backup.Spec.Instance) + if err != nil { + r.Log.Error(err, "failed to create config agent client") + return nil, err + } + defer closeConn() + + timeLimitMinutes := controllers.PhysBackupTimeLimitDefault * 3 + if inst.Spec.Restore.TimeLimitMinutes != 0 { + timeLimitMinutes = time.Duration(inst.Spec.Restore.TimeLimitMinutes) * time.Minute + } + + ctxRestore, cancel := context.WithTimeout(context.Background(), timeLimitMinutes) + defer cancel() + + resp, err := caClient.PhysicalRestore(ctxRestore, &capb.PhysicalRestoreRequest{ + InstanceName: inst.Name, + CdbName: inst.Spec.CDBName, + Dop: dop, + LocalPath: backup.Spec.LocalPath, + GcsPath: backup.Spec.GcsPath, + LroInput: &capb.LROInput{OperationId: lroOperationID(physicalRestore, &inst)}, + }) + if err != nil { + return nil, fmt.Errorf("failed on PhysicalRestore gRPC call: %v", err) + } + + r.Log.Info("caClient.PhysicalRestore", "response", resp) + return resp, nil +} + +// restoreSnapshot constructs the new PVCs and sets the restore in stsParams struct +// based on the requested snapshot to restore from. +func (r *InstanceReconciler) restoreSnapshot(ctx context.Context, inst v1alpha1.Instance, sts *appsv1.StatefulSet, sp *controllers.StsParams) error { + if err := r.Delete(ctx, sts); err != nil { + r.Log.Error(err, "restoreSnapshot: failed to delete the old StatefulSet") + } + r.Log.Info("restoreSnapshot: old StatefulSet deleted") + + pvcs, err := controllers.NewPVCs(*sp) + if err != nil { + r.Log.Error(err, "NewPVCs failed") + return err + } + r.Log.Info("restoreSnapshot: old PVCs to delete constructed", "pvcs", pvcs) + + for i, pvc := range pvcs { + pvc.Name = fmt.Sprintf("%s-%s-0", pvc.Name, sp.StsName) + if err := r.Delete(ctx, &pvc); err != nil { + r.Log.Error(err, "restoreSnapshot: failed to delete the old PVC", "pvc#", i, "pvc", pvc) + } + r.Log.Info("restoreSnapshot: old PVC deleted", "pvc", pvc.Name) + + applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("instance-controller")} + if err := r.Patch(ctx, &pvc, client.Apply, applyOpts...); err != nil { + r.Log.Error(err, "restoreSnapshot: failed to patch the deleting of the old PVC") + } + } + + sp.Restore = inst.Spec.Restore + + newPVCs, err := controllers.NewPVCs(*sp) + if err != nil { + r.Log.Error(err, "NewPVCs failed") + return err + } + newPodTemplate := controllers.NewPodTemplate(*sp, inst.Spec.CDBName, controllers.GetDBDomain(&inst)) + stsRestored, err := controllers.NewSts(*sp, newPVCs, newPodTemplate) + if err != nil { + r.Log.Error(err, "restoreSnapshot: failed to construct the restored StatefulSet") + return err + } + sts = stsRestored + + applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("instance-controller")} + if err := r.Patch(ctx, sts, client.Apply, applyOpts...); err != nil { + r.Log.Error(err, "failed to patch the restored StatefulSet") + return err + } + r.Log.Info("restoreSnapshot: StatefulSet constructed", "statefulSet", sts, "sts.Status", sts.Status) + + return nil +} + +// loadConfig attempts to find a customer specific Operator config +// if it's been provided. There should be at most one config. +// If no config is provided by a customer, no errors are raised and +// all defaults are assumed. +func (r *InstanceReconciler) loadConfig(ctx context.Context, ns string) (*v1alpha1.Config, error) { + var configs v1alpha1.ConfigList + if err := r.List(ctx, &configs, client.InNamespace(ns)); err != nil { + return nil, err + } + + if len(configs.Items) == 0 { + return nil, nil + } + + if len(configs.Items) != 1 { + return nil, fmt.Errorf("number of customer provided configs is not one: %d", len(configs.Items)) + } + + return &configs.Items[0], nil +} + +// statusProgress tracks the progress of an ongoing instance creation and returns the progress in terms of percentage. +func (r *InstanceReconciler) statusProgress(ctx context.Context, ns, name string) (int, error) { + var sts appsv1.StatefulSetList + if err := r.List(ctx, &sts, client.InNamespace(ns)); err != nil { + r.Log.Error(err, "failed to get a list of StatefulSets to check status") + return 0, err + } + + if len(sts.Items) < 1 { + return 0, fmt.Errorf("failed to find a StatefulSet, found: %d", len(sts.Items)) + } + + // In theory a user should not be running any StatefulSet in a + // namespace, but to be on a safe side, iterate over all until we find ours. + var foundSts *appsv1.StatefulSet + for index, s := range sts.Items { + if s.Name == name { + foundSts = &sts.Items[index] + } + } + + if foundSts == nil { + return 0, fmt.Errorf("failed to find the right StatefulSet %s (out of %d)", name, len(sts.Items)) + } + r.Log.V(1).Info("found the right StatefulSet", "foundSts", &foundSts.Name, + "sts.Status.CurrentReplicas", &foundSts.Status.CurrentReplicas, "sts.Status.ReadyReplicas", foundSts.Status.ReadyReplicas) + + if foundSts.Status.CurrentReplicas != 1 { + return 10, fmt.Errorf("StatefulSet is not ready yet? (failed to find the expected number of current replicas): %d", foundSts.Status.CurrentReplicas) + } + + if foundSts.Status.ReadyReplicas != 1 { + return 50, fmt.Errorf("StatefulSet is not ready yet? (failed to find the expected number of ready replicas): %d", foundSts.Status.ReadyReplicas) + } + + var pods corev1.PodList + if err := r.List(ctx, &pods, client.InNamespace(ns), client.MatchingLabels{"statefulset": name}); err != nil { + r.Log.Error(err, "failed to get a list of Pods to check status") + return 60, err + } + + if len(pods.Items) < 1 { + return 65, fmt.Errorf("failed to find enough pods, found: %d pods", len(pods.Items)) + } + + var foundPod *corev1.Pod + for index, p := range pods.Items { + if p.Name == name+"-0" { + foundPod = &pods.Items[index] + } + } + + if foundPod == nil { + return 75, fmt.Errorf("failed to find the right Pod %s (out of %d)", name+"-0", len(pods.Items)) + } + r.Log.V(1).Info("found the right Pod", "pod.Name", &foundPod.Name, "pod.Status", foundPod.Status.Phase, "#containers", len(foundPod.Status.ContainerStatuses)) + + if foundPod.Status.Phase != "Running" { + return 85, fmt.Errorf("failed to find the right Pod %s in status Running: %s", name+"-0", foundPod.Status.Phase) + } + + for _, c := range foundPod.Status.ContainerStatuses { + if c.Name == databasecontroller.DatabaseContainerName && c.Ready { + return 100, nil + } + } + return 85, fmt.Errorf("failed to find a database container in %+v", foundPod.Status.ContainerStatuses) +} + +func (r *InstanceReconciler) updateProgressCondition(ctx context.Context, inst v1alpha1.Instance, ns, op string) bool { + iReadyCond := k8s.FindCondition(inst.Status.Conditions, k8s.Ready) + + r.Log.Info("updateProgressCondition", "operation", op, "iReadyCond", iReadyCond) + progress, err := r.statusProgress(ctx, ns, fmt.Sprintf(controllers.StsName, inst.Name)) + if err != nil && iReadyCond != nil { + if progress > 0 { + k8s.InstanceUpsertCondition(&inst.Status, iReadyCond.Type, iReadyCond.Status, iReadyCond.Reason, fmt.Sprintf("%s: %d%%", op, progress)) + } + r.Log.Info("updateProgressCondition", "statusProgress", err) + return false + } + return true +} + +// validateSpec sanity checks a DB Domain input for conflicts. +func validateSpec(inst *v1alpha1.Instance) error { + // Does DBUniqueName contain DB Domain as a suffix? + if strings.Contains(inst.Spec.DBUniqueName, ".") { + domainFromName := strings.SplitN(inst.Spec.DBUniqueName, ".", 2)[1] + if inst.Spec.DBDomain != "" && domainFromName != inst.Spec.DBDomain { + return fmt.Errorf("validateSpec: domain %q provided in DBUniqueName %q does not match with provided DBDomain %q", + domainFromName, inst.Spec.DBUniqueName, inst.Spec.DBDomain) + } + } + + if inst.Spec.CDBName != "" { + if _, err := sql.Identifier(inst.Spec.CDBName); err != nil { + return fmt.Errorf("validateSpec: cdbName is not valid: %w", err) + } + } + + return nil +} + +// updateIsChangeApplied sets instance.Status.IsChangeApplied field to false if observedGeneration < generation, it sets it to true if changes are applied. +// TODO: add logic to handle restore/recovery +func (r *InstanceReconciler) updateIsChangeApplied(ctx context.Context, inst *v1alpha1.Instance) { + if inst.Status.ObservedGeneration < inst.Generation { + inst.Status.IsChangeApplied = v1.ConditionFalse + inst.Status.ObservedGeneration = inst.Generation + r.Log.Info("change detected", "observedGeneration", inst.Status.ObservedGeneration, "generation", inst.Generation) + } + if inst.Status.IsChangeApplied == v1.ConditionTrue { + return + } + parameterUpdateDone := inst.Spec.Parameters == nil || reflect.DeepEqual(inst.Status.CurrentParameters, inst.Spec.Parameters) + if parameterUpdateDone { + inst.Status.IsChangeApplied = v1.ConditionTrue + } + r.Log.Info("change applied", "observedGeneration", inst.Status.ObservedGeneration, "generation", inst.Generation) +} + +func (r *InstanceReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, respErr error) { + ctx := context.Background() + log := r.Log.WithValues("Instance", req.NamespacedName) + + log.Info("reconciling instance") + + var inst v1alpha1.Instance + if err := r.Get(ctx, req.NamespacedName, &inst); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if err := validateSpec(&inst); err != nil { + log.Error(err, "instance spec validation failed") + // TODO better error handling, no need retry + return ctrl.Result{}, nil + } + + defer func() { + r.updateIsChangeApplied(ctx, &inst) + if err := r.Status().Update(ctx, &inst); err != nil { + log.Error(err, "failed to update the instance status") + if respErr == nil { + respErr = err + } + } + }() + + diskSpace, err := commonutils.DiskSpaceTotal(&inst) + if err != nil { + log.Error(err, "failed to calculate the total disk space") + } + log.Info("common instance", "total allocated disk space across all instance disks [Gi]", diskSpace/1024/1024/1024) + + instanceReadyCond := k8s.FindCondition(inst.Status.Conditions, k8s.Ready) + dbInstanceCond := k8s.FindCondition(inst.Status.Conditions, k8s.DatabaseInstanceReady) + + var enabledServices []commonv1alpha1.Service + for service, enabled := range inst.Spec.Services { + if enabled { + enabledServices = append(enabledServices, service) + } + } + + // If the instance and database is ready, we can set the instance parameters + if k8s.ConditionStatusEquals(instanceReadyCond, v1.ConditionTrue) && + k8s.ConditionStatusEquals(dbInstanceCond, v1.ConditionTrue) && inst.Spec.Parameters != nil { + log.Info("instance and db is ready, setting instance parameters") + + if result, err := r.setInstanceParameterStateMachine(ctx, req, inst, log); err != nil { + return result, err + } + } + + iReadyCond := k8s.FindCondition(inst.Status.Conditions, k8s.Ready) + if restoreInProgress(iReadyCond) { + return r.handleRestoreInProgress(ctx, req, &inst, iReadyCond, log) + } + + // Load default preferences (aka "config") if provided by a customer. + config, err := r.loadConfig(ctx, req.NamespacedName.Namespace) + if err != nil { + return ctrl.Result{}, err + } + + images := make(map[string]string) + for k, v := range r.Images { + images[k] = v + } + + result, err := r.overrideDefaultImages(config, images, &inst, log) + if err != nil { + return result, err + } + + services := []string{"lb", "node"} + + applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("instance-controller")} + + cm, err := controllers.NewConfigMap(&inst, r.Scheme, fmt.Sprintf(controllers.CmName, inst.Name)) + if err != nil { + log.Error(err, "failed to create a ConfigMap", "cm", cm) + return ctrl.Result{}, err + } + + if err := r.Patch(ctx, cm, client.Apply, applyOpts...); err != nil { + return ctrl.Result{}, err + } + + // Create a StatefulSet if needed. + sp := controllers.StsParams{ + Inst: &inst, + Scheme: r.Scheme, + Namespace: req.NamespacedName.Namespace, + Images: images, + SvcName: fmt.Sprintf(controllers.SvcName, inst.Name), + StsName: fmt.Sprintf(controllers.StsName, inst.Name), + PrivEscalation: false, + ConfigMap: cm, + Disks: diskSpecs(&inst, config), + Config: config, + Log: log, + Services: enabledServices, + } + + var forceRestore bool + + dbiCond := k8s.FindCondition(inst.Status.Conditions, k8s.DatabaseInstanceReady) + if (k8s.ConditionStatusEquals(iReadyCond, v1.ConditionTrue) && k8s.ConditionStatusEquals(dbiCond, v1.ConditionTrue)) || + k8s.ConditionReasonEquals(iReadyCond, k8s.RestoreFailed) { + + if inst.Spec.Restore == nil { + if k8s.ConditionStatusEquals(iReadyCond, v1.ConditionTrue) { + log.Info("instance has already been provisioned and ready") + } else { + log.Info("instance is in failed restore state") + } + return ctrl.Result{}, nil + } + + if inst.Spec.Restore != nil { + if !inst.Spec.Restore.Force { + log.Info("instance is up and running. To replace (restore from a backup), set force=true") + return ctrl.Result{}, nil + } + + requestTime := inst.Spec.Restore.RequestTime.Rfc3339Copy() + if inst.Status.LastRestoreTime != nil && !requestTime.After(inst.Status.LastRestoreTime.Time) { + log.Info(fmt.Sprintf("skipping the restore request as requestTime=%v is not later than the last restore time %v", + requestTime, inst.Status.LastRestoreTime.Time)) + return ctrl.Result{}, nil + } + + forceRestore = true + log.Info("force restore, replacing the original instance...") + } + } + + newPVCs, err := controllers.NewPVCs(sp) + if err != nil { + r.Log.Error(err, "NewPVCs failed") + return ctrl.Result{}, err + } + newPodTemplate := controllers.NewPodTemplate(sp, inst.Spec.CDBName, controllers.GetDBDomain(&inst)) + sts, err := controllers.NewSts(sp, newPVCs, newPodTemplate) + if err != nil { + log.Error(err, "failed to create a StatefulSet", "sts", sts) + return ctrl.Result{}, err + } + log.V(1).Info("StatefulSet constructed", "sts", sts, "sts.Status", sts.Status, "inst.Status", inst.Status) + + if forceRestore { + return r.forceRestore(ctx, req, &inst, iReadyCond, sts, sp, log) + } + + if err := r.Patch(ctx, sts, client.Apply, applyOpts...); err != nil { + log.Error(err, "failed to patch the StatefulSet", "sts.Status", sts.Status) + return ctrl.Result{}, err + } + + agentParam := controllers.AgentDeploymentParams{ + Inst: &inst, + Scheme: r.Scheme, + Name: fmt.Sprintf(controllers.AgentDeploymentName, inst.Name), + Images: images, + PrivEscalation: false, + Log: log, + Args: controllers.GetLogLevelArgs(config), + Services: enabledServices, + } + agentDeployment, err := controllers.NewAgentDeployment(agentParam) + if err != nil { + log.Error(err, "failed to create a Deployment", "agent deployment", agentDeployment) + return ctrl.Result{}, err + } + if err := r.Patch(ctx, agentDeployment, client.Apply, applyOpts...); err != nil { + log.Error(err, "failed to patch the Deployment", "agent deployment.Status", agentDeployment.Status) + return ctrl.Result{}, err + } + + // Create LB/NodePort Services if needed. + var svcLB *corev1.Service + for _, s := range services { + svc, err := controllers.NewSvc(&inst, r.Scheme, s) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.Patch(ctx, svc, client.Apply, applyOpts...); err != nil { + return ctrl.Result{}, err + } + + if s == "lb" { + svcLB = svc + } + } + + svc, err := controllers.NewDBDaemonSvc(&inst, r.Scheme) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.Patch(ctx, svc, client.Apply, applyOpts...); err != nil { + return ctrl.Result{}, err + } + + svc, err = controllers.NewAgentSvc(&inst, r.Scheme) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.Patch(ctx, svc, client.Apply, applyOpts...); err != nil { + return ctrl.Result{}, err + } + + if iReadyCond == nil { + iReadyCond = k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.CreateInProgress, "") + } + + inst.Status.Endpoint = fmt.Sprintf(controllers.SvcEndpoint, fmt.Sprintf(controllers.SvcName, inst.Name), inst.Namespace) + inst.Status.URL = controllers.SvcURL(svcLB, consts.SecureListenerPort) + + // RequeueAfter 30 seconds to avoid constantly reconcile errors before statefulSet is ready. + // Update status when the Service is ready (for the initial provisioning). + // Also confirm that the StatefulSet is up and running. + if k8s.ConditionReasonEquals(iReadyCond, k8s.CreateInProgress) { + elapsed := k8s.ElapsedTimeFromLastTransitionTime(iReadyCond, time.Second) + if elapsed > instanceProvisionTimeout { + r.Recorder.Eventf(&inst, corev1.EventTypeWarning, "InstanceReady", fmt.Sprintf("Instance provision timed out after %v", instanceProvisionTimeout)) + msg := fmt.Sprintf("Instance provision timed out. Elapsed Time: %v", elapsed) + log.Info(msg) + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.CreateInProgress, msg) + return ctrl.Result{}, nil + } + + if !r.updateProgressCondition(ctx, inst, req.NamespacedName.Namespace, controllers.CreateInProgress) { + log.Info("requeue after 30 seconds") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + if inst.Status.URL != "" { + if !k8s.ConditionReasonEquals(iReadyCond, k8s.CreateComplete) { + r.Recorder.Eventf(&inst, corev1.EventTypeNormal, "InstanceReady", "Instance has been created successfully. Elapsed Time: %v", elapsed) + } + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionTrue, k8s.CreateComplete, "") + inst.Status.CurrentServiceImage = images["service"] + return ctrl.Result{}, nil + } + } + + if inst.Labels == nil { + inst.Labels = map[string]string{"instance": inst.Name} + if err := r.Update(ctx, &inst); err != nil { + log.Error(err, "failed to update the Instance spec (set labels)") + return ctrl.Result{}, err + } + } + + // reach here, the instance should be ready. + if inst.Spec.Mode == commonv1alpha1.ManuallySetUpStandby { + log.Info("reconciling instance for manually set up standby: DONE") + // the code will return here, so we can rely on defer function to update database status. + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.ManuallySetUpStandbyInProgress, fmt.Sprintf("Setting up standby database in progress, remove spec.mode %v to promote the instance", inst.Spec.Mode)) + k8s.InstanceUpsertCondition(&inst.Status, k8s.StandbyReady, v1.ConditionTrue, k8s.CreateComplete, fmt.Sprintf("standby instance creation complete, ready to set up standby database in the instance")) + return ctrl.Result{}, nil + } + + if k8s.ConditionStatusEquals(k8s.FindCondition(inst.Status.Conditions, k8s.StandbyReady), v1.ConditionTrue) { + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + log.Error(err, "failed to create a conn via gRPC.Dial") + return ctrl.Result{}, err + } + defer conn.Close() + caClient := capb.NewConfigAgentClient(conn) + // promote the standby instance, bootstrap is part of promotion. + r.Recorder.Eventf(&inst, corev1.EventTypeNormal, k8s.PromoteStandbyInProgress, "") + if err := r.bootstrapStandby(ctx, &inst, caClient, log); err != nil { + r.Recorder.Eventf(&inst, corev1.EventTypeWarning, k8s.PromoteStandbyFailed, fmt.Sprintf("Error promoting standby: %v", err)) + return ctrl.Result{}, err + } + // the standby instance has been successfully promoted, set ready condition + // to true and standby ready to false. Promotion need to be idempotent to + // ensure the correctness under retry. + r.Recorder.Eventf(&inst, corev1.EventTypeNormal, k8s.PromoteStandbyComplete, "") + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionTrue, k8s.CreateComplete, "") + k8s.InstanceUpsertCondition(&inst.Status, k8s.StandbyReady, v1.ConditionFalse, k8s.PromoteStandbyComplete, "") + return ctrl.Result{Requeue: true}, err + } + + var dbs v1alpha1.DatabaseList + if err := r.List(ctx, &dbs, client.InNamespace(req.Namespace)); err != nil { + log.V(1).Info("failed to list databases for instance", "inst.Name", inst.Name) + } else { + log.Info("list of queried databases", "dbs", dbs) + } + + for _, newDB := range dbs.Items { + // check DB name against existing ones to decide whether this is a new DB + if !controllers.Contains(inst.Status.DatabaseNames, newDB.Spec.Name) { + log.Info("found a new DB", "dbName", newDB.Spec.Name) + inst.Status.DatabaseNames = append(inst.Status.DatabaseNames, newDB.Spec.Name) + } + log.V(1).Info("not a new DB, skipping the update", "dbName", newDB.Spec.Name) + } + + log.Info("instance status", "iReadyCond", iReadyCond, "endpoint", inst.Status.Endpoint, + "url", inst.Status.URL, "databases", inst.Status.DatabaseNames) + + log.Info("reconciling instance: DONE") + + istatus, err := controllers.CheckStatusInstanceFunc(ctx, inst.Name, inst.Spec.CDBName, svc.Spec.ClusterIP, controllers.GetDBDomain(&inst), log) + if err != nil { + log.Error(err, "failed to check the database instance status") + return ctrl.Result{}, err + } + + isImageSeeded, err := isImageSeeded(ctx, svc.Spec.ClusterIP, log) + if err != nil { + log.Error(err, "unable to determine image type") + return ctrl.Result{}, err + } + if !isImageSeeded && istatus == controllers.StatusInProgress { + log.Info("Creating a new CDB database") + k8s.InstanceUpsertCondition(&inst.Status, k8s.DatabaseInstanceReady, v1.ConditionFalse, k8s.CreateInProgress, "Bootstrapping CDB") + if err := r.Status().Update(ctx, &inst); err != nil { + log.Error(err, "failed to update the instance status") + } + + if err = r.bootstrapCDB(ctx, inst, svc.Spec.ClusterIP, log); err != nil { + k8s.InstanceUpsertCondition(&inst.Status, k8s.DatabaseInstanceReady, v1.ConditionFalse, k8s.CreateFailed, fmt.Sprintf("Error creating CDB: %v", err)) + log.Error(err, "Error while creating CDB database") + r.Recorder.Eventf(&inst, corev1.EventTypeWarning, "DatabaseInstanceCreateFailed", fmt.Sprintf("Error creating CDB: %v", err)) + return ctrl.Result{}, err // No point in proceeding if the instance isn't provisioned + } + log.Info("Finished creating new CDB database") + } + + dbiCond = k8s.FindCondition(inst.Status.Conditions, k8s.DatabaseInstanceReady) + if istatus != controllers.StatusReady { + log.Info("database instance doesn't appear to be ready yet...") + + elapsed := k8s.ElapsedTimeFromLastTransitionTime(dbiCond, time.Second) + if elapsed < createDatabaseInstanceTimeout { + log.Info(fmt.Sprintf("database instance creation in progress for %v, requeue after 30 seconds", elapsed)) + k8s.InstanceUpsertCondition(&inst.Status, k8s.DatabaseInstanceReady, v1.ConditionFalse, k8s.CreateInProgress, "") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + log.Info(fmt.Sprintf("database instance creation timed out. Elapsed Time: %v", elapsed)) + if !strings.Contains(dbiCond.Message, "Warning") { // so that we would create only one database instance timeout event + r.Recorder.Eventf(&inst, corev1.EventTypeWarning, k8s.DatabaseInstanceTimeout, "DatabaseInstance has been in progress for over %v, please verify if it is stuck and should be recreated.", createDatabaseInstanceTimeout) + } + k8s.InstanceUpsertCondition(&inst.Status, k8s.DatabaseInstanceReady, v1.ConditionFalse, k8s.CreateInProgress, "Warning: db instance is taking a long time to start up - verify that instance has not failed") + return ctrl.Result{}, nil // return nil so reconcile loop would not retry + } + + if !k8s.ConditionStatusEquals(dbiCond, v1.ConditionTrue) { + r.Recorder.Eventf(&inst, corev1.EventTypeNormal, k8s.DatabaseInstanceReady, "DatabaseInstance has been created successfully. Elapsed Time: %v", k8s.ElapsedTimeFromLastTransitionTime(dbiCond, time.Second)) + } + + k8s.InstanceUpsertCondition(&inst.Status, k8s.DatabaseInstanceReady, v1.ConditionTrue, k8s.CreateComplete, "") + log.Info("reconciling database instance: DONE") + + return ctrl.Result{}, nil +} + +// isImageSeeded determines from the service image metadata file if the image is seeded or unseeded. +func isImageSeeded(ctx context.Context, clusterIP string, log logr.Logger) (bool, error) { + + log.Info("isImageSeeded: new database requested clusterIP", clusterIP) + + dialTimeout := 1 * time.Minute + // Establish a connection to a Config Agent. + ctx, cancel := context.WithTimeout(ctx, dialTimeout) + defer cancel() + + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", clusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + log.Error(err, "isImageSeeded: failed to create a conn via gRPC.Dial") + return false, err + } + defer conn.Close() + + caClient := capb.NewConfigAgentClient(conn) + serviceImageMetaData, err := caClient.FetchServiceImageMetaData(ctx, &capb.FetchServiceImageMetaDataRequest{}) + if err != nil { + return false, fmt.Errorf("isImageSeeded: failed on FetchServiceImageMetaData call: %v", err) + } + if serviceImageMetaData.CdbName == "" { + return false, nil + } + return true, nil +} + +func (r *InstanceReconciler) overrideDefaultImages(config *v1alpha1.Config, images map[string]string, inst *v1alpha1.Instance, log logr.Logger) (ctrl.Result, error) { + if config != nil { + log.V(1).Info("customer config loaded", "config", config) + + if config.Spec.Platform != "GCP" && config.Spec.Platform != "BareMetal" && config.Spec.Platform != "Minikube" { + return ctrl.Result{}, fmt.Errorf("Unsupported platform: %q", config.Spec.Platform) + } + + // Replace the default images from the global Config, if so requested. + log.Info("create instance: prep", "images explicitly requested for this config", config.Spec.Images) + for k, image := range config.Spec.Images { + log.Info("key value is", "k", k, "image", image) + if v2, ok := images[k]; ok { + log.Info("create instance: prep", "replacing", k, "image of", v2, "with global", image) + images[k] = image + } + } + } else { + log.Info("no customer specific config found, assuming all defaults") + } + + // Replace final images with those explicitly set for the Instance. + if inst.Spec.Images != nil { + log.Info("create instance: prep", "images explicitly requested for this instance", inst.Spec.Images) + for k, v1 := range inst.Spec.Images { + log.Info("k value is ", "key", k) + if v2, ok := images[k]; ok { + r.Log.Info("create instance: prep", "replacing", k, "image of", v2, "with instance specific", v1) + images[k] = v1 + } + } + } + + serviceImageDefined := false + if inst.Spec.Images != nil { + if _, ok := inst.Spec.Images["service"]; ok { + serviceImageDefined = true + log.Info("service image requested via instance", "service image:", inst.Spec.Images["service"]) + } + } + if config != nil { + if _, ok := config.Spec.Images["service"]; ok { + serviceImageDefined = true + log.Info("service image requested via config", "service image:", config.Spec.Images["service"]) + } + } + + if inst.Spec.CDBName == "" { + return ctrl.Result{}, fmt.Errorf("bootstrapCDB: CDBName isn't defined in the config") + } + if !serviceImageDefined { + return ctrl.Result{}, fmt.Errorf("bootstrapCDB: Service image isn't defined in the config") + } + return ctrl.Result{}, nil +} + +// forceRestore restores an instance from a backup. This method should be invoked +// only when the force flag in restore spec is set to true. +func (r *InstanceReconciler) forceRestore(ctx context.Context, req ctrl.Request, inst *v1alpha1.Instance, iReadyCond *v1.Condition, sts *appsv1.StatefulSet, sp controllers.StsParams, log logr.Logger) (ctrl.Result, error) { + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.RestoreInProgress, fmt.Sprintf("Starting a restore on %s-%d from backup %s (type %s)", time.Now().Format(dateFormat), time.Now().Nanosecond(), inst.Spec.Restore.BackupID, inst.Spec.Restore.BackupType)) + inst.Status.LastRestoreTime = inst.Spec.Restore.RequestTime.DeepCopy() + inst.Status.BackupID = "" + + if err := r.Status().Update(ctx, inst); err != nil { + log.Error(err, "failed to update an Instance status (starting a restore)") + return ctrl.Result{}, err + } + + backup, err := r.findBackupForRestore(ctx, *inst, req.Namespace) + if err != nil { + log.Error(err, "could not find a matching backup") + r.Recorder.Eventf(inst, corev1.EventTypeWarning, "RestoreFailed", "Could not find a matching backup for BackupID: %v, BackupType: %v", inst.Spec.Restore.BackupID, inst.Spec.Restore.BackupType) + k8s.InstanceUpsertCondition(&inst.Status, iReadyCond.Type, v1.ConditionFalse, k8s.RestoreFailed, err.Error()) + return ctrl.Result{}, nil + } + + switch inst.Spec.Restore.BackupType { + case "Snapshot": + if err := r.restoreSnapshot(ctx, *inst, sts, &sp); err != nil { + return ctrl.Result{}, err + } + log.Info("restore from a storage snapshot: DONE") + + case "Physical": + operation, err := r.restorePhysical(ctx, *inst, backup, req) + if err != nil { + if !controllers.IsAlreadyExistsError(err) { + r.Log.Error(err, "PhysicalRestore failed") + return ctrl.Result{}, err + } + } else { + if operation.Done { + // we're dealing with non LRO version of restore + log.V(6).Info("encountered synchronous version of PhysicalRestore") + log.Info("PhysicalRestore DONE") + + message := fmt.Sprintf("Physical restore done. Elapsed Time: %v", k8s.ElapsedTimeFromLastTransitionTime(k8s.FindCondition(inst.Status.Conditions, k8s.Ready), time.Second)) + r.Recorder.Eventf(inst, corev1.EventTypeNormal, "RestoreComplete", message) + // non-LRO version sets condition to false, so that it will be set to true and cleaned up in the next reconcile loop + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.RestoreComplete, message) + } else { + r.Log.Info("PhysicalRestore started") + } + } + + default: + // Not playing games here. A restore (especially the in-place restore) + // is destructive. It's not about being user-friendly. A user is to + // be specific as to what kind of backup they want to restore from. + return ctrl.Result{}, fmt.Errorf("a BackupType is a mandatory parameter for a restore") + } + + return ctrl.Result{}, nil +} + +func (r *InstanceReconciler) handleRestoreInProgress(ctx context.Context, req ctrl.Request, + inst *v1alpha1.Instance, iReadyCond *v1.Condition, log logr.Logger) (ctrl.Result, error) { + + cleanupLROFunc := func() {} + // This is to prevent a panic if another thread already resets restore spec. + if inst.Spec.Restore != nil && inst.Spec.Restore.BackupType == "Physical" && !k8s.ConditionReasonEquals(iReadyCond, k8s.RestoreComplete) { + id := lroOperationID(physicalRestore, inst) + operation, err := controllers.GetLROOperation(r.ClientFactory, ctx, r, req.Namespace, id, inst.Name) + if err != nil { + log.Error(err, "GetLROOperation returned an error") + return ctrl.Result{}, err + } + log.Info("GetLROOperation", "response", operation) + if !operation.Done { + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + + log.Info("LRO is DONE", "id", id) + cleanupLROFunc = func() { + _ = controllers.DeleteLROOperation(r.ClientFactory, ctx, r, req.Namespace, id, inst.Name) + } + + // handle case when remote LRO completed unsuccessfully + if operation.GetError() != nil { + backupID := inst.Spec.Restore.BackupID + backupType := inst.Spec.Restore.BackupType + + k8s.InstanceUpsertCondition(&inst.Status, iReadyCond.Type, v1.ConditionFalse, k8s.RestoreFailed, fmt.Sprintf("Failed to restore on %s-%d from backup %s (type %s): %s", time.Now().Format(dateFormat), + time.Now().Nanosecond(), backupID, backupType, operation.GetError().GetMessage())) + if err := r.Status().Update(ctx, inst); err != nil { + log.Error(err, "failed to update the instance status") + return ctrl.Result{}, err + } + + inst.Spec.Restore = nil + if err := r.Update(ctx, inst); err != nil { + log.Error(err, "failed to update the Instance spec (record Restore Failure)") + return ctrl.Result{}, err + } + cleanupLROFunc() + return ctrl.Result{}, nil + } + } else if inst.Spec.Restore != nil && inst.Spec.Restore.BackupType == "Snapshot" { + if !r.updateProgressCondition(ctx, *inst, req.NamespacedName.Namespace, controllers.RestoreInProgress) { + log.Info("requeue after 30 seconds") + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } + } + // This is to prevent a panic if another thread already resets restore spec. + if inst.Spec.Restore != nil { + backupID := inst.Spec.Restore.BackupID + backupType := inst.Spec.Restore.BackupType + + inst.Spec.Restore = nil + if err := r.Update(ctx, inst); err != nil { + log.Error(err, "failed to update the Instance spec (removing the restore bit)") + return ctrl.Result{}, err + } + inst.Status.Description = fmt.Sprintf("Restored on %s-%d from backup %s (type %s)", time.Now().Format(dateFormat), + time.Now().Nanosecond(), backupID, backupType) + r.Recorder.Eventf(inst, corev1.EventTypeNormal, "RestoreComplete", inst.Status.Description) + } + cleanupLROFunc() + + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionTrue, k8s.RestoreComplete, "") + + return ctrl.Result{}, nil +} + +func diskSpecs(inst *v1alpha1.Instance, config *v1alpha1.Config) []commonv1alpha1.DiskSpec { + if inst != nil && inst.Spec.Disks != nil { + return inst.Spec.Disks + } + if config != nil && config.Spec.Disks != nil { + return config.Spec.Disks + } + return defaultDisks +} + +func restoreInProgress(instReadyCond *v1.Condition) bool { + return k8s.ConditionStatusEquals(instReadyCond, v1.ConditionFalse) && + (k8s.ConditionReasonEquals(instReadyCond, k8s.RestoreComplete) || k8s.ConditionReasonEquals(instReadyCond, k8s.RestoreInProgress)) +} + +// bootstrapCDB is invoked during the instance creation phase for a database +// image not containing a CDB and does the following. +// a) Create a CDB for an unseeded image. +// b) Invoke a provisioning workflow for unseeded images. +// c) Creates the database listener. +func (r *InstanceReconciler) bootstrapCDB(ctx context.Context, inst v1alpha1.Instance, clusterIP string, log logr.Logger) error { + // TODO: add better error handling. + if inst.Spec.CDBName == "" || inst.Spec.DBUniqueName == "" { + return fmt.Errorf("bootstrapCDB: at least one of the following required arguments is not defined: CDBName, DBUniqueName") + } + + log.Info("bootstrapCDB: new database requested clusterIP", clusterIP) + + // TODO: Remove this timeout workaround once we have the LRO thing figured out. + dialTimeout := 21 * time.Minute + // Establish a connection to a Config Agent. + ctx, cancel := context.WithTimeout(ctx, dialTimeout) + defer cancel() + + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", clusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + log.Error(err, "bootstrapCDB: failed to create a conn via gRPC.Dial") + return err + } + defer conn.Close() + + caClient := capb.NewConfigAgentClient(conn) + _, err = caClient.CreateCDB(ctx, &capb.CreateCDBRequest{ + Sid: inst.Spec.CDBName, + DbUniqueName: inst.Spec.DBUniqueName, + DbDomain: controllers.GetDBDomain(&inst), + CharacterSet: inst.Spec.CharacterSet, + MemoryPercent: int32(inst.Spec.MemoryPercent), + //DBCA expects the parameters in the following string array format + // ["key1=val1", "key2=val2","key3=val3"] + AdditionalParams: mapsToStringArray(inst.Spec.Parameters), + }) + if err != nil { + return fmt.Errorf("bootstrapCDB: failed on CreateDatabase gRPC call: %v", err) + } + + inst.Status.CurrentParameters = inst.Spec.Parameters + if err := r.Status().Update(ctx, &inst); err != nil { + log.Error(err, "failed to update an Instance status returning error") + return err + } + + caClient = capb.NewConfigAgentClient(conn) + dbDomain := controllers.GetDBDomain(&inst) + _, err = caClient.BootstrapDatabase(ctx, &capb.BootstrapDatabaseRequest{ + CdbName: inst.Spec.CDBName, + DbUniqueName: inst.Spec.DBUniqueName, + Dbdomain: dbDomain, + }) + + if err != nil { + return fmt.Errorf("bootstrapCDB: error while running post-creation bootstrapping steps: %v", err) + } + + _, err = caClient.CreateListener(ctx, &capb.CreateListenerRequest{ + Name: inst.Spec.CDBName, + Port: 6021, + Protocol: "TCP", + DbDomain: dbDomain, + }) + if err != nil { + return fmt.Errorf("bootstrapCDB: failed on listener gRPC call: %v", err) + } + + return nil +} + +func (r *InstanceReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.Log.V(1).Info("SetupWithManager", "images", r.Images) + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.Instance{}). + Owns(&corev1.Service{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&corev1.ConfigMap{}). + Watches( + &source.Kind{Type: &v1alpha1.Database{}}, + &handler.EnqueueRequestForObject{}). + Complete(r) +} + +func lroOperationID(opType string, instance *v1alpha1.Instance) string { + return fmt.Sprintf("%s_%s_%s", opType, instance.GetUID(), instance.Status.LastRestoreTime.Format(time.RFC3339)) +} + +// extracted for testing. +var restorePhysicalPreflightCheck = func(ctx context.Context, r *InstanceReconciler, namespace, instName string) error { + svc := &corev1.Service{} + if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(controllers.SvcName, instName), Namespace: namespace}, svc); err != nil { + return err + } + + if len(svc.Status.LoadBalancer.Ingress) == 0 { + return fmt.Errorf("preflight check: physical backup: external LB is NOT ready") + } + r.Log.Info("preflight check: restore from a physical backup, external LB service is ready", "succeededExecCmd#:", 1, "svc", svc.Name) + + return nil +} diff --git a/oracle/controllers/instancecontroller/instance_controller_parameters.go b/oracle/controllers/instancecontroller/instance_controller_parameters.go new file mode 100644 index 0000000..8a650c7 --- /dev/null +++ b/oracle/controllers/instancecontroller/instance_controller_parameters.go @@ -0,0 +1,314 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instancecontroller + +import ( + "context" + "errors" + "fmt" + "reflect" + "time" + + "github.com/go-logr/logr" + "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + + maintenance "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/pkg/maintenance" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// reservedParameters holds the list of parameters that aren't allowed for modification. +var reservedParameters = map[string]bool{ + "audit_file_dest": true, + "audit_trail": true, + "compatible": true, + "control_files": true, + "db_block_size": true, + "db_recovery_file_dest": true, + "db_recovery_file_dest_size": true, + "diagnostic_dest": true, + "dispatchers": true, + "enable_pluggable_database": true, + "filesystemio_options": true, + "local_listener": true, + "open_cursors": true, + "pga_aggregate_target": true, + "processes": true, + "remote_login_passwordfile": true, + "sga_target": true, + "undo_tablespace": true, + "log_archive_dest_1": true, + "log_archive_dest_state_1": true, + "log_archive_format": true, + "standby_file_management": true, +} + +func (r *InstanceReconciler) recordEventAndUpdateStatus(ctx context.Context, inst *v1alpha1.Instance, conditionStatus v1.ConditionStatus, reason, msg string, log logr.Logger) { + if conditionStatus == v1.ConditionTrue { + r.Recorder.Eventf(inst, corev1.EventTypeNormal, reason, msg) + } else { + r.Recorder.Eventf(inst, corev1.EventTypeWarning, reason, msg) + } + k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, conditionStatus, reason, msg) + if err := r.Status().Update(ctx, inst); err != nil { + log.Error(err, "failed to update the instance status") + } +} + +// fetchCurrentParameterState infers the type and current value of the +// parameters by querying the database and is used for the following purpose, +// * The parameter type (static or dynamic) will be used for deciding whether +// a database restart is required. +// * The current parameter value will be used for rollback if the parameter +// update fails or the database is non-functional after the restart. +func fetchCurrentParameterState(ctx context.Context, caClient capb.ConfigAgentClient, spec v1alpha1.InstanceSpec) (map[string]string, map[string]string, error) { + + var unacceptableParams []string + var keys []string + for k := range spec.Parameters { + if _, ok := reservedParameters[k]; ok { + unacceptableParams = append(unacceptableParams, k) + } + keys = append(keys, k) + } + + if len(unacceptableParams) != 0 { + return nil, nil, fmt.Errorf("fetchCurrentParameterState: parameter list contains reserved parameters:%v", unacceptableParams) + } + staticParams := make(map[string]string) + dynamicParams := make(map[string]string) + response, err := caClient.GetParameterTypeValue(ctx, &capb.GetParameterTypeValueRequest{ + Keys: keys, + }) + if err != nil { + return nil, nil, fmt.Errorf("fetchCurrentParameterState: error while querying parameter type:%v", err) + } + + // Check if static parameters are specified and restart is required. + restartRequired := false + paramType := response.GetTypes() + paramValues := response.GetValues() + for i := 0; i < len(paramType); i++ { + if paramType[i] == "FALSE" { + restartRequired = restartRequired || paramType[i] == "FALSE" + staticParams[keys[i]] = paramValues[i] + } else { + dynamicParams[keys[i]] = paramValues[i] + } + } + + // If restart is required, check if the restartTimeRange is specified in the config. + if restartRequired && !maintenance.HasValidTimeRanges(spec.MaintenanceWindow) { + return nil, nil, errors.New("maintenanceWindow for db downtime not specified for static parameter update") + } + + currentTime := time.Now() + inMaintenanceWindow := maintenance.InRange(spec.MaintenanceWindow, currentTime) + + if !inMaintenanceWindow { + return nil, nil, errors.New("current time is not in a maintenance window that allows db restarts") + } + return staticParams, dynamicParams, nil +} + +func (r *InstanceReconciler) setParameters(ctx context.Context, inst v1alpha1.Instance, caClient capb.ConfigAgentClient, log logr.Logger) (bool, error) { + log.Info("Parameters are ", "parameters:", inst.Spec.Parameters) + requireDatabaseRestart := false + + for k, v := range inst.Spec.Parameters { + response, err := caClient.SetParameter(ctx, &capb.SetParameterRequest{ + Key: k, + Value: v, + }) + if err != nil { + log.Error(err, "setParameters: error while running SetParameter query") + return requireDatabaseRestart, err + } + requireDatabaseRestart = requireDatabaseRestart || response.Static + log.Info("setParameters: requireDatabaseRestart", "requireDatabaseRestart", requireDatabaseRestart) + } + log.Info("setParameters: SQL commands executed successfully") + return requireDatabaseRestart, nil +} + +// setInstanceParameterStateMachine guides the transition of parameter update +// workflow to the next possible state based on the current state and the outcome +// of the task associated with the current state. +func (r *InstanceReconciler) setInstanceParameterStateMachine(ctx context.Context, req ctrl.Request, inst v1alpha1.Instance, log logr.Logger) (ctrl.Result, error) { + + // If the current parameter state is equal to the requested state skip the update + if eq := reflect.DeepEqual(inst.Spec.Parameters, inst.Status.CurrentParameters); eq { + return ctrl.Result{}, nil + } + + // If the last failed parameter update is equal to the requested state skip it. + if eq := reflect.DeepEqual(inst.Spec.Parameters, inst.Status.LastFailedParameterUpdate); eq { + return ctrl.Result{}, nil + } + + if result, err := r.sanityCheckTimeRange(inst, log); err != nil { + return result, err + } + conn, caClient, err := r.getConfigAgentClient(ctx, req, inst, log) + if err != nil { + return ctrl.Result{}, err + } + defer conn.Close() + + _, dynamicParamsRollbackState, err := fetchCurrentParameterState(ctx, caClient, inst.Spec) + if err != nil { + msg := "setInstanceParameterStateMachine: Sanity check failed for instance parameters" + r.recordEventAndUpdateStatus(ctx, &inst, v1.ConditionFalse, k8s.ParameterUpdateRollback, fmt.Sprintf("%s: %v", msg, err), log) + return ctrl.Result{}, err + } + + log.Info("setInstanceParameterStateMachine: entering state machine") + for true { + instanceReadyCond := k8s.FindCondition(inst.Status.Conditions, k8s.Ready) + switch instanceReadyCond.Reason { + case k8s.CreateComplete: + msg := "setInstanceParameterStateMachine: parameter update in progress" + r.recordEventAndUpdateStatus(ctx, &inst, v1.ConditionFalse, k8s.ParameterUpdateInProgress, msg, log) + log.Info("setInstanceParameterStateMachine: SM CreateComplete -> ParameterUpdateInProgress") + case k8s.ParameterUpdateInProgress: + restartRequired, err := r.setParameters(ctx, inst, caClient, log) + if err != nil { + msg := "setInstanceParameterStateMachine: Error while setting instance parameters" + r.recordEventAndUpdateStatus(ctx, &inst, v1.ConditionFalse, k8s.ParameterUpdateRollback, fmt.Sprintf("%s: %v", msg, err), log) + log.Info("setInstanceParameterStateMachine: SM ParameterUpdateInProgress -> ParameterUpdateRollback") + break + } + if restartRequired { + log.Info("setInstanceParameterStateMachine: static parameter specified in config, scheduling restart to activate them") + if _, err := caClient.BounceDatabase(ctx, &capb.BounceDatabaseRequest{ + Sid: inst.Spec.CDBName, + }); err != nil { + msg := "setInstanceParameterStateMachine: error while restarting database after setting static parameters" + r.recordEventAndUpdateStatus(ctx, &inst, v1.ConditionFalse, k8s.ParameterUpdateRollback, fmt.Sprintf("%s: %v", msg, err), log) + log.Info("setInstanceParameterStateMachine: SM ParameterUpdateInProgress -> ParameterUpdateRollback") + break + } + } + msg := "setInstanceParameterStateMachine: Parameter update successful" + inst.Status.CurrentParameters = inst.Spec.Parameters + r.recordEventAndUpdateStatus(ctx, &inst, v1.ConditionTrue, k8s.CreateComplete, msg, log) + log.Info("setInstanceParameterStateMachine: SM ParameterUpdateInProgress -> CreateComplete") + return ctrl.Result{}, nil + case k8s.ParameterUpdateRollback: + if err := r.initiateRecovery(ctx, inst, caClient, dynamicParamsRollbackState, log); err != nil { + log.Info("setInstanceParameterStateMachine: recovery failed, instance currently in irrecoverable state", "err", err) + return ctrl.Result{}, err + } + inst.Status.LastFailedParameterUpdate = inst.Spec.Parameters + msg := "setInstanceParameterStateMachine: instance recovered after bad parameter update" + r.recordEventAndUpdateStatus(ctx, &inst, v1.ConditionTrue, k8s.CreateComplete, msg, log) + log.Info("setInstanceParameterStateMachine: SM ParameterUpdateRollback -> CreateComplete") + return ctrl.Result{}, nil + } + } + return ctrl.Result{}, nil +} + +func (r *InstanceReconciler) getConfigAgentClient(ctx context.Context, req ctrl.Request, inst v1alpha1.Instance, log logr.Logger) (*grpc.ClientConn, capb.ConfigAgentClient, error) { + agentSvc := &corev1.Service{} + if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(controllers.AgentSvcName, inst.Name), Namespace: req.Namespace}, agentSvc); err != nil { + return nil, nil, err + } + + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", agentSvc.Spec.ClusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + // We'll retry the reconcile if its due to transient connection errors + log.Error(err, "setInstanceParameterStateMachine: failed to create a conn via gRPC.Dial") + return nil, nil, err + } + caClient := capb.NewConfigAgentClient(conn) + return conn, caClient, nil +} + +// initiateRecovery will recover the config file (which contains the static +// parameters) to the last known working copy if the static +// parameter update failed (which caused the database to be non-functional +// after a restart). +func (r *InstanceReconciler) initiateRecovery(ctx context.Context, inst v1alpha1.Instance, caClient capb.ConfigAgentClient, dynamicParams map[string]string, log logr.Logger) error { + + log.Info("initiateRecovery: initiating recovery of config file") + if _, err := caClient.RecoverConfigFile(ctx, &capb.RecoverConfigFileRequest{ + CdbName: inst.Spec.CDBName, + }); err != nil { + msg := "initiateRecovery: error while recovering config file" + log.Info(msg, "err", err) + return err + } + + if _, err := caClient.BounceDatabase(ctx, &capb.BounceDatabaseRequest{ + Sid: inst.Spec.CDBName, + }); err != nil { + return err + } + log.Info("initiateRecovery: database bounced completed successfully") + + // Rollback all the dynamic parameter updates after the database has recovered + for k, v := range dynamicParams { + _, err := caClient.SetParameter(ctx, &capb.SetParameterRequest{ + Key: k, + Value: v, + }) + if err != nil { + log.Error(err, "initiateRecovery: error while rolling back dynamic parameters") + return err + } + } + log.Info("initiateRecovery: rolling back of dynamic parameters completed successfully", "dynamicParams", dynamicParams) + return nil +} + +func (r *InstanceReconciler) sanityCheckTimeRange(inst v1alpha1.Instance, log logr.Logger) (ctrl.Result, error) { + if !maintenance.HasValidTimeRanges(inst.Spec.MaintenanceWindow) { + return ctrl.Result{}, fmt.Errorf("MaintenanceWindow specification is not valid: %+v", inst.Spec.MaintenanceWindow) + } + + now := time.Now() + + if maintenance.InRange(inst.Spec.MaintenanceWindow, now) { + return ctrl.Result{}, nil + } + + nextStart, _, err := maintenance.NextWindow(inst.Spec.MaintenanceWindow, now) + + // If there is no future maintenance windows (next window), return an error. + if err != nil { + return ctrl.Result{}, errors.New("current time is past the maintenance time range") + } + + // Otherwise: requeue for processing when the maintenance window opens up. + restartWaitTime := nextStart.Sub(now) + log.Info("setInstanceParameterStateMachine: Wait time before restart ", "restartWaitTime", restartWaitTime.Seconds()) + return ctrl.Result{RequeueAfter: restartWaitTime}, errors.New("current time is not within the maintenance time range") +} + +func mapsToStringArray(parameterMap map[string]string) []string { + var parameters []string + for k, v := range parameterMap { + parameters = append(parameters, fmt.Sprintf("%s=%s", k, v)) + } + return parameters +} diff --git a/oracle/controllers/instancecontroller/instance_controller_standby.go b/oracle/controllers/instancecontroller/instance_controller_standby.go new file mode 100644 index 0000000..13e876b --- /dev/null +++ b/oracle/controllers/instancecontroller/instance_controller_standby.go @@ -0,0 +1,73 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instancecontroller + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" +) + +func (r *InstanceReconciler) bootstrapStandby(ctx context.Context, inst *v1alpha1.Instance, caClient capb.ConfigAgentClient, log logr.Logger) error { + bootstrapResp, err := caClient.BootstrapStandby(ctx, &capb.BootstrapStandbyRequest{ + CdbName: inst.Spec.CDBName, + Version: inst.Spec.Version, + Dbdomain: controllers.GetDBDomain(inst), + }) + if err != nil { + return fmt.Errorf("failed to bootstrap the standby instance: %v", err) + } + + // Create missing resources for migrated database. + for _, pdb := range bootstrapResp.GetPdbs() { + var users []v1alpha1.UserSpec + for _, u := range pdb.Users { + var privs []v1alpha1.PrivilegeSpec + for _, p := range u.Privs { + privs = append(privs, v1alpha1.PrivilegeSpec(p)) + } + users = append(users, v1alpha1.UserSpec{ + UserSpec: commonv1alpha1.UserSpec{ + Name: u.UserName, + }, + Privileges: privs, + }) + } + database := &v1alpha1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: inst.GetNamespace(), + Name: pdb.GetPdbName(), + }, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: pdb.GetPdbName(), + Instance: inst.GetName(), + }, + Users: users, + }, + } + if err := r.Client.Create(ctx, database); err != nil { + return fmt.Errorf("failed to create database resource: %v", err) + } + } + return nil +} diff --git a/oracle/controllers/instancecontroller/instance_controller_test.go b/oracle/controllers/instancecontroller/instance_controller_test.go new file mode 100644 index 0000000..b20a6f5 --- /dev/null +++ b/oracle/controllers/instancecontroller/instance_controller_test.go @@ -0,0 +1,720 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instancecontroller + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager + images = map[string]string{ + "dbinit": "dbInitImage", + "service": "serviceImage", + "config": "configAgentImage", + "logging_sidecar": "loggingSidecarImage", + } + reconciler *InstanceReconciler + fakeClientFactory *testhelpers.FakeClientFactory +) + +func TestInstanceController(t *testing.T) { + + // Mock functions + CheckStatusInstanceFunc = func(ctx context.Context, instName, cdbName, clusterIP, DBDomain string, log logr.Logger) (string, error) { + return "Ready", nil + } + + fakeClientFactory = &testhelpers.FakeClientFactory{} + + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Instance controller", func() []testhelpers.Reconciler { + reconciler = &InstanceReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Instance"), + Scheme: k8sManager.GetScheme(), + Images: images, + ClientFactory: fakeClientFactory, + Recorder: k8sManager.GetEventRecorderFor("instance-controller"), + } + + return []testhelpers.Reconciler{reconciler} + }) +} + +var _ = Describe("Instance controller", func() { + + // Define utility constants for object names and testing timeouts and intervals. + const ( + Namespace = "default" + InstanceName = "test-instance" + + timeout = time.Second * 15 + interval = time.Millisecond * 15 + ) + + Context("New instance", func() { + It("Should create statefulset/deployment/svc", func() { + By("creating a new Instance") + ctx := context.Background() + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: InstanceName, + Namespace: Namespace, + }, + Spec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Images: images, + }, + }, + } + Expect(k8sClient.Create(ctx, instance)).Should(Succeed()) + + objKey := client.ObjectKey{Namespace: Namespace, Name: InstanceName} + + By("checking that statefulset/deployment/svc are created") + Eventually( + func() error { + var createdInst v1alpha1.Instance + if err := k8sClient.Get(ctx, objKey, &createdInst); err != nil { + return err + } + if cond := k8s.FindCondition(createdInst.Status.Conditions, k8s.Ready); !k8s.ConditionReasonEquals(cond, k8s.CreateInProgress) { + return errors.New("expected update has not happened yet") + } + return nil + }, timeout, interval).Should(Succeed()) + + var sts appsv1.StatefulSetList + Expect(k8sClient.List(ctx, &sts, client.InNamespace(Namespace))).Should(Succeed()) + Expect(len(sts.Items) == 1) + + var deployment appsv1.DeploymentList + Expect(k8sClient.List(ctx, &deployment, client.InNamespace(Namespace))).Should(Succeed()) + Expect(len(deployment.Items) == 1) + + var svc corev1.ServiceList + Expect(k8sClient.List(ctx, &svc, client.InNamespace(Namespace))).Should(Succeed()) + Expect(len(svc.Items) == 4) + + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + }) + }) + + Context("instance status observedGeneration and isChangeApplied fields", func() { + + It("should update observedGeneration", func() { + fakeClientFactory.Reset() + objKey := client.ObjectKey{Namespace: "default", Name: "generation-test-inst"} + By("creating a new Instance") + ctx := context.Background() + instance := v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: objKey.Name, + Namespace: objKey.Namespace, + }, + Spec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Images: images, + }, + }, + } + Expect(k8sClient.Create(ctx, &instance)).Should(Succeed()) + + By("checking observed generation matches generation in meta data") + Eventually(func() bool { + if k8sClient.Get(ctx, objKey, &instance) != nil { + return false + } + return instance.ObjectMeta.Generation == instance.Status.ObservedGeneration + }, timeout, interval).Should(Equal(true)) + + By("updating instance parameters in spec") + oldObservedGeneration := instance.Status.ObservedGeneration + parameterMap := map[string]string{"parallel_servers_target": "15"} + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, objKey, &instance); err != nil { + return err + } + instance.Spec.Parameters = parameterMap + oneHourAfter := metav1.NewTime(time.Now().Add(1 * time.Hour)) + oneHour := metav1.Duration{Duration: time.Hour} + instance.Spec.MaintenanceWindow = &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &oneHourAfter, Duration: &oneHour}}} + return k8sClient.Update(ctx, &instance) + })).Should(Succeed()) + + By("checking generation in meta data is increased after spec changes") + Eventually(func() bool { + if k8sClient.Get(ctx, objKey, &instance) != nil { + return false + } + return instance.ObjectMeta.Generation > oldObservedGeneration + }, timeout, interval).Should(Equal(true)) + + By("checking observed generation matches generation in meta data after reconciliation") + Eventually(func() bool { + if k8sClient.Get(ctx, objKey, &instance) != nil { + return false + } + return instance.ObjectMeta.Generation == instance.Status.ObservedGeneration + }, timeout, interval).Should(Equal(true)) + + By("checking isChangeApplied is false before parameterUpdates is completed") + Expect(instance.Status.IsChangeApplied).Should(Equal(metav1.ConditionFalse)) + + By("updating currentParameters in status to match the parameterMap in spec") + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, objKey, &instance); err != nil { + return err + } + instance.Status.CurrentParameters = parameterMap + return k8sClient.Status().Update(ctx, &instance) + })).Should(Succeed()) + + By("checking isChangeApplied is true after parameterUpdates is completed") + Eventually(func() bool { + if k8sClient.Get(ctx, objKey, &instance) != nil { + return false + } + return instance.Status.IsChangeApplied == metav1.ConditionTrue + }, timeout*2, interval).Should(Equal(true)) + + Expect(k8sClient.Delete(ctx, &instance)).Should(Succeed()) + }) + }) + + Context("Existing instance restore from RMAN backup", func() { + var fakeConfigAgentClient *testhelpers.FakeConfigAgentClient + oldPreflightFunc := restorePhysicalPreflightCheck + + BeforeEach(func() { + fakeClientFactory.Reset() + fakeConfigAgentClient = fakeClientFactory.Caclient + + fakeConfigAgentClient.AsyncPhysicalRestore = true + restorePhysicalPreflightCheck = func(ctx context.Context, r *InstanceReconciler, namespace, instName string) error { + return nil + } + }) + + AfterEach(func() { + restorePhysicalPreflightCheck = oldPreflightFunc + }) + + backupName := "test-backup" + backupID := "test-backup-id" + objKey := client.ObjectKey{Namespace: Namespace, Name: InstanceName} + ctx := context.Background() + restoreRequestTime := metav1.Now() + + createInstanceAndStartRestore := func(mode testhelpers.FakeOperationStatus) (*v1alpha1.Instance, *v1alpha1.Backup) { + By("creating a new Instance") + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: InstanceName, + Namespace: Namespace, + }, + Spec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Images: images, + }, + }, + } + + Expect(k8sClient.Create(ctx, instance)).Should(Succeed()) + + By("checking that statefulset/deployment/svc are created") + + Eventually( + func() error { + var createdInst v1alpha1.Instance + if err := k8sClient.Get(ctx, objKey, &createdInst); err != nil { + return err + } + if cond := k8s.FindCondition(createdInst.Status.Conditions, k8s.Ready); !k8s.ConditionReasonEquals(cond, k8s.CreateInProgress) { + return errors.New("expected update has not happened yet") + } + return nil + }, timeout, interval).Should(Succeed()) + + By("setting Instance as Ready") + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + instance.Status = v1alpha1.InstanceStatus{ + GenericInstanceStatus: commonv1alpha1.GenericInstanceStatus{ + Conditions: []metav1.Condition{ + { + Type: k8s.Ready, + Status: metav1.ConditionTrue, + Reason: k8s.CreateComplete, + LastTransitionTime: metav1.Now().Rfc3339Copy(), + }, + { + Type: k8s.DatabaseInstanceReady, + Status: metav1.ConditionTrue, + Reason: k8s.CreateComplete, + LastTransitionTime: metav1.Now().Rfc3339Copy(), + }, + }, + }, + } + return k8sClient.Status().Update(ctx, instance) + })).Should(Succeed()) + + trueVar := true + By("creating a new RMAN backup") + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: Namespace, + Name: backupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: InstanceName, + Type: commonv1alpha1.BackupTypePhysical, + }, + Subtype: "Instance", + Backupset: &trueVar, + }, + } + + backupObjKey := client.ObjectKey{Namespace: Namespace, Name: backupName} + Expect(k8sClient.Create(ctx, backup)).Should(Succeed()) + Eventually( + func() error { + return k8sClient.Get(ctx, backupObjKey, backup) + }, timeout, interval).Should(Succeed()) + + backup.Status = v1alpha1.BackupStatus{ + BackupStatus: commonv1alpha1.BackupStatus{ + Conditions: []metav1.Condition{ + { + Type: k8s.Ready, + Status: metav1.ConditionTrue, + Reason: k8s.BackupReady, + LastTransitionTime: metav1.Now().Rfc3339Copy(), + }, + }, + Phase: commonv1alpha1.BackupSucceeded, + }, + BackupID: backupID, + } + Expect(k8sClient.Status().Update(ctx, backup)).Should(Succeed()) + + By("invoking RMAN restore for the an Instance") + + // configure fake ConfigAgent to be in requested mode + fakeConfigAgentClient.NextGetOperationStatus = mode + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + instance.Spec.Restore = &v1alpha1.RestoreSpec{ + BackupID: backupID, + BackupType: "Physical", + Force: true, + RequestTime: restoreRequestTime, + } + return k8sClient.Update(ctx, instance) + })).Should(Succeed()) + + return instance, backup + } + + // extract happy path restore test part into a function for reuse + // in multiple tests + testCaseHappyPathLRORestore := func() (*v1alpha1.Instance, *v1alpha1.Backup) { + instance, backup := createInstanceAndStartRestore(testhelpers.StatusRunning) + + By("verifying restore LRO was started") + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.RestoreInProgress)) + + Expect(k8sClient.Get(ctx, objKey, instance)).Should(Succeed()) + Expect(instance.Status.LastRestoreTime).ShouldNot(BeNil()) + Expect(instance.Status.LastRestoreTime.UnixNano()).Should(Equal(restoreRequestTime.Rfc3339Copy().UnixNano())) + + Eventually(func() int { + return fakeConfigAgentClient.PhysicalRestoreCalledCnt + }, timeout, interval).Should(Equal(1)) + + By("checking that instance is Ready on restore LRO completion") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDone + Expect(triggerReconcile(ctx, objKey)).Should(Succeed()) + + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + Eventually(fakeConfigAgentClient.DeleteOperationCalledCnt).Should(Equal(1)) + + By("checking that instance Restore section is deleted") + Eventually(func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + if instance.Spec.Restore != nil { + return fmt.Errorf("expected update has not yet happened") + } + return nil + }, timeout, interval).Should(Succeed()) + + By("checking that instance Status.Description is updated") + Expect(instance.Status.Description).Should(HavePrefix("Restored on")) + Expect(instance.Status.Description).Should(ContainSubstring(backupID)) + + return instance, backup + } + + It("it should restore successfully in LRO mode", func() { + instance, backup := testCaseHappyPathLRORestore() + + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, backup)).Should(Succeed()) + }) + + It("it should NOT attempt to restore with the same RequestTime", func() { + instance, backup := testCaseHappyPathLRORestore() + + oldPhysicalRestoreCalledCnt := fakeConfigAgentClient.PhysicalRestoreCalledCnt + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusRunning + + By("restoring from same backup with same RequestTime") + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + instance.Spec.Restore = &v1alpha1.RestoreSpec{ + BackupID: backupID, + BackupType: "Physical", + Force: true, + RequestTime: restoreRequestTime, + } + return k8sClient.Update(ctx, instance) + })).Should(Succeed()) + + By("verifying restore was not run") + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + Expect(fakeConfigAgentClient.PhysicalRestoreCalledCnt).Should(Equal(oldPhysicalRestoreCalledCnt)) + + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, backup)).Should(Succeed()) + }) + + It("it should run new restore with a later RequestTime", func() { + + instance, backup := testCaseHappyPathLRORestore() + + // reset method call counters used later + fakeConfigAgentClient.Reset() + fakeConfigAgentClient.AsyncPhysicalRestore = true + + By("restoring from same backup with later RequestTime") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusRunning + secondRestoreRequestTime := metav1.NewTime(restoreRequestTime.Rfc3339Copy().Add(time.Second)) + + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + instance.Spec.Restore = &v1alpha1.RestoreSpec{ + BackupID: backupID, + BackupType: "Physical", + Force: true, + RequestTime: secondRestoreRequestTime, + } + return k8sClient.Update(ctx, instance) + })).Should(Succeed()) + + By("verifying restore was started") + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.RestoreInProgress)) + + By("checking that instance is Ready on restore LRO completion") + fakeConfigAgentClient.NextGetOperationStatus = testhelpers.StatusDone + Expect(triggerReconcile(ctx, objKey)).Should(Succeed()) + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + Eventually(fakeConfigAgentClient.DeleteOperationCalledCnt).Should(Equal(1)) + Expect(fakeConfigAgentClient.PhysicalRestoreCalledCnt).Should(Equal(1)) + + By("checking Status.LastRestoreTime was updated") + Expect(k8sClient.Get(ctx, objKey, instance)).Should(Succeed()) + Expect(instance.Status.LastRestoreTime.UnixNano()).Should(Equal(secondRestoreRequestTime.UnixNano())) + + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, backup)).Should(Succeed()) + }) + + It("it should handle failure in LRO operation", func() { + + instance, backup := createInstanceAndStartRestore(testhelpers.StatusDoneWithError) + + By("checking that instance has RestoreFailed status") + Expect(triggerReconcile(ctx, objKey)).Should(Succeed()) + Eventually(func() (string, error) { + return getConditionReason(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(k8s.RestoreFailed)) + Eventually(fakeConfigAgentClient.DeleteOperationCalledCnt, timeout, interval).Should(Equal(1)) + + By("checking that instance Restore section is deleted") + Eventually(func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + if instance.Spec.Restore != nil { + return fmt.Errorf("expected update has not yet happened") + } + return nil + }, timeout, interval).Should(Succeed()) + + By("checking that instance Status.Description is updated") + cond := k8s.FindCondition(instance.Status.Conditions, k8s.Ready) + Expect(cond.Message).Should(HavePrefix("Failed to restore on")) + Expect(cond.Message).Should(ContainSubstring(backupID)) + + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, backup)).Should(Succeed()) + }) + + It("it should restore successfully in sync mode", func() { + + fakeConfigAgentClient.AsyncPhysicalRestore = false + instance, backup := createInstanceAndStartRestore(testhelpers.StatusNotFound) + + By("checking that instance status is Ready") + Eventually(func() (metav1.ConditionStatus, error) { + return getConditionStatus(ctx, objKey, k8s.Ready) + }, timeout, interval).Should(Equal(metav1.ConditionTrue)) + + Expect(fakeConfigAgentClient.DeleteOperationCalledCnt()).Should(Equal(0)) + + By("checking that instance Restore section is deleted") + Eventually(func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + if instance.Spec.Restore != nil { + return fmt.Errorf("expected update has not yet happened") + } + return nil + }, timeout, interval).Should(Succeed()) + + By("checking that instance Status.Description is updated") + Eventually(func() error { + if err := k8sClient.Get(ctx, objKey, instance); err != nil { + return err + } + if !strings.HasPrefix(instance.Status.Description, "Restored on") { + return fmt.Errorf("%q does not have expected prefix", instance.Status.Description) + } + if !strings.Contains(instance.Status.Description, backupID) { + return fmt.Errorf("%q does not contain %q", instance.Status.Description, backupID) + } + return nil + }, timeout, interval).Should(Succeed()) + + Expect(k8sClient.Delete(ctx, instance)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, backup)).Should(Succeed()) + }) + }) +}) + +func TestSanityCheckForReservedParameters(t *testing.T) { + twoHourBefore := metav1.NewTime(time.Now().Add(-2 * time.Hour)) + oneHourBefore := metav1.NewTime(time.Now().Add(-time.Hour)) + oneHourAfter := metav1.NewTime(time.Now().Add(time.Hour)) + oneHour := metav1.Duration{Duration: time.Hour} + twoHours := metav1.Duration{Duration: 2 * time.Hour} + + tests := []struct { + name string + parameterKey string + parameterVal string + expectedError bool + maintenanceWindow commonv1alpha1.MaintenanceWindowSpec + }{ + { + name: "should return error for reserved parameters", + parameterKey: "audit_trail", + parameterVal: "/some/directory", + expectedError: true, + maintenanceWindow: commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &oneHourBefore, Duration: &twoHours}}}, + }, + { + name: "should not return error for valid parameters", + parameterKey: "parallel_servers_target", + parameterVal: "15", + expectedError: false, + maintenanceWindow: commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &oneHourBefore, Duration: &twoHours}}}, + }, + { + name: "should return error for elapsed past time range", + parameterKey: "parallel_servers_target", + parameterVal: "15", + expectedError: true, + maintenanceWindow: commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &twoHourBefore, Duration: &oneHour}}}, + }, + { + name: "should return error for current time not within time range", + parameterKey: "parallel_servers_target", + parameterVal: "15", + expectedError: true, + maintenanceWindow: commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &oneHourAfter, Duration: &oneHour}}}, + }, + } + ctx := context.Background() + fakeClientFactory := &testhelpers.FakeClientFactory{} + fakeClientFactory.Reset() + fakeConfigAgentClient := fakeClientFactory.Caclient + + for _, tc := range tests { + instanceSpec := v1alpha1.InstanceSpec{ + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Parameters: map[string]string{tc.parameterKey: tc.parameterVal}, + MaintenanceWindow: &tc.maintenanceWindow, + }, + } + _, _, err := fetchCurrentParameterState(ctx, fakeConfigAgentClient, instanceSpec) + if tc.expectedError && err == nil { + t.Fatalf("TestSanityCheckParameters(ctx) expected error for test case:%v", tc.name) + } else if !tc.expectedError && err != nil { + t.Fatalf("TestSanityCheckParameters(ctx) didn't expect error for test case: %v", tc.name) + } + } +} + +func getConditionReason(ctx context.Context, objKey client.ObjectKey, cType string) (string, error) { + var instance v1alpha1.Instance + if err := k8sClient.Get(ctx, objKey, &instance); err != nil { + return "", err + } + + if cond := k8s.FindCondition(instance.Status.Conditions, cType); cond != nil { + return cond.Reason, nil + } + return "", nil +} + +func getConditionStatus(ctx context.Context, objKey client.ObjectKey, cType string) (metav1.ConditionStatus, error) { + var instance v1alpha1.Instance + if err := k8sClient.Get(ctx, objKey, &instance); err != nil { + return "", err + } + if cond := k8s.FindCondition(instance.Status.Conditions, cType); cond != nil { + return cond.Status, nil + } + return metav1.ConditionUnknown, nil +} + +// triggerReconcile invokes k8s reconcile action by updating +// an irrelevant field. +func triggerReconcile(ctx context.Context, objKey client.ObjectKey) error { + var instance v1alpha1.Instance + if err := k8sClient.Get(ctx, objKey, &instance); err != nil { + return err + } + instance.Spec.MemoryPercent = (instance.Spec.MemoryPercent + 1) % 100 + + err := k8sClient.Update(ctx, &instance) + if k8serrors.IsConflict(err) { + return nil + } + return err +} + +func createSimpleInstance(ctx context.Context, instanceName string, namespace string, timeout time.Duration, interval time.Duration) *v1alpha1.Instance { + By("Creating a new Instance") + images := map[string]string{"service": "image"} + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: instanceName, + Namespace: namespace, + }, + Spec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Images: images, + }, + }, + } + + Expect(k8sClient.Create(ctx, instance)).Should(Succeed()) + createdInstance := &v1alpha1.Instance{} + // We'll need to retry getting this newly created Instance, given that creation may not immediately happen. + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instance.Name}, createdInstance) + }, timeout, interval).Should(Succeed()) + + Expect(retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instance.Name}, instance); client.IgnoreNotFound(err) != nil { + return err + } + instance.Status.Conditions = k8s.Upsert(instance.Status.Conditions, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, "") + instance.Status.Conditions = k8s.Upsert(instance.Status.Conditions, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, "") + return k8sClient.Status().Update(ctx, instance) + })).Should(Succeed()) + + By("By creating an agent service") + agentSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-instance-agent-svc", + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 9999}}}, + } + // Might fail if the resource already exists + k8sClient.Create(ctx, agentSvc) + + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: "test-instance-agent-svc", Namespace: namespace}, agentSvc) + }, timeout, interval).Should(Succeed()) + + fakeClientFactory.Reset() + + return instance +} diff --git a/oracle/controllers/inttest/.bazelrc.tmpl b/oracle/controllers/inttest/.bazelrc.tmpl new file mode 100644 index 0000000..fd1a0a8 --- /dev/null +++ b/oracle/controllers/inttest/.bazelrc.tmpl @@ -0,0 +1,5 @@ +test --test_env=TEST_IMAGE_ORACLE_18_XE_SEEDED=$TEST_IMAGE_ORACLE_18_XE_SEEDED +test --test_env=TEST_IMAGE_ORACLE_19_3_EE_SEEDED=$TEST_IMAGE_ORACLE_19_3_EE_SEEDED +test --test_env=TEST_IMAGE_ORACLE_12_2_EE_UNSEEDED_31741641=$TEST_IMAGE_ORACLE_12_2_EE_UNSEEDED_31741641 +test --test_env=TEST_IMAGE_ORACLE_12_2_EE_SEEDED_BUGGY=$TEST_IMAGE_ORACLE_12_2_EE_SEEDED_BUGGY +test --test_env=TEST_IMAGE_ORACLE_12_2_EE_SEEDED=$TEST_IMAGE_ORACLE_12_2_EE_SEEDED diff --git a/oracle/controllers/inttest/BUILD.bazel b/oracle/controllers/inttest/BUILD.bazel new file mode 100644 index 0000000..e69de29 diff --git a/oracle/controllers/inttest/datapumptest/BUILD.bazel b/oracle/controllers/inttest/datapumptest/BUILD.bazel new file mode 100644 index 0000000..cf974bb --- /dev/null +++ b/oracle/controllers/inttest/datapumptest/BUILD.bazel @@ -0,0 +1,22 @@ +load("//oracle:scripts/ginkgo_test.bzl", "ginkgo_test") + +# gazelle:map_kind go_test ginkgo_test //oracle:scripts/ginkgo_test.bzl + +ginkgo_test( + name = "datapumptest_test", + timeout = "eternal", # 60m + srcs = ["datapump_test.go"], + nodes = 3, + tags = ["integration"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/envtest/printer", + ], +) diff --git a/oracle/controllers/inttest/datapumptest/datapump_test.go b/oracle/controllers/inttest/datapumptest/datapump_test.go new file mode 100644 index 0000000..7b26a98 --- /dev/null +++ b/oracle/controllers/inttest/datapumptest/datapump_test.go @@ -0,0 +1,208 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datapumptest + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + + // Enable GCP auth for k8s client + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// Global variable, to be accessible by AfterSuite +var k8sEnv = testhelpers.K8sOperatorEnvironment{} + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("Datapump", func() { + var namespace string + var instanceName = "mydb" + var pod = instanceName + "-sts-0" + + BeforeEach(func() { + namespace = testhelpers.RandName("datapump-test") + k8sEnv.Init(namespace) + + // Allow the k8s [namespace/default] service account access to GCS buckets + testhelpers.SetupServiceAccountBindingBetweenGcpAndK8s(k8sEnv) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, + []string{"manager", "dbdaemon", "oracledb"}, []string{instanceName}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + testDataPump := func(version string, edition string) { + It("Should create instance and export data", func() { + testhelpers.CreateSimpleInstance(k8sEnv, instanceName, version, edition) + + instKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: instanceName} + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 15*time.Minute) + + testhelpers.CreateSimplePDB(k8sEnv, instanceName) + testhelpers.InsertSimpleData(k8sEnv) + + By("Creating a new Schema export") + schemaExport := &v1alpha1.Export{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "export-schemas", + }, + Spec: v1alpha1.ExportSpec{ + Instance: instanceName, + DatabaseName: "pdb1", + Type: "DataPump", + ExportObjectType: "Schemas", + ExportObjects: []string{"scott"}, + FlashbackTime: &metav1.Time{Time: time.Now()}, + GcsPath: fmt.Sprintf("gs://%s/%s/%s/exportSchema.dmp", + os.Getenv("PROW_PROJECT"), os.Getenv("PROW_CLUSTER"), namespace), + }, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, schemaExport) + + By("Waiting for export to complete") + { + createdExport := &v1alpha1.Export{} + objKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: schemaExport.Name} + testhelpers.WaitForObjectConditionState(k8sEnv, + objKey, createdExport, k8s.Ready, metav1.ConditionTrue, k8s.ExportComplete, 2*time.Minute) + } + + By("Creating a new Table export") + tableExport := &v1alpha1.Export{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "export-tables", + }, + Spec: v1alpha1.ExportSpec{ + Instance: instanceName, + DatabaseName: "pdb1", + Type: "DataPump", + ExportObjectType: "Tables", + ExportObjects: []string{"scott.test_table"}, + FlashbackTime: &metav1.Time{Time: time.Now()}, + GcsPath: fmt.Sprintf("gs://%s/%s/%s/exportTables.dmp", + os.Getenv("PROW_PROJECT"), os.Getenv("PROW_CLUSTER"), namespace), + }, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, tableExport) + + By("Waiting for export to complete") + { + createdExport := &v1alpha1.Export{} + objKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: tableExport.Name} + testhelpers.WaitForObjectConditionState(k8sEnv, + objKey, createdExport, k8s.Ready, metav1.ConditionTrue, k8s.ExportComplete, 2*time.Minute) + } + + By("Erasing scott user") + sql := `alter session set container=pdb1; +drop user scott cascade;` + testhelpers.K8sExecuteSqlOrFail(pod, k8sEnv.Namespace, sql) + + By("Importing Schemas") + schemaImport := &v1alpha1.Import{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "import-schemas", + }, + Spec: v1alpha1.ImportSpec{ + Instance: instanceName, + DatabaseName: "pdb1", + GcsPath: schemaExport.Spec.GcsPath, + }, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, schemaImport) + + By("Waiting for schema import to complete") + { + createdImport := &v1alpha1.Import{} + objKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: schemaImport.Name} + testhelpers.WaitForObjectConditionState(k8sEnv, + objKey, createdImport, k8s.Ready, metav1.ConditionTrue, k8s.ImportComplete, 2*time.Minute) + } + + By("Granting unlimited tablespace to scott") + sql = `alter session set container=pdb1; +grant unlimited tablespace to scott; +alter session set current_schema=scott; +drop table test_table;` + testhelpers.K8sExecuteSqlOrFail(pod, k8sEnv.Namespace, sql) + + By("Importing Tables") + tableImport := &v1alpha1.Import{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "import-tables", + }, + Spec: v1alpha1.ImportSpec{ + Instance: instanceName, + DatabaseName: "pdb1", + GcsPath: tableExport.Spec.GcsPath, + }, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, tableImport) + By("Waiting for table import to complete") + { + createdImport := &v1alpha1.Import{} + objKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: tableImport.Name} + testhelpers.WaitForObjectConditionState(k8sEnv, + objKey, createdImport, k8s.Ready, metav1.ConditionTrue, k8s.ImportComplete, 2*time.Minute) + } + + testhelpers.VerifySimpleData(k8sEnv) + }) + } + + Context("Oracle 12c", func() { + testDataPump("12.2", "EE") + }) + + Context("Oracle 19c", func() { + testDataPump("19.3", "EE") + }) + + Context("Oracle 18c XE", func() { + testDataPump("18c", "XE") + }) +}) + +func TestDataPump(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, + t.Name(), + []Reporter{printer.NewlineReporter{}}) +} diff --git a/oracle/controllers/inttest/instancetest/BUILD.bazel b/oracle/controllers/inttest/instancetest/BUILD.bazel new file mode 100644 index 0000000..64bf56a --- /dev/null +++ b/oracle/controllers/inttest/instancetest/BUILD.bazel @@ -0,0 +1,26 @@ +load("//oracle:scripts/ginkgo_test.bzl", "ginkgo_test") + +# gazelle:map_kind go_test ginkgo_test //oracle:scripts/ginkgo_test.bzl + +ginkgo_test( + name = "instancetest_test", + timeout = "eternal", # 60m + srcs = ["instance_test.go"], + nodes = 3, + tags = ["integration"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_api//apps/v1:apps", + "@io_k8s_api//core/v1:core", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/envtest/printer", + ], +) diff --git a/oracle/controllers/inttest/instancetest/instance_test.go b/oracle/controllers/inttest/instancetest/instance_test.go new file mode 100644 index 0000000..a2f33f0 --- /dev/null +++ b/oracle/controllers/inttest/instancetest/instance_test.go @@ -0,0 +1,160 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instancetest + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + + // Enable GCP auth for k8s client + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// Made global to be accessible by AfterSuite +var k8sEnv = testhelpers.K8sOperatorEnvironment{} + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("Instance and Database provisioning", func() { + var namespace string + var instanceName string + + BeforeEach(func() { + namespace = testhelpers.RandName("instance-crd-test") + instanceName = "mydb" + k8sEnv.Init(namespace) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, []string{"manager", "dbdaemon", "oracledb"}, []string{instanceName}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + TestInstanceCreationAndDatabaseProvisioning := func(version string, edition string) { + It("Should create instance and provision database", func() { + ctx := context.Background() + k8sClient, err := client.New(k8sEnv.Env.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + + By("By creating a new Instance") + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: instanceName, + Namespace: namespace, + }, + Spec: v1alpha1.InstanceSpec{ + // Keep the CDBName in the spec different from the CDB name in the image (GCLOUD). + // Doing this implicitly test the CDB renaming feature. + CDBName: "mydb", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Version: version, + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + }, + MinMemoryForDBContainer: "7.0Gi", + Images: map[string]string{ + "service": testhelpers.TestImageForVersion(version, edition, ""), + }, + }, + }, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, instance) + + createdInstance := &v1alpha1.Instance{} + instKey := client.ObjectKey{Namespace: namespace, Name: instanceName} + + By("By checking that Instance is created") + // Wait until the instance is "Ready" (requires 5+ minutes to download image) + Eventually(func() metav1.ConditionStatus { + Expect(k8sClient.Get(ctx, instKey, createdInstance)).Should(Succeed()) + cond := k8s.FindCondition(createdInstance.Status.Conditions, k8s.Ready) + if cond != nil { + return cond.Status + } + return metav1.ConditionUnknown + }, 10*time.Minute, 5*time.Second).Should(Equal(metav1.ConditionTrue)) + + By("By checking that Database is provisioned") + Eventually(func() metav1.ConditionStatus { + Expect(k8sClient.Get(ctx, instKey, createdInstance)).Should(Succeed()) + cond := k8s.FindCondition(createdInstance.Status.Conditions, k8s.DatabaseInstanceReady) + if cond != nil { + return cond.Status + } + return metav1.ConditionUnknown + }, 20*time.Minute, 5*time.Second).Should(Equal(metav1.ConditionTrue)) + + By("By checking that statefulset/deployment/svc are created") + var sts appsv1.StatefulSetList + Expect(k8sClient.List(ctx, &sts, client.InNamespace(namespace))).Should(Succeed()) + Expect(len(sts.Items)).Should(Equal(1)) + + var deployment appsv1.DeploymentList + Expect(k8sClient.List(ctx, &deployment, client.InNamespace(namespace))).Should(Succeed()) + Expect(len(deployment.Items)).Should(Equal(2)) + + var svc corev1.ServiceList + Expect(k8sClient.List(ctx, &svc, client.InNamespace(namespace))).Should(Succeed()) + Expect(len(svc.Items)).Should(Equal(4)) + }) + } + + Context("Oracle 12.2 EE", func() { + TestInstanceCreationAndDatabaseProvisioning("12.2", "EE") + }) + + Context("Oracle 19.3 EE", func() { + TestInstanceCreationAndDatabaseProvisioning("19.3", "EE") + }) + + Context("Oracle 18c XE", func() { + TestInstanceCreationAndDatabaseProvisioning("18c", "XE") + }) +}) + +func TestInstance(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, + t.Name(), + []Reporter{printer.NewlineReporter{}}) +} diff --git a/oracle/controllers/inttest/parameterupdatetest/BUILD.bazel b/oracle/controllers/inttest/parameterupdatetest/BUILD.bazel new file mode 100644 index 0000000..43e2f22 --- /dev/null +++ b/oracle/controllers/inttest/parameterupdatetest/BUILD.bazel @@ -0,0 +1,25 @@ +load("//oracle:scripts/ginkgo_test.bzl", "ginkgo_test") + +# gazelle:map_kind go_test ginkgo_test //oracle:scripts/ginkgo_test.bzl + +ginkgo_test( + name = "parameterupdatetest_test", + timeout = "eternal", # 60m + srcs = ["parameter_update_test.go"], + nodes = 6, + tags = ["integration"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/log", + "@io_k8s_sigs_controller_runtime//pkg/log/zap", + ], +) diff --git a/oracle/controllers/inttest/parameterupdatetest/parameter_update_test.go b/oracle/controllers/inttest/parameterupdatetest/parameter_update_test.go new file mode 100644 index 0000000..6203b34 --- /dev/null +++ b/oracle/controllers/inttest/parameterupdatetest/parameter_update_test.go @@ -0,0 +1,196 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parameterupdatetest + +import ( + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + // Enable GCP auth for k8s client + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +func TestParameterUpdate(t *testing.T) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + RegisterFailHandler(Fail) + RunSpecs(t, "ParameterUpdate") +} + +// Global variable, to be accessible by AfterSuite +var k8sEnv = testhelpers.K8sOperatorEnvironment{} + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("ParameterUpdate", func() { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + log := logf.Log + pod := "mydb-sts-0" + instanceName := "mydb" + + BeforeEach(func() { + nameSpace := testhelpers.RandName("parameter-update-test") + k8sEnv.Init(nameSpace) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, []string{"manager", "dbdaemon", "oracledb"}, []string{instanceName}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + TestParameterUpdateForCorrectParameters := func(version string, edition string) { + It("Test Successful Parameter update", func() { + testhelpers.CreateSimpleInstance(k8sEnv, instanceName, version, edition) + + // Wait until DatabaseInstanceReady = True + instKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: instanceName} + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 15*time.Minute) + + // Create PDB + testhelpers.CreateSimplePDB(k8sEnv, instanceName) + + By("DB is ready, initiating parameter update") + + // Generate a random value for the parameter whose max is 100 + randVal := strconv.Itoa(1 + (rand.New(rand.NewSource(time.Now().UnixNano() / 1000)).Intn(100))) + + createdInstance := &v1alpha1.Instance{} + testhelpers.K8sGetAndUpdateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, + instKey, + createdInstance, + func(obj *runtime.Object) { + instanceToUpdate := (*obj).(*v1alpha1.Instance) + // Add the required parameters spec to the spec file + oneHourBefore := metav1.NewTime(time.Now().Add(-1 * time.Hour)) + twoHours := metav1.Duration{Duration: 2 * time.Hour} + instanceToUpdate.Spec.MaintenanceWindow = &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &oneHourBefore, Duration: &twoHours}}} + + instanceToUpdate.Spec.Parameters = map[string]string{ + "parallel_threads_per_cpu": randVal, // dynamic parameter + "disk_asynch_io": "true", // static parameter + } + }) + + // Verify the controller is getting into ParameterUpdateInProgress state + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionFalse, k8s.ParameterUpdateInProgress, 60*time.Second) + + // Wait until the instance settles into "Ready" again + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, 20*time.Minute) + + Expect(fetchParameterValue(pod, "parallel_threads_per_cpu")).To(Equal(randVal)) + Expect(fetchParameterValue(pod, "disk_asynch_io")).To(Equal("TRUE")) + }) + } + + TestParameterUpdateFailureAndRollback := func(version string, edition string) { + It("Test parameter update failure and rollback", func() { + testhelpers.CreateSimpleInstance(k8sEnv, instanceName, version, edition) + + // Wait until DatabaseInstanceReady = True + instKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: instanceName} + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 15*time.Minute) + + // Create PDB + testhelpers.CreateSimplePDB(k8sEnv, instanceName) + By("DB is ready, initiating parameter update") + + createdInstance := &v1alpha1.Instance{} + parallelThreadCountPreUpdate := fetchParameterValue(pod, "parallel_threads_per_cpu") + diskAsyncValuePreUpdate := fetchParameterValue(pod, "disk_asynch_io") + memMaxTargetPreUpdate := fetchParameterValue(pod, "memory_max_target") + + testhelpers.K8sGetAndUpdateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, + instKey, + createdInstance, + func(obj *runtime.Object) { + instanceToUpdate := (*obj).(*v1alpha1.Instance) + // Add the required parameters spec to the spec file + oneHourBefore := metav1.NewTime(time.Now().Add(-1 * time.Hour)) + twoHours := metav1.Duration{Duration: 2 * time.Hour} + instanceToUpdate.Spec.MaintenanceWindow = &commonv1alpha1.MaintenanceWindowSpec{TimeRanges: []commonv1alpha1.TimeRange{{Start: &oneHourBefore, Duration: &twoHours}}} + // Generate a random value for the parameter whose max is 100 + randVal := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano() / 1000)).Intn(100)) + log.Info("The generated random value is ", "rand val", randVal) + + instanceToUpdate.Spec.Parameters = map[string]string{ + "parallel_threads_per_cpu": randVal, // dynamic parameter + "disk_asynch_io": "true", // static parameter + "memory_max_target": "1", // bad static parameter value. + } + }) + + // Verify the controller is getting into ParameterUpdateInProgress state + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionFalse, k8s.ParameterUpdateInProgress, 10*time.Second) + + // Verify the instance transitions to ParameterUpdateRollback state due to database unable to restart. + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionFalse, k8s.ParameterUpdateRollback, 5*time.Minute) + // Wait until the instance settles into "Ready" again + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, 20*time.Minute) + + // Verify rollback by checking current parameter value is equivalent to pre-update value. + Expect(fetchParameterValue(pod, "parallel_threads_per_cpu")).To(Equal(parallelThreadCountPreUpdate)) + Expect(fetchParameterValue(pod, "disk_asynch_io")).To(Equal(diskAsyncValuePreUpdate)) + Expect(fetchParameterValue(pod, "memory_max_target")).To(Equal(memMaxTargetPreUpdate)) + }) + } + + Context("Oracle 12.2 EE", func() { + TestParameterUpdateForCorrectParameters("12.2", "EE") + TestParameterUpdateFailureAndRollback("12.2", "EE") + }) + + Context("Oracle 19.3 EE", func() { + TestParameterUpdateForCorrectParameters("19.3", "EE") + TestParameterUpdateFailureAndRollback("19.3", "EE") + }) + + Context("Oracle 18c XE", func() { + TestParameterUpdateForCorrectParameters("18c", "XE") + TestParameterUpdateFailureAndRollback("18c", "XE") + }) +}) + +func fetchParameterValue(pod string, parameter string) string { + out := testhelpers.K8sExecuteSqlOrFail(pod, k8sEnv.Namespace, fmt.Sprintf("SHOW PARAMETERS %s;", parameter)) + // The output of the above query is + // parallel_threads_per_cpu\t integer\t 1000 + // The following command extract the required last column + // The alternate approach to query system parameter(shown below) doesn't seem to work with bash due the dollar symbol + // SELECT value from v$parameter where name='parallel_servers_target' + shellCmd := "echo '%s' | sed 's/ //g' | tr -s '\\t' | tr '\\t' '|' | cut -d '|' -f3" + out, _ = testhelpers.K8sExec(pod, k8sEnv.Namespace, "oracledb", fmt.Sprintf(shellCmd, out)) + return out +} diff --git a/oracle/controllers/inttest/physbackuptest/BUILD.bazel b/oracle/controllers/inttest/physbackuptest/BUILD.bazel new file mode 100644 index 0000000..9b05243 --- /dev/null +++ b/oracle/controllers/inttest/physbackuptest/BUILD.bazel @@ -0,0 +1,26 @@ +load("//oracle:scripts/ginkgo_test.bzl", "ginkgo_test") + +# gazelle:map_kind go_test ginkgo_test //oracle:scripts/ginkgo_test.bzl + +ginkgo_test( + name = "physbackuptest_test", + timeout = "eternal", # 60m + srcs = ["physical_backup_test.go"], + nodes = 9, + tags = ["integration"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/envtest/printer", + "@io_k8s_sigs_controller_runtime//pkg/log", + ], +) diff --git a/oracle/controllers/inttest/physbackuptest/physical_backup_test.go b/oracle/controllers/inttest/physbackuptest/physical_backup_test.go new file mode 100644 index 0000000..ce800ef --- /dev/null +++ b/oracle/controllers/inttest/physbackuptest/physical_backup_test.go @@ -0,0 +1,306 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package physbackuptest + +import ( + "fmt" + "os" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +type backupTestCase struct { + name string + contextTitle string + instanceName string + backupName string + instanceSpec v1alpha1.InstanceSpec + backupSpec v1alpha1.BackupSpec +} + +// Made global to be accessible by AfterSuite +var k8sEnv = testhelpers.K8sOperatorEnvironment{} + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("Instance and Database provisioning", func() { + var namespace string + + BeforeEach(func() { + namespace = testhelpers.RandName("physical-backup-test") + k8sEnv.Init(namespace) + + // Allow the k8s [namespace/default] service account access to GCS buckets + testhelpers.SetupServiceAccountBindingBetweenGcpAndK8s(k8sEnv) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, + []string{"manager", "dbdaemon", "oracledb"}, + []string{"mydb"}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + BackupTest := func(tc backupTestCase) { + Context(tc.contextTitle, func() { + It("Should create rman based backup successfully", func() { + log := logf.Log + + By("By creating an instance") + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: tc.instanceName, + Namespace: namespace, + }, + Spec: tc.instanceSpec, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, instance) + instKey := client.ObjectKey{Namespace: namespace, Name: tc.instanceName} + + // Wait until the instance is "Ready" (requires 5+ minutes to download image) + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, 10*time.Minute) + + By("By letting instance DB initialize") + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 10*time.Minute) + + testhelpers.CreateSimplePDB(k8sEnv, tc.instanceName) + testhelpers.InsertSimpleData(k8sEnv) + + By("By creating a physical based backup") + backupName := tc.backupName + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: backupName, + }, + Spec: tc.backupSpec, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, backup) + + By("By checking backup object is created and ready") + backupKey := client.ObjectKey{Namespace: namespace, Name: backupName} + + var createdBackup v1alpha1.Backup + var cond *metav1.Condition + // Wait until the backup is ready or failed. + Eventually(func() bool { + Expect(k8sEnv.K8sClient.Get(k8sEnv.Ctx, backupKey, &createdBackup)).Should(Succeed()) + cond = k8s.FindCondition(createdBackup.Status.Conditions, k8s.Ready) + return k8s.ConditionStatusEquals(cond, metav1.ConditionTrue) || + k8s.ConditionReasonEquals(cond, k8s.BackupFailed) + }, 7*time.Minute, 5*time.Second).Should(Equal(true)) + log.Info("Backup status", "status", cond.Status, "message", cond.Message) + Expect(cond.Reason).Should(Equal(k8s.BackupReady)) + + By("By restoring an instance from backup") + instKey = client.ObjectKey{Namespace: namespace, Name: tc.instanceName} + testhelpers.K8sGetAndUpdateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, + instKey, + instance, + func(obj *runtime.Object) { + instanceToUpdate := (*obj).(*v1alpha1.Instance) + instanceToUpdate.Spec.Restore = &v1alpha1.RestoreSpec{ + BackupType: createdBackup.Spec.Type, + BackupID: createdBackup.Status.BackupID, + Force: true, + RequestTime: metav1.NewTime(time.Now()), + } + }, + ) + + // Wait until the instance is "Ready" + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.RestoreComplete, 10*time.Minute) + + // Check databases are "Ready" + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 30*time.Second) + + testhelpers.VerifySimpleData(k8sEnv) + }) + }) + } + + Context("New backup through physical", func() { + testCase := backupTestCase{ + name: "default rman backup", + instanceName: "mydb", + backupName: "phys", + instanceSpec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + }, + Images: map[string]string{}, + MinMemoryForDBContainer: "7.0Gi", + }, + }, + backupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb", + Type: commonv1alpha1.BackupTypePhysical, + }, + }, + } + + Context("Oracle 12.2 EE", func() { + testCase.instanceSpec.GenericInstanceSpec.Version = "12.2" + testCase.instanceSpec.GenericInstanceSpec.Images = map[string]string{ + "service": testhelpers.TestImageForVersion("12.2", "EE", ""), + } + BackupTest(testCase) + }) + Context("Oracle 18c XE", func() { + testCase.instanceSpec.GenericInstanceSpec.Version = "18c" + testCase.instanceSpec.GenericInstanceSpec.Images = map[string]string{ + "service": testhelpers.TestImageForVersion("18c", "XE", ""), + } + BackupTest(testCase) + }) + }) + + Context("RMAN backup on datadisk", func() { + testCase := backupTestCase{ + name: "rman backup on datadisk", + contextTitle: "rman backup on datadisk", + instanceName: "mydb", + backupName: "phys", + instanceSpec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + { + Name: "BackupDisk", + Size: resource.MustParse("100Gi"), + }, + }, + Images: map[string]string{}, + MinMemoryForDBContainer: "7.0Gi", + }, + }, + backupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb", + Type: commonv1alpha1.BackupTypePhysical, + }, + LocalPath: "/u04/app/oracle/rman", + }, + } + Context("Oracle 12.2 EE", func() { + testCase.instanceSpec.GenericInstanceSpec.Version = "12.2" + testCase.instanceSpec.GenericInstanceSpec.Images = map[string]string{ + "service": testhelpers.TestImageForVersion("12.2", "EE", ""), + } + BackupTest(testCase) + }) + Context("Oracle 18c XE", func() { + testCase.instanceSpec.GenericInstanceSpec.Version = "18c" + testCase.instanceSpec.GenericInstanceSpec.Images = map[string]string{ + "service": testhelpers.TestImageForVersion("18c", "XE", ""), + } + BackupTest(testCase) + }) + }) + + Context("New GCS backup", func() { + testCase := backupTestCase{ + name: "GCS rman backup", + instanceName: "mydb", + backupName: "phys", + instanceSpec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + }, + Images: map[string]string{}, + MinMemoryForDBContainer: "7.0Gi", + }, + }, + backupSpec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: "mydb", + Type: commonv1alpha1.BackupTypePhysical, + }, + GcsPath: fmt.Sprintf("gs://%s/%s/%s/", + os.Getenv("PROW_PROJECT"), os.Getenv("PROW_CLUSTER"), testhelpers.RandName("backup")), + }, + } + + Context("Oracle 12.2 EE", func() { + testCase.instanceSpec.GenericInstanceSpec.Version = "12.2" + testCase.instanceSpec.GenericInstanceSpec.Images = map[string]string{ + "service": testhelpers.TestImageForVersion("12.2", "EE", ""), + } + BackupTest(testCase) + }) + Context("Oracle 18c XE", func() { + testCase.instanceSpec.GenericInstanceSpec.Version = "18c" + testCase.instanceSpec.GenericInstanceSpec.Images = map[string]string{ + "service": testhelpers.TestImageForVersion("18c", "XE", ""), + } + BackupTest(testCase) + }) + }) +}) + +func TestPhysicalBackup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, + t.Name(), + []Reporter{printer.NewlineReporter{}}) +} diff --git a/oracle/controllers/inttest/releasetest/BUILD.bazel b/oracle/controllers/inttest/releasetest/BUILD.bazel new file mode 100644 index 0000000..2dd8262 --- /dev/null +++ b/oracle/controllers/inttest/releasetest/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "releasetest_test", + timeout = "eternal", # 60m + srcs = ["release_test.go"], + tags = ["integration"], + deps = [ + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/envtest/printer", + ], +) diff --git a/oracle/controllers/inttest/releasetest/release_test.go b/oracle/controllers/inttest/releasetest/release_test.go new file mode 100644 index 0000000..da7c2a0 --- /dev/null +++ b/oracle/controllers/inttest/releasetest/release_test.go @@ -0,0 +1,73 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package releasetest + +import ( + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + // Enable GCP auth for k8s client + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" +) + +// Made global to be accessible by AfterSuite +var k8sEnv = testhelpers.K8sOperatorEnvironment{} + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("New deployment", func() { + var namespace string + + BeforeEach(func() { + namespace = testhelpers.RandName("release-crd-test") + k8sEnv.Init(namespace) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, []string{}, []string{}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + It("Should create s release object", func() { + Eventually(func() string { + rKey := client.ObjectKey{Namespace: namespace, Name: "release"} + release := &v1alpha1.Release{} + if err := k8sEnv.K8sClient.Get(k8sEnv.Ctx, rKey, release); err != nil { + return "" + } + return release.Spec.Version + }, 1*time.Minute, 5*time.Second).Should(Not(BeEmpty())) + }) +}) + +func TestRelease(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, + t.Name(), + []Reporter{printer.NewlineReporter{}}) +} diff --git a/oracle/controllers/inttest/snapbackuptest/BUILD.bazel b/oracle/controllers/inttest/snapbackuptest/BUILD.bazel new file mode 100644 index 0000000..16b8a5a --- /dev/null +++ b/oracle/controllers/inttest/snapbackuptest/BUILD.bazel @@ -0,0 +1,26 @@ +load("//oracle:scripts/ginkgo_test.bzl", "ginkgo_test") + +# gazelle:map_kind go_test ginkgo_test //oracle:scripts/ginkgo_test.bzl + +ginkgo_test( + name = "snapbackuptest_test", + timeout = "eternal", # 60m + srcs = ["snapshot_backup_test.go"], + nodes = 3, + tags = ["integration"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_kubernetes_csi_external_snapshotter_v2//pkg/apis/volumesnapshot/v1beta1", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/envtest/printer", + "@io_k8s_sigs_controller_runtime//pkg/log", + ], +) diff --git a/oracle/controllers/inttest/snapbackuptest/snapshot_backup_test.go b/oracle/controllers/inttest/snapbackuptest/snapshot_backup_test.go new file mode 100644 index 0000000..4c60485 --- /dev/null +++ b/oracle/controllers/inttest/snapbackuptest/snapshot_backup_test.go @@ -0,0 +1,171 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snapbackuptest + +import ( + "testing" + "time" + + snapv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// Made global to be accessible by AfterSuite +var k8sEnv = testhelpers.K8sOperatorEnvironment{} + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("Backup through snapshot", func() { + var namespace string + var instanceName string + + BeforeEach(func() { + namespace = testhelpers.RandName("backup-snap-crd-test") + //namespace = "backup-snap-crd-test" + instanceName = "mydb" + k8sEnv.Init(namespace) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, []string{"manager", "dbdaemon", "oracledb"}, []string{instanceName}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + snapBackupTest := func(version string, edition string) { + It("Should create snapshot based backup then restore to instance successfully", func() { + log := logf.Log + + By("By creating a instance") + testhelpers.CreateSimpleInstance(k8sEnv, instanceName, version, edition) + + createdInstance := &v1alpha1.Instance{} + instKey := client.ObjectKey{Namespace: namespace, Name: instanceName} + + // Wait until the instance is "Ready" (requires 5+ minutes to download image) + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, 10*time.Minute) + + // Wait until DatabaseInstanceReady = True + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 7*time.Minute) + + // Add test data + time.Sleep(10 * time.Second) + testhelpers.CreateSimplePDB(k8sEnv, instanceName) + testhelpers.InsertSimpleData(k8sEnv) + + By("By creating a snapshot based backup") + backupName := "snap" + backup := &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: backupName, + }, + Spec: v1alpha1.BackupSpec{ + BackupSpec: commonv1alpha1.BackupSpec{ + Instance: instanceName, + Type: commonv1alpha1.BackupTypeSnapshot, + }, + }, + } + testhelpers.K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, backup) + + By("By checking volume snapshot is created and ready") + backupKey := client.ObjectKey{Namespace: namespace, Name: backupName} + + // Wait for Ready or Error. + var createdBackup v1alpha1.Backup + var cond *metav1.Condition + Eventually(func() bool { + Expect(k8sEnv.K8sClient.Get(k8sEnv.Ctx, backupKey, &createdBackup)).Should(Succeed()) + cond = k8s.FindCondition(createdBackup.Status.Conditions, k8s.Ready) + return k8s.ConditionStatusEquals(cond, metav1.ConditionTrue) || + k8s.ConditionReasonEquals(cond, k8s.BackupFailed) + }, 10*time.Minute, 5*time.Second).Should(Equal(true)) + log.Info("Backup status", "status", cond.Status, "message", cond.Message) + Expect(cond.Reason).Should(Equal(k8s.BackupReady)) + + var snapshots snapv1.VolumeSnapshotList + Expect(k8sEnv.K8sClient.List(k8sEnv.Ctx, &snapshots, client.InNamespace(namespace))).Should(Succeed()) + Expect(len(snapshots.Items)).Should(Equal(2)) + for _, snapshot := range snapshots.Items { + Expect(*snapshot.Status.ReadyToUse).To(BeTrue()) + } + + By("By restoring snapshot backup to instance") + restoreSpec := &v1alpha1.RestoreSpec{ + BackupType: "Snapshot", + BackupID: createdBackup.Status.BackupID, + Dop: 2, + TimeLimitMinutes: 180, + Force: true, + RequestTime: metav1.Time{Time: time.Now()}, + } + + testhelpers.K8sGetAndUpdateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, + instKey, + createdInstance, + func(obj *runtime.Object) { + (*obj).(*v1alpha1.Instance).Spec.Restore = restoreSpec + }) + + // Wait until the instance is "Ready" (requires 5+ minutes to download image) + By("By checking instance should be restored successfully") + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.RestoreComplete, 10*time.Minute) + + // Check databases are "Ready" + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 30*time.Second) + + //Verify if the restored instance contains the pre-backup data + time.Sleep(30 * time.Second) + testhelpers.VerifySimpleData(k8sEnv) + }) + } + + Context("Oracle 12.2 EE", func() { + snapBackupTest("12.2", "EE") + }) + + Context("Oracle 19.3 EE", func() { + snapBackupTest("19.3", "EE") + }) + + Context("Oracle 18c XE", func() { + snapBackupTest("18c", "XE") + }) +}) + +func TestSnapshotBackup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, + t.Name(), + []Reporter{printer.NewlineReporter{}}) +} diff --git a/oracle/controllers/inttest/usertest/BUILD.bazel b/oracle/controllers/inttest/usertest/BUILD.bazel new file mode 100644 index 0000000..10949ae --- /dev/null +++ b/oracle/controllers/inttest/usertest/BUILD.bazel @@ -0,0 +1,26 @@ +load("//oracle:scripts/ginkgo_test.bzl", "ginkgo_test") + +# gazelle:map_kind go_test ginkgo_test //oracle:scripts/ginkgo_test.bzl + +ginkgo_test( + name = "usertest_test", + timeout = "eternal", # 60m + srcs = ["user_test.go"], + nodes = 3, + tags = ["integration"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "//oracle/pkg/k8s", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_client_go//util/retry", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/log", + "@io_k8s_sigs_controller_runtime//pkg/log/zap", + ], +) diff --git a/oracle/controllers/inttest/usertest/user_test.go b/oracle/controllers/inttest/usertest/user_test.go new file mode 100644 index 0000000..8a7167c --- /dev/null +++ b/oracle/controllers/inttest/usertest/user_test.go @@ -0,0 +1,355 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usertest + +import ( + "io/ioutil" + "os" + "os/exec" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // Enable GCP auth for k8s client + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +func TestUser(t *testing.T) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + RegisterFailHandler(Fail) + RunSpecs(t, "User operations") +} + +var ( + // Global variable, to be accessible by AfterSuite + k8sEnv = testhelpers.K8sOperatorEnvironment{} + instanceName = "mydb" + databaseName = "pdb1" + pod = "mydb-sts-0" + projectId = os.Getenv("PROW_PROJECT") + targetCluster = os.Getenv("PROW_CLUSTER") + targetZone = os.Getenv("PROW_CLUSTER_ZONE") + userPwdBefore = map[string]string{ + "GPDB_ADMIN": "google", + "superuser": "superpassword", + "scott": "tiger", + "proberuser": "proberpassword", + } + userPwdAfter = map[string]string{ + "GPDB_ADMIN": "google1", + "superuser": "superpassword1", + "scott": "tiger1", + "proberuser": "proberpassword1", + } + log = logf.Log +) + +// Initial setup before test suite. +var _ = BeforeSuite(func() { + // Note that these GSM + WI setup steps are re-runnable. + // If the env fulfills, no error should occur. + + // Check if project info is initialized + Expect(projectId).ToNot(BeEmpty()) + Expect(targetCluster).ToNot(BeEmpty()) + Expect(targetZone).NotTo(BeEmpty()) + enableGsmApi() + enableIamApi() + prepareTestUsersAndGrantAccess() + enableWiWithNodePool() +}) + +// In case of Ctrl-C clean up the last valid k8sEnv. +var _ = AfterSuite(func() { + k8sEnv.Close() +}) + +var _ = Describe("User operations", func() { + BeforeEach(func() { + initEnvBeforeEachTest() + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + testhelpers.PrintLogs(k8sEnv.Namespace, k8sEnv.Env, []string{"manager", "dbdaemon", "oracledb"}, []string{instanceName}) + testhelpers.PrintClusterObjects() + } + k8sEnv.Close() + }) + + testUpdateUser := func(version string, edition string) { + It("Should test users creation with GSM", func() { + testhelpers.CreateSimpleInstance(k8sEnv, instanceName, version, edition) + + // Wait until DatabaseInstanceReady = True + instKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: instanceName} + testhelpers.WaitForInstanceConditionState(k8sEnv, instKey, k8s.DatabaseInstanceReady, metav1.ConditionTrue, k8s.CreateComplete, 15*time.Minute) + + // Create PDB + testhelpers.CreateSimplePdbWithDbObj(k8sEnv, &v1alpha1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: k8sEnv.Namespace, + Name: databaseName, + }, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: databaseName, + Instance: instanceName, + }, + AdminPasswordGsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "GPDB_ADMIN", + Version: "1", + }, + Users: []v1alpha1.UserSpec{ + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "superuser", + CredentialSpec: commonv1alpha1.CredentialSpec{ + GsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "superuser", + Version: "1", + }, + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect"}, + }, + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "scott", + CredentialSpec: commonv1alpha1.CredentialSpec{ + GsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "scott", + Version: "1", + }, + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect"}, + }, + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "proberuser", + CredentialSpec: commonv1alpha1.CredentialSpec{ + GsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "proberuser", + Version: "1", + }, + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect"}, + }, + }, + }, + }) + + // Note the we might not need a separate test for user creation + // as BeforeEach function has covered this scenario already. + By("Verify PDB user connectivity with initial passwords") + // Resolve password sync latency between Config Server and Oracle DB. + // Even after we checked PDB status is ready. + time.Sleep(5 * time.Second) + testhelpers.K8sVerifyUserConnectivity(pod, k8sEnv.Namespace, databaseName, userPwdBefore) + + By("DB is ready, updating user secret version") + createdDatabase := &v1alpha1.Database{} + objKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: databaseName} + + testhelpers.K8sGetAndUpdateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, + objKey, + createdDatabase, + func(obj *runtime.Object) { + databaseToUpdate := (*obj).(*v1alpha1.Database) + databaseToUpdate.Spec.AdminPasswordGsmSecretRef = &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "GPDB_ADMIN", + Version: "2", + } + databaseToUpdate.Spec.Users = []v1alpha1.UserSpec{ + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "superuser", + CredentialSpec: commonv1alpha1.CredentialSpec{ + GsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "superuser", + Version: "2", + }, + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect"}, + }, + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "scott", + CredentialSpec: commonv1alpha1.CredentialSpec{ + GsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "scott", + Version: "2", + }, + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect"}, + }, + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "proberuser", + CredentialSpec: commonv1alpha1.CredentialSpec{ + GsmSecretRef: &commonv1alpha1.GsmSecretReference{ + ProjectId: projectId, + SecretId: "proberuser", + Version: "2", + }, + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect"}, + }, + } + }) + + // Verify if both PDB ready and user ready status are expected. + Eventually(func() metav1.ConditionStatus { + Expect(k8sEnv.K8sClient.Get(k8sEnv.Ctx, client.ObjectKey{Namespace: k8sEnv.Namespace, Name: databaseName}, createdDatabase)).Should(Succeed()) + cond := k8s.FindCondition(createdDatabase.Status.Conditions, k8s.Ready) + syncUserCompleted := k8s.ConditionStatusEquals(&metav1.Condition{ + Type: k8s.UserReady, + Status: metav1.ConditionTrue, + Reason: k8s.SyncComplete, + Message: "", + }, metav1.ConditionTrue) + if cond != nil && syncUserCompleted { + logf.Log.Info("PDB", "state", cond.Reason, "SyncComplete", syncUserCompleted) + return cond.Status + } + return metav1.ConditionUnknown + }, 2*time.Minute, 5*time.Second).Should(Equal(metav1.ConditionTrue)) + + // Resolve password sync latency between Config Server and Oracle DB. + // Even after we checked PDB status is ready and user sync complete. + time.Sleep(5 * time.Second) + + By("Verify PDB user connectivity with new passwords") + testhelpers.K8sVerifyUserConnectivity(pod, k8sEnv.Namespace, databaseName, userPwdAfter) + }) + } + + Context("Oracle 12.2 EE", func() { + testUpdateUser("12.2", "EE") + }) + Context("Oracle 19.3 EE", func() { + testUpdateUser("19.3", "EE") + }) + Context("Oracle 18c XE", func() { + testUpdateUser("18c", "XE") + }) +}) + +func enableGsmApi() { + // Enable GSM API. + cmd := exec.Command("gcloud", "services", "enable", "secretmanager.googleapis.com") + out, err := cmd.CombinedOutput() + log.Info("gcloud services enable secretmanager.googleapis.com", "output", string(out)) + Expect(err).NotTo(HaveOccurred()) +} + +func enableIamApi() { + // Enable IAM API. + cmd := exec.Command("gcloud", "services", "enable", "iamcredentials.googleapis.com") + out, err := cmd.CombinedOutput() + log.Info("gcloud services enable iamcredentials.googleapis.com", "output", string(out)) + Expect(err).NotTo(HaveOccurred()) +} + +func prepareTestUsersAndGrantAccess() { + // Prepare test users and grant GMS permission + for k, v := range userPwdBefore { + checkUser := exec.Command("gcloud", "secrets", "describe", k) + if checkUserOutput, err := checkUser.CombinedOutput(); err != nil { + log.Info("gcloud secrets describe "+k, "output", string(checkUserOutput)) + + // Prepare two password files for initiating GSM secret + f1, err := ioutil.TempFile("", "") + Expect(err).NotTo(HaveOccurred()) + defer os.Remove(f1.Name()) + err = ioutil.WriteFile(f1.Name(), []byte(v), os.FileMode(0600)) + Expect(err).NotTo(HaveOccurred()) + + f2, err := ioutil.TempFile("", "") + Expect(err).NotTo(HaveOccurred()) + defer os.Remove(f2.Name()) + newPassword, ok := userPwdAfter[k] + Expect(ok).Should(Equal(true)) + err = ioutil.WriteFile(f2.Name(), []byte(newPassword), os.FileMode(0600)) + Expect(err).NotTo(HaveOccurred()) + + // Create user with credential file f1. + cmd := exec.Command("gcloud", "secrets", "create", k, "--replication-policy=automatic", "--data-file="+f1.Name()) + out, err := cmd.CombinedOutput() + // Omitted password. + log.Info("gcloud secrets create "+k, "output", string(out)) + Expect(err).NotTo(HaveOccurred()) + + // Add user secret with credential file f2. + cmd = exec.Command("gcloud", "secrets", "versions", "add", k, "--data-file="+f2.Name()) + out, err = cmd.CombinedOutput() + // Omitted password. + log.Info("gcloud secrets add "+k+" v2", "output", string(out)) + Expect(err).NotTo(HaveOccurred()) + } + + // Grant GSM secret access role to the our test service account. + Expect(retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, func() error { + cmd := exec.Command("gcloud", + "secrets", "add-iam-policy-binding", k, "--role=roles/secretmanager.secretAccessor", + "--member="+"serviceAccount:"+testhelpers.GCloudServiceAccount()) + out, err := cmd.CombinedOutput() + log.Info("gcloud secrets service-accounts add-iam-policy-binding", "output", string(out)) + return err + })).To(Succeed()) + } +} +func enableWiWithNodePool() { + // Enable workload identify on existing cluster. + cmd := exec.Command("gcloud", "container", "clusters", "update", targetCluster, "--workload-pool="+projectId+".svc.id.goog", "--zone="+targetZone) + out, err := cmd.CombinedOutput() + log.Info("gcloud container clusters update", "output", string(out)) + Expect(err).NotTo(HaveOccurred()) + // Migrate applications to Workload Identity with Node pool modification. + cmd = exec.Command("gcloud", "container", "node-pools", "update", "default-pool", "--cluster="+targetCluster, "--workload-metadata=GKE_METADATA", "--zone="+targetZone) + out, err = cmd.CombinedOutput() + log.Info("gcloud container node-pools update", "output", string(out)) + Expect(err).NotTo(HaveOccurred()) +} + +func initEnvBeforeEachTest() { + k8sEnv.Init(testhelpers.RandName("user-test")) + // Allow the k8s [namespace/default] service account access to GCS buckets + testhelpers.SetupServiceAccountBindingBetweenGcpAndK8s(k8sEnv) +} diff --git a/oracle/controllers/resources.go b/oracle/controllers/resources.go new file mode 100644 index 0000000..af783ef --- /dev/null +++ b/oracle/controllers/resources.go @@ -0,0 +1,852 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/go-logr/logr" + snapv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + "google.golang.org/grpc" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/common" +) + +const ( + platformGCP = "GCP" + platformBareMetal = "BareMetal" + platformMinikube = "Minikube" + defaultStorageClassNameGCP = "csi-gce-pd" + defaultVolumeSnapshotClassNameGCP = "csi-gce-pd-snapshot-class" + defaultStorageClassNameBM = "csi-trident" + defaultVolumeSnapshotClassNameBM = "csi-trident-snapshot-class" + defaultStorageClassNameMinikube = "csi-hostpath-sc" + defaultVolumeSnapshotClassNameMinikube = "csi-hostpath-snapclass" + + configAgentName = "config-agent" + // OperatorName is the default operator name. + OperatorName = "operator" + scriptDir = "/agents" + defaultUID = int64(54321) + defaultGID = int64(54322) + safeMinMemoryForDBContainer = "4.0Gi" +) + +var ( + sourceCidrRanges = []string{"0.0.0.0/0"} + defaultDiskSize = resource.MustParse("100Gi") + dialTimeout = 3 * time.Minute + configList = []string{configAgentName, OperatorName} +) + +type platformConfig struct { + storageClassName string + volumeSnapshotClassName string +} + +func getPlatformConfig(p string) (*platformConfig, error) { + switch p { + case platformGCP: + return &platformConfig{ + storageClassName: defaultStorageClassNameGCP, + volumeSnapshotClassName: defaultVolumeSnapshotClassNameGCP, + }, nil + case platformBareMetal: + return &platformConfig{ + storageClassName: defaultStorageClassNameBM, + volumeSnapshotClassName: defaultVolumeSnapshotClassNameBM, + }, nil + case platformMinikube: + return &platformConfig{ + storageClassName: defaultStorageClassNameMinikube, + volumeSnapshotClassName: defaultVolumeSnapshotClassNameMinikube, + }, nil + default: + return nil, fmt.Errorf("the current release doesn't support deployment platform %q", p) + } +} + +func (pc *platformConfig) finalStorageClassName(config *v1alpha1.Config) string { + storageClassName := pc.storageClassName + + // Override if explicitly requested by the Custom/Global Config. + // If it's not requested in the Global Config, return "", + // which at this point would constitute an error. + // (no platform specific default and no override). + if config != nil { + storageClassName = config.Spec.StorageClass + } + + return storageClassName +} + +func (pc *platformConfig) finalVolumeSnapshotClassName(config *v1alpha1.Config) string { + volumeSnapshotClassName := pc.volumeSnapshotClassName + + // Override if explicitly requested by the Custom/Global Config. + // If it's not requested in the Global Config, return "", + // which at this point would constitute an error. + // (no platform specific default and no override). + if config != nil { + volumeSnapshotClassName = config.Spec.VolumeSnapshotClass + } + + return volumeSnapshotClassName +} + +// NewSvc returns the service for the database. +func NewSvc(inst *v1alpha1.Instance, scheme *runtime.Scheme, lb string) (*corev1.Service, error) { + if len(inst.Spec.SourceCidrRanges) > 0 { + sourceCidrRanges = inst.Spec.SourceCidrRanges + } + var svcAnnotations map[string]string + + lbType := corev1.ServiceTypeLoadBalancer + svcNameFull := fmt.Sprintf(SvcName, inst.Name) + if lb == "node" { + lbType = corev1.ServiceTypeNodePort + svcNameFull = svcNameFull + "-" + lb + } else { + networkOpts := inst.Spec.DBNetworkServiceOptions + if networkOpts != nil && networkOpts.GCP.LoadBalancerType == "Internal" { + svcAnnotations = map[string]string{ + "cloud.google.com/load-balancer-type": "Internal", + } + } + } + + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "Service"}, + ObjectMeta: metav1.ObjectMeta{Name: svcNameFull, Namespace: inst.Namespace, Annotations: svcAnnotations}, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"instance": inst.Name}, + Ports: []corev1.ServicePort{ + { + Name: "secure-listener", + Protocol: "TCP", + Port: consts.SecureListenerPort, + TargetPort: intstr.FromInt(consts.SecureListenerPort), + }, + { + Name: "ssl-listener", + Protocol: "TCP", + Port: consts.SSLListenerPort, + TargetPort: intstr.FromInt(consts.SSLListenerPort), + }, + }, + Type: lbType, + // LoadBalancerSourceRanges: sourceCidrRanges, + }, + } + + // Set the Instance resource to own the Service resource. + if err := ctrl.SetControllerReference(inst, svc, scheme); err != nil { + return svc, err + } + + return svc, nil +} + +// NewDBDaemonSvc returns the service for the database daemon server. +func NewDBDaemonSvc(inst *v1alpha1.Instance, scheme *runtime.Scheme) (*corev1.Service, error) { + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "Service"}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf(DbdaemonSvcName, inst.Name), Namespace: inst.Namespace}, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"instance": inst.Name}, + Ports: []corev1.ServicePort{ + { + Name: "dbdaemon", + Protocol: "TCP", + Port: consts.DefaultDBDaemonPort, + TargetPort: intstr.FromInt(consts.DefaultDBDaemonPort), + }, + }, + Type: corev1.ServiceTypeClusterIP, + }, + } + + // Set the Instance resource to own the Service resource. + if err := ctrl.SetControllerReference(inst, svc, scheme); err != nil { + return svc, err + } + + return svc, nil +} + +// NewAgentSvc returns the service for the agent. +func NewAgentSvc(inst *v1alpha1.Instance, scheme *runtime.Scheme) (*corev1.Service, error) { + ports := []corev1.ServicePort{ + { + Name: configAgentName, + Protocol: "TCP", + Port: consts.DefaultConfigAgentPort, + TargetPort: intstr.FromInt(consts.DefaultConfigAgentPort), + }, + } + for service, enabled := range inst.Spec.Services { + switch service { + case commonv1alpha1.Monitoring: + if enabled { + ports = append(ports, corev1.ServicePort{ + Name: consts.MonitoringAgentName, + Protocol: "TCP", + Port: consts.DefaultMonitoringAgentPort, + }) + } + } + } + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "Service"}, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(AgentSvcName, inst.Name), + Namespace: inst.Namespace, + Labels: map[string]string{"app": "agent-svc"}, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"instance-agent": fmt.Sprintf("%s-agent", inst.Name)}, + Ports: ports, + Type: corev1.ServiceTypeClusterIP, + }, + } + + // Set the Instance resource to own the Service resource. + if err := ctrl.SetControllerReference(inst, svc, scheme); err != nil { + return svc, err + } + + return svc, nil +} + +// SvcURL returns the URL for the database service. +func SvcURL(svc *corev1.Service, port int32) string { + // Unset if not present: state to reflect what's observed. + if len(svc.Status.LoadBalancer.Ingress) == 0 { + return "" + } + + hostName := svc.Status.LoadBalancer.Ingress[0].Hostname + if hostName == "" { + hostName = svc.Status.LoadBalancer.Ingress[0].IP + } + + return net.JoinHostPort(hostName, fmt.Sprintf("%d", port)) +} + +// NewConfigMap returns the config map for database env variables. +func NewConfigMap(inst *v1alpha1.Instance, scheme *runtime.Scheme, cmName string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "ConfigMap"}, + ObjectMeta: metav1.ObjectMeta{Name: cmName, Namespace: inst.Namespace}, + Data: map[string]string{ + "SCRIPTS_DIR": scriptDir, + "INSTALL_DIR": "/stage", + "HEALTHCHECK_DB_SCRIPT": "health-check-db.sh", + "ORACLE_BASE": common.GetSourceOracleBase(inst.Spec.Version), + "ORACLE_INV": common.GetSourceOracleInventory(inst.Spec.Version), + "ORACLE_HOME": common.GetSourceOracleHome(inst.Spec.Version), + "LD_LIBRARY_PATH": fmt.Sprintf("export LD_LIBRARY_PATH=%s/lib:/usr/lib\n", common.GetSourceOracleHome(inst.Spec.Version)), + }, + } + + // Set the Instance resource to own the ConfigMap resource. + if err := ctrl.SetControllerReference(inst, cm, scheme); err != nil { + return cm, err + } + + return cm, nil +} + +// NewSts returns the statefulset for the database pod. +func NewSts(sp StsParams, pvcs []corev1.PersistentVolumeClaim, podTemplate corev1.PodTemplateSpec) (*appsv1.StatefulSet, error) { + var replicas int32 = 1 + sts := &appsv1.StatefulSet{ + // It looks like the version needs to be explicitly set to avoid the + // "incorrect version specified in apply patch" error. + TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "StatefulSet"}, + ObjectMeta: metav1.ObjectMeta{Name: sp.StsName, Namespace: sp.Inst.Namespace}, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + // UpdateStrategy: appsv1.StatefulSetUpdateStrategy{Type: appsv1.RollingUpdateStatefulSetStrategyType}, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"instance": sp.Inst.Name, "statefulset": sp.StsName}, + }, + Template: podTemplate, + // Do we need a pointer to a service in a StatefulSet? + // ServiceName: sp.svcName, + VolumeClaimTemplates: pvcs, + }, + } + + // Set the Instance resource to own the StatefulSet resource. + if err := ctrl.SetControllerReference(sp.Inst, sts, sp.Scheme); err != nil { + return sts, err + } + + return sts, nil +} + +// GetLogLevelArgs returns agent args for log level. +func GetLogLevelArgs(config *v1alpha1.Config) map[string][]string { + agentArgs := make(map[string][]string) + if config == nil { + return agentArgs + } + + for _, name := range configList { + args := []string{} + if len(config.Spec.LogLevel[name]) > 0 { + args = append(args, fmt.Sprintf("--v=%s", config.Spec.LogLevel[name])) + } + agentArgs[name] = args + } + + return agentArgs +} + +// NewAgentDeployment returns the agent deployment. +func NewAgentDeployment(agentDeployment AgentDeploymentParams) (*appsv1.Deployment, error) { + var replicas int32 = 1 + instlabels := map[string]string{"instance": agentDeployment.Inst.Name} + labels := map[string]string{"instance-agent": fmt.Sprintf("%s-agent", agentDeployment.Inst.Name), "deployment": agentDeployment.Name} + + configAgentArgs := []string{ + fmt.Sprintf("--port=%d", consts.DefaultConfigAgentPort), + fmt.Sprintf("--dbservice=%s", fmt.Sprintf(DbdaemonSvcName, agentDeployment.Inst.Name)), + fmt.Sprintf("--dbport=%d", consts.DefaultDBDaemonPort), + } + + monitoringAgentArgs := []string{ + fmt.Sprintf("--dbservice=%s", fmt.Sprintf(DbdaemonSvcName, agentDeployment.Inst.Name)), + fmt.Sprintf("--dbport=%d", consts.DefaultDBDaemonPort), + } + + if len(agentDeployment.Args[configAgentName]) > 0 { + for _, arg := range agentDeployment.Args[configAgentName] { + configAgentArgs = append(configAgentArgs, arg) + } + } + + containers := []corev1.Container{ + { + Name: configAgentName, + Image: agentDeployment.Images["config"], + Command: []string{"/configagent"}, + Args: configAgentArgs, + Ports: []corev1.ContainerPort{ + {Name: "ca-port", Protocol: "TCP", ContainerPort: consts.DefaultConfigAgentPort}, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &agentDeployment.PrivEscalation, + }, + ImagePullPolicy: corev1.PullAlways, + }, + } + agentDeployment.Log.V(2).Info("enabling services: ", "services", agentDeployment.Services) + for _, s := range agentDeployment.Services { + switch s { + case commonv1alpha1.Monitoring: + containers = append(containers, corev1.Container{ + Name: consts.MonitoringAgentName, + Image: agentDeployment.Images["monitoring"], + Command: []string{"/monitoring_agent"}, + Args: monitoringAgentArgs, + Ports: []corev1.ContainerPort{ + { + Name: "oe-port", + Protocol: "TCP", + ContainerPort: consts.DefaultMonitoringAgentPort, + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &agentDeployment.PrivEscalation, + }, + ImagePullPolicy: corev1.PullAlways, + }) + default: + agentDeployment.Log.V(2).Info("unsupported service: ", "service", s) + } + } + + podSpec := corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{}, + Containers: containers, + // Add pod affinity for agent pod, so that k8s will try to schedule the agent pod + // to the same node where the paired DB pod is located. In this way, we can avoid + // unnecessary cross node communication. + Affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: instlabels, + }, + Namespaces: []string{agentDeployment.Inst.Namespace}, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + } + + template := corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Namespace: agentDeployment.Inst.Namespace, + }, + Spec: podSpec, + } + + deployment := &appsv1.Deployment{ + // It looks like the version needs to be explicitly set to avoid the + // "incorrect version specified in apply patch" error. + TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"}, + ObjectMeta: metav1.ObjectMeta{Name: agentDeployment.Name, Namespace: agentDeployment.Inst.Namespace}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: template, + }, + } + + if err := ctrl.SetControllerReference(agentDeployment.Inst, deployment, agentDeployment.Scheme); err != nil { + return deployment, err + } + return deployment, nil +} + +func findDiskSize(diskName string, sp StsParams) resource.Quantity { + spec, exists := defaultDiskSpecs[diskName] + if !exists { + sp.Log.Info("no default volume bind with diskName %q, returns default disk size %q", diskName, defaultDiskSize) + return defaultDiskSize + } + + if sp.Disks != nil { + for _, d := range sp.Disks { + if d.Name == diskName && !d.Size.IsZero() { + sp.Log.Info("returns size with an instance-level requested size", "diskName", diskName, "mount", defaultDiskMountLocations[spec.Name], "requestedDiskSize", d.Size) + return d.Size + } + } + } + + if sp.Config != nil { + for _, d := range sp.Config.Spec.Disks { + if d.Name == diskName && !d.Size.IsZero() { + sp.Log.Info("returns size with the customer provided (global preference) numbers", "mount", defaultDiskMountLocations[spec.Name], "diskName", diskName, "diskSizes", d.Size) + return d.Size + } + } + } + sp.Log.Info("returns size with default numbers", "diskName", diskName, "mount", defaultDiskMountLocations[spec.Name], "diskSizes", spec.Size) + return spec.Size +} + +// NewPVCs returns PVCs. +func NewPVCs(sp StsParams) ([]corev1.PersistentVolumeClaim, error) { + var pvcs []corev1.PersistentVolumeClaim + + for _, diskSpec := range sp.Disks { + rl := corev1.ResourceList{corev1.ResourceStorage: findDiskSize(diskSpec.Name, sp)} + pvcName, mount := GetPVCNameAndMount(sp.Inst.Name, diskSpec.Name) + var pvc corev1.PersistentVolumeClaim + + // Determine storage class (from disk spec or config) + storageClass, err := ConfigAttribute("StorageClass", diskSpec.StorageClass, sp.Config) + if err != nil || storageClass == "" { + return nil, fmt.Errorf("failed to identify a storageClassName for disk %q", diskSpec.Name) + } + sp.Log.Info("storage class identified", "disk", diskSpec.Name, "StorageClass", storageClass) + + pvc = corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "PersistentVolumeClaim"}, + ObjectMeta: metav1.ObjectMeta{Name: pvcName, Namespace: sp.Inst.Namespace}, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{Requests: rl}, + StorageClassName: func() *string { s := storageClass; return &s }(), + }, + } + + if sp.Restore != nil && sp.Restore.BackupID != "" { + sp.Log.Info("starting a restore process for disk", "mount", mount) + pvc.Spec.DataSource = &corev1.TypedLocalObjectReference{ + APIGroup: func() *string { s := string("snapshot.storage.k8s.io"); return &s }(), + Kind: "VolumeSnapshot", + Name: fmt.Sprintf("%s-%s", sp.Restore.BackupID, mount), + } + } else { + sp.Log.Info("starting a provisioning process for disk", "mount", mount) + } + + pvcs = append(pvcs, pvc) + } + + return pvcs, nil +} + +func buildPVCMounts(sp StsParams) []corev1.VolumeMount { + var diskMounts []corev1.VolumeMount + + for _, diskSpec := range sp.Disks { + pvcName, mount := GetPVCNameAndMount(sp.Inst.Name, diskSpec.Name) + diskMounts = append(diskMounts, corev1.VolumeMount{ + Name: pvcName, + MountPath: fmt.Sprintf("/%s", mount), + }) + } + + return diskMounts +} + +// NewPodTemplate returns the pod template for the database statefulset. +func NewPodTemplate(sp StsParams, cdbName, DBDomain string) corev1.PodTemplateSpec { + labels := map[string]string{ + "instance": sp.Inst.Name, + "statefulset": sp.StsName, + "app": DatabasePodAppLabel, + } + + minMemoryForDBContainer := safeMinMemoryForDBContainer + if sp.Inst.Spec.MinMemoryForDBContainer != "" { + minMemoryForDBContainer = sp.Inst.Spec.MinMemoryForDBContainer + sp.Log.Info("NewPodTemplate: replacing", "SafeMinMemoryForDBContainer", safeMinMemoryForDBContainer, "sp.Inst.Spec.MinMemoryForDBContainer", sp.Inst.Spec.MinMemoryForDBContainer) + } + + sp.Log.Info("NewPodTemplate: creating new template with service image", "image", sp.Images["service"]) + dataDiskPVC, dataDiskMountName := GetPVCNameAndMount(sp.Inst.Name, "DataDisk") + containers := []corev1.Container{ + { + Name: "oracledb", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse(minMemoryForDBContainer), + }, + }, + Image: sp.Images["service"], + Command: []string{fmt.Sprintf("%s/init_oracle.sh", scriptDir)}, + Args: []string{cdbName, DBDomain}, + Ports: []corev1.ContainerPort{ + {Name: "secure-listener", Protocol: "TCP", ContainerPort: consts.SecureListenerPort}, + {Name: "ssl-listener", Protocol: "TCP", ContainerPort: consts.SSLListenerPort}, + }, + VolumeMounts: append([]corev1.VolumeMount{ + {Name: "var-tmp", MountPath: "/var/tmp"}, + {Name: "agent-repo", MountPath: "/agents"}, + }, + buildPVCMounts(sp)...), + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &sp.PrivEscalation, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: sp.ConfigMap.ObjectMeta.Name}}, + }, + }, + ImagePullPolicy: corev1.PullAlways, + }, + { + Name: "dbdaemon", + Image: sp.Images["service"], + Command: []string{"/agents/dbdaemon"}, + Args: []string{fmt.Sprintf("--cdb_name=%s", cdbName)}, + Ports: []corev1.ContainerPort{ + {Name: "dbdaemon", Protocol: "TCP", ContainerPort: consts.DefaultDBDaemonPort}, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &sp.PrivEscalation, + }, + VolumeMounts: append([]corev1.VolumeMount{ + {Name: "var-tmp", MountPath: "/var/tmp"}, + {Name: "agent-repo", MountPath: "/agents"}, + }, + buildPVCMounts(sp)...), + ImagePullPolicy: corev1.PullAlways, + }, + { + Name: "alert-log-sidecar", + Image: sp.Images["logging_sidecar"], + Command: []string{"/logging_main"}, + Args: []string{"--logType=ALERT"}, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &sp.PrivEscalation, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: dataDiskPVC, MountPath: fmt.Sprintf("/%s", dataDiskMountName)}, + }, + ImagePullPolicy: corev1.PullAlways, + }, + { + Name: "listener-log-sidecar", + Image: sp.Images["logging_sidecar"], + Command: []string{"/logging_main"}, + Args: []string{"--logType=LISTENER"}, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &sp.PrivEscalation, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: dataDiskPVC, MountPath: fmt.Sprintf("/%s", dataDiskMountName)}, + }, + ImagePullPolicy: corev1.PullAlways, + }, + } + initContainers := []corev1.Container{ + { + Name: "dbinit", + Image: sp.Images["dbinit"], + Command: []string{"sh", "-c", "cp -r agent_repo/. /agents/ && chmod -R 750 /agents/*"}, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &sp.PrivEscalation, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "agent-repo", MountPath: "/agents"}, + }, + ImagePullPolicy: corev1.PullAlways, + }, + } + + volumes := []corev1.Volume{ + { + Name: "var-tmp", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + { + Name: "agent-repo", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + } + + var antiAffinityNamespaces []string + if sp.Config != nil && len(sp.Config.Spec.HostAntiAffinityNamespaces) != 0 { + antiAffinityNamespaces = sp.Config.Spec.HostAntiAffinityNamespaces + } + + uid := sp.Inst.Spec.DatabaseUID + if uid == nil { + sp.Log.Info("set pod user ID to default value", "UID", defaultUID) + // consts are not addressable + uid = func(i int64) *int64 { return &i }(defaultUID) + } + + gid := sp.Inst.Spec.DatabaseGID + if gid == nil { + sp.Log.Info("set pod group ID to default value", "GID", defaultGID) + // consts are not addressable + gid = func(i int64) *int64 { return &i }(defaultGID) + } + + // for minikube, the default csi-hostpath-driver mounts persistent volumes writable by root only, so explicitly + // change owner and permissions of mounted pvs with an init container. + if sp.Config != nil && sp.Config.Spec.Platform == platformMinikube { + initContainers = addMinikubeInitContainer(sp, initContainers, *uid, *gid) + } + + podSpec := corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: uid, + RunAsGroup: gid, + FSGroup: gid, + RunAsNonRoot: func(b bool) *bool { return &b }(true), + }, + // ImagePullSecrets: []corev1.LocalObjectReference {{Name: GcrSecretName }}, + // InitContainers: initContainers, + Containers: containers, + InitContainers: initContainers, + ShareProcessNamespace: func(b bool) *bool { return &b }(true), + // ServiceAccountName: + // TerminationGracePeriodSeconds: + // Tolerations: + Volumes: volumes, + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": DatabasePodAppLabel}, + }, + Namespaces: antiAffinityNamespaces, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + } + + // TODO(bdali): consider adding pod affinity, priority class name, secret mount. + + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Namespace: sp.Namespace, + // Annotations: annotations, + }, + Spec: podSpec, + } +} + +// NewSnapshot returns the snapshot for the given pv. +func NewSnapshot(backup *v1alpha1.Backup, scheme *runtime.Scheme, pvcName, snapName, volumeSnapshotClassName string) (*snapv1.VolumeSnapshot, error) { + snap := &snapv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{APIVersion: snapv1.SchemeGroupVersion.String(), Kind: "VolumeSnapshot"}, + ObjectMeta: metav1.ObjectMeta{Name: snapName, Namespace: backup.Namespace, Labels: map[string]string{"snap": snapName}}, + Spec: snapv1.VolumeSnapshotSpec{ + Source: snapv1.VolumeSnapshotSource{PersistentVolumeClaimName: &pvcName}, + VolumeSnapshotClassName: func() *string { s := string(volumeSnapshotClassName); return &s }(), + }, + } + + // Set the Instance resource to own the VolumeSnapshot resource. + if err := ctrl.SetControllerReference(backup, snap, scheme); err != nil { + return snap, err + } + + return snap, nil +} + +// NewSnapshot returns the snapshot for the given instance and pv. +func NewSnapshotInst(inst *v1alpha1.Instance, scheme *runtime.Scheme, pvcName, snapName, volumeSnapshotClassName string) (*snapv1.VolumeSnapshot, error) { + snap := &snapv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{APIVersion: snapv1.SchemeGroupVersion.String(), Kind: "VolumeSnapshot"}, + ObjectMeta: metav1.ObjectMeta{Name: snapName, Namespace: inst.Namespace, Labels: map[string]string{"snap": snapName}}, + Spec: snapv1.VolumeSnapshotSpec{ + Source: snapv1.VolumeSnapshotSource{PersistentVolumeClaimName: &pvcName}, + VolumeSnapshotClassName: func() *string { s := string(volumeSnapshotClassName); return &s }(), + }, + } + + // Set the Instance resource to own the VolumeSnapshot resource. + if err := ctrl.SetControllerReference(inst, snap, scheme); err != nil { + return snap, err + } + + return snap, nil +} + +// checkStatusInstance attempts to determine a state of an database instance. +// In particular: +// - has provisioning finished? +// - is Instance up and accepting connection requests? +var CheckStatusInstanceFunc = func(ctx context.Context, instName, cdbName, clusterIP, DBDomain string, log logr.Logger) (string, error) { + log.Info("resources/checkStatusInstance", "inst name", instName, "clusterIP", clusterIP) + + // Establish a connection to a Config Agent. + ctx, cancel := context.WithTimeout(ctx, dialTimeout) + defer cancel() + + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", clusterIP, consts.DefaultConfigAgentPort), grpc.WithInsecure()) + if err != nil { + log.Error(err, "resources/checkStatusInstance: failed to create a conn via gRPC.Dial") + return "", err + } + defer conn.Close() + + caClient := capb.NewConfigAgentClient(conn) + cdOut, err := caClient.CheckStatus(ctx, &capb.CheckStatusRequest{ + Name: instName, + CdbName: cdbName, + CheckStatusType: capb.CheckStatusRequest_INSTANCE, + DbDomain: DBDomain, + }) + if err != nil { + return "", fmt.Errorf("resource/checkStatusInstance: failed on CheckStatus gRPC call: %v", err) + } + log.Info("resource/CheckStatusInstance: DONE with this output", "out", cdOut) + + return string(cdOut.Status), nil +} + +// GetDBDomain figures out DBDomain from DBUniqueName and DBDomain. +func GetDBDomain(inst *v1alpha1.Instance) string { + // Does DBUniqueName contain a DB Domain suffix? + if strings.Contains(inst.Spec.DBUniqueName, ".") { + domainFromName := strings.SplitN(inst.Spec.DBUniqueName, ".", 2)[1] + return domainFromName + } + + return inst.Spec.DBDomain +} + +// ConfigAttribute attempts to detect what value to use for a requested +// attribute. If an explicit value is requested via the Spec, +// it's immediately returned "as is". If not, a customer global Config +// is checked and returned if set. Failing all that a platform default +// value is used for a requested attribute. +func ConfigAttribute(name, explicitRequest string, config *v1alpha1.Config) (string, error) { + if explicitRequest != "" { + return explicitRequest, nil + } + + // Assume the default platform as GCP. This can be overridden via a Config. + platform := platformGCP + if config != nil && config.Spec.Platform != "" { + platform = config.Spec.Platform + } + + gc, err := getPlatformConfig(platform) + if err != nil { + return "", err + } + + switch name { + case "StorageClass": + return gc.finalStorageClassName(config), nil + case "VolumeSnapshotClass": + return gc.finalVolumeSnapshotClassName(config), nil + default: + return "", fmt.Errorf("unknown attribute requested (presently supported: StorageClass, VolumeSnapshotClass): %q", name) + } +} + +func addMinikubeInitContainer(sp StsParams, containers []corev1.Container, uid, gid int64) []corev1.Container { + volumeMounts := buildPVCMounts(sp) + cmd := "" + for _, mount := range volumeMounts { + if cmd != "" { + cmd += " && " + } + cmd += fmt.Sprintf("chown %d:%d %s ", uid, gid, mount.MountPath) + } + sp.Log.Info("add an init container for minikube", "cmd", cmd) + return append(containers, corev1.Container{ + Name: "prepare-pv-container", + Image: "busybox:latest", + Command: []string{"sh", "-c", cmd}, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: func(i int64) *int64 { return &i }(0), + RunAsGroup: func(i int64) *int64 { return &i }(0), + RunAsNonRoot: func(b bool) *bool { return &b }(false), + AllowPrivilegeEscalation: &sp.PrivEscalation, + }, + VolumeMounts: volumeMounts, + }) +} diff --git a/oracle/controllers/resources_test.go b/oracle/controllers/resources_test.go new file mode 100644 index 0000000..14de6e7 --- /dev/null +++ b/oracle/controllers/resources_test.go @@ -0,0 +1,108 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +func TestBuildPVCMounts(t *testing.T) { + + testCases := []struct { + Name string + InstanceName string + DiskSpec []commonv1alpha1.DiskSpec + wantMounts []corev1.VolumeMount + }{ + { + Name: "default - data and log disk only", + InstanceName: "myinst", + DiskSpec: []commonv1alpha1.DiskSpec{ + {Name: "DataDisk"}, + {Name: "LogDisk"}, + }, + wantMounts: []corev1.VolumeMount{ + { + Name: "myinst-pvc-u02", + MountPath: "/u02", + }, + { + Name: "myinst-pvc-u03", + MountPath: "/u03", + }, + }, + }, + { + Name: "default - data, log and backup", + InstanceName: "myinst", + DiskSpec: []commonv1alpha1.DiskSpec{ + {Name: "DataDisk"}, + {Name: "LogDisk"}, + {Name: "BackupDisk"}, + }, + wantMounts: []corev1.VolumeMount{ + { + Name: "myinst-pvc-u02", + MountPath: "/u02", + }, + { + Name: "myinst-pvc-u03", + MountPath: "/u03", + }, + { + Name: "myinst-pvc-u04", + MountPath: "/u04", + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + sp := StsParams{ + Disks: tc.DiskSpec, + Inst: &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: tc.InstanceName, + }, + }, + } + + gotVolumeMounts := buildPVCMounts(sp) + + if len(gotVolumeMounts) != len(tc.wantMounts) { + t.Errorf("got len(volumeMounts)=%d, want %d", len(gotVolumeMounts), len(tc.wantMounts)) + } + + for _, wantMount := range tc.wantMounts { + var gotMount corev1.VolumeMount + for _, mount := range gotVolumeMounts { + if mount.Name == wantMount.Name { + gotMount = mount + break + } + } + if gotMount.MountPath != wantMount.MountPath { + t.Errorf("got mountPath=%s, want %s", gotMount.MountPath, wantMount.MountPath) + } + } + }) + } +} diff --git a/oracle/controllers/testhelpers/BUILD.bazel b/oracle/controllers/testhelpers/BUILD.bazel new file mode 100644 index 0000000..ff98562 --- /dev/null +++ b/oracle/controllers/testhelpers/BUILD.bazel @@ -0,0 +1,49 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "testhelpers", + srcs = [ + "envtest.go", + "grpcmocks.go", + ], + data = ["//oracle:configs"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers", + visibility = ["//visibility:public"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/k8s", + "@com_github_kubernetes_csi_external_snapshotter_v2//pkg/apis/volumesnapshot/v1beta1", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_ginkgo//config", + "@com_github_onsi_gomega//:gomega", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@go_googleapis//google/rpc:status_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_k8s_api//apps/v1:apps", + "@io_k8s_api//core/v1:core", + "@io_k8s_api//rbac/v1:rbac", + "@io_k8s_apimachinery//pkg/api/resource", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/runtime", + "@io_k8s_apimachinery//pkg/runtime/serializer", + "@io_k8s_apimachinery//pkg/util/wait", + "@io_k8s_client_go//kubernetes", + "@io_k8s_client_go//kubernetes/scheme", + "@io_k8s_client_go//plugin/pkg/client/auth/gcp", + "@io_k8s_client_go//rest", + "@io_k8s_client_go//util/retry", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + "@io_k8s_sigs_controller_runtime//pkg/client/config", + "@io_k8s_sigs_controller_runtime//pkg/envtest", + "@io_k8s_sigs_controller_runtime//pkg/envtest/printer", + "@io_k8s_sigs_controller_runtime//pkg/log", + "@io_k8s_sigs_controller_runtime//pkg/log/zap", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + ], +) diff --git a/oracle/controllers/testhelpers/envtest.go b/oracle/controllers/testhelpers/envtest.go new file mode 100644 index 0000000..fe05b92 --- /dev/null +++ b/oracle/controllers/testhelpers/envtest.go @@ -0,0 +1,1046 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testhelpers + +import ( + "bytes" + "context" + "encoding/base32" + "errors" + "fmt" + "io" + "io/ioutil" + logg "log" + "math/rand" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + snapv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + . "github.com/onsi/ginkgo" + ginkgoconfig "github.com/onsi/ginkgo/config" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s" +) + +// Reconciler is the interface to setup a reconciler for testing. +type Reconciler interface { + SetupWithManager(manager ctrl.Manager) error +} + +// cdToRoot change to the repo root directory. +func cdToRoot(t *testing.T) { + for { + if _, err := os.Stat("config/crd/bases/oracle.db.anthosapis.com_instances.yaml"); err == nil { + break + } + if err := os.Chdir(".."); err != nil { + t.Fatalf("Failed to cd: %v", err) + } + if cwd, err := os.Getwd(); err != nil || cwd == "/" { + t.Fatalf("Failed to find config dir") + } + } +} + +// RandName generates a name suitable for use as a namespace with a given prefix. +func RandName(base string) string { + seed := rand.NewSource(time.Now().UnixNano() + int64(1000000*ginkgoconfig.GinkgoConfig.ParallelNode)) + testrand := rand.New(seed) + buf := make([]byte, 4) + testrand.Read(buf) + str := strings.ToLower(base32.StdEncoding.EncodeToString(buf)) + return base + "-" + str[:4] +} + +// RunReconcilerTestSuite runs all specs in the current package against a +// specialized testing environment. Before running the suite, this function +// configures the test environment by taking the following actions: +// +// * Starting a control plane consisting of an etcd process and a Kubernetes API +// server process. +// * Installing CRDs into the control plane +// * Starting an in-process manager in a dedicated goroutine with the given +// reconcilers installed in it. +// +// These components will be torn down after the suite runs. +func RunReconcilerTestSuite(t *testing.T, k8sClient *client.Client, k8sManager *ctrl.Manager, description string, controllers func() []Reconciler) { + cdToRoot(t) + + // Define the test environment. + testEnv := envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("config", "crd", "bases"), + filepath.Join("config", "crd", "testing"), + }, + ControlPlaneStartTimeout: 60 * time.Second, // Default 20s may not be enough for test pods. + } + + BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + var err error + cfg, err := testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = v1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = snapv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + + *k8sManager = mgr + *k8sClient = mgr.GetClient() + + // Install controllers into the manager. + for _, c := range controllers() { + Expect(c.SetupWithManager(mgr)).To(Succeed()) + } + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctrl.SetupSignalHandler()) + Expect(err).ToNot(HaveOccurred()) + }() + + close(done) + }, 300) + + AfterSuite(func() { + By("Stopping control plane") + Expect(testEnv.Stop()).To(Succeed()) + }) + + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, + description, + []Reporter{printer.NewlineReporter{}}) +} + +var ( + // Base image names, to be combined with PROW_IMAGE_{TAG,REPO}. + dbInitImage = "oracle.db.anthosapis.com/dbinit" + configAgentImage = "oracle.db.anthosapis.com/configagent" + loggingSidecarImage = "oracle.db.anthosapis.com/loggingsidecar" + monitoringAgentImage = "oracle.db.anthosapis.com/monitoring" + operatorImage = "oracle.db.anthosapis.com/operator" +) + +// Set up kubectl config targeting PROW_PROJECT / PROW_CLUSTER / PROW_CLUSTER_ZONE +// Set envtest environment pointing to that cluster +// Create k8s client +// Install CRDs +// Create a new 'namespace' +func initK8sCluster(namespace *string) (envtest.Environment, context.Context, client.Client) { + cdToRoot(nil) + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + log := logf.Log + // Generate credentials for our test cluster. + Expect(os.Setenv("KUBECONFIG", fmt.Sprintf("/tmp/.kubectl/config-%v", *namespace))).Should(Succeed()) + + // Allow local runs to target their own GKE cluster to prevent collisions with Prow. + var targetProject, targetCluster, targetZone string + if targetProject = os.Getenv("PROW_PROJECT"); targetProject == "" { + Expect(errors.New("PROW_PROJECT envvar was not set. Did you try to test without make?")).NotTo(HaveOccurred()) + } + if targetCluster = os.Getenv("PROW_CLUSTER"); targetCluster == "" { + Expect(errors.New("PROW_CLUSTER envar was not set. Did you try to test without make?")).NotTo(HaveOccurred()) + } + if targetZone = os.Getenv("PROW_CLUSTER_ZONE"); targetZone == "" { + Expect(errors.New("PROW_CLUSTER_ZONE envar was not set. Did you try to test without make?")).NotTo(HaveOccurred()) + } + cmdGetCreds := exec.Command("gcloud", "container", "clusters", "get-credentials", targetCluster, "--project="+targetProject, "--zone="+targetZone) + out, err := cmdGetCreds.CombinedOutput() + log.Info("gcloud get-credentials", "output", string(out)) + Expect(err).NotTo(HaveOccurred()) + + // load the test gcp project config + cfg, err := config.GetConfig() + log.Info("Load kubectl config") + Expect(err).NotTo(HaveOccurred()) + + trueValue := true + env := envtest.Environment{ + UseExistingCluster: &trueValue, + Config: cfg, + CRDDirectoryPaths: []string{ + filepath.Join("config", "crd", "bases"), + }, + CRDInstallOptions: envtest.CRDInstallOptions{CleanUpAfterUse: false}, + } + + var CRDBackoff = wait.Backoff{ + Steps: 6, + Duration: 100 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, + } + + // env.Start() may fail on the same set of CRDs during parallel execution + // need to retry in that case. + Expect(retry.OnError(CRDBackoff, func(error) bool { return true }, func() error { + _, err = env.Start() + if err != nil { + logf.Log.Error(err, "Envtest startup failed: CRD conflict, retrying") + } + return err + })).Should(Succeed()) + + err = v1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = snapv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + nsObj := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: *namespace, + Labels: map[string]string{ + "control-plane": "controller-manager", + }, + }, + } + ctx := context.Background() + Expect(k8sClient.Create(ctx, nsObj)).Should(Succeed()) + return env, ctx, k8sClient +} + +// Remove namespace (and all corresponding objects). +// Remove kubectl config. +func cleanupK8Cluster(namespace string, k8sClient client.Client) { + nsObj := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: map[string]string{ + "control-plane": "controller-manager", + }, + }, + } + if k8sClient != nil { + k8sClient.Delete(context.Background(), nsObj) + } + os.Remove(fmt.Sprintf("/tmp/.kubectl/config-%v", namespace)) +} + +// PrintEvents for all namespaces in the cluster. +func PrintEvents() { + cmd := exec.Command("kubectl", "get", "events", "-A", "-o", "custom-columns=LastSeen:.lastTimestamp,From:.source.component,Type:.type,Reason:.reason,Message:.message", "--sort-by=.lastTimestamp") + out, err := cmd.CombinedOutput() + if err != nil { + logf.Log.Error(err, "Failed to get events") + return + } + log := logg.New(GinkgoWriter, "", 0) + log.Println("=============================") + log.Printf("Last events:\n %s\n", out) +} + +// Print pods for all namespaces in the cluster +func PrintPods() { + cmd := exec.Command("kubectl", "get", "pods", "-A", "-o", "wide") + out, err := cmd.CombinedOutput() + if err != nil { + logf.Log.Error(err, "Failed to get pods") + return + } + log := logg.New(GinkgoWriter, "", 0) + log.Println("=============================") + log.Printf("Pods:\n %s\n", out) +} + +// Print svcs for all namespaces in the cluster +func PrintSVCs() { + cmd := exec.Command("kubectl", "get", "svc", "-A", "-o", "wide") + out, err := cmd.CombinedOutput() + if err != nil { + logf.Log.Error(err, "Failed to get svcs") + return + } + log := logg.New(GinkgoWriter, "", 0) + log.Println("=============================") + log.Printf("SVCs:\n %s\n", out) +} + +// Print PVCs for all namespaces in the cluster +func PrintPVCs() { + cmd := exec.Command("kubectl", "get", "pvc", "-A", "-o", "wide") + out, err := cmd.CombinedOutput() + if err != nil { + logf.Log.Error(err, "Failed to get pvcs") + return + } + log := logg.New(GinkgoWriter, "", 0) + log.Println("=============================") + log.Printf("PVCs:\n %s\n", out) +} + +// Print ENV variables +func PrintENV() { + log := logg.New(GinkgoWriter, "", 0) + log.Println("=============================") + log.Println("ENV:") + for _, e := range os.Environ() { + log.Println(e) + } +} + +// Print cluster objects - events, pods, pvcs for all namespaces in the cluster +func PrintClusterObjects() { + PrintENV() + PrintEvents() + PrintPods() + PrintPVCs() + PrintSVCs() +} + +// Print logs from requested containers +func PrintLogs(namespace string, env envtest.Environment, dumpLogsFor []string, instances []string) { + log := logg.New(GinkgoWriter, "", 0) + for _, c := range dumpLogsFor { + var logs string + var err error + + // Make the log start a bit easier to distinguish. + log.Println("=============================") + if c == "manager" { + logs, err = getOperatorLogs(context.Background(), env.Config, namespace) + if err != nil { + log.Printf("Failed to get %s logs: %s\n", c, err) + } else { + log.Printf("%s logs:\n %s\n", c, logs) + } + } else { + for _, inst := range instances { + logs, err = getAgentLogs(context.Background(), env.Config, namespace, inst, c) + if err != nil { + log.Printf("Failed to get %s %s logs: %s\n", inst, c, err) + } else { + log.Printf("%s %s logs:\n %s\n", inst, c, logs) + } + } + } + } + +} + +// DeployOperator deploys an operator and returns a cleanup function to delete +// all cluster level objects created outside of the namespace. +func DeployOperator(ctx context.Context, k8sClient client.Client, namespace string) (func() error, error) { + var agentImageTag, agentImageRepo string + if agentImageTag = os.Getenv("PROW_IMAGE_TAG"); agentImageTag == "" { + return nil, errors.New("PROW_IMAGE_TAG envvar was not set. Did you try to test without make?") + } + if agentImageRepo = os.Getenv("PROW_IMAGE_REPO"); agentImageRepo == "" { + return nil, errors.New("PROW_IMAGE_REPO envar was not set. Did you try to test without make?") + } + + dbInitImage := fmt.Sprintf("%s/%s:%s", agentImageRepo, dbInitImage, agentImageTag) + configAgentImage := fmt.Sprintf("%s/%s:%s", agentImageRepo, configAgentImage, agentImageTag) + loggingSidecarImage := fmt.Sprintf("%s/%s:%s", agentImageRepo, loggingSidecarImage, agentImageTag) + monitoringAgentImage := fmt.Sprintf("%s/%s:%s", agentImageRepo, monitoringAgentImage, agentImageTag) + operatorImage := fmt.Sprintf("%s/%s:%s", agentImageRepo, operatorImage, agentImageTag) + + objs, err := readYamls([]string{ + "config/manager/manager.yaml", + "config/rbac/role.yaml", + "config/rbac/role_binding.yaml", + }) + if err != nil { + return nil, err + } + + // minimal set of operator.yaml we need to deploy. + var d *appsv1.Deployment + var cr *rbacv1.ClusterRole + var crb *rbacv1.ClusterRoleBinding + for _, obj := range objs { + if _, ok := obj.(*appsv1.Deployment); ok { + d = obj.(*appsv1.Deployment) + } + if _, ok := obj.(*rbacv1.ClusterRole); ok { + if cr != nil { + return nil, fmt.Errorf("test needs to be updated to handle multiple ClusterRoles") + } + cr = obj.(*rbacv1.ClusterRole) + } + if _, ok := obj.(*rbacv1.ClusterRoleBinding); ok { + if crb != nil { + return nil, fmt.Errorf("test needs to be updated to handle multiple ClusterRoleBindings") + } + crb = obj.(*rbacv1.ClusterRoleBinding) + } + } + + // Add in our overrides. + cr.ObjectMeta.Name = "manager-role-" + namespace + crb.ObjectMeta.Name = "manager-rolebinding-" + namespace + crb.RoleRef.Name = cr.ObjectMeta.Name + crb.Subjects[0].Namespace = namespace + d.Namespace = namespace + d.Spec.Template.Spec.Containers[0].Image = operatorImage + d.Spec.Template.Spec.Containers[0].ImagePullPolicy = corev1.PullAlways + d.Spec.Template.Spec.Containers[0].Args = []string{ + "--logtostderr=true", + "--enable-leader-election=false", + "--namespace=" + namespace, + "--db_init_image_uri=" + dbInitImage, + "--config_image_uri=" + configAgentImage, + "--logging_sidecar_image_uri=" + loggingSidecarImage, + "--monitoring_agent_image_uri=" + monitoringAgentImage, + } + + // Ensure account has cluster admin to create ClusterRole/Binding. You + // can figure out the k8s account name from a GCE service account name + // using the `uniqueId` property from `gcloud iam service-accounts + // describe some@service.account`. + if err := k8sClient.Create(ctx, cr); err != nil { + return nil, err + } + if err := k8sClient.Create(ctx, crb); err != nil { + k8sClient.Delete(ctx, cr) + return nil, err + } + if err := k8sClient.Create(ctx, d); err != nil { + k8sClient.Delete(ctx, cr) + k8sClient.Delete(ctx, crb) + return nil, err + } + + // Ensure deployment succeeds. + instKey := client.ObjectKey{Namespace: namespace, Name: d.Name} + Eventually(func() int { + err := k8sClient.Get(ctx, instKey, d) + if err != nil { + return 0 + } + return int(d.Status.ReadyReplicas) + }, 30*time.Second, 1*time.Second).Should(Equal(1)) + + return func() error { + if err := k8sClient.Delete(ctx, cr); err != nil { + return err + } + if err := k8sClient.Delete(ctx, crb); err != nil { + return err + } + return nil + }, nil +} + +func readYamls(files []string) ([]runtime.Object, error) { + var objs []runtime.Object + + decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDeserializer() + for _, f := range files { + data, err := ioutil.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("error reading '%s': %v", f, err) + } + parts := bytes.Split(data, []byte("\n---")) + + // role.yaml is generated by kubebuilder with an empty yaml + // doc, this wont decode so we need to filter it out first. + for _, part := range parts { + if cleaned := bytes.TrimSpace(part); len(cleaned) > 0 { + obj, err := runtime.Decode(decoder, cleaned) + if err != nil { + return nil, fmt.Errorf("error decoding '%s': %v", f, err) + } + + objs = append(objs, obj) + } + } + } + + return objs, nil +} + +func getOperatorLogs(ctx context.Context, config *rest.Config, namespace string) (string, error) { + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + return "", err + } + + pod, err := findPodFor(ctx, clientSet, namespace, "control-plane=controller-manager") + if err != nil { + return "", err + } + return getContainerLogs(ctx, clientSet, namespace, pod.Name, "manager") +} + +func getAgentLogs(ctx context.Context, config *rest.Config, namespace, instance, agent string) (string, error) { + // The label selector to find the target agent container. Different + // labels are use for the CSA/NCSA agents to associate the deployments + // with the instance. + agentToQuery := map[string]string{ + // NCSA Agents + "config-agent": "deployment=" + instance + "-agent-deployment", + "oracle-monitoring": "deployment=" + instance + "-agent-deployment", + // CSA Agents + "oracledb": "instance=" + instance, + "dbdaemon": "instance=" + instance, + "alert-log-sidecar": "instance=" + instance, + "listener-log-sidecar": "instance=" + instance, + } + + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + return "", err + } + + pod, err := findPodFor(ctx, clientSet, namespace, agentToQuery[agent]) + if err != nil { + return "", err + } + return getContainerLogs(ctx, clientSet, namespace, pod.Name, agent) +} + +func getContainerLogs(ctx context.Context, clientSet *kubernetes.Clientset, ns, p, c string) (string, error) { + logOpts := corev1.PodLogOptions{ + Container: c, + } + req := clientSet.CoreV1().Pods(ns).GetLogs(p, &logOpts) + podLogs, err := req.Stream(context.Background()) + if err != nil { + return "", err + } + + sb := strings.Builder{} + _, err = io.Copy(&sb, podLogs) + if err != nil { + return "", err + } + return sb.String(), nil +} + +func findPodFor(ctx context.Context, clientSet *kubernetes.Clientset, ns, filter string) (*corev1.Pod, error) { + listOpts := metav1.ListOptions{ + LabelSelector: filter, + } + pods, err := clientSet.CoreV1().Pods(ns).List(ctx, listOpts) + if err != nil { + return nil, err + } + if len(pods.Items) < 1 { + return nil, fmt.Errorf("couldnt find Pod in %q matching %q", ns, filter) + } + if len(pods.Items) > 1 { + return nil, fmt.Errorf("found multiple Pods in %q matching %q:\n%+v", ns, filter, pods.Items) + } + return &pods.Items[0], nil +} + +// GCloudServiceAccount returns the GCloud service account name. +func GCloudServiceAccount() string { + return fmt.Sprintf( + "%s@%s.iam.gserviceaccount.com", + os.Getenv("PROW_INT_TEST_SA"), + os.Getenv("PROW_PROJECT")) +} + +/* +K8sOperatorEnvironment is a helper for integration testing. + +Encapsulates all necessary variables to work with the test cluster +Can be created/destroyed multiple times within one test suite +Depends on the Ginkgo asserts +Example usage: + +// Global variable, to be accessible by AfterSuite. +var k8sEnv = testhelpers.K8sEnvironment{} +// In case of Ctrl-C, clean up the last valid k8sEnv. +AfterSuite(func() { + k8sEnv.Close() +}) +... +BeforeEach(func() { + k8sEnv.Init(testhelpers.RandName("k8s-env-stress-test")) +}) +AfterEach(func() { + k8sEnv.Close() +}) +*/ +type K8sOperatorEnvironment struct { + Env envtest.Environment + Namespace string + Ctx context.Context + K8sClient client.Client + OperCleanup func() error // Operator deployment cleanup callback. + TestFailed bool // If true then dump container logs. + K8sServiceAccount string +} + +// Init the environment, install CRDs, deploy operator, create 'namespace'. +func (k8sEnv *K8sOperatorEnvironment) Init(namespace string) { + // K8S Service account + k8sEnv.K8sServiceAccount = os.Getenv("PROW_PROJECT") + ".svc.id.goog[" + namespace + "/default]" + + By("Starting control plane " + namespace) + // Init cluster + k8sEnv.Namespace = namespace + k8sEnv.Env, k8sEnv.Ctx, k8sEnv.K8sClient = initK8sCluster(&k8sEnv.Namespace) + // Deploy operator + By("Deploying operator " + namespace) + // Deploy Operator, retry if necessary + Expect(retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, func() error { + var err error + k8sEnv.OperCleanup, err = DeployOperator(k8sEnv.Ctx, k8sEnv.K8sClient, k8sEnv.Namespace) + if err != nil { + logf.Log.Error(err, "DeployOperator failed, retrying") + } + return err + })).Should(Succeed()) +} + +// Close cleans cluster objects and uninstalls operator. +func (k8sEnv *K8sOperatorEnvironment) Close() { + if k8sEnv.Namespace == "" { + return + } + By("Stopping control plane " + k8sEnv.Namespace) + Expect(k8sEnv.Env.Stop()).To(Succeed()) + + if k8sEnv.OperCleanup != nil { + By("Uninstalling operator " + k8sEnv.Namespace) + k8sEnv.OperCleanup() + } + if k8sEnv.K8sClient == nil { + return + } + + cleanupK8Cluster(k8sEnv.Namespace, k8sEnv.K8sClient) + k8sEnv.Namespace = "" +} + +// Instance-specific helper functions. + +// TestImageForVersion returns service image for integration tests. +// Image paths are predefined in the env variables TEST_IMAGE_ORACLE_*. +func TestImageForVersion(version string, edition string, extra string) string { + switch edition { + case "XE": + { + switch version { + case "18c": + { + switch extra { + default: + { + return os.Getenv("TEST_IMAGE_ORACLE_18_XE_SEEDED") + } + } + } + } + } + case "EE": + { + switch version { + case "19.3": + { + switch extra { + default: + { + return os.Getenv("TEST_IMAGE_ORACLE_19_3_EE_SEEDED") + } + } + } + case "12.2": + { + switch extra { + case "31741641-unseeded": + { + return os.Getenv("TEST_IMAGE_ORACLE_12_2_EE_UNSEEDED_31741641") + } + case "seeded-gcloud-buggy": + { + return os.Getenv("TEST_IMAGE_ORACLE_12_2_EE_SEEDED_BUGGY") + } + default: + { + return os.Getenv("TEST_IMAGE_ORACLE_12_2_EE_SEEDED") + } + } + } + } + } + } + return "INVALID_VERSION" +} + +// CreateSimpleInstance creates a basic v1alpha1.Instance object named 'instanceName'. +// 'version' and 'edition' should match rules of TestImageForVersion(). +// Depends on the Ginkgo asserts. +func CreateSimpleInstance(k8sEnv K8sOperatorEnvironment, instanceName string, version string, edition string) { + instance := &v1alpha1.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Name: instanceName, + Namespace: k8sEnv.Namespace, + }, + Spec: v1alpha1.InstanceSpec{ + CDBName: "GCLOUD", + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Version: version, + Disks: []commonv1alpha1.DiskSpec{ + { + Name: "DataDisk", + Size: resource.MustParse("100Gi"), + }, + { + Name: "LogDisk", + Size: resource.MustParse("150Gi"), + }, + }, + MinMemoryForDBContainer: "7.0Gi", + Images: map[string]string{ + "service": TestImageForVersion(version, edition, ""), + }, + }, + }, + } + + K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, instance) + instKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: instanceName} + + // Wait until the instance is "Ready" (requires 5+ minutes to download image). + WaitForInstanceConditionState(k8sEnv, instKey, k8s.Ready, metav1.ConditionTrue, k8s.CreateComplete, 10*time.Minute) +} + +// CreateSimplePdbWithDbObj creates simple PDB by given database object. +func CreateSimplePdbWithDbObj(k8sEnv K8sOperatorEnvironment, database *v1alpha1.Database) { + pod := database.Spec.Instance + "-sts-0" + K8sCreateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, database) + // Wait for the PDB to come online (UserReady = "SyncComplete"). + emptyObj := &v1alpha1.Database{} + objectKey := client.ObjectKey{Namespace: k8sEnv.Namespace, Name: database.Name} + WaitForObjectConditionState(k8sEnv, objectKey, emptyObj, k8s.UserReady, metav1.ConditionTrue, k8s.SyncComplete, 7*time.Minute) + + // Open PDBs. + out := K8sExecuteSqlOrFail(pod, k8sEnv.Namespace, "alter pluggable database all open;") + Expect(out).To(Equal("")) +} + +// CreateSimplePDB creates a simple PDB 'pdb1' inside 'instanceName' Instance. +// Depends on the Ginkgo asserts. +func CreateSimplePDB(k8sEnv K8sOperatorEnvironment, instanceName string) { + CreateSimplePdbWithDbObj(k8sEnv, &v1alpha1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: k8sEnv.Namespace, + Name: "pdb1", + }, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: "pdb1", + Instance: instanceName, + }, + AdminPassword: "123456", + Users: []v1alpha1.UserSpec{ + { + UserSpec: commonv1alpha1.UserSpec{ + Name: "scott", + CredentialSpec: commonv1alpha1.CredentialSpec{ + Password: "tiger", + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{"connect", "resource", "unlimited tablespace"}, + }, + }, + }, + }) +} + +// InsertSimpleData creates 'test_table' in pdb1 and inserts a test row. +func InsertSimpleData(k8sEnv K8sOperatorEnvironment) { + pod := "mydb-sts-0" + // Insert test data + sql := `alter session set container=pdb1; +alter session set current_schema=scott; +create table test_table (name varchar(100)); +insert into test_table values ('Hello World'); +commit;` + out := K8sExecuteSqlOrFail(pod, k8sEnv.Namespace, sql) + Expect(out).To(Equal("")) +} + +// VerifySimpleData checks that the test row in 'pdb1' exists. +func VerifySimpleData(k8sEnv K8sOperatorEnvironment) { + pod := "mydb-sts-0" + sql := `alter session set container=pdb1; +alter session set current_schema=scott; +select name from test_table;` + Expect(K8sExecuteSqlOrFail(pod, k8sEnv.Namespace, sql)).To(Equal("Hello World")) +} + +// WaitForObjectConditionState waits until the k8s object condition object status = targetStatus +// and reason = targetReason. +// Objects supported: v1alpha1. {Instance, Import, Export} +// Depends on the Ginkgo asserts. +func WaitForObjectConditionState(k8sEnv K8sOperatorEnvironment, + key client.ObjectKey, + emptyObj runtime.Object, + condition string, + targetStatus metav1.ConditionStatus, + targetReason string, + timeout time.Duration) { + Eventually(func() bool { + K8sGetWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, key, emptyObj) + cond := &metav1.Condition{} + switch emptyObj.(type) { + case *v1alpha1.Instance: + cond = k8s.FindCondition(emptyObj.(*v1alpha1.Instance).Status.Conditions, condition) + case *v1alpha1.Import: + cond = k8s.FindCondition(emptyObj.(*v1alpha1.Import).Status.Conditions, condition) + case *v1alpha1.Export: + cond = k8s.FindCondition(emptyObj.(*v1alpha1.Export).Status.Conditions, condition) + case *v1alpha1.Database: + cond = k8s.FindCondition(emptyObj.(*v1alpha1.Database).Status.Conditions, condition) + } + if cond != nil { + logf.Log.Info(fmt.Sprintf("Waiting %v, status=%v:%v, expecting=%v:%v", condition, cond.Status, cond.Reason, targetStatus, targetReason)) + return cond.Status == targetStatus && cond.Reason == targetReason + } + return false + }, timeout, 5*time.Second).Should(Equal(true)) +} + +// WaitForInstanceConditionState waits until the Instance condition object status = targetStatus and reason = targetReason. +// Depends on the Ginkgo asserts. +func WaitForInstanceConditionState(k8sEnv K8sOperatorEnvironment, key client.ObjectKey, condition string, targetStatus metav1.ConditionStatus, targetReason string, timeout time.Duration) { + instance := &v1alpha1.Instance{} + WaitForObjectConditionState(k8sEnv, key, instance, condition, targetStatus, targetReason, timeout) +} + +// K8sExec execs a command in a pod and returns a string result. +// Depends on the Ginkgo asserts. +// kubectl exec -n -c +func K8sExec(pod string, ns string, container string, cmd string) (string, error) { + cfg, err := ctrl.GetConfig() + Expect(err).NotTo(HaveOccurred()) + clientset, err := kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + var p = controllers.ExecCmdParams{ + Pod: pod, + Ns: ns, + Con: &corev1.Container{ + Name: container, + }, + Sch: runtime.NewScheme(), + RestConfig: cfg, + Client: clientset, + } + // Execute sh -c + out, err := controllers.ExecCmdFunc(p, cmd) + // Trim the output. + out = strings.TrimSpace(out) + logf.Log.Info("Pod exec result", "output", out, "err", err) + return out, err +} + +/* +K8sExecuteSql executes multiple sql statements in an Oracle pod +e.g. +sql := `alter session set container=pdb1; +create table test_table (name varchar(100)); +insert into test_table values ('Hello World'); +commit;` +out, err = testhelpers.K8sExecuteSql("mydb-sts-0", "db", sql) +Depends on the Ginkgo asserts. +Please escape any bash special characters. +*/ +func K8sExecuteSql(pod string, ns string, sql string) (string, error) { + cmd := fmt.Sprintf(`source ~/GCLOUD.env && sqlplus -S / as sysdba <.svc.id.goog[/default] +// and google service account. +func SetupServiceAccountBindingBetweenGcpAndK8s(k8sEnv K8sOperatorEnvironment) { + Expect(retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, func() error { + cmd := exec.Command("gcloud", "iam", + "service-accounts", "add-iam-policy-binding", + "--role=roles/iam.workloadIdentityUser", + "--member="+"serviceAccount:"+k8sEnv.K8sServiceAccount, + GCloudServiceAccount()) + out, err := cmd.CombinedOutput() + logf.Log.Info("gcloud iam service-accounts add-iam-policy-binding", "output", string(out)) + return err + })).To(Succeed()) + saObj := &corev1.ServiceAccount{} + K8sGetAndUpdateWithRetry(k8sEnv.K8sClient, k8sEnv.Ctx, + client.ObjectKey{Namespace: k8sEnv.Namespace, Name: "default"}, + saObj, + func(obj *runtime.Object) { + // Add service account annotation. + (*obj).(*corev1.ServiceAccount).ObjectMeta.Annotations = map[string]string{ + "iam.gke.io/gcp-service-account": GCloudServiceAccount(), + } + }) +} diff --git a/oracle/controllers/testhelpers/grpcmocks.go b/oracle/controllers/testhelpers/grpcmocks.go new file mode 100644 index 0000000..d771003 --- /dev/null +++ b/oracle/controllers/testhelpers/grpcmocks.go @@ -0,0 +1,273 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testhelpers + +import ( + "context" + "fmt" + + "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + capb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" +) + +// FakeOperationStatus is an enum type for LRO statuses managed by FakeConfigAgentClient. +type FakeOperationStatus int + +const ( + //StatusUndefined undefined. + StatusUndefined FakeOperationStatus = iota + //StatusRunning running. + StatusRunning + //StatusDone done. + StatusDone + //StatusDoneWithError done with error. + StatusDoneWithError + //StatusNotFound not found. + StatusNotFound +) + +// FakeConfigAgentClient a client for capturing calls the various ConfigAgent api. +type FakeConfigAgentClient struct { + PhysicalBackupCalledCnt int + PhysicalRestoreCalledCnt int + CreateDatabaseCalledCnt int + CreateUsersCalledCnt int + UsersChangedCalledCnt int + UpdateUsersCalledCnt int + CheckStatusCalledCnt int + CreateCDBCalledCnt int + BootstrapCDBCalledCnt int + BootstrapDatabaseCalledCnt int + BootstrapStandbyCalledCnt int + BounceDatabaseCalledCnt int + CreateListenerCalledCnt int + ListOperationsCalledCnt int + GetOperationCalledCnt int + deleteOperationCalledCnt int + dataPumpImportCalledCnt int + dataPumpExportCalledCnt int + SetParameterCalledCnt int + GetParameterTypeValueCalledCnt int + RecoverConfigFileCalledCnt int + AsyncPhysicalBackup bool + AsyncPhysicalRestore bool + FetchServiceImageMetaDataCnt int + NextGetOperationStatus FakeOperationStatus +} + +var ( + emptyConnCloseFunc = func() {} +) + +// FakeClientFactory is a simple factory to create our FakeConfigAgentClient. +type FakeClientFactory struct { + Caclient *FakeConfigAgentClient +} + +// New returns a new fake ConfigAgent. +func (g *FakeClientFactory) New(context.Context, client.Reader, string, string) (capb.ConfigAgentClient, controllers.ConnCloseFunc, error) { + if g.Caclient == nil { + g.Reset() + } + return g.Caclient, emptyConnCloseFunc, nil +} + +// Reset clears the inner ConfigAgent. +func (g *FakeClientFactory) Reset() { + g.Caclient = &FakeConfigAgentClient{} +} + +// Reset reset's the config agent's counters. +func (cli *FakeConfigAgentClient) Reset() { + *cli = FakeConfigAgentClient{} +} + +// CreateDatabase wrapper. +func (cli *FakeConfigAgentClient) CreateDatabase(context.Context, *capb.CreateDatabaseRequest, ...grpc.CallOption) (*capb.CreateDatabaseResponse, error) { + cli.CreateCDBCalledCnt++ + return nil, nil +} + +// CreateUsers wrapper. +func (cli *FakeConfigAgentClient) CreateUsers(context.Context, *capb.CreateUsersRequest, ...grpc.CallOption) (*capb.CreateUsersResponse, error) { + cli.CreateUsersCalledCnt++ + return nil, nil +} + +// UsersChanged wrapper. +func (cli *FakeConfigAgentClient) UsersChanged(context.Context, *capb.UsersChangedRequest, ...grpc.CallOption) (*capb.UsersChangedResponse, error) { + cli.UsersChangedCalledCnt++ + return nil, nil +} + +// UpdateUsers wrapper. +func (cli *FakeConfigAgentClient) UpdateUsers(context.Context, *capb.UpdateUsersRequest, ...grpc.CallOption) (*capb.UpdateUsersResponse, error) { + cli.UpdateUsersCalledCnt++ + return nil, nil +} + +// PhysicalBackup wrapper. +func (cli *FakeConfigAgentClient) PhysicalBackup(context.Context, *capb.PhysicalBackupRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.PhysicalBackupCalledCnt++ + return &longrunning.Operation{Done: !cli.AsyncPhysicalBackup}, nil +} + +// PhysicalRestore wrapper. +func (cli *FakeConfigAgentClient) PhysicalRestore(context.Context, *capb.PhysicalRestoreRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.PhysicalRestoreCalledCnt++ + return &longrunning.Operation{Done: !cli.AsyncPhysicalRestore}, nil +} + +// CheckStatus wrapper. +func (cli *FakeConfigAgentClient) CheckStatus(context.Context, *capb.CheckStatusRequest, ...grpc.CallOption) (*capb.CheckStatusResponse, error) { + cli.CheckStatusCalledCnt++ + return nil, nil +} + +// CreateCDB wrapper. +func (cli *FakeConfigAgentClient) CreateCDB(context.Context, *capb.CreateCDBRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.CreateCDBCalledCnt++ + return nil, nil +} + +// CreateListener wrapper. +func (cli *FakeConfigAgentClient) CreateListener(context.Context, *capb.CreateListenerRequest, ...grpc.CallOption) (*capb.CreateListenerResponse, error) { + cli.CreateListenerCalledCnt++ + return nil, nil +} + +// ListOperations wrapper. +func (cli *FakeConfigAgentClient) ListOperations(context.Context, *longrunning.ListOperationsRequest, ...grpc.CallOption) (*longrunning.ListOperationsResponse, error) { + cli.ListOperationsCalledCnt++ + return nil, nil +} + +// GetOperation wrapper. +func (cli *FakeConfigAgentClient) GetOperation(context.Context, *longrunning.GetOperationRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.GetOperationCalledCnt++ + + switch cli.NextGetOperationStatus { + case StatusDone: + return &longrunning.Operation{Done: true}, nil + + case StatusDoneWithError: + return &longrunning.Operation{ + Done: true, + Result: &longrunning.Operation_Error{ + Error: &status.Status{Code: int32(codes.Unknown), Message: "Test Error"}, + }, + }, nil + + case StatusRunning: + return &longrunning.Operation{}, nil + + case StatusNotFound: + return nil, grpcstatus.Errorf(codes.NotFound, "") + + case StatusUndefined: + panic("Misconfigured test, set up expected operation status") + + default: + panic(fmt.Sprintf("unknown status: %v", cli.NextGetOperationStatus)) + } +} + +// DeleteOperation wrapper. +func (cli *FakeConfigAgentClient) DeleteOperation(context.Context, *longrunning.DeleteOperationRequest, ...grpc.CallOption) (*empty.Empty, error) { + cli.deleteOperationCalledCnt++ + return nil, nil +} + +// CreateCDBUser wrapper. +func (cli *FakeConfigAgentClient) CreateCDBUser(context.Context, *capb.CreateCDBUserRequest, ...grpc.CallOption) (*capb.CreateCDBUserResponse, error) { + cli.CreateListenerCalledCnt++ + return nil, nil +} + +// BootstrapDatabase wrapper. +func (cli *FakeConfigAgentClient) BootstrapDatabase(context.Context, *capb.BootstrapDatabaseRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.BootstrapDatabaseCalledCnt++ + return nil, nil +} + +// BootstrapStandby wrapper. +func (cli *FakeConfigAgentClient) BootstrapStandby(context.Context, *capb.BootstrapStandbyRequest, ...grpc.CallOption) (*capb.BootstrapStandbyResponse, error) { + cli.BootstrapStandbyCalledCnt++ + return nil, nil +} + +// DataPumpImport wrapper. +func (cli *FakeConfigAgentClient) DataPumpImport(context.Context, *capb.DataPumpImportRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.dataPumpImportCalledCnt++ + return &longrunning.Operation{Done: false}, nil +} + +// DataPumpExport wrapper. +func (cli *FakeConfigAgentClient) DataPumpExport(context.Context, *capb.DataPumpExportRequest, ...grpc.CallOption) (*longrunning.Operation, error) { + cli.dataPumpExportCalledCnt++ + return nil, nil +} + +// BounceDatabase wrapper. +func (cli *FakeConfigAgentClient) BounceDatabase(context.Context, *capb.BounceDatabaseRequest, ...grpc.CallOption) (*capb.BounceDatabaseResponse, error) { + cli.BounceDatabaseCalledCnt++ + return nil, nil +} + +// DataPumpImportCalledCnt return call count. +func (cli *FakeConfigAgentClient) DataPumpImportCalledCnt() int { + return cli.dataPumpImportCalledCnt +} + +// DataPumpExportCalledCnt return call count. +func (cli *FakeConfigAgentClient) DataPumpExportCalledCnt() int { + return cli.dataPumpExportCalledCnt +} + +// DeleteOperationCalledCnt return call count. +func (cli *FakeConfigAgentClient) DeleteOperationCalledCnt() int { + return cli.deleteOperationCalledCnt +} + +// SetParameter wrapper. +func (cli *FakeConfigAgentClient) SetParameter(context.Context, *capb.SetParameterRequest, ...grpc.CallOption) (*capb.SetParameterResponse, error) { + cli.SetParameterCalledCnt++ + return nil, nil +} + +// GetParameterTypeValue wrapper. +func (cli *FakeConfigAgentClient) GetParameterTypeValue(context.Context, *capb.GetParameterTypeValueRequest, ...grpc.CallOption) (*capb.GetParameterTypeValueResponse, error) { + cli.GetParameterTypeValueCalledCnt++ + return nil, nil +} + +// RecoverConfigFile wrapper. +func (cli *FakeConfigAgentClient) RecoverConfigFile(ctx context.Context, in *capb.RecoverConfigFileRequest, opts ...grpc.CallOption) (*capb.RecoverConfigFileResponse, error) { + cli.RecoverConfigFileCalledCnt++ + return nil, nil +} + +func (cli *FakeConfigAgentClient) FetchServiceImageMetaData(ctx context.Context, in *capb.FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*capb.FetchServiceImageMetaDataResponse, error) { + cli.FetchServiceImageMetaDataCnt++ + return nil, nil +} diff --git a/oracle/controllers/validationstest/BUILD.bazel b/oracle/controllers/validationstest/BUILD.bazel new file mode 100644 index 0000000..73a4a50 --- /dev/null +++ b/oracle/controllers/validationstest/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "validationstest_test", + srcs = ["crd_validation_test.go"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "//oracle/controllers/testhelpers", + "@com_github_onsi_ginkgo//:ginkgo", + "@com_github_onsi_gomega//:gomega", + "@com_github_onsi_gomega//format", + "@com_github_onsi_gomega//types", + "@io_k8s_apimachinery//pkg/api/errors", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_sigs_controller_runtime//:controller-runtime", + "@io_k8s_sigs_controller_runtime//pkg/client", + ], +) diff --git a/oracle/controllers/validationstest/crd_validation_test.go b/oracle/controllers/validationstest/crd_validation_test.go new file mode 100644 index 0000000..c339102 --- /dev/null +++ b/oracle/controllers/validationstest/crd_validation_test.go @@ -0,0 +1,347 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validationstest + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + v1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/testhelpers" +) + +var ( + k8sClient client.Client + k8sManager ctrl.Manager +) + +func TestValidations(t *testing.T) { + testhelpers.RunReconcilerTestSuite(t, &k8sClient, &k8sManager, "Validations test", func() []testhelpers.Reconciler { + return []testhelpers.Reconciler{} + }) +} + +var _ = Describe("Instance CRD Validation rules", func() { + instanceMeta := metav1.ObjectMeta{ + Name: "test-instance", + Namespace: "default", + } + ctx := context.Background() + + Context("Memory percent attribute", func() { + It("Is validated", func() { + tests := []struct { + memPercent int + valid bool + }{ + {-1, false}, + {0, true}, + {42, true}, + {100, true}, + {101, false}, + } + + for _, tc := range tests { + By(fmt.Sprintf("Creating an Instance with MemoryPercent=%d", tc.memPercent)) + + instance := &v1alpha1.Instance{ + ObjectMeta: instanceMeta, + Spec: v1alpha1.InstanceSpec{ + MemoryPercent: tc.memPercent, + }, + } + + haveExpectedOutcome := Succeed() + if !tc.valid { + haveExpectedOutcome = validationErrorOccurred() + } + + createRequest := k8sClient.Create(ctx, instance) + _ = k8sClient.Delete(ctx, instance) + + Expect(createRequest).To(haveExpectedOutcome) + } + }) + }) + + Context("Database engine attribute", func() { + It("Is validated", func() { + tests := []struct { + dbType string + valid bool + }{ + {"Oracle", true}, + {"MySQL", false}, + } + + for _, tc := range tests { + By(fmt.Sprintf("Creating an Instance with Type=%s", tc.dbType)) + + instance := &v1alpha1.Instance{ + ObjectMeta: instanceMeta, + Spec: v1alpha1.InstanceSpec{ + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Type: tc.dbType, + }, + }, + } + + haveExpectedOutcome := Succeed() + if !tc.valid { + haveExpectedOutcome = validationErrorOccurred() + } + + createRequest := k8sClient.Create(ctx, instance) + _ = k8sClient.Delete(ctx, instance) + + Expect(createRequest).To(haveExpectedOutcome) + } + }) + }) + + Context("Restore.DOP attribute", func() { + It("Is validated", func() { + tests := []struct { + dop int32 + valid bool + }{ + {-1, false}, + {0, true}, + {1, true}, + {100, true}, + {101, false}, + } + + for _, tc := range tests { + By(fmt.Sprintf("Creating an Instance with Restore.DOP=%d", tc.dop)) + + instance := &v1alpha1.Instance{ + ObjectMeta: instanceMeta, + Spec: v1alpha1.InstanceSpec{ + Restore: &v1alpha1.RestoreSpec{ + Dop: tc.dop, + RequestTime: metav1.Now(), + }, + }, + } + + haveExpectedOutcome := Succeed() + if !tc.valid { + haveExpectedOutcome = validationErrorOccurred() + } + + createRequest := k8sClient.Create(ctx, instance) + _ = k8sClient.Delete(ctx, instance) + + Expect(createRequest).To(haveExpectedOutcome) + } + }) + }) + + Context("Restore.TimeLimitMinutes attribute", func() { + It("Is validated", func() { + tests := []struct { + timeLimitMinutes int32 + valid bool + }{ + {-1, false}, + {0, true}, + {1, true}, + {101, true}, + } + + for _, tc := range tests { + By(fmt.Sprintf("Creating an Instance with Restore.TimeLimitMinutes=%d", tc.timeLimitMinutes)) + + instance := &v1alpha1.Instance{ + ObjectMeta: instanceMeta, + Spec: v1alpha1.InstanceSpec{ + Restore: &v1alpha1.RestoreSpec{ + TimeLimitMinutes: tc.timeLimitMinutes, + RequestTime: metav1.Now(), + }, + }, + } + + haveExpectedOutcome := Succeed() + if !tc.valid { + haveExpectedOutcome = validationErrorOccurred() + } + + createRequest := k8sClient.Create(ctx, instance) + _ = k8sClient.Delete(ctx, instance) + + Expect(createRequest).To(haveExpectedOutcome) + } + }) + }) + + Context("Disk.Name attribute", func() { + It("Is validated", func() { + tests := []struct { + disks []commonv1alpha1.DiskSpec + valid bool + }{ + { + disks: []commonv1alpha1.DiskSpec{ + {Name: "DataDisk"}, + {Name: "LogDisk"}, + }, + valid: true, + }, + { + disks: []commonv1alpha1.DiskSpec{ + {Name: "DataDisk"}, + }, + valid: true, + }, + { + disks: []commonv1alpha1.DiskSpec{ + {Name: "FrisbeeDisk"}, + }, + valid: false, + }, + { + disks: []commonv1alpha1.DiskSpec{ + {Name: "SystemDisk"}, + {Name: "DataDisk"}, + }, + valid: false, + }, + } + + for _, tc := range tests { + By(fmt.Sprintf("Creating an Instance with Disks=%v", tc.disks)) + + instance := &v1alpha1.Instance{ + ObjectMeta: instanceMeta, + Spec: v1alpha1.InstanceSpec{ + GenericInstanceSpec: commonv1alpha1.GenericInstanceSpec{ + Disks: tc.disks, + }, + }, + } + + haveExpectedOutcome := Succeed() + if !tc.valid { + haveExpectedOutcome = validationErrorOccurred() + } + + createRequest := k8sClient.Create(ctx, instance) + _ = k8sClient.Delete(ctx, instance) + + Expect(createRequest).To(haveExpectedOutcome) + } + }) + }) +}) + +var _ = Describe("Database CRD Validation rules", func() { + instanceMeta := metav1.ObjectMeta{ + Name: "test-database", + Namespace: "default", + } + ctx := context.Background() + Context("User name attribute", func() { + It("Is validated", func() { + tests := []struct { + user string + valid bool + }{ + {user: "scott", valid: true}, + {user: "superuser", valid: true}, + } + + for _, tc := range tests { + By(fmt.Sprintf("Creating Database User with name =%v", tc.user)) + + database := &v1alpha1.Database{ + ObjectMeta: instanceMeta, + Spec: v1alpha1.DatabaseSpec{ + DatabaseSpec: commonv1alpha1.DatabaseSpec{ + Name: "pdb1", + Instance: "mydb", + }, + AdminPassword: "google", + + Users: []v1alpha1.UserSpec{ + { + UserSpec: commonv1alpha1.UserSpec{ + Name: tc.user, + CredentialSpec: commonv1alpha1.CredentialSpec{ + Password: "123456", + }, + }, + Privileges: []v1alpha1.PrivilegeSpec{}, + }, + }, + }, + } + haveExpectedOutcome := Succeed() + if !tc.valid { + haveExpectedOutcome = validationErrorOccurred() + } + + createRequest := k8sClient.Create(ctx, database) + _ = k8sClient.Delete(ctx, database) + + Expect(createRequest).To(haveExpectedOutcome) + } + }) + }) +}) + +// validationErrorMatcher is a matcher for CRD validation errors. +type validationErrorMatcher struct{} + +func validationErrorOccurred() types.GomegaMatcher { + return &validationErrorMatcher{} +} + +func (matcher *validationErrorMatcher) Match(actual interface{}) (bool, error) { + if actual == nil { + return false, fmt.Errorf("expected an error, got nil") + } + + err, ok := actual.(error) + if !ok { + return false, fmt.Errorf("%s is not an error", format.Object(actual, 1)) + } + + if !errors.IsInvalid(err) { + return false, fmt.Errorf("%s is not an error indicating an invalid resource", format.Object(err, 1)) + } + + return true, nil +} + +func (matcher *validationErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be a validation error") +} + +func (matcher *validationErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be a validation error") +} diff --git a/oracle/dashboards/install-dashboards.jsonnet b/oracle/dashboards/install-dashboards.jsonnet new file mode 100644 index 0000000..e964275 --- /dev/null +++ b/oracle/dashboards/install-dashboards.jsonnet @@ -0,0 +1,52 @@ +local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; + +local pvc = k.core.v1.persistentVolumeClaim; + +local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + + (import 'kube-prometheus/kube-prometheus-all-namespaces.libsonnet') + { + _config+:: { + namespace: 'monitoring', + prometheus+:: { + namespaces: [], + }, + }, + prometheus+:: { + prometheus+: { + spec+: { + retention: '30d', + storage: { + volumeClaimTemplate: + pvc.new() + + pvc.mixin.spec.withAccessModes('ReadWriteOnce') + + pvc.mixin.spec.resources.withRequests({ storage: '10Gi' }) + + pvc.mixin.spec.withStorageClassName('csi-gce-pd'), + }, // storage + }, // spec + }, // prometheus + }, // prometheus + grafanaDashboards+:: { // monitoring-mixin compatibility + 'db.json': (import 'db-dashboard.json'), + }, + grafana+:: { + dashboards+:: { // use this method to import your dashboards to Grafana + 'db.json': (import 'db-dashboard.json'), + }, + }, +}; + + +{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } + +{ + ['setup/prometheus-operator-' + name]: kp.prometheusOperator[name] + for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator)) +} + +// serviceMonitor is separated so that it can be created after the CRDs are ready +{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } + +{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } + +{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } + +{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } + +{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } + +{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } + +{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } + +{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } + +{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } \ No newline at end of file diff --git a/oracle/main.go b/oracle/main.go new file mode 100644 index 0000000..3a6e657 --- /dev/null +++ b/oracle/main.go @@ -0,0 +1,220 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "os" + + snapv1 "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/backupcontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/backupschedulecontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/configcontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/cronanythingcontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/databasecontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/exportcontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/importcontroller" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/controllers/instancecontroller" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + + dbInitImage = flag.String("db_init_image_uri", "gcr.io/elcarro/oracle.db.anthosapis.com/dbinit:latest", "DB POD init binary image URI") + serviceImage = flag.String("service_image_uri", "", "GCR service URI") + configAgentImage = flag.String("config_image_uri", "gcr.io/elcarro/oracle.db.anthosapis.com/configagent:latest", "Config Agent image URI") + loggingSidecarImage = flag.String("logging_sidecar_image_uri", "gcr.io/elcarro/oracle.db.anthosapis.com/loggingsidecar:latest", "Logging Sidecar image URI") + monitoringAgentImage = flag.String("monitoring_agent_image_uri", "gcr.io/elcarro/oracle.db.anthosapis.com/monitoring:latest", "Monitoring Agent image URI") + + namespace = flag.String("namespace", "", "TESTING ONLY: Limits controller to watching resources in this namespace only") +) + +func init() { + _ = snapv1.AddToScheme(scheme) + + _ = clientgoscheme.AddToScheme(scheme) + + _ = v1alpha1.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme +} + +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=releases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=oracle.db.anthosapis.com,resources=releases/status,verbs=get;update;patch + +func main() { + klog.InitFlags(nil) + + var metricsAddr string + var enableLeaderElection bool + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.Parse() + + ctrl.SetLogger(klogr.New()) + + images := make(map[string]string) + images["dbinit"] = *dbInitImage + images["service"] = *serviceImage + images["config"] = *configAgentImage + images["logging_sidecar"] = *loggingSidecarImage + images["monitoring"] = *monitoringAgentImage + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "controller-leader-election-helper", + Port: 9443, + Namespace: *namespace, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&instancecontroller.InstanceReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Instance"), + Scheme: mgr.GetScheme(), + Images: images, + ClientFactory: &controllers.GrpcConfigAgentClientFactory{}, + Recorder: mgr.GetEventRecorderFor("instance-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Instance") + os.Exit(1) + } + if err = (&databasecontroller.DatabaseReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Database"), + Scheme: mgr.GetScheme(), + ClientFactory: &controllers.GrpcConfigAgentClientFactory{}, + Recorder: mgr.GetEventRecorderFor("database-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Database") + os.Exit(1) + } + if err = (&backupcontroller.BackupReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Backup"), + Scheme: mgr.GetScheme(), + ClientFactory: &controllers.GrpcConfigAgentClientFactory{}, + Recorder: mgr.GetEventRecorderFor("backup-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Backup") + os.Exit(1) + } + if err = (&configcontroller.ConfigReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Config"), + Scheme: mgr.GetScheme(), + Images: images, + Recorder: mgr.GetEventRecorderFor("config-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Config") + os.Exit(1) + } + if err = (&exportcontroller.ExportReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Export"), + Scheme: mgr.GetScheme(), + ClientFactory: &controllers.GrpcConfigAgentClientFactory{}, + Recorder: mgr.GetEventRecorderFor("export-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Export") + os.Exit(1) + } + if err = (&importcontroller.ImportReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Import"), + Scheme: mgr.GetScheme(), + ClientFactory: &controllers.GrpcConfigAgentClientFactory{}, + Recorder: mgr.GetEventRecorderFor("import-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Import") + os.Exit(1) + } + + if err = backupschedulecontroller.NewBackupScheduleReconciler(mgr).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BackupSchedule") + os.Exit(1) + } + + cronAnythingReconciler, err := cronanythingcontroller.NewCronAnythingReconciler(mgr) + if err != nil { + setupLog.Error(err, "unable to build controller", "controller", "CronAnything") + os.Exit(1) + } + + if err := cronAnythingReconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to Add controller", "controller", "CronAnything") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + // Use the testing namespace if supplied, otherwise deploy to the same namespace as the operator. + operatorNS := "operator-system" + if *namespace != "" { + operatorNS = *namespace + } + + c := mgr.GetClient() + + ctx := context.Background() + release := &v1alpha1.Release{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "oracle.db.anthosapis.com/v1alpha1", + Kind: "Release", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "release", + Namespace: operatorNS, + }, + Spec: v1alpha1.ReleaseSpec{ + Version: version, + }, + } + + err = c.Create(ctx, release) + + if apierrors.IsAlreadyExists(err) { + if err := c.Patch(ctx, release, client.Apply, client.ForceOwnership, client.FieldOwner("release-controller")); err != nil { + setupLog.Error(err, "failed to patch release CRD") + } + } else if err != nil { + setupLog.Error(err, "failed to install release CRD") + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/oracle/operator.yaml b/oracle/operator.yaml new file mode 100644 index 0000000..533b9bb --- /dev/null +++ b/oracle/operator.yaml @@ -0,0 +1,2663 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: operator-system +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: backups.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance Name + type: string + - JSONPath: .spec.type + name: Backup Type + type: string + - JSONPath: .spec.subType + name: Backup SubType + type: string + - JSONPath: .spec.dop + name: DOP + type: integer + - JSONPath: .spec.backupset + name: BS/IC + type: boolean + - JSONPath: .spec.gcsPath + name: GCS Path + type: string + - JSONPath: .status.phase + name: Phase + type: string + - JSONPath: .status.backupid + name: Backup ID + type: string + - JSONPath: .status.backuptime + name: Backup Time + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Backup is the Schema for the backups API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSpec defines the desired state of Backup. + properties: + backupItems: + description: For a Physical backup this slice can be used to indicate + what PDBs, schemas, tablespaces or tables to back up. + items: + type: string + type: array + backupset: + description: For a Physical backup the choices are Backupset and Image + Copies. Backupset is the default, but if Image Copies are required, + flip this flag to false. + type: boolean + checkLogical: + description: For a Physical backup, optionally turn on an additional + "check logical" option. The default is off. + type: boolean + compressed: + description: For a Physical backup, optionally turn on compression, + by flipping this flag to true. The default is false. + type: boolean + dop: + description: For a Physical backup, optionally indicate a degree of + parallelism also known as DOP. + format: int32 + maximum: 100 + minimum: 1 + type: integer + filesperset: + description: For a Physical backup, optionally specify filesperset. + The default depends on a type of backup, generally 64. + format: int32 + type: integer + gcsPath: + description: If set up ahead of time, the backup sets of a physical + backup can be optionally transferred to a GCS bucket. A user is to + ensure proper write access to the bucket from within the Oracle Operator. + type: string + instance: + description: Instance is a name of an instance to take a backup for. + type: string + keepDataOnDeletion: + description: KeepDataOnDeletion defines whether to keep backup data + when backup resource is removed. The default value is false. + type: boolean + level: + description: For a Physical backup, optionally specify an incremental + level. The default is 0 (the whole database). + format: int32 + type: integer + localPath: + description: For a Physical backup, optionally specify a local backup + dir. If omitted, /u03/app/oracle/rman is assumed. + type: string + sectionSize: + description: For a Physical backup, optionally specify a section size + in MB. Don't include the unit (MB), just the integer. + format: int32 + type: integer + subType: + description: 'Backup sub-type, which is only relevant for a Physical + backup type (e.g. RMAN). If omitted, the default of Instance(Level) + is assumed. Supported options at this point are: Instance or Database + level backups.' + enum: + - Instance + - Database + - Tablespace + - Datafile + type: string + timeLimitMinutes: + description: For a Physical backup, optionally specify the time threshold. + If a threshold is reached, the backup request would time out and error + out. The threshold is expressed in minutes. Don't include the unit + (minutes), just the integer. + format: int32 + type: integer + type: + description: "Type describes a type of a backup to take. Immutable. + Available options are: - Snapshot: storage level disk snapshot. - + Physical: database engine specific backup that relies on a redo stream + / continuous archiving (WAL) and may allow a PITR. Examples + include pg_backup, pgBackRest, mysqlbackup. A Physical + backup may be file based or database block based \t (e.g. Oracle + RMAN). - Logical: database engine specific backup that relies on running + SQL statements, e.g. mysqldump, pg_dump, expdp. If not + specified, the default of Snapshot is assumed." + enum: + - Snapshot + - Physical + - Logical + type: string + volumeSnapshotClass: + description: VolumeSnapshotClass points to a particular CSI driver and + is used for taking a volume snapshot. If requested here at the Backup + level, this setting overrides the platform default as well as the + default set via the Config (global user preferences). + type: string + type: object + status: + description: BackupStatus defines the observed state of Backup. + properties: + backupid: + type: string + backuptime: + type: string + conditions: + description: Conditions represents the latest available observations + of the backup's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + phase: + description: Phase is a summary of current state of the Backup. + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: backupschedules.oracle.db.anthosapis.com +spec: + group: oracle.db.anthosapis.com + names: + kind: BackupSchedule + listKind: BackupScheduleList + plural: backupschedules + singular: backupschedule + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: BackupSchedule is the Schema for the backupschedules API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupScheduleSpec defines the desired state of BackupSchedule. + properties: + backupRetentionPolicy: + description: BackupRetentionPolicy is the policy used to trigger automatic + deletion of backups produced from this BackupSchedule. + properties: + backupRetention: + description: BackupRetention is the number of successful backups + to keep around. The default is 7. A value of 0 means "do not delete + backups based on count". Max of 512 allows for ~21 days of hourly + backups or ~1.4 years of daily backups. + format: int32 + maximum: 512 + minimum: 0 + type: integer + type: object + backupSpec: + description: BackupSpec defines the Backup that will be created on the + provided schedule. + properties: + backupItems: + description: For a Physical backup this slice can be used to indicate + what PDBs, schemas, tablespaces or tables to back up. + items: + type: string + type: array + backupset: + description: For a Physical backup the choices are Backupset and + Image Copies. Backupset is the default, but if Image Copies are + required, flip this flag to false. + type: boolean + checkLogical: + description: For a Physical backup, optionally turn on an additional + "check logical" option. The default is off. + type: boolean + compressed: + description: For a Physical backup, optionally turn on compression, + by flipping this flag to true. The default is false. + type: boolean + dop: + description: For a Physical backup, optionally indicate a degree + of parallelism also known as DOP. + format: int32 + maximum: 100 + minimum: 1 + type: integer + filesperset: + description: For a Physical backup, optionally specify filesperset. + The default depends on a type of backup, generally 64. + format: int32 + type: integer + gcsPath: + description: If set up ahead of time, the backup sets of a physical + backup can be optionally transferred to a GCS bucket. A user is + to ensure proper write access to the bucket from within the Oracle + Operator. + type: string + instance: + description: Instance is a name of an instance to take a backup + for. + type: string + keepDataOnDeletion: + description: KeepDataOnDeletion defines whether to keep backup data + when backup resource is removed. The default value is false. + type: boolean + level: + description: For a Physical backup, optionally specify an incremental + level. The default is 0 (the whole database). + format: int32 + type: integer + localPath: + description: For a Physical backup, optionally specify a local backup + dir. If omitted, /u03/app/oracle/rman is assumed. + type: string + sectionSize: + description: For a Physical backup, optionally specify a section + size in MB. Don't include the unit (MB), just the integer. + format: int32 + type: integer + subType: + description: 'Backup sub-type, which is only relevant for a Physical + backup type (e.g. RMAN). If omitted, the default of Instance(Level) + is assumed. Supported options at this point are: Instance or Database + level backups.' + enum: + - Instance + - Database + - Tablespace + - Datafile + type: string + timeLimitMinutes: + description: For a Physical backup, optionally specify the time + threshold. If a threshold is reached, the backup request would + time out and error out. The threshold is expressed in minutes. + Don't include the unit (minutes), just the integer. + format: int32 + type: integer + type: + description: "Type describes a type of a backup to take. Immutable. + Available options are: - Snapshot: storage level disk snapshot. + - Physical: database engine specific backup that relies on a redo + stream / continuous archiving (WAL) and may allow + a PITR. Examples include pg_backup, pgBackRest, mysqlbackup. + \ A Physical backup may be file based or database block + based \t (e.g. Oracle RMAN). - Logical: database engine + specific backup that relies on running SQL statements, + e.g. mysqldump, pg_dump, expdp. If not specified, the default + of Snapshot is assumed." + enum: + - Snapshot + - Physical + - Logical + type: string + volumeSnapshotClass: + description: VolumeSnapshotClass points to a particular CSI driver + and is used for taking a volume snapshot. If requested here at + the Backup level, this setting overrides the platform default + as well as the default set via the Config (global user preferences). + type: string + type: object + schedule: + description: Schedule is a cron-style expression of the schedule on + which Backup will be created. For allowed syntax, see en.wikipedia.org/wiki/Cron + and godoc.org/github.com/robfig/cron. + type: string + startingDeadlineSeconds: + description: StartingDeadlineSeconds is an optional deadline in seconds + for starting the backup creation if it misses scheduled time for any + reason. The default is 30 seconds. + format: int64 + type: integer + suspend: + description: Suspend tells the controller to suspend operations - both + creation of new Backup and retention actions. This will not have any + effect on backups currently in progress. Default is false. + type: boolean + required: + - backupSpec + - schedule + type: object + status: + description: BackupScheduleStatus defines the observed state of BackupSchedule. + properties: + backupHistory: + description: BackupHistory stores the records for up to 7 of the latest + backups. + items: + description: BackupHistoryRecord is a historical record of a Backup. + properties: + backupName: + description: BackupName is the name of the Backup that gets created. + nullable: true + type: string + creationTime: + description: CreationTime is the time that the Backup gets created. + format: date-time + nullable: true + type: string + phase: + description: Phase tells the state of the Backup. + type: string + required: + - backupName + - creationTime + type: object + type: array + backupTotal: + description: BackupTotal stores the total number of current existing + backups created by this backupSchedule. + format: int32 + type: integer + conditions: + description: Conditions of the BackupSchedule. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupTime: + description: LastBackupTime is the time the last Backup was created + for this BackupSchedule. + format: date-time + nullable: true + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: configs.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.platform + name: Platform + type: string + - JSONPath: .spec.diskSizes + name: Disk Sizes + type: string + - JSONPath: .spec.storageClass + name: Storage Class + type: string + - JSONPath: .spec.volumeSnapshotClass + name: Volume Snapshot Class + type: string + group: oracle.db.anthosapis.com + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: Config is the Schema for the configs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConfigSpec defines the desired state of Config. + properties: + disks: + description: 'Disks slice describes at minimum two disks: data and log + (archive log), and optionally a backup disk.' + items: + description: DiskSpec defines the desired state of a disk. (the structure + is deliberately designed to be flexible, as a slice, so that if + we change a disk layout for different hosting platforms, the model + can be also adjusted to reflect that). + properties: + name: + description: 'Name of a disk. Allowed values are: DataDisk,LogDisk,BackupDisk' + enum: + - DataDisk + - LogDisk + - BackupDisk + type: string + size: + anyOf: + - type: integer + - type: string + description: 'Disk size. If not specified, the defaults are: DataDisk:"100Gi", + LogDisk:"150Gi",BackupDisk:"100Gi"' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + description: StorageClass points to a particular CSI driver and + is used for disk provisioning. + type: string + type: + description: Disk type. Depending on a deployment platform, DiskType + may take different values. On GCP, support "HDD" and "SSD". + Default to "HDD" if not specified. + type: string + required: + - name + type: object + type: array + hostAntiAffinityNamespaces: + description: HostAntiAffinityNamespaces is an optional list of namespaces + that need to be included in anti-affinity by hostname rule. The effect + of the rule is forbidding scheduling a database pod in the current + namespace on a host that already runs a database pod in any of the + listed namespaces. + items: + type: string + type: array + images: + additionalProperties: + type: string + description: Service agent and other data plane agent images. This is + an optional map that allows a customer to specify agent images different + from those chosen/provided by the Oracle Operator by default. See + an example of how this map can be used in config/samples/v1alpha1_config_gcp1.yaml + type: object + logLevel: + additionalProperties: + type: string + description: Log Levels for the various components. This is an optional + map for component -> log level See an example of how this map can + be used in config/samples/v1alpha1_config_gcp1.yaml + type: object + platform: + description: 'Deployment platform. Presently supported values are: GCP + (default), BareMetal.' + enum: + - GCP + - BareMetal + - Minikube + type: string + storageClass: + description: Storage class to use for dynamic provisioning. This value + varies depending on a platform. For GCP (and the default) it is "csi-gce-pd". + type: string + volumeSnapshotClass: + description: Volume Snapshot class to use for storage snapshots. This + value varies depending on a platform. For GCP (and the default) it + is "csi-gce-pd-snapshot-class". + type: string + type: object + status: + description: ConfigStatus defines the observed state of Config. + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: cronanythings.oracle.db.anthosapis.com +spec: + group: oracle.db.anthosapis.com + names: + kind: CronAnything + listKind: CronAnythingList + plural: cronanythings + singular: cronanything + scope: Namespaced + validation: + openAPIV3Schema: + description: CronAnything is the Schema for the cronanythings API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CronAnythingSpec defines the desired state of CronAnything. + properties: + cascadeDelete: + description: CascadeDelete tells CronAnything to set up owner references + from the created resources to the CronAnything resource. This means + that if the CronAnything resource is deleted, all resources created + by it will also be deleted. This is an optional field that defaults + to false. + type: boolean + concurrencyPolicy: + description: ConcurrencyPolicy specifies how to treat concurrent resources + if the resource provides a status path that exposes completion. The + default policy if not provided is to allow a new resource to be created + even if an active resource already exists. If the resource doesn’t + have an active/completed status, the only supported concurrency policy + is to allow creating new resources. This field is mutable. If the + policy is changed to a more stringent policy while multiple resources + are active, it will not delete any existing resources. The exception + is if a creation of a new resource is triggered and the policy has + been changed to Replace. If multiple resources are active, they will + all be deleted and replaced by a new resource. + type: string + finishableStrategy: + description: FinishableStrategy defines how the CronAnything controller + an decide if a resource has completed. Some resources will do some + work after they have been created and at some point be finished. Jobs + are the most common example. If no strategy is defined, it is assumed + that the resources never finish. + properties: + stringField: + description: StringField contains the details for how the CronAnything + controller can find the string field on the resource needed to + decide if the resource has completed. It also lists the values + that mean the resource has completed. + properties: + fieldPath: + description: The path to the field on the resource that contains + a string value. + type: string + finishedValues: + description: The values of the field that means the resource + has completed. + items: + type: string + type: array + required: + - fieldPath + - finishedValues + type: object + timestampField: + description: TimestampField contains the details for how the CronAnything + controller can find the timestamp field on the resource in order + to decide if the resource has completed. + properties: + fieldPath: + description: The path to the field on the resource that contains + the timestamp. + type: string + required: + - fieldPath + type: object + type: + description: Type tells which strategy should be used. + type: string + required: + - type + type: object + resourceBaseName: + description: ResourceBaseName specifies the base name for the resources + created by CronAnything, which will be named using the format -. + This field is optional, and the default is to use the name of the + CronAnything resource as the ResourceBaseName. + type: string + resourceTimestampFormat: + description: ResourceTimestampFormat defines the format of the timestamp + in the name of Resources created by CronAnything -. + This field is optional, and the default is to format the timestamp + as unix time. If provided, it must be compatible with time.Format + in golang. + type: string + retention: + description: Retention defines the retention policy for resources created + by CronAnything. If no retention policy is defined, CronAnything will + never delete resources, so cleanup must be handled through some other + process. + properties: + historyCountLimit: + description: The number of completed resources to keep before deleting + them. This only affects finishable resources and the default value + is 3. This field is mutable and if it is changed to a number lower + than the current number of finished resources, the oldest ones + will eventually be deleted until the number of finished resources + matches the limit. + format: int32 + type: integer + historyTimeLimitSeconds: + description: The time since completion that a resource is kept before + deletion. This only affects finishable resources. This does not + have any default value and if it is not provided, HistoryCountLimit + will be used to prune completed resources. If both HistoryCountLimit + and HistoryTimeLimitSeconds are set, it is treated as an OR operation. + format: int64 + type: integer + resourceTimestampStrategy: + description: ResourceTimestampStrategy specifies how the CronAnything + controller can find the age of a resource. This is needed to support + retention. + properties: + field: + description: FieldResourceTimestampStrategy specifies how the + CronAnything controller can find the timestamp for the resource + from a field. + properties: + fieldPath: + description: The path to the field on the resource that + contains the timestamp. + type: string + required: + - fieldPath + type: object + type: + description: Type tells which strategy should be used. + type: string + required: + - type + type: object + required: + - resourceTimestampStrategy + type: object + schedule: + description: Schedule defines a time-based schedule, e.g., a standard + cron schedule such as “@every 10m”. This field is mandatory and mutable. + If it is changed, resources will simply be created at the new interval + from then on. + type: string + suspend: + description: Suspend tells the controller to suspend creation of additional + resources. The default value is false. This field is mutable. It will + not affect any existing resources, but only affect creation of additional + resources. + type: boolean + template: + description: Template is a template of a resource type for which instances + are to be created on the given schedule. This field is mandatory and + it must contain a valid template for an existing apiVersion and kind + in the cluster. It is immutable, so if the template needs to change, + the whole CronAnything resource should be replaced. + type: object + totalResourceLimit: + description: TotalResourceLimit specifies the total number of children + allowed for a particular CronAnything resource. If this limit is reached, + no additional resources will be created. This limit is mostly meant + to avoid runaway creation of resources that could bring down the cluster. + Both finished and unfinished resources count against this limit. This + field is mutable. If it is changed to a lower value than the existing + number of resources, none of the existing resources will be deleted + as a result, but no additional resources will be created until the + number of child resources goes below the limit. The field is optional + with a default value of 100. + format: int32 + type: integer + triggerDeadlineSeconds: + description: TriggerDeadlineSeconds defines Deadline in seconds for + creating the resource if it missed the scheduled time. If no deadline + is provided, the resource will be created no matter how far after + the scheduled time. If multiple triggers were missed, only the last + will be triggered and only one resource will be created. This field + is mutable and changing it will affect the creation of new resources + from that point in time. + format: int64 + type: integer + required: + - schedule + - template + type: object + status: + description: CronAnythingStatus defines the observed state of CronAnything. + properties: + lastScheduleTime: + description: LastScheduleTime keeps track of the scheduled time for + the last successfully completed creation of a resource. This is used + by the controller to determine when the next resource creation should + happen. If creation of a resource is delayed for any reason but eventually + does happen, this value will still be updated to the time when it + was originally scheduled to happen. + format: date-time + type: string + pendingTrigger: + description: PendingTrigger keeps track of any triggers that are past + their trigger time, but for some reason have not been completed yet. + This is typically a result of the create operation failing. + properties: + result: + description: Result tells why this trigger is in the pending state, + i.e. what prevented it from completing successfully. + type: string + scheduleTime: + description: ScheduleTime is the time when this trigger was scheduled + to be executed. + format: date-time + type: string + required: + - result + - scheduleTime + type: object + triggerHistory: + description: TriggerHistory keeps track of the status for the last 10 + triggers. This allows users of CronAnything to see whether any triggers + failed. It is important to know that this only keeps track of whether + a trigger was successfully executed (as in creating the given resource), + not whether the created resource was itself successful. For this information, + any users of CronAnything should observe the resources created. + items: + description: TriggerHistoryRecord contains information about the result + of a trigger. It can either have completed successfully, and if + it did not, the record will provide information about what is the + cause of the failure. + properties: + creationTimestamp: + description: CreationTimestamp is the time when this record was + created. This is thus also the time at which the final result + of the trigger was decided. + format: date-time + type: string + result: + description: Result contains the outcome of a trigger. It can + either be CreateSucceeded, which means the given resource was + created as intended, or it can be one of several error messages. + type: string + scheduleTime: + description: ScheduleTime is the time when this trigger was scheduled + to be executed. + format: date-time + type: string + required: + - creationTimestamp + - result + - scheduleTime + type: object + type: array + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: databases.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance + type: string + - JSONPath: .status.usernames + name: Users + type: string + - JSONPath: .status.phase + name: Phase + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: DatabaseReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: DatabaseReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: DatabaseReadyMessage + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="UserReady")].status + name: UserReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="UserReady")].reason + name: UserReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="UserReady")].message + name: UserReadyMessage + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + categories: + - genericdatabases + kind: Database + listKind: DatabaseList + plural: databases + shortNames: + - gdb + singular: database + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Database is the Schema for the databases API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseSpec defines the desired state of Database. + properties: + admin_password: + description: AdminPassword is the password for the sys admin of the + database. + maxLength: 30 + minLength: 5 + type: string + adminPasswordGsmSecretRef: + description: AdminPasswordGsmSecretRef is a reference to the secret + object containing sensitive information to pass to config agent. This + field is optional, and may be empty if plaintext password is used. + properties: + projectId: + description: ProjectId identifies the project where the secret resource + is. + type: string + secretId: + description: SecretId identifies the secret. + type: string + version: + description: Version is the version of the secret. If "latest" is + specified, underlying the latest SecretId is used. + type: string + type: object + instance: + description: Name of the instance that the database belongs to. + type: string + name: + description: Name of the database. + type: string + users: + description: Users specifies an optional list of users to be created + in this database. + items: + description: UserSpec defines the desired state of the Database Users. + properties: + gsmSecretRef: + description: A reference to a GSM secret. + properties: + projectId: + description: ProjectId identifies the project where the secret + resource is. + type: string + secretId: + description: SecretId identifies the secret. + type: string + version: + description: Version is the version of the secret. If "latest" + is specified, underlying the latest SecretId is used. + type: string + type: object + name: + description: Name of the User. + type: string + password: + description: Plaintext password. + type: string + privileges: + description: Privileges specifies an optional list of privileges + to grant to the user. + items: + description: PrivilegeSpec defines the desired state of roles + and privileges. + type: string + type: array + secretRef: + description: A reference to a k8s secret. + properties: + name: + description: Name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: Namespace defines the space within which the + secret name must be unique. + type: string + type: object + type: object + type: array + type: object + status: + description: DatabaseStatus defines the observed state of Database. + properties: + UserResourceVersions: + additionalProperties: + type: string + description: 'UserResourceVersions is a map of username to user resource + version (plaintext or GSM). For GSM Resource version, use format: + "projects/{ProjectId}/secrets/{SecretId}/versions/{Version}".' + type: object + conditions: + description: Conditions represents the latest available observations + of the Database's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + isChangeApplied: + description: IsChangeApplied indicates whether database changes have + been applied + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by + the controller. + format: int64 + type: integer + phase: + description: Phase is a summary of the current state of the Database. + type: string + usernames: + description: List of user names. + items: + type: string + type: array + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: exports.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance Name + type: string + - JSONPath: .spec.databaseName + name: Database Name + type: string + - JSONPath: .spec.exportObjectType + name: Export Object Type + type: string + - JSONPath: .spec.exportObjects + name: Export Objects + type: string + - JSONPath: .spec.gcsPath + name: GCS Path + type: string + - JSONPath: .spec.gcsLogPath + name: GCS Log Path + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + kind: Export + listKind: ExportList + plural: exports + singular: export + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Export is the Schema for the exports API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ExportSpec defines the desired state of Export + properties: + databaseName: + description: DatabaseName is the database resource name within Instance + to export from. + type: string + exportObjectType: + description: 'ExportObjectType is the type of objects to export. If + omitted, the default of Schemas is assumed. Supported options at this + point are: Schemas or Tables.' + enum: + - Schemas + - Tables + type: string + exportObjects: + description: ExportObjects are objects, schemas or tables, exported + by DataPump. + items: + type: string + type: array + flashbackTime: + description: FlashbackTime is an optional time. If this time is set, + the SCN that most closely matches the time is found, and this SCN + is used to enable the Flashback utility. The export operation is performed + with data that is consistent up to this SCN. + format: date-time + type: string + gcsLogPath: + description: GcsLogPath is an optional full path in GCS. If set up ahead + of time, export logs can be optionally transferred to set GCS bucket. + A user is to ensure proper write access to the bucket from within + the Oracle Operator. + type: string + gcsPath: + description: GcsPath is a full path in GCS bucket to transfer exported + files to. A user is to ensure proper write access to the bucket from + within the Oracle Operator. + type: string + instance: + description: Instance is the resource name within namespace to export + from. + type: string + type: + description: Type of the Export. If omitted, the default of DataPump + is assumed. + enum: + - DataPump + type: string + required: + - databaseName + - instance + type: object + status: + description: ExportStatus defines the observed state of Export. + properties: + conditions: + description: Conditions represents the latest available observations + of the export's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: imports.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.instance + name: Instance Name + type: string + - JSONPath: .spec.databaseName + name: Database Name + type: string + - JSONPath: .spec.gcsPath + name: GCS Path + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + - JSONPath: .spec.gcsLogPath + name: GCS Log Path + type: string + group: oracle.db.anthosapis.com + names: + kind: Import + listKind: ImportList + plural: imports + singular: import + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Import is the Schema for the imports API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImportSpec defines the desired state of Import. + properties: + databaseName: + description: DatabaseName is the database resource name within Instance + to import into. + type: string + gcsLogPath: + description: GcsLogPath is an optional path in GCS to copy import log + to. A user is to ensure proper write access to the bucket from within + the Oracle Operator. + type: string + gcsPath: + description: GcsPath is a full path to the input file in GCS containing + import data. A user is to ensure proper write access to the bucket + from within the Oracle Operator. + type: string + instance: + description: Instance is the resource name within same namespace to + import into. + type: string + type: + description: Type of the Import. If not specified, the default of DataPump + is assumed, which is the only supported option currently. + enum: + - DataPump + type: string + type: object + status: + description: ImportStatus defines the observed state of Import. + properties: + conditions: + description: Conditions represents the latest available observations + of the import's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: instances.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.type + name: DB Engine + type: string + - JSONPath: .spec.version + name: Version + type: string + - JSONPath: .spec.edition + name: Edition + type: string + - JSONPath: .status.endpoint + name: Endpoint + type: string + - JSONPath: .status.url + name: URL + type: string + - JSONPath: .status.databasenames + name: DB Names + type: string + - JSONPath: .status.backupid + name: Backup ID + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: ReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].reason + name: ReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: ReadyMessage + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="DatabaseInstanceReady")].status + name: DBReadyStatus + type: string + - JSONPath: .status.conditions[?(@.type=="DatabaseInstanceReady")].reason + name: DBReadyReason + type: string + - JSONPath: .status.conditions[?(@.type=="DatabaseInstanceReady")].message + name: DBReadyMessage + priority: 1 + type: string + - JSONPath: .status.isChangeApplied + name: IsChangeApplied + priority: 1 + type: string + group: oracle.db.anthosapis.com + names: + categories: + - genericinstances + kind: Instance + listKind: InstanceList + plural: instances + shortNames: + - ginst + singular: instance + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Instance is the Schema for the instances API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance. + properties: + cdbName: + description: CDBName is the intended name of the CDB attribute. If the + CDBName is different from the original name (with which the CDB was + created) the CDB will be renamed. + type: string + characterSet: + description: CharacterSet used to create a database (the default is + AL32UTF8). + type: string + cloudProvider: + description: CloudProvider is only relevant if the hosting type is Cloud, + MultiCloud, Hybrid or Bare Metal. + enum: + - GCP + - AWS + - Azure + - OCI + type: string + databaseGID: + description: DatabaseGID represents an OS group ID of a user running + a database. + format: int64 + type: integer + databaseUID: + description: DatabaseUID represents an OS UID of a user running a database. + format: int64 + type: integer + dbDomain: + description: DBDomain is an optional attribute to set a database domain. + type: string + dbNetworkServiceOptions: + description: DBNetworkServiceOptions allows to override some details + of kubernetes Service created to expose a connection to database. + properties: + gcp: + description: GCP contains Google Cloud specific attributes of Service + configuration. + properties: + loadBalancerType: + description: LoadBalancerType let's define a type of load balancer, + see https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + enum: + - "" + - Internal + - External + type: string + type: object + type: object + dbUniqueName: + description: DBUniqueName represents a unique database name that would + be set for a database (if not provided, as a default, the [_generic|_] will be appended to a DatabaseName). + type: string + deploymentType: + description: DeploymentType reflects a fully managed (DBaaS) vs. semi-managed + database. + enum: + - "" + - InCluster + - CloudSQL + - RDS + type: string + disks: + description: 'Disks slice describes at minimum two disks: data and log + (archive log), and optionally a backup disk.' + items: + description: DiskSpec defines the desired state of a disk. (the structure + is deliberately designed to be flexible, as a slice, so that if + we change a disk layout for different hosting platforms, the model + can be also adjusted to reflect that). + properties: + name: + description: 'Name of a disk. Allowed values are: DataDisk,LogDisk,BackupDisk' + enum: + - DataDisk + - LogDisk + - BackupDisk + type: string + size: + anyOf: + - type: integer + - type: string + description: 'Disk size. If not specified, the defaults are: DataDisk:"100Gi", + LogDisk:"150Gi",BackupDisk:"100Gi"' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + description: StorageClass points to a particular CSI driver and + is used for disk provisioning. + type: string + type: + description: Disk type. Depending on a deployment platform, DiskType + may take different values. On GCP, support "HDD" and "SSD". + Default to "HDD" if not specified. + type: string + required: + - name + type: object + type: array + edition: + description: Edition of a database. + type: string + hostingType: + description: HostingType conveys whether an Instance is meant to be + hosted on a cloud (single or multiple), on-prem, on Bare Metal, etc. + It is meant to be used as a filter and aggregation dimension. + enum: + - "" + - Cloud + - MultiCloud + - Hybrid + - BareMetal + - OnPrem + type: string + images: + additionalProperties: + type: string + description: Service agent and other data plane GCR images. This is + an optional map that allows a customer to specify GCR images different + from those chosen/provided. + type: object + maintenanceWindow: + description: MaintenanceWindow specifies the time windows during which + database downtimes are allowed for maintenance. + properties: + timeRanges: + description: Maintenance time ranges. + items: + description: TimeRange defines a window of time. Both start time + and duration are required. + properties: + duration: + description: Duration of the maintenance window + type: string + start: + description: Start time. + format: date-time + type: string + type: object + type: array + type: object + memoryPercent: + description: MemoryPercent represents the percentage of memory that + should be allocated for Oracle SGA (default is 25%). + maximum: 100 + minimum: 0 + type: integer + minMemoryForDBContainer: + description: MinMemoryForDBContainer overrides the default safe limit + for scheduling the db container without crashes due to memory pressure. + type: string + mode: + description: Mode specifies how this instance will be managed by the + operator. + enum: + - ManuallySetUpStandby + type: string + parameters: + additionalProperties: + type: string + description: Parameters contains the database flags in the map format + type: object + patching: + description: Patching contains all the patching related attributes like + patch version and image. + properties: + patchVersion: + description: Patch version + type: string + patchedServiceImage: + description: gcr link containing the patched service image. + type: string + type: object + restore: + description: Restore and recovery request details. This section should + normally be commented out unless an actual restore/recovery is required. + properties: + backupId: + description: Backup name to restore from. + type: string + backupType: + description: 'Backup type to restore from. Oracle only supports: + Snapshot or Physical.' + enum: + - Snapshot + - Physical + type: string + dop: + description: Similar to a (physical) backup, optionally indicate + a degree of parallelism, also known as DOP. + format: int32 + maximum: 100 + minimum: 1 + type: integer + force: + description: To overwrite an existing, up and running instance, + an explicit athorization is required. This is safeguard to avoid + accidentally destroying a perfectly healthy (status=Ready) instance. + enum: + - true + - false + type: boolean + requestTime: + description: Request version as a date-time to avoid accidental + triggering of a restore operation when reapplying an older version + of a resource file. If at least one restore operation has occurred, + any further restore operation that have the same RequestTime or + earlier than the last Restore operation will be ignored. + format: date-time + type: string + timeLimitMinutes: + description: Restore time limit. Optional field defaulting to three + times the backup time limit. Don't include the unit (minutes), + just the integer. + format: int32 + minimum: 0 + type: integer + required: + - requestTime + type: object + services: + additionalProperties: + type: boolean + description: Services list the optional semi-managed services that the + customers can choose from. + type: object + sourceCidrRanges: + description: Source IP CIDR ranges allowed for a client. + items: + type: string + type: array + type: + description: Type of a database engine. + enum: + - Oracle + type: string + version: + description: Version of a database. + type: string + type: object + status: + description: InstanceStatus defines the observed state of Instance. + properties: + backupid: + description: Last backup ID. + type: string + conditions: + description: Conditions represents the latest available observations + of the GenericInstance's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // + +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentParameters: + additionalProperties: + type: string + description: CurrentParameters stores the last successfully set instance + parameters. + type: object + currentServiceImage: + description: CurrentServiceImage stores the image name used by the database + instance. + type: string + databasenames: + description: List of database names (e.g. PDBs) hosted in the Instance. + items: + type: string + type: array + description: + description: Description is for a human consumption. E.g. when an Instance + is restored from a backup this field is populated with the human readable + restore details. + type: string + endpoint: + description: Endpoint is presently expressed in the format of -svc.. + type: string + isChangeApplied: + description: IsChangeApplied indicates whether instance changes have + been applied + type: string + lastFailedParameterUpdate: + additionalProperties: + type: string + description: LastFailedParameterUpdate is used to avoid getting into + the failed parameter update loop. + type: object + lastRestoreTime: + format: date-time + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by + the controller. + format: int64 + type: integer + url: + description: URL represents an IP and a port number info needed in order + to establish a database connection from outside a cluster. + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: releases.oracle.db.anthosapis.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.version + name: Release + type: string + group: oracle.db.anthosapis.com + names: + kind: Release + listKind: ReleaseList + plural: releases + singular: release + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: Release is the Schema for the releases API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ReleaseSpec defines the desired state of Release. + properties: + version: + type: string + required: + - version + type: object + status: + description: ReleaseStatus defines the observed state of Release. + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operator-leader-election-role + namespace: operator-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: operator-manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get + - patch + - update +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - services + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - database.oracle.db.anthosapis.com + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backupschedules/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - cronanythings + verbs: + - '*' + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - exports/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - imports/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - releases/status + verbs: + - get + - patch + - update +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operator-leader-election-rolebinding + namespace: operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operator-leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-proxy-role +subjects: +- kind: ServiceAccount + name: default + namespace: operator-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: operator-controller-manager-metrics-service + namespace: operator-system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + control-plane: controller-manager + name: operator-controller-manager + namespace: operator-system +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=10 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + - args: + - --metrics-addr=127.0.0.1:8080 + - --enable-leader-election + command: + - /manager + image: gcr.io/elcarro/oracle.db.anthosapis.com/operator:latest + name: manager + resources: + limits: + cpu: 100m + memory: 40Mi + requests: + cpu: 100m + memory: 30Mi + terminationGracePeriodSeconds: 10 diff --git a/oracle/pkg/agents/backup/BUILD.bazel b/oracle/pkg/agents/backup/BUILD.bazel new file mode 100644 index 0000000..a7b158d --- /dev/null +++ b/oracle/pkg/agents/backup/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "backup", + srcs = [ + "backup.go", + "restore.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/backup", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_k8s_klog_v2//:klog", + ], +) diff --git a/oracle/pkg/agents/backup/backup.go b/oracle/pkg/agents/backup/backup.go new file mode 100644 index 0000000..c6f9296 --- /dev/null +++ b/oracle/pkg/agents/backup/backup.go @@ -0,0 +1,164 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package backup provides physical database backup utility functions intended +// to be called from a Config Agent gRPC server. +package backup + +import ( + "context" + "fmt" + + lropb "google.golang.org/genproto/googleapis/longrunning" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +const ( + allocateChannel = "allocate channel disk%d device type disk;\n" + + // The format of the backup statement template is: + // run { + // channels + // backup + // as + // + // + //
+ // incremental level + // to destination '' + // + // backup... + // } + backupStmtTemplate = `run { + %s + backup + as %s %s + %s + %s + %s + incremental level %d + to destination '%s' + (%s); + backup + to destination '%s' + (spfile) (current controlfile) + plus archivelog; + } + ` +) + +// Params that can be passed to PhysicalBackup. +type Params struct { + InstanceName string + CDBName string + Client dbdpb.DatabaseDaemonClient + Granularity string + Backupset bool + DOP int32 + CheckLogical bool + Compressed bool + Level int32 + Filesperset int32 + SectionSize int32 + LocalPath string + GCSPath string + OperationID string +} + +// PhysicalBackup takes a physical backup of the oracle database. +func PhysicalBackup(ctx context.Context, params *Params) (*lropb.Operation, error) { + klog.InfoS("oracle/PhysicalBackup", "params", params) + + var channels string + for i := 1; i <= int(params.DOP); i++ { + channels += fmt.Sprintf(allocateChannel, i) + } + klog.InfoS("oracle/PhysicalBackup", "channels", channels) + + granularity := "database" + if params.Granularity != "" { + granularity = params.Granularity + } + + backupDir := consts.DefaultRMANDir + if params.LocalPath != "" { + backupDir = params.LocalPath + } + // for RMAN backup to GCS bucket, first backup to a staging location. Remove staging dir when upload finishes. + if params.GCSPath != "" { + backupDir = consts.RMANStagingDir + } + klog.InfoS("oracle/PhysicalBackup", "backupDir", backupDir) + + // Check/create the destination dir if it's different from the default. + if _, err := params.Client.CreateDir(ctx, &dbdpb.CreateDirRequest{ + Path: backupDir, + Perm: 0760, + }); err != nil { + return nil, fmt.Errorf("failed to create a backup dir %q: %v", backupDir, err) + } + + if params.Compressed && !params.Backupset { + return nil, fmt.Errorf("oracle/PhysicalBackup: failed a pre-flight check: Image Copy type of backup is not compatible with a Compress setting") + } + + var compressed string + if params.Compressed { + compressed = "compressed" + } + + var backupset string + if params.Backupset { + backupset = "backupset" + } else { + backupset = "copy" + } + klog.InfoS("oracle/PhysicalBackup", "backupset", backupset) + + checklogical := "check logical" + if !params.CheckLogical { + checklogical = "" + } + klog.InfoS("oracle/PhysicalBackup", "checkLogical", checklogical) + + filesperset := "" + if params.Filesperset != 0 { + filesperset = fmt.Sprintf("filesperset %d", params.Filesperset) + } + klog.InfoS("oracle/PhysicalBackup", "filesperset", filesperset) + + sectionSize := "" + if params.SectionSize != 0 { + sectionSize = fmt.Sprintf("section size %dM", params.SectionSize) + } + klog.InfoS("oracle/PhysicalBackup", "sectionSize", sectionSize) + + backupStmt := fmt.Sprintf(backupStmtTemplate, channels, compressed, backupset, checklogical, filesperset, sectionSize, params.Level, backupDir, granularity, backupDir) + klog.InfoS("oracle/PhysicalBackup", "finalBackupRequest", backupStmt) + + backupReq := &dbdpb.RunRMANAsyncRequest{ + SyncRequest: &dbdpb.RunRMANRequest{Scripts: []string{backupStmt}, GcsPath: params.GCSPath, LocalPath: params.LocalPath, Cmd: consts.RMANBackup}, + LroInput: &dbdpb.LROInput{OperationId: params.OperationID}, + } + klog.InfoS("oracle/PhysicalBackup", "backupReq", backupReq) + + operation, err := params.Client.RunRMANAsync(ctx, backupReq) + if err != nil { + return nil, fmt.Errorf("oracle/PhysicalBackup: failed to create database backup request: %v", err) + } + return operation, nil +} diff --git a/oracle/pkg/agents/backup/restore.go b/oracle/pkg/agents/backup/restore.go new file mode 100644 index 0000000..584fec7 --- /dev/null +++ b/oracle/pkg/agents/backup/restore.go @@ -0,0 +1,186 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package backup this file provides the restore and recovery functions from a +// physical backup and is intended to be called from a Config Agent gRPC +// server. +package backup + +import ( + "context" + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + lropb "google.golang.org/genproto/googleapis/longrunning" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +const ( + maxSCNquery = ` + select max(next_change#) as scn + from v$archived_log + where resetlogs_id=( + select resetlogs_id + from v$database_incarnation + where status='CURRENT' + ) + ` + + restoreStmtTemplate = `run { + startup force nomount; + restore spfile to '%s' from '%s'; + shutdown immediate; + startup nomount; + restore controlfile from '%s'; + startup mount; + %s + restore database; + delete foreign archivelog all; + } + ` + + recoverStmtTemplate = `run { + recover database until scn %d; + alter database open resetlogs; + alter pluggable database all open; + } + ` +) + +type fileTime struct { + name string + modTime time.Time +} + +// PhysicalRestore runs an RMAN restore and recovery. +// Presently the recovery process goes up to the last SCN in the last +// archived redo log. +func PhysicalRestore(ctx context.Context, params *Params) (*lropb.Operation, error) { + klog.InfoS("oracle/PhysicalRestore", "params", params) + + var channels string + for i := 1; i <= int(params.DOP); i++ { + channels += fmt.Sprintf(allocateChannel, i) + } + klog.InfoS("oracle/PhysicalRestore", "channels", channels) + + backupDir := consts.DefaultRMANDir + if params.LocalPath != "" { + backupDir = params.LocalPath + } + klog.InfoS("oracle/PhysicalRestore", "backupDir", backupDir) + + if params.GCSPath != "" { + backupDir = consts.RMANStagingDir + downloadReq := &dbdpb.DownloadDirectoryFromGCSRequest{ + GcsPath: params.GCSPath, + LocalPath: backupDir, + } + klog.InfoS("oracle/PhysicalRestore", "restore from gcs, downloadReq", downloadReq) + + if _, err := params.Client.DownloadDirectoryFromGCS(ctx, downloadReq); err != nil { + return nil, fmt.Errorf("failed to download rman backup from GCS bucket %s", err) + } + } + + // Files stored in default format + // /u03/app/oracle/rman/id/DB_UNIQUE_NAME/backupset/2020_03_13/o1_mf_nnsnf_TAG20200313T214926_h6qzz6g6_.bkp + resp, err := params.Client.ReadDir(ctx, &dbdpb.ReadDirRequest{ + Path: backupDir, + Recursive: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to read backup dir: %v", err) + } + var spfilesTime []fileTime + for _, fileInfo := range resp.SubPaths { + if !fileInfo.IsDir && strings.Contains(fileInfo.Name, "nnsnf") { + if err := fileInfo.ModTime.CheckValid(); err != nil { + return nil, fmt.Errorf("failed to convert timestamp: %v", err) + } + modTime := fileInfo.ModTime.AsTime() + spfilesTime = append(spfilesTime, fileTime{name: fileInfo.AbsPath, modTime: modTime}) + } + } + if len(spfilesTime) < 1 { + return nil, fmt.Errorf("failed to find spfile candidates: %d", len(spfilesTime)) + } + + for i, t := range spfilesTime { + klog.InfoS("spfiles time", "index", i, "name", t.name, "modTime", t.modTime) + } + + sort.Slice(spfilesTime, func(i, j int) bool { + return spfilesTime[i].modTime.After(spfilesTime[j].modTime) + }) + + klog.InfoS("oracle/PhysicalRestore: sorted spfiles", "spfilesTime", spfilesTime) + + var ctlfilesTime []fileTime + for _, fileInfo := range resp.SubPaths { + if !fileInfo.IsDir && strings.Contains(fileInfo.Name, "ncnnf") { + if err := fileInfo.ModTime.CheckValid(); err != nil { + return nil, fmt.Errorf("failed to convert timestamp: %v", err) + } + modTime := fileInfo.ModTime.AsTime() + ctlfilesTime = append(ctlfilesTime, fileTime{name: fileInfo.AbsPath, modTime: modTime}) + } + } + if len(ctlfilesTime) < 1 { + return nil, fmt.Errorf("failed to find controlfile candidates: %d", len(ctlfilesTime)) + } + + sort.Slice(ctlfilesTime, func(i, j int) bool { + return ctlfilesTime[i].modTime.After(ctlfilesTime[j].modTime) + }) + klog.InfoS("oracle/PhysicalRestore sorted control files", "ctlFilesTime", ctlfilesTime) + + // Clear spfile and datafile dir. + + spfileLoc := filepath.Join( + fmt.Sprintf(consts.ConfigDir, consts.DataMount, params.CDBName), + fmt.Sprintf("spfile%s.ora", params.CDBName), + ) + + if _, err := params.Client.DeleteDir(ctx, &dbdpb.DeleteDirRequest{Path: spfileLoc}); err != nil { + klog.ErrorS(err, "failed to delete the spfile before restore") + } + + dataDir := filepath.Join(consts.OracleBase, "oradata", "*") + if _, err := params.Client.DeleteDir(ctx, &dbdpb.DeleteDirRequest{Path: dataDir}); err != nil { + klog.ErrorS(err, "failed to delete the data files before restore", "dataDir", dataDir) + } + + restoreStmt := fmt.Sprintf(restoreStmtTemplate, spfileLoc, spfilesTime[0].name, ctlfilesTime[0].name, channels) + + operation, err := params.Client.PhysicalRestoreAsync(ctx, &dbdpb.PhysicalRestoreAsyncRequest{ + SyncRequest: &dbdpb.PhysicalRestoreRequest{ + RestoreStatement: restoreStmt, + LatestRecoverableScnQuery: maxSCNquery, + RecoverStatementTemplate: recoverStmtTemplate, + }, + LroInput: &dbdpb.LROInput{OperationId: params.OperationID}, + }) + + if err != nil { + return nil, fmt.Errorf("oracle/PhysicalRestore: failed to create database restore request: %v", err) + } + return operation, nil +} diff --git a/oracle/pkg/agents/common/BUILD.bazel b/oracle/pkg/agents/common/BUILD.bazel new file mode 100644 index 0000000..6d15bee --- /dev/null +++ b/oracle/pkg/agents/common/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "common", + srcs = [ + "connect.go", + "dbdaemonlib.go", + "socket.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common", + visibility = ["//visibility:public"], + deps = [ + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//credentials/local", + ], +) diff --git a/oracle/pkg/agents/common/connect.go b/oracle/pkg/agents/common/connect.go new file mode 100644 index 0000000..ed9dd5b --- /dev/null +++ b/oracle/pkg/agents/common/connect.go @@ -0,0 +1,58 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package common provides general utilities. +// +// Some utility function(s) for oracle connection strings. +// +// Helpers for communicating with Database Daemon. +// Both *nix domain socket and TCP/IP communication +// with the Database Daemon are supported with domain sockets being the default +// mechanism. +package common + +import ( + "fmt" + "net" + "strings" +) + +// EZ returns EZConnect string compatible with oracle tooling. +// All parameters except host are optional, refer to documentation. +// See https://docs.oracle.com/database/121/NETAG/naming.htm#NETAG1112. +func EZ(user, pass, host, port, db, domain string, asSysDba bool) string { + svc := db + if domain != "" { + svc = fmt.Sprintf("%s.%s", db, domain) + } + if host == "" { + return "" + } + // username[/password]@[//]host[:port][/service_name][:server][/instance_name] + cs := strings.TrimRight(net.JoinHostPort(host, port), ":") + uPart := user + if pass != "" { + uPart = fmt.Sprintf("%s/%s", user, pass) + } + if uPart != "" { + cs = fmt.Sprintf("%s@%s", uPart, cs) + } + if svc != "" { + cs = fmt.Sprintf("%s/%s", cs, svc) + } + if asSysDba { + cs = fmt.Sprintf("%s AS SYSDBA", cs) + } + return cs +} diff --git a/oracle/pkg/agents/common/dbdaemonlib.go b/oracle/pkg/agents/common/dbdaemonlib.go new file mode 100644 index 0000000..1f137d7 --- /dev/null +++ b/oracle/pkg/agents/common/dbdaemonlib.go @@ -0,0 +1,62 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/local" +) + +var ( + // CallTimeout can be set via a flag by the program importing the library. + CallTimeout = 5 * time.Minute +) + +// withTimeout returns a context with a default timeout if the input context has no timeout. +func withTimeout(ctx context.Context, timeOut time.Duration) (context.Context, context.CancelFunc) { + if _, ok := ctx.Deadline(); ok { + return ctx, func() {} + } + return context.WithTimeout(ctx, timeOut) +} + +// DatabaseDaemonDialLocalhost connects to a local Database Daemon via gRPC. +func DatabaseDaemonDialLocalhost(ctx context.Context, port int, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + ctxDial, cancel := withTimeout(ctx, CallTimeout) + defer cancel() + finalOpts := append([]grpc.DialOption{grpc.WithTransportCredentials(local.NewCredentials())}, opts...) + return grpc.DialContext(ctxDial, fmt.Sprintf("localhost:%d", port), finalOpts...) +} + +// DatabaseDaemonDialSocket connects to Database Daemon via gRPC. +func DatabaseDaemonDialSocket(ctx context.Context, socket string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + ctxDial, cancel := withTimeout(ctx, CallTimeout) + defer cancel() + endpoint := fmt.Sprintf("passthrough://unix/%s", socket) + finalOpts := append([]grpc.DialOption{grpc.WithTransportCredentials(local.NewCredentials()), grpc.WithContextDialer(GrpcUnixDialer)}, opts...) + return grpc.DialContext(ctxDial, endpoint, finalOpts...) +} + +// DatabaseDaemonDialService connects to Database Service via gRPC. +func DatabaseDaemonDialService(ctx context.Context, serviceAndPort string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + ctxDial, cancel := withTimeout(ctx, CallTimeout) + defer cancel() + finalOpts := append([]grpc.DialOption{grpc.WithInsecure()}, opts...) + return grpc.DialContext(ctxDial, serviceAndPort, finalOpts...) +} diff --git a/oracle/pkg/agents/common/socket.go b/oracle/pkg/agents/common/socket.go new file mode 100644 index 0000000..7998820 --- /dev/null +++ b/oracle/pkg/agents/common/socket.go @@ -0,0 +1,62 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "context" + "fmt" + "net" + "time" + + "k8s.io/klog/v2" +) + +var ( + // MaxAttempts can be set via a flag by the program importing the library. + MaxAttempts = 5 + // MaxDelay can be set via a flag by the program importing the library. + MaxDelay = 30 * time.Second +) + +// UnixDialer opens a socket connection. +func UnixDialer(ctx context.Context, addr string) (net.Conn, error) { + var err error + var conn net.Conn + var d net.Dialer + var i int + + for i = 1; i <= MaxAttempts; i++ { + conn, err = d.DialContext(ctx, "unix", addr) + if err == nil { + return conn, nil + } + + // UnixDialer is usually called by other functions then this error will be swallowed, adding log. + klog.InfoS("Unix dialer failed", "addr", addr, "err", err) + select { + case <-time.After(MaxDelay): + continue + case <-ctx.Done(): + return nil, ctx.Err() + } + } + klog.ErrorS(fmt.Errorf("failed to connect"), "failed to connect", "numAttempts", i) + return nil, err +} + +// GrpcUnixDialer opens up a unix socket connection compatible with gRPC dialers. +func GrpcUnixDialer(ctx context.Context, addr string) (net.Conn, error) { + return UnixDialer(ctx, addr) +} diff --git a/oracle/pkg/agents/common/sql/BUILD.bazel b/oracle/pkg/agents/common/sql/BUILD.bazel new file mode 100644 index 0000000..9ca2a9a --- /dev/null +++ b/oracle/pkg/agents/common/sql/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "sql", + srcs = ["sql.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql", + visibility = ["//visibility:public"], +) + +go_test( + name = "sql_test", + srcs = ["sql_test.go"], + embed = [":sql"], +) diff --git a/oracle/pkg/agents/common/sql/sql.go b/oracle/pkg/agents/common/sql/sql.go new file mode 100644 index 0000000..51c1d4b --- /dev/null +++ b/oracle/pkg/agents/common/sql/sql.go @@ -0,0 +1,180 @@ +package sql + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +const ( + createPDBCmd = "create pluggable database %s admin user %s identified by %s create_file_dest='%s' default tablespace %s datafile '%s' size 1G autoextend on storage unlimited file_name_convert=('%s', '%s')" + setContainerCmd = "alter session set container=%s" + createDirCmd = "create directory %s as '%s'" + createUserCmd = "create user %s identified by %s" + alterUserCmd = "alter user %s identified by %s" + grantPrivCmd = "grant %s to %s" + revokePrivCmd = "revoke %s from %s" + alterSystemSetCmd = "alter system set %s=%s" +) + +var ( + // ErrQuoteInIdentifier is an error returned when an identifier + // contains a double-quote. + ErrQuoteInIdentifier = errors.New("identifier contains double quotes") + privilegeMatcher = regexp.MustCompile(`^[A-Za-z ,_]+$`).MatchString + parameterNonStringMatcher = regexp.MustCompile(`^[A-Za-z0-9-]+$`).MatchString +) + +// QueryCreatePDB constructs a sql statement for creating a new pluggable database. +// It panics if one of the following params is not a valid identifier +// * pdbName +// * adminUser +// * adminUserPass +// * defaultTablespace +func QueryCreatePDB(pdbName, adminUser, adminUserPass, dataFilesDir, defaultTablespace, defaultTablespaceDatafile, fileConvertFrom, fileConvertTo string) string { + return fmt.Sprintf(createPDBCmd, + MustBeObjectName(pdbName), + MustBeObjectName(adminUser), + MustBeIdentifier(adminUserPass), + StringParam(dataFilesDir), + MustBeObjectName(defaultTablespace), + StringParam(defaultTablespaceDatafile), + StringParam(fileConvertFrom), + StringParam(fileConvertTo), + ) +} + +// QueryCreateDir constructs a sql statement for creating a new Oracle directory. +// It panics if dirName is not a valid identifier. +func QueryCreateDir(dirName, path string) string { + return fmt.Sprintf(createDirCmd, + MustBeObjectName(dirName), + StringParam(path), + ) +} + +// QueryCreateUser constructs a sql statement for creating a new database user. +// It panics if any parameter is not a valid identifier. +func QueryCreateUser(name, pass string) string { + return fmt.Sprintf(createUserCmd, + MustBeObjectName(name), + MustBeIdentifier(pass), + ) +} + +// QueryAlterUser constructs a sql statement for updating user password. +// It panics if any parameter is not a valid identifier. +func QueryAlterUser(name, pass string) string { + return fmt.Sprintf(alterUserCmd, + MustBeObjectName(name), + MustBeIdentifier(pass), + ) +} + +// QuerySetSessionContainer constructs a sql statement for changing session +// container to the given pdbName. +// It panics if pdbName is not a valid identifier. +func QuerySetSessionContainer(pdbName string) string { + return fmt.Sprintf(setContainerCmd, MustBeObjectName(pdbName)) +} + +// Identifier escapes an Oracle identifier. +// If id is not a valid identifier the ErrQuoteInIdentifier error is returned. +func Identifier(id string) (string, error) { + if strings.Contains(id, `"`) { + return "", ErrQuoteInIdentifier + } + + return `"` + id + `"`, nil +} + +// ObjectName escapes an Oracle object name. +// If id is not a valid identifier the ErrQuoteInIdentifier error is returned. +func ObjectName(id string) (string, error) { + return Identifier(strings.ToUpper(id)) +} + +// MustBeIdentifier escapes an Oracle identifier. +// It panics if id is not a valid identifier. +func MustBeIdentifier(id string) string { + sanitizedID, err := Identifier(id) + if err != nil { + panic(err) + } + return sanitizedID +} + +// MustBeObjectName escapes an Oracle object name. +// It panics if id is not a valid identifier. +func MustBeObjectName(id string) string { + sanitizedID, err := ObjectName(id) + if err != nil { + panic(err) + } + return sanitizedID +} + +// StringParam escapes a string parameter. +func StringParam(s string) string { + return strings.ReplaceAll(s, "'", "''") +} + +// IsPrivilege returns true if parameter has the expected syntax of +// (comma separated) list of privileges. +func IsPrivilege(p string) bool { + return privilegeMatcher(p) +} + +func mustBePrivilege(p string) string { + if !IsPrivilege(p) { + panic(fmt.Errorf("not a privilege: %s", p)) + } + return p +} + +// QueryGrantPrivileges constructs a sql statement for granting privileges. +// It panics if privileges is not a valid list of privileges (syntactically) or +// grantee is not a valid identifier. +func QueryGrantPrivileges(privileges, grantee string) string { + return fmt.Sprintf(grantPrivCmd, + mustBePrivilege(privileges), + MustBeObjectName(grantee), + ) +} + +// QueryRevokePrivileges constructs a sql statement for revoking privileges. +// It panics if privileges is not a valid list of privileges (syntactically) or +// grantee is not a valid identifier. +func QueryRevokePrivileges(privileges, grantee string) string { + return fmt.Sprintf(revokePrivCmd, + mustBePrivilege(privileges), + MustBeObjectName(grantee), + ) +} + +// IsValidParameterValue returns false if parameter value is not a valid one +// based on the parameter type. +// It still can return true in cases when parameter value won't be accepted by the database +// e.g. int value set for a boolean parameter or vice versa, but the cases +// relevant for constructing a syntactically correct query are supported. +func IsValidParameterValue(value string, isTypeString bool) bool { + if isTypeString { + return true + } + return parameterNonStringMatcher(value) +} + +// QuerySetSystemParameter constructs a sql statement for setting a database parameter. +// It returns an error if IsValidParameterValue(value, isTypeString) return false. +func QuerySetSystemParameterNoPanic(name, value string, isTypeString bool) (string, error) { + if !IsValidParameterValue(value, isTypeString) { + return "", fmt.Errorf("unsupported value %q for parameter %q", value, name) + } + + if isTypeString { + value = "'" + StringParam(value) + "'" + } + + return fmt.Sprintf(alterSystemSetCmd, name, value), nil +} diff --git a/oracle/pkg/agents/common/sql/sql_test.go b/oracle/pkg/agents/common/sql/sql_test.go new file mode 100644 index 0000000..3c3e46d --- /dev/null +++ b/oracle/pkg/agents/common/sql/sql_test.go @@ -0,0 +1,377 @@ +package sql + +import ( + "testing" +) + +func TestObjectName(t *testing.T) { + tests := []struct { + id string + want string + wantErr bool + }{ + { + id: "scott", + want: `"SCOTT"`, + }, + { + id: "SCOTT", + want: `"SCOTT"`, + }, + { + id: `scott"; DROP TABLE USERS; "`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.id, func(t *testing.T) { + got, err := ObjectName(tt.id) + + if (err != nil) != tt.wantErr { + t.Errorf("ObjectName(%q) error = %q, wantErr %v", tt.id, err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ObjectName(%q) got = %q, want %q", tt.id, got, tt.want) + } + }) + } +} + +func TestMustBeObjectName(t *testing.T) { + tests := []struct { + id string + want string + wantPanic bool + }{ + { + id: "scott", + want: `"SCOTT"`, + }, + { + id: "SCOTT", + want: `"SCOTT"`, + }, + { + id: `scott"; DROP TABLE USERS; "`, + wantPanic: true, + }, + } + for _, tt := range tests { + t.Run(tt.id, func(t *testing.T) { + defer func() { + gotPanic := recover() != nil + if gotPanic != tt.wantPanic { + t.Errorf("MustBeObjectName(%q) panic = %v, wantPanic %v", tt.id, gotPanic, tt.wantPanic) + return + } + }() + + got := MustBeObjectName(tt.id) + + if got != tt.want { + t.Errorf("MustBeObjectName(%q) = %q, want %q", tt.id, got, tt.want) + } + }) + } +} + +func TestIdentifier(t *testing.T) { + tests := []struct { + id string + want string + wantErr bool + }{ + { + id: "scott", + want: `"scott"`, + }, + { + id: "SCOTT", + want: `"SCOTT"`, + }, + { + id: `scott"; DROP TABLE USERS; "`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.id, func(t *testing.T) { + got, err := Identifier(tt.id) + + if (err != nil) != tt.wantErr { + t.Errorf("Identifier(%q) error = %q, want %v", tt.id, err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Identifier(%q) got = %q, want %q", tt.id, got, tt.want) + } + }) + } +} + +func TestMustBeIdentifier(t *testing.T) { + tests := []struct { + id string + want string + wantPanic bool + }{ + { + id: "scott", + want: `"scott"`, + }, + { + id: "SCOTT", + want: `"SCOTT"`, + }, + { + id: `scott"; DROP TABLE USERS; "`, + wantPanic: true, + }, + } + for _, tt := range tests { + t.Run(tt.id, func(t *testing.T) { + defer func() { + gotPanic := recover() != nil + if gotPanic != tt.wantPanic { + t.Errorf("MustBeIdentifier(%q) panic = %v, wantPanic %v", tt.id, gotPanic, tt.wantPanic) + return + } + }() + + got := MustBeIdentifier(tt.id) + + if got != tt.want { + t.Errorf("MustBeIdentifier(%q) = %q, want %q", tt.id, got, tt.want) + } + }) + } +} + +func TestIsPrivilege(t *testing.T) { + tests := []struct { + priv string + want bool + }{ + { + priv: "create session", + want: true, + }, + { + priv: "create session, resource, datapump_imp_full_database, datapump_exp_full_database, unlimited tablespace", + want: true, + }, + { + priv: "RESOURCE", + want: true, + }, + { + priv: "connect; drop table users", + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.priv, func(t *testing.T) { + got := IsPrivilege(tt.priv) + if got != tt.want { + t.Errorf("IsPrivilege(%q) got = %v, want %v", tt.priv, got, tt.want) + } + }) + } +} + +func TestMustBePrivilege(t *testing.T) { + tests := []struct { + priv string + wantPanic bool + }{ + { + priv: "create session", + wantPanic: false, + }, + { + priv: "create session, resource, datapump_imp_full_database, datapump_exp_full_database, unlimited tablespace", + wantPanic: false, + }, + { + priv: "RESOURCE", + wantPanic: false, + }, + { + priv: "connect; drop table users", + wantPanic: true, + }, + } + for _, tt := range tests { + t.Run(tt.priv, func(t *testing.T) { + defer func() { + gotPanic := recover() != nil + if gotPanic != tt.wantPanic { + t.Errorf("mustBePrivilege(%q) panic = %v, wantPanic %v", tt.priv, gotPanic, tt.wantPanic) + return + } + }() + + got := mustBePrivilege(tt.priv) + + if got != tt.priv { + t.Errorf("mustBePrivilege(%q) got = %v, want %v", tt.priv, got, tt.priv) + } + }) + } +} + +func TestStringParam(t *testing.T) { + tests := []struct { + str string + want string + }{ + { + str: "text", + want: "text", + }, + { + str: "SOME TEXT", + want: "SOME TEXT", + }, + { + str: "That's '' single quotes", + want: "That''s '''' single quotes", + }, + } + for _, tt := range tests { + t.Run(tt.str, func(t *testing.T) { + got := StringParam(tt.str) + if got != tt.want { + t.Errorf("StringParam(%q) got = %v, want %v", tt.str, got, tt.want) + } + }) + } +} + +func TestIsValidParameterValue(t *testing.T) { + tests := []struct { + name string + isString bool + value string + want bool + }{ + { + name: "string parameter", + isString: true, + value: "/u01/bin, /u02/bin, 'etc'", + want: true, + }, + { + name: "injection attempt", + isString: false, + value: "true; DROP TABLE USERS", + want: false, + }, + { + name: "positive number", + isString: false, + value: "120", + want: true, + }, + { + name: "negative number", + isString: false, + value: "-120", + want: true, + }, + { + name: "true", + isString: false, + value: "true", + want: true, + }, + { + name: "FALSE", + isString: false, + value: "FALSe", + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsValidParameterValue(tt.value, tt.isString) + if got != tt.want { + t.Errorf("IsValidParameterValue(%q, %v) got = %v, want %v", tt.value, tt.isString, got, tt.want) + } + }) + } +} + +func TestQuerySetSystemParameterNoPanic(t *testing.T) { + testParamName := "p" + + tests := []struct { + name string + isString bool + value string + want string + wantErr bool + }{ + { + name: "string parameter", + isString: true, + value: "/u01/bin, /u02/bin, 'etc'", + want: `alter system set p='/u01/bin, /u02/bin, ''etc'''`, + wantErr: false, + }, + { + name: "injection attempt", + isString: false, + value: "true; DROP TABLE USERS", + wantErr: true, + }, + { + name: "string parameter, with SQL", + isString: true, + value: "true; DROP TABLE USERS", + want: `alter system set p='true; DROP TABLE USERS'`, + wantErr: false, + }, + { + name: "positive number", + isString: false, + value: "120", + want: `alter system set p=120`, + wantErr: false, + }, + { + name: "negative number", + isString: false, + value: "-120", + want: "alter system set p=-120", + wantErr: false, + }, + { + name: "true", + isString: false, + value: "true", + want: "alter system set p=true", + wantErr: false, + }, + { + name: "FALSE", + isString: false, + value: "FALSE", + want: "alter system set p=FALSE", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := QuerySetSystemParameterNoPanic(testParamName, tt.value, tt.isString) + if (err != nil) != tt.wantErr { + t.Errorf("QuerySetSystemParameterNoPanic(%q, %q, %v) error = %q, wantErr %v", testParamName, tt.value, tt.isString, err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("QuerySetSystemParameterNoPanic(%q, %q, %v) got = %v, want %v", testParamName, tt.value, tt.isString, got, tt.want) + } + }) + } +} diff --git a/oracle/pkg/agents/config_agent/BUILD.bazel b/oracle/pkg/agents/config_agent/BUILD.bazel new file mode 100644 index 0000000..e69de29 diff --git a/oracle/pkg/agents/config_agent/protos/BUILD.bazel b/oracle/pkg/agents/config_agent/protos/BUILD.bazel new file mode 100644 index 0000000..7d86882 --- /dev/null +++ b/oracle/pkg/agents/config_agent/protos/BUILD.bazel @@ -0,0 +1,29 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "protos_proto", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:empty_proto", + "@go_googleapis//google/longrunning:longrunning_proto", + ], +) + +go_proto_library( + name = "protos_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos", + proto = ":protos_proto", + visibility = ["//visibility:public"], + deps = ["@go_googleapis//google/longrunning:longrunning_go_proto"], +) + +go_library( + name = "protos", + embed = [":protos_go_proto"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos", + visibility = ["//visibility:public"], +) diff --git a/oracle/pkg/agents/config_agent/protos/service.pb.go b/oracle/pkg/agents/config_agent/protos/service.pb.go new file mode 100644 index 0000000..13b21a9 --- /dev/null +++ b/oracle/pkg/agents/config_agent/protos/service.pb.go @@ -0,0 +1,3772 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Config Agent proto for gRPC communications from controllers. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.4 +// source: oracle/pkg/agents/config_agent/protos/service.proto + +package protos + +import ( + empty "github.com/golang/protobuf/ptypes/empty" + longrunning "google.golang.org/genproto/googleapis/longrunning" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UsersChangedResponse_Type int32 + +const ( + UsersChangedResponse_UNKNOWN_TYPE UsersChangedResponse_Type = 0 + UsersChangedResponse_DELETE UsersChangedResponse_Type = 1 + UsersChangedResponse_CREATE UsersChangedResponse_Type = 2 +) + +// Enum value maps for UsersChangedResponse_Type. +var ( + UsersChangedResponse_Type_name = map[int32]string{ + 0: "UNKNOWN_TYPE", + 1: "DELETE", + 2: "CREATE", + } + UsersChangedResponse_Type_value = map[string]int32{ + "UNKNOWN_TYPE": 0, + "DELETE": 1, + "CREATE": 2, + } +) + +func (x UsersChangedResponse_Type) Enum() *UsersChangedResponse_Type { + p := new(UsersChangedResponse_Type) + *p = x + return p +} + +func (x UsersChangedResponse_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UsersChangedResponse_Type) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[0].Descriptor() +} + +func (UsersChangedResponse_Type) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[0] +} + +func (x UsersChangedResponse_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UsersChangedResponse_Type.Descriptor instead. +func (UsersChangedResponse_Type) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{12, 0} +} + +type PhysicalBackupRequest_Type int32 + +const ( + PhysicalBackupRequest_UNKNOWN_TYPE PhysicalBackupRequest_Type = 0 + PhysicalBackupRequest_INSTANCE PhysicalBackupRequest_Type = 1 + PhysicalBackupRequest_DATABASE PhysicalBackupRequest_Type = 2 + PhysicalBackupRequest_TABLESPACE PhysicalBackupRequest_Type = 3 + PhysicalBackupRequest_DATAFILE PhysicalBackupRequest_Type = 4 +) + +// Enum value maps for PhysicalBackupRequest_Type. +var ( + PhysicalBackupRequest_Type_name = map[int32]string{ + 0: "UNKNOWN_TYPE", + 1: "INSTANCE", + 2: "DATABASE", + 3: "TABLESPACE", + 4: "DATAFILE", + } + PhysicalBackupRequest_Type_value = map[string]int32{ + "UNKNOWN_TYPE": 0, + "INSTANCE": 1, + "DATABASE": 2, + "TABLESPACE": 3, + "DATAFILE": 4, + } +) + +func (x PhysicalBackupRequest_Type) Enum() *PhysicalBackupRequest_Type { + p := new(PhysicalBackupRequest_Type) + *p = x + return p +} + +func (x PhysicalBackupRequest_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PhysicalBackupRequest_Type) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[1].Descriptor() +} + +func (PhysicalBackupRequest_Type) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[1] +} + +func (x PhysicalBackupRequest_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PhysicalBackupRequest_Type.Descriptor instead. +func (PhysicalBackupRequest_Type) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{15, 0} +} + +type CheckStatusRequest_Type int32 + +const ( + CheckStatusRequest_UNKNOWN_TYPE CheckStatusRequest_Type = 0 + CheckStatusRequest_INSTANCE CheckStatusRequest_Type = 1 +) + +// Enum value maps for CheckStatusRequest_Type. +var ( + CheckStatusRequest_Type_name = map[int32]string{ + 0: "UNKNOWN_TYPE", + 1: "INSTANCE", + } + CheckStatusRequest_Type_value = map[string]int32{ + "UNKNOWN_TYPE": 0, + "INSTANCE": 1, + } +) + +func (x CheckStatusRequest_Type) Enum() *CheckStatusRequest_Type { + p := new(CheckStatusRequest_Type) + *p = x + return p +} + +func (x CheckStatusRequest_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CheckStatusRequest_Type) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[2].Descriptor() +} + +func (CheckStatusRequest_Type) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[2] +} + +func (x CheckStatusRequest_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CheckStatusRequest_Type.Descriptor instead. +func (CheckStatusRequest_Type) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{17, 0} +} + +type SetParameterRequest_Type int32 + +const ( + SetParameterRequest_DYNAMIC SetParameterRequest_Type = 0 + SetParameterRequest_STATIC SetParameterRequest_Type = 1 + SetParameterRequest_DEFERRED SetParameterRequest_Type = 2 +) + +// Enum value maps for SetParameterRequest_Type. +var ( + SetParameterRequest_Type_name = map[int32]string{ + 0: "DYNAMIC", + 1: "STATIC", + 2: "DEFERRED", + } + SetParameterRequest_Type_value = map[string]int32{ + "DYNAMIC": 0, + "STATIC": 1, + "DEFERRED": 2, + } +) + +func (x SetParameterRequest_Type) Enum() *SetParameterRequest_Type { + p := new(SetParameterRequest_Type) + *p = x + return p +} + +func (x SetParameterRequest_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SetParameterRequest_Type) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[3].Descriptor() +} + +func (SetParameterRequest_Type) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes[3] +} + +func (x SetParameterRequest_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SetParameterRequest_Type.Descriptor instead. +func (SetParameterRequest_Type) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{26, 0} +} + +type CreateCDBRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OracleHome string `protobuf:"bytes,1,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` + Sid string `protobuf:"bytes,2,opt,name=sid,proto3" json:"sid,omitempty"` + DbUniqueName string `protobuf:"bytes,3,opt,name=db_unique_name,json=dbUniqueName,proto3" json:"db_unique_name,omitempty"` + CharacterSet string `protobuf:"bytes,4,opt,name=character_set,json=characterSet,proto3" json:"character_set,omitempty"` + MemoryPercent int32 `protobuf:"varint,5,opt,name=memory_percent,json=memoryPercent,proto3" json:"memory_percent,omitempty"` + AdditionalParams []string `protobuf:"bytes,6,rep,name=additional_params,json=additionalParams,proto3" json:"additional_params,omitempty"` + Version string `protobuf:"bytes,7,opt,name=version,proto3" json:"version,omitempty"` + DbDomain string `protobuf:"bytes,8,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` +} + +func (x *CreateCDBRequest) Reset() { + *x = CreateCDBRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCDBRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCDBRequest) ProtoMessage() {} + +func (x *CreateCDBRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCDBRequest.ProtoReflect.Descriptor instead. +func (*CreateCDBRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateCDBRequest) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +func (x *CreateCDBRequest) GetSid() string { + if x != nil { + return x.Sid + } + return "" +} + +func (x *CreateCDBRequest) GetDbUniqueName() string { + if x != nil { + return x.DbUniqueName + } + return "" +} + +func (x *CreateCDBRequest) GetCharacterSet() string { + if x != nil { + return x.CharacterSet + } + return "" +} + +func (x *CreateCDBRequest) GetMemoryPercent() int32 { + if x != nil { + return x.MemoryPercent + } + return 0 +} + +func (x *CreateCDBRequest) GetAdditionalParams() []string { + if x != nil { + return x.AdditionalParams + } + return nil +} + +func (x *CreateCDBRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *CreateCDBRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +type CreateListenerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + Protocol string `protobuf:"bytes,3,opt,name=protocol,proto3" json:"protocol,omitempty"` + OracleHome string `protobuf:"bytes,4,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` + DbDomain string `protobuf:"bytes,5,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` +} + +func (x *CreateListenerRequest) Reset() { + *x = CreateListenerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateListenerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateListenerRequest) ProtoMessage() {} + +func (x *CreateListenerRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateListenerRequest.ProtoReflect.Descriptor instead. +func (*CreateListenerRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateListenerRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateListenerRequest) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *CreateListenerRequest) GetProtocol() string { + if x != nil { + return x.Protocol + } + return "" +} + +func (x *CreateListenerRequest) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +func (x *CreateListenerRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +type CreateListenerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateListenerResponse) Reset() { + *x = CreateListenerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateListenerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateListenerResponse) ProtoMessage() {} + +func (x *CreateListenerResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateListenerResponse.ProtoReflect.Descriptor instead. +func (*CreateListenerResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{2} +} + +type GsmSecretReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + SecretId string `protobuf:"bytes,2,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + LastVersion string `protobuf:"bytes,4,opt,name=last_version,json=lastVersion,proto3" json:"last_version,omitempty"` +} + +func (x *GsmSecretReference) Reset() { + *x = GsmSecretReference{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GsmSecretReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GsmSecretReference) ProtoMessage() {} + +func (x *GsmSecretReference) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GsmSecretReference.ProtoReflect.Descriptor instead. +func (*GsmSecretReference) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GsmSecretReference) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *GsmSecretReference) GetSecretId() string { + if x != nil { + return x.SecretId + } + return "" +} + +func (x *GsmSecretReference) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *GsmSecretReference) GetLastVersion() string { + if x != nil { + return x.LastVersion + } + return "" +} + +type CreateDatabaseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // only being used for plaintext password scenario. + // GSM doesn't use this field. + Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` + DbDomain string `protobuf:"bytes,4,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` + AdminPasswordGsmSecretRef *GsmSecretReference `protobuf:"bytes,5,opt,name=admin_password_gsm_secret_ref,json=adminPasswordGsmSecretRef,proto3" json:"admin_password_gsm_secret_ref,omitempty"` + // only being used for plaintext password scenario. + // GSM doesn't use this field. + LastPassword string `protobuf:"bytes,6,opt,name=last_password,json=lastPassword,proto3" json:"last_password,omitempty"` +} + +func (x *CreateDatabaseRequest) Reset() { + *x = CreateDatabaseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateDatabaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateDatabaseRequest) ProtoMessage() {} + +func (x *CreateDatabaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateDatabaseRequest.ProtoReflect.Descriptor instead. +func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateDatabaseRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *CreateDatabaseRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateDatabaseRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *CreateDatabaseRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +func (x *CreateDatabaseRequest) GetAdminPasswordGsmSecretRef() *GsmSecretReference { + if x != nil { + return x.AdminPasswordGsmSecretRef + } + return nil +} + +func (x *CreateDatabaseRequest) GetLastPassword() string { + if x != nil { + return x.LastPassword + } + return "" +} + +type CreateDatabaseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CreateDatabaseResponse) Reset() { + *x = CreateDatabaseResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateDatabaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateDatabaseResponse) ProtoMessage() {} + +func (x *CreateDatabaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateDatabaseResponse.ProtoReflect.Descriptor instead. +func (*CreateDatabaseResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateDatabaseResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *CreateDatabaseResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +type CreateUsersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + PdbName string `protobuf:"bytes,2,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + CreateUsersCmd []string `protobuf:"bytes,3,rep,name=create_users_cmd,json=createUsersCmd,proto3" json:"create_users_cmd,omitempty"` + GrantPrivsCmd []string `protobuf:"bytes,4,rep,name=grant_privs_cmd,json=grantPrivsCmd,proto3" json:"grant_privs_cmd,omitempty"` + DbDomain string `protobuf:"bytes,5,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` + User []*User `protobuf:"bytes,6,rep,name=user,proto3" json:"user,omitempty"` +} + +func (x *CreateUsersRequest) Reset() { + *x = CreateUsersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUsersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUsersRequest) ProtoMessage() {} + +func (x *CreateUsersRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUsersRequest.ProtoReflect.Descriptor instead. +func (*CreateUsersRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateUsersRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *CreateUsersRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *CreateUsersRequest) GetCreateUsersCmd() []string { + if x != nil { + return x.CreateUsersCmd + } + return nil +} + +func (x *CreateUsersRequest) GetGrantPrivsCmd() []string { + if x != nil { + return x.GrantPrivsCmd + } + return nil +} + +func (x *CreateUsersRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +func (x *CreateUsersRequest) GetUser() []*User { + if x != nil { + return x.User + } + return nil +} + +type CreateUsersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *CreateUsersResponse) Reset() { + *x = CreateUsersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUsersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUsersResponse) ProtoMessage() {} + +func (x *CreateUsersResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUsersResponse.ProtoReflect.Descriptor instead. +func (*CreateUsersResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{7} +} + +func (x *CreateUsersResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +type CreateCDBUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + CreateUsersCmd []string `protobuf:"bytes,3,rep,name=create_users_cmd,json=createUsersCmd,proto3" json:"create_users_cmd,omitempty"` + GrantPrivsCmd []string `protobuf:"bytes,4,rep,name=grant_privs_cmd,json=grantPrivsCmd,proto3" json:"grant_privs_cmd,omitempty"` +} + +func (x *CreateCDBUserRequest) Reset() { + *x = CreateCDBUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCDBUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCDBUserRequest) ProtoMessage() {} + +func (x *CreateCDBUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCDBUserRequest.ProtoReflect.Descriptor instead. +func (*CreateCDBUserRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{8} +} + +func (x *CreateCDBUserRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *CreateCDBUserRequest) GetCreateUsersCmd() []string { + if x != nil { + return x.CreateUsersCmd + } + return nil +} + +func (x *CreateCDBUserRequest) GetGrantPrivsCmd() []string { + if x != nil { + return x.GrantPrivsCmd + } + return nil +} + +type CreateCDBUserResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CreateCDBUserResponse) Reset() { + *x = CreateCDBUserResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCDBUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCDBUserResponse) ProtoMessage() {} + +func (x *CreateCDBUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCDBUserResponse.ProtoReflect.Descriptor instead. +func (*CreateCDBUserResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateCDBUserResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *CreateCDBUserResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +type User struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // only being used for plaintext password scenario. + // GSM doesn't use this field. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Privileges []string `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` + PasswordGsmSecretRef *GsmSecretReference `protobuf:"bytes,4,opt,name=password_gsm_secret_ref,json=passwordGsmSecretRef,proto3" json:"password_gsm_secret_ref,omitempty"` + // only being used for plaintext password scenario. + // GSM doesn't use this field. + LastPassword string `protobuf:"bytes,5,opt,name=last_password,json=lastPassword,proto3" json:"last_password,omitempty"` +} + +func (x *User) Reset() { + *x = User{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*User) ProtoMessage() {} + +func (x *User) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use User.ProtoReflect.Descriptor instead. +func (*User) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{10} +} + +func (x *User) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *User) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *User) GetPrivileges() []string { + if x != nil { + return x.Privileges + } + return nil +} + +func (x *User) GetPasswordGsmSecretRef() *GsmSecretReference { + if x != nil { + return x.PasswordGsmSecretRef + } + return nil +} + +func (x *User) GetLastPassword() string { + if x != nil { + return x.LastPassword + } + return "" +} + +type UsersChangedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + UserSpecs []*User `protobuf:"bytes,2,rep,name=user_specs,json=userSpecs,proto3" json:"user_specs,omitempty"` +} + +func (x *UsersChangedRequest) Reset() { + *x = UsersChangedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UsersChangedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsersChangedRequest) ProtoMessage() {} + +func (x *UsersChangedRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsersChangedRequest.ProtoReflect.Descriptor instead. +func (*UsersChangedRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{11} +} + +func (x *UsersChangedRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *UsersChangedRequest) GetUserSpecs() []*User { + if x != nil { + return x.UserSpecs + } + return nil +} + +type UsersChangedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Changed bool `protobuf:"varint,1,opt,name=changed,proto3" json:"changed,omitempty"` + Suppressed []*UsersChangedResponse_Suppressed `protobuf:"bytes,2,rep,name=suppressed,proto3" json:"suppressed,omitempty"` +} + +func (x *UsersChangedResponse) Reset() { + *x = UsersChangedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UsersChangedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsersChangedResponse) ProtoMessage() {} + +func (x *UsersChangedResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsersChangedResponse.ProtoReflect.Descriptor instead. +func (*UsersChangedResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{12} +} + +func (x *UsersChangedResponse) GetChanged() bool { + if x != nil { + return x.Changed + } + return false +} + +func (x *UsersChangedResponse) GetSuppressed() []*UsersChangedResponse_Suppressed { + if x != nil { + return x.Suppressed + } + return nil +} + +type UpdateUsersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + UserSpecs []*User `protobuf:"bytes,2,rep,name=user_specs,json=userSpecs,proto3" json:"user_specs,omitempty"` +} + +func (x *UpdateUsersRequest) Reset() { + *x = UpdateUsersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUsersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUsersRequest) ProtoMessage() {} + +func (x *UpdateUsersRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUsersRequest.ProtoReflect.Descriptor instead. +func (*UpdateUsersRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{13} +} + +func (x *UpdateUsersRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *UpdateUsersRequest) GetUserSpecs() []*User { + if x != nil { + return x.UserSpecs + } + return nil +} + +type UpdateUsersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateUsersResponse) Reset() { + *x = UpdateUsersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUsersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUsersResponse) ProtoMessage() {} + +func (x *UpdateUsersResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUsersResponse.ProtoReflect.Descriptor instead. +func (*UpdateUsersResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{14} +} + +type PhysicalBackupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BackupSubType PhysicalBackupRequest_Type `protobuf:"varint,1,opt,name=backup_sub_type,json=backupSubType,proto3,enum=protos.PhysicalBackupRequest_Type" json:"backup_sub_type,omitempty"` + BackupItems []string `protobuf:"bytes,2,rep,name=backup_items,json=backupItems,proto3" json:"backup_items,omitempty"` + Backupset bool `protobuf:"varint,3,opt,name=backupset,proto3" json:"backupset,omitempty"` + Compressed bool `protobuf:"varint,4,opt,name=compressed,proto3" json:"compressed,omitempty"` + CheckLogical bool `protobuf:"varint,5,opt,name=check_logical,json=checkLogical,proto3" json:"check_logical,omitempty"` + // DOP = degree of parallelism for physical backup. + Dop int32 `protobuf:"varint,6,opt,name=dop,proto3" json:"dop,omitempty"` + Level int32 `protobuf:"varint,7,opt,name=level,proto3" json:"level,omitempty"` + Filesperset int32 `protobuf:"varint,8,opt,name=filesperset,proto3" json:"filesperset,omitempty"` + SectionSize int32 `protobuf:"varint,9,opt,name=section_size,json=sectionSize,proto3" json:"section_size,omitempty"` + LocalPath string `protobuf:"bytes,10,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` + GcsPath string `protobuf:"bytes,11,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + LroInput *LROInput `protobuf:"bytes,12,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *PhysicalBackupRequest) Reset() { + *x = PhysicalBackupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhysicalBackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhysicalBackupRequest) ProtoMessage() {} + +func (x *PhysicalBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhysicalBackupRequest.ProtoReflect.Descriptor instead. +func (*PhysicalBackupRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{15} +} + +func (x *PhysicalBackupRequest) GetBackupSubType() PhysicalBackupRequest_Type { + if x != nil { + return x.BackupSubType + } + return PhysicalBackupRequest_UNKNOWN_TYPE +} + +func (x *PhysicalBackupRequest) GetBackupItems() []string { + if x != nil { + return x.BackupItems + } + return nil +} + +func (x *PhysicalBackupRequest) GetBackupset() bool { + if x != nil { + return x.Backupset + } + return false +} + +func (x *PhysicalBackupRequest) GetCompressed() bool { + if x != nil { + return x.Compressed + } + return false +} + +func (x *PhysicalBackupRequest) GetCheckLogical() bool { + if x != nil { + return x.CheckLogical + } + return false +} + +func (x *PhysicalBackupRequest) GetDop() int32 { + if x != nil { + return x.Dop + } + return 0 +} + +func (x *PhysicalBackupRequest) GetLevel() int32 { + if x != nil { + return x.Level + } + return 0 +} + +func (x *PhysicalBackupRequest) GetFilesperset() int32 { + if x != nil { + return x.Filesperset + } + return 0 +} + +func (x *PhysicalBackupRequest) GetSectionSize() int32 { + if x != nil { + return x.SectionSize + } + return 0 +} + +func (x *PhysicalBackupRequest) GetLocalPath() string { + if x != nil { + return x.LocalPath + } + return "" +} + +func (x *PhysicalBackupRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *PhysicalBackupRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type PhysicalRestoreRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + CdbName string `protobuf:"bytes,2,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + // DOP = degree of parallelism for a restore from a physical backup. + Dop int32 `protobuf:"varint,3,opt,name=dop,proto3" json:"dop,omitempty"` + LocalPath string `protobuf:"bytes,4,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` + GcsPath string `protobuf:"bytes,5,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + LroInput *LROInput `protobuf:"bytes,6,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *PhysicalRestoreRequest) Reset() { + *x = PhysicalRestoreRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhysicalRestoreRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhysicalRestoreRequest) ProtoMessage() {} + +func (x *PhysicalRestoreRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhysicalRestoreRequest.ProtoReflect.Descriptor instead. +func (*PhysicalRestoreRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{16} +} + +func (x *PhysicalRestoreRequest) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *PhysicalRestoreRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *PhysicalRestoreRequest) GetDop() int32 { + if x != nil { + return x.Dop + } + return 0 +} + +func (x *PhysicalRestoreRequest) GetLocalPath() string { + if x != nil { + return x.LocalPath + } + return "" +} + +func (x *PhysicalRestoreRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *PhysicalRestoreRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type CheckStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CdbName string `protobuf:"bytes,2,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + CheckStatusType CheckStatusRequest_Type `protobuf:"varint,3,opt,name=check_status_type,json=checkStatusType,proto3,enum=protos.CheckStatusRequest_Type" json:"check_status_type,omitempty"` + DbDomain string `protobuf:"bytes,4,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` +} + +func (x *CheckStatusRequest) Reset() { + *x = CheckStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckStatusRequest) ProtoMessage() {} + +func (x *CheckStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckStatusRequest.ProtoReflect.Descriptor instead. +func (*CheckStatusRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{17} +} + +func (x *CheckStatusRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CheckStatusRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *CheckStatusRequest) GetCheckStatusType() CheckStatusRequest_Type { + if x != nil { + return x.CheckStatusType + } + return CheckStatusRequest_UNKNOWN_TYPE +} + +func (x *CheckStatusRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +type CheckStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CheckStatusResponse) Reset() { + *x = CheckStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckStatusResponse) ProtoMessage() {} + +func (x *CheckStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckStatusResponse.ProtoReflect.Descriptor instead. +func (*CheckStatusResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{18} +} + +func (x *CheckStatusResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *CheckStatusResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +type DataPumpImportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + DbDomain string `protobuf:"bytes,2,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` + // GCS path to input dump file + GcsPath string `protobuf:"bytes,3,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + // GCS path to output log file + GcsLogPath string `protobuf:"bytes,4,opt,name=gcs_log_path,json=gcsLogPath,proto3" json:"gcs_log_path,omitempty"` + LroInput *LROInput `protobuf:"bytes,5,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *DataPumpImportRequest) Reset() { + *x = DataPumpImportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpImportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpImportRequest) ProtoMessage() {} + +func (x *DataPumpImportRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpImportRequest.ProtoReflect.Descriptor instead. +func (*DataPumpImportRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{19} +} + +func (x *DataPumpImportRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *DataPumpImportRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +func (x *DataPumpImportRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *DataPumpImportRequest) GetGcsLogPath() string { + if x != nil { + return x.GcsLogPath + } + return "" +} + +func (x *DataPumpImportRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type DataPumpExportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + DbDomain string `protobuf:"bytes,2,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` + ObjectType string `protobuf:"bytes,3,opt,name=object_type,json=objectType,proto3" json:"object_type,omitempty"` + Objects string `protobuf:"bytes,4,opt,name=objects,proto3" json:"objects,omitempty"` + GcsPath string `protobuf:"bytes,5,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + GcsLogPath string `protobuf:"bytes,6,opt,name=gcs_log_path,json=gcsLogPath,proto3" json:"gcs_log_path,omitempty"` + LroInput *LROInput `protobuf:"bytes,7,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` + FlashbackTime string `protobuf:"bytes,8,opt,name=flashback_time,json=flashbackTime,proto3" json:"flashback_time,omitempty"` +} + +func (x *DataPumpExportRequest) Reset() { + *x = DataPumpExportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpExportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpExportRequest) ProtoMessage() {} + +func (x *DataPumpExportRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpExportRequest.ProtoReflect.Descriptor instead. +func (*DataPumpExportRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{20} +} + +func (x *DataPumpExportRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *DataPumpExportRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +func (x *DataPumpExportRequest) GetObjectType() string { + if x != nil { + return x.ObjectType + } + return "" +} + +func (x *DataPumpExportRequest) GetObjects() string { + if x != nil { + return x.Objects + } + return "" +} + +func (x *DataPumpExportRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *DataPumpExportRequest) GetGcsLogPath() string { + if x != nil { + return x.GcsLogPath + } + return "" +} + +func (x *DataPumpExportRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +func (x *DataPumpExportRequest) GetFlashbackTime() string { + if x != nil { + return x.FlashbackTime + } + return "" +} + +// LROInput is a common part of input requests for all Async operations. +type LROInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` +} + +func (x *LROInput) Reset() { + *x = LROInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LROInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LROInput) ProtoMessage() {} + +func (x *LROInput) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LROInput.ProtoReflect.Descriptor instead. +func (*LROInput) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{21} +} + +func (x *LROInput) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +type BootstrapDatabaseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + DbUniqueName string `protobuf:"bytes,4,opt,name=db_unique_name,json=dbUniqueName,proto3" json:"db_unique_name,omitempty"` + Dbdomain string `protobuf:"bytes,5,opt,name=dbdomain,proto3" json:"dbdomain,omitempty"` +} + +func (x *BootstrapDatabaseRequest) Reset() { + *x = BootstrapDatabaseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapDatabaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapDatabaseRequest) ProtoMessage() {} + +func (x *BootstrapDatabaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapDatabaseRequest.ProtoReflect.Descriptor instead. +func (*BootstrapDatabaseRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{22} +} + +func (x *BootstrapDatabaseRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *BootstrapDatabaseRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *BootstrapDatabaseRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *BootstrapDatabaseRequest) GetDbUniqueName() string { + if x != nil { + return x.DbUniqueName + } + return "" +} + +func (x *BootstrapDatabaseRequest) GetDbdomain() string { + if x != nil { + return x.Dbdomain + } + return "" +} + +type BootstrapDatabaseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BootstrapDatabaseResponse) Reset() { + *x = BootstrapDatabaseResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapDatabaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapDatabaseResponse) ProtoMessage() {} + +func (x *BootstrapDatabaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapDatabaseResponse.ProtoReflect.Descriptor instead. +func (*BootstrapDatabaseResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{23} +} + +type BootstrapStandbyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Dbdomain string `protobuf:"bytes,3,opt,name=dbdomain,proto3" json:"dbdomain,omitempty"` +} + +func (x *BootstrapStandbyRequest) Reset() { + *x = BootstrapStandbyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapStandbyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapStandbyRequest) ProtoMessage() {} + +func (x *BootstrapStandbyRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapStandbyRequest.ProtoReflect.Descriptor instead. +func (*BootstrapStandbyRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{24} +} + +func (x *BootstrapStandbyRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *BootstrapStandbyRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *BootstrapStandbyRequest) GetDbdomain() string { + if x != nil { + return x.Dbdomain + } + return "" +} + +type BootstrapStandbyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pdbs []*BootstrapStandbyResponse_PDB `protobuf:"bytes,1,rep,name=pdbs,proto3" json:"pdbs,omitempty"` +} + +func (x *BootstrapStandbyResponse) Reset() { + *x = BootstrapStandbyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapStandbyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapStandbyResponse) ProtoMessage() {} + +func (x *BootstrapStandbyResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapStandbyResponse.ProtoReflect.Descriptor instead. +func (*BootstrapStandbyResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{25} +} + +func (x *BootstrapStandbyResponse) GetPdbs() []*BootstrapStandbyResponse_PDB { + if x != nil { + return x.Pdbs + } + return nil +} + +type SetParameterRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Type SetParameterRequest_Type `protobuf:"varint,3,opt,name=type,proto3,enum=protos.SetParameterRequest_Type" json:"type,omitempty"` +} + +func (x *SetParameterRequest) Reset() { + *x = SetParameterRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetParameterRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetParameterRequest) ProtoMessage() {} + +func (x *SetParameterRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetParameterRequest.ProtoReflect.Descriptor instead. +func (*SetParameterRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{26} +} + +func (x *SetParameterRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SetParameterRequest) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *SetParameterRequest) GetType() SetParameterRequest_Type { + if x != nil { + return x.Type + } + return SetParameterRequest_DYNAMIC +} + +type SetParameterResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Static bool `protobuf:"varint,1,opt,name=static,proto3" json:"static,omitempty"` +} + +func (x *SetParameterResponse) Reset() { + *x = SetParameterResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetParameterResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetParameterResponse) ProtoMessage() {} + +func (x *SetParameterResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetParameterResponse.ProtoReflect.Descriptor instead. +func (*SetParameterResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{27} +} + +func (x *SetParameterResponse) GetStatic() bool { + if x != nil { + return x.Static + } + return false +} + +type GetParameterTypeValueRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` +} + +func (x *GetParameterTypeValueRequest) Reset() { + *x = GetParameterTypeValueRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetParameterTypeValueRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetParameterTypeValueRequest) ProtoMessage() {} + +func (x *GetParameterTypeValueRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetParameterTypeValueRequest.ProtoReflect.Descriptor instead. +func (*GetParameterTypeValueRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{28} +} + +func (x *GetParameterTypeValueRequest) GetKeys() []string { + if x != nil { + return x.Keys + } + return nil +} + +type GetParameterTypeValueResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Types []string `protobuf:"bytes,1,rep,name=types,proto3" json:"types,omitempty"` + Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *GetParameterTypeValueResponse) Reset() { + *x = GetParameterTypeValueResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetParameterTypeValueResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetParameterTypeValueResponse) ProtoMessage() {} + +func (x *GetParameterTypeValueResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetParameterTypeValueResponse.ProtoReflect.Descriptor instead. +func (*GetParameterTypeValueResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{29} +} + +func (x *GetParameterTypeValueResponse) GetTypes() []string { + if x != nil { + return x.Types + } + return nil +} + +func (x *GetParameterTypeValueResponse) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +type BounceDatabaseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sid string `protobuf:"bytes,1,opt,name=sid,proto3" json:"sid,omitempty"` + // avoid_config_backup: by default we backup the config except for scenarios + // when it isn't possible (like bootstrapping) + AvoidConfigBackup bool `protobuf:"varint,2,opt,name=avoid_config_backup,json=avoidConfigBackup,proto3" json:"avoid_config_backup,omitempty"` +} + +func (x *BounceDatabaseRequest) Reset() { + *x = BounceDatabaseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BounceDatabaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BounceDatabaseRequest) ProtoMessage() {} + +func (x *BounceDatabaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BounceDatabaseRequest.ProtoReflect.Descriptor instead. +func (*BounceDatabaseRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{30} +} + +func (x *BounceDatabaseRequest) GetSid() string { + if x != nil { + return x.Sid + } + return "" +} + +func (x *BounceDatabaseRequest) GetAvoidConfigBackup() bool { + if x != nil { + return x.AvoidConfigBackup + } + return false +} + +type BounceDatabaseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BounceDatabaseResponse) Reset() { + *x = BounceDatabaseResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BounceDatabaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BounceDatabaseResponse) ProtoMessage() {} + +func (x *BounceDatabaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BounceDatabaseResponse.ProtoReflect.Descriptor instead. +func (*BounceDatabaseResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{31} +} + +type RecoverConfigFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` +} + +func (x *RecoverConfigFileRequest) Reset() { + *x = RecoverConfigFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RecoverConfigFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecoverConfigFileRequest) ProtoMessage() {} + +func (x *RecoverConfigFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecoverConfigFileRequest.ProtoReflect.Descriptor instead. +func (*RecoverConfigFileRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{32} +} + +func (x *RecoverConfigFileRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +type RecoverConfigFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RecoverConfigFileResponse) Reset() { + *x = RecoverConfigFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RecoverConfigFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecoverConfigFileResponse) ProtoMessage() {} + +func (x *RecoverConfigFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecoverConfigFileResponse.ProtoReflect.Descriptor instead. +func (*RecoverConfigFileResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{33} +} + +type FetchServiceImageMetaDataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FetchServiceImageMetaDataRequest) Reset() { + *x = FetchServiceImageMetaDataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FetchServiceImageMetaDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FetchServiceImageMetaDataRequest) ProtoMessage() {} + +func (x *FetchServiceImageMetaDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FetchServiceImageMetaDataRequest.ProtoReflect.Descriptor instead. +func (*FetchServiceImageMetaDataRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{34} +} + +type FetchServiceImageMetaDataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + CdbName string `protobuf:"bytes,2,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + OracleHome string `protobuf:"bytes,3,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` +} + +func (x *FetchServiceImageMetaDataResponse) Reset() { + *x = FetchServiceImageMetaDataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FetchServiceImageMetaDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FetchServiceImageMetaDataResponse) ProtoMessage() {} + +func (x *FetchServiceImageMetaDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FetchServiceImageMetaDataResponse.ProtoReflect.Descriptor instead. +func (*FetchServiceImageMetaDataResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{35} +} + +func (x *FetchServiceImageMetaDataResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *FetchServiceImageMetaDataResponse) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *FetchServiceImageMetaDataResponse) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +// Suppressed describes user creates/updates which will be suppressed in the +// current release. +type UsersChangedResponse_Suppressed struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SuppressType UsersChangedResponse_Type `protobuf:"varint,1,opt,name=suppress_type,json=suppressType,proto3,enum=protos.UsersChangedResponse_Type" json:"suppress_type,omitempty"` + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // sql is the suppressed cmd which can update the user to the spec defined + // state + Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` +} + +func (x *UsersChangedResponse_Suppressed) Reset() { + *x = UsersChangedResponse_Suppressed{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UsersChangedResponse_Suppressed) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsersChangedResponse_Suppressed) ProtoMessage() {} + +func (x *UsersChangedResponse_Suppressed) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsersChangedResponse_Suppressed.ProtoReflect.Descriptor instead. +func (*UsersChangedResponse_Suppressed) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *UsersChangedResponse_Suppressed) GetSuppressType() UsersChangedResponse_Type { + if x != nil { + return x.SuppressType + } + return UsersChangedResponse_UNKNOWN_TYPE +} + +func (x *UsersChangedResponse_Suppressed) GetUserName() string { + if x != nil { + return x.UserName + } + return "" +} + +func (x *UsersChangedResponse_Suppressed) GetSql() string { + if x != nil { + return x.Sql + } + return "" +} + +type BootstrapStandbyResponse_User struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + Privs []string `protobuf:"bytes,2,rep,name=privs,proto3" json:"privs,omitempty"` +} + +func (x *BootstrapStandbyResponse_User) Reset() { + *x = BootstrapStandbyResponse_User{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapStandbyResponse_User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapStandbyResponse_User) ProtoMessage() {} + +func (x *BootstrapStandbyResponse_User) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapStandbyResponse_User.ProtoReflect.Descriptor instead. +func (*BootstrapStandbyResponse_User) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{25, 0} +} + +func (x *BootstrapStandbyResponse_User) GetUserName() string { + if x != nil { + return x.UserName + } + return "" +} + +func (x *BootstrapStandbyResponse_User) GetPrivs() []string { + if x != nil { + return x.Privs + } + return nil +} + +type BootstrapStandbyResponse_PDB struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + Users []*BootstrapStandbyResponse_User `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"` +} + +func (x *BootstrapStandbyResponse_PDB) Reset() { + *x = BootstrapStandbyResponse_PDB{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapStandbyResponse_PDB) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapStandbyResponse_PDB) ProtoMessage() {} + +func (x *BootstrapStandbyResponse_PDB) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapStandbyResponse_PDB.ProtoReflect.Descriptor instead. +func (*BootstrapStandbyResponse_PDB) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP(), []int{25, 1} +} + +func (x *BootstrapStandbyResponse_PDB) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *BootstrapStandbyResponse_PDB) GetUsers() []*BootstrapStandbyResponse_User { + if x != nil { + return x.Users + } + return nil +} + +var File_oracle_pkg_agents_config_agent_protos_service_proto protoreflect.FileDescriptor + +var file_oracle_pkg_agents_config_agent_protos_service_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x1a, 0x23, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x9b, 0x02, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x5f, 0x68, + 0x6f, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x62, 0x5f, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x62, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x53, + 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x70, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x99, 0x01, + 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x18, 0x0a, 0x16, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x47, 0x73, 0x6d, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x5c, 0x0a, 0x1d, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x67, 0x73, 0x6d, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x47, 0x73, 0x6d, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x47, 0x73, 0x6d, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x52, 0x65, 0x66, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x55, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0xdb, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6d, 0x64, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, + 0x65, 0x72, 0x73, 0x43, 0x6d, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, + 0x70, 0x72, 0x69, 0x76, 0x73, 0x5f, 0x63, 0x6d, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x50, 0x72, 0x69, 0x76, 0x73, 0x43, 0x6d, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x04, 0x75, + 0x73, 0x65, 0x72, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x2d, 0x0a, + 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x83, 0x01, 0x0a, + 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x28, 0x0a, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, + 0x5f, 0x63, 0x6d, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x43, 0x6d, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, + 0x61, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x73, 0x5f, 0x63, 0x6d, 0x64, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x50, 0x72, 0x69, 0x76, 0x73, 0x43, + 0x6d, 0x64, 0x22, 0x54, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xce, 0x01, 0x0a, 0x04, 0x55, 0x73, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, + 0x73, 0x12, 0x51, 0x0a, 0x17, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x67, 0x73, + 0x6d, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x47, 0x73, 0x6d, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x47, 0x73, 0x6d, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x52, 0x65, 0x66, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x73, + 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x5d, 0x0a, 0x13, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x09, 0x75, + 0x73, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x14, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x0a, 0x73, + 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x73, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x52, 0x0a, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x1a, 0x83, 0x01, 0x0a, 0x0a, 0x53, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x73, + 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x30, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, + 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x02, 0x22, 0x5c, 0x0a, 0x12, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, + 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x09, 0x75, 0x73, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x93, 0x04, 0x0a, 0x15, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0f, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x68, + 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x10, 0x0a, + 0x03, 0x64, 0x6f, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x6f, 0x70, 0x12, + 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x70, 0x65, + 0x72, 0x73, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x70, 0x65, 0x72, 0x73, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x73, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x63, 0x73, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x63, 0x73, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, 0x72, 0x6f, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x22, 0x52, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, + 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x41, 0x42, + 0x4c, 0x45, 0x53, 0x50, 0x41, 0x43, 0x45, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x41, 0x54, + 0x41, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x04, 0x22, 0xd3, 0x01, 0x0a, 0x16, 0x50, 0x68, 0x79, 0x73, + 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x64, 0x6f, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2d, + 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, 0x72, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0xd5, 0x01, + 0x0a, 0x12, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x26, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, + 0x4e, 0x43, 0x45, 0x10, 0x01, 0x22, 0x52, 0x0a, 0x13, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x15, 0x44, 0x61, + 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x67, + 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, + 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0c, 0x67, 0x63, 0x73, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, 0x63, + 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, + 0x72, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x9d, 0x02, 0x0a, 0x15, 0x44, 0x61, 0x74, 0x61, + 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x20, 0x0a, 0x0c, 0x67, 0x63, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, 0x63, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x4c, 0x52, + 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, 0x72, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x62, + 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x08, 0x4c, 0x52, 0x4f, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xa5, 0x01, 0x0a, 0x18, 0x42, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0e, + 0x64, 0x62, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x62, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x1b, + 0x0a, 0x19, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6a, 0x0a, 0x17, 0x42, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x64, + 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, + 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xee, 0x01, 0x0a, 0x18, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x70, 0x64, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x44, 0x42, 0x52, 0x04, 0x70, 0x64, 0x62, 0x73, 0x1a, 0x39, + 0x0a, 0x04, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x69, 0x76, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x70, 0x72, 0x69, 0x76, 0x73, 0x1a, 0x5d, 0x0a, 0x03, 0x50, 0x44, 0x42, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x75, + 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x73, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, + 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x55, 0x73, 0x65, + 0x72, 0x52, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x53, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x2d, + 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, + 0x43, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x2e, 0x0a, + 0x14, 0x53, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x22, 0x32, 0x0a, + 0x1c, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, + 0x73, 0x22, 0x4d, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x22, 0x59, 0x0a, 0x15, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x61, + 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x18, 0x0a, 0x16, 0x42, + 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x35, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, + 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x20, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x79, 0x0a, + 0x21, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, + 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x5f, 0x68, 0x6f, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x32, 0xdd, 0x0e, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x44, 0x42, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0c, 0x55, 0x73, 0x65, 0x72, 0x73, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, + 0x73, 0x65, 0x72, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, + 0x73, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0e, + 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x52, + 0x0a, 0x0f, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x68, 0x79, 0x73, 0x69, + 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x09, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x50, + 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x67, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x56, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, + 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x56, 0x0a, 0x11, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x10, 0x42, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x12, 0x1f, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, + 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, + 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4e, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, + 0x15, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x11, 0x52, 0x65, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x19, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x65, 0x5a, 0x63, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x65, 0x6c, 0x63, 0x61, 0x72, + 0x72, 0x6f, 0x2d, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescOnce sync.Once + file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescData = file_oracle_pkg_agents_config_agent_protos_service_proto_rawDesc +) + +func file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescGZIP() []byte { + file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescOnce.Do(func() { + file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescData) + }) + return file_oracle_pkg_agents_config_agent_protos_service_proto_rawDescData +} + +var file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes = make([]protoimpl.MessageInfo, 39) +var file_oracle_pkg_agents_config_agent_protos_service_proto_goTypes = []interface{}{ + (UsersChangedResponse_Type)(0), // 0: protos.UsersChangedResponse.Type + (PhysicalBackupRequest_Type)(0), // 1: protos.PhysicalBackupRequest.Type + (CheckStatusRequest_Type)(0), // 2: protos.CheckStatusRequest.Type + (SetParameterRequest_Type)(0), // 3: protos.SetParameterRequest.Type + (*CreateCDBRequest)(nil), // 4: protos.CreateCDBRequest + (*CreateListenerRequest)(nil), // 5: protos.CreateListenerRequest + (*CreateListenerResponse)(nil), // 6: protos.CreateListenerResponse + (*GsmSecretReference)(nil), // 7: protos.GsmSecretReference + (*CreateDatabaseRequest)(nil), // 8: protos.CreateDatabaseRequest + (*CreateDatabaseResponse)(nil), // 9: protos.CreateDatabaseResponse + (*CreateUsersRequest)(nil), // 10: protos.CreateUsersRequest + (*CreateUsersResponse)(nil), // 11: protos.CreateUsersResponse + (*CreateCDBUserRequest)(nil), // 12: protos.CreateCDBUserRequest + (*CreateCDBUserResponse)(nil), // 13: protos.CreateCDBUserResponse + (*User)(nil), // 14: protos.User + (*UsersChangedRequest)(nil), // 15: protos.UsersChangedRequest + (*UsersChangedResponse)(nil), // 16: protos.UsersChangedResponse + (*UpdateUsersRequest)(nil), // 17: protos.UpdateUsersRequest + (*UpdateUsersResponse)(nil), // 18: protos.UpdateUsersResponse + (*PhysicalBackupRequest)(nil), // 19: protos.PhysicalBackupRequest + (*PhysicalRestoreRequest)(nil), // 20: protos.PhysicalRestoreRequest + (*CheckStatusRequest)(nil), // 21: protos.CheckStatusRequest + (*CheckStatusResponse)(nil), // 22: protos.CheckStatusResponse + (*DataPumpImportRequest)(nil), // 23: protos.DataPumpImportRequest + (*DataPumpExportRequest)(nil), // 24: protos.DataPumpExportRequest + (*LROInput)(nil), // 25: protos.LROInput + (*BootstrapDatabaseRequest)(nil), // 26: protos.BootstrapDatabaseRequest + (*BootstrapDatabaseResponse)(nil), // 27: protos.BootstrapDatabaseResponse + (*BootstrapStandbyRequest)(nil), // 28: protos.BootstrapStandbyRequest + (*BootstrapStandbyResponse)(nil), // 29: protos.BootstrapStandbyResponse + (*SetParameterRequest)(nil), // 30: protos.SetParameterRequest + (*SetParameterResponse)(nil), // 31: protos.SetParameterResponse + (*GetParameterTypeValueRequest)(nil), // 32: protos.GetParameterTypeValueRequest + (*GetParameterTypeValueResponse)(nil), // 33: protos.GetParameterTypeValueResponse + (*BounceDatabaseRequest)(nil), // 34: protos.BounceDatabaseRequest + (*BounceDatabaseResponse)(nil), // 35: protos.BounceDatabaseResponse + (*RecoverConfigFileRequest)(nil), // 36: protos.RecoverConfigFileRequest + (*RecoverConfigFileResponse)(nil), // 37: protos.RecoverConfigFileResponse + (*FetchServiceImageMetaDataRequest)(nil), // 38: protos.FetchServiceImageMetaDataRequest + (*FetchServiceImageMetaDataResponse)(nil), // 39: protos.FetchServiceImageMetaDataResponse + (*UsersChangedResponse_Suppressed)(nil), // 40: protos.UsersChangedResponse.Suppressed + (*BootstrapStandbyResponse_User)(nil), // 41: protos.BootstrapStandbyResponse.User + (*BootstrapStandbyResponse_PDB)(nil), // 42: protos.BootstrapStandbyResponse.PDB + (*longrunning.ListOperationsRequest)(nil), // 43: google.longrunning.ListOperationsRequest + (*longrunning.GetOperationRequest)(nil), // 44: google.longrunning.GetOperationRequest + (*longrunning.DeleteOperationRequest)(nil), // 45: google.longrunning.DeleteOperationRequest + (*longrunning.Operation)(nil), // 46: google.longrunning.Operation + (*longrunning.ListOperationsResponse)(nil), // 47: google.longrunning.ListOperationsResponse + (*empty.Empty)(nil), // 48: google.protobuf.Empty +} +var file_oracle_pkg_agents_config_agent_protos_service_proto_depIdxs = []int32{ + 7, // 0: protos.CreateDatabaseRequest.admin_password_gsm_secret_ref:type_name -> protos.GsmSecretReference + 14, // 1: protos.CreateUsersRequest.user:type_name -> protos.User + 7, // 2: protos.User.password_gsm_secret_ref:type_name -> protos.GsmSecretReference + 14, // 3: protos.UsersChangedRequest.user_specs:type_name -> protos.User + 40, // 4: protos.UsersChangedResponse.suppressed:type_name -> protos.UsersChangedResponse.Suppressed + 14, // 5: protos.UpdateUsersRequest.user_specs:type_name -> protos.User + 1, // 6: protos.PhysicalBackupRequest.backup_sub_type:type_name -> protos.PhysicalBackupRequest.Type + 25, // 7: protos.PhysicalBackupRequest.lro_input:type_name -> protos.LROInput + 25, // 8: protos.PhysicalRestoreRequest.lro_input:type_name -> protos.LROInput + 2, // 9: protos.CheckStatusRequest.check_status_type:type_name -> protos.CheckStatusRequest.Type + 25, // 10: protos.DataPumpImportRequest.lro_input:type_name -> protos.LROInput + 25, // 11: protos.DataPumpExportRequest.lro_input:type_name -> protos.LROInput + 42, // 12: protos.BootstrapStandbyResponse.pdbs:type_name -> protos.BootstrapStandbyResponse.PDB + 3, // 13: protos.SetParameterRequest.type:type_name -> protos.SetParameterRequest.Type + 0, // 14: protos.UsersChangedResponse.Suppressed.suppress_type:type_name -> protos.UsersChangedResponse.Type + 41, // 15: protos.BootstrapStandbyResponse.PDB.users:type_name -> protos.BootstrapStandbyResponse.User + 8, // 16: protos.ConfigAgent.CreateDatabase:input_type -> protos.CreateDatabaseRequest + 10, // 17: protos.ConfigAgent.CreateUsers:input_type -> protos.CreateUsersRequest + 12, // 18: protos.ConfigAgent.CreateCDBUser:input_type -> protos.CreateCDBUserRequest + 15, // 19: protos.ConfigAgent.UsersChanged:input_type -> protos.UsersChangedRequest + 17, // 20: protos.ConfigAgent.UpdateUsers:input_type -> protos.UpdateUsersRequest + 19, // 21: protos.ConfigAgent.PhysicalBackup:input_type -> protos.PhysicalBackupRequest + 20, // 22: protos.ConfigAgent.PhysicalRestore:input_type -> protos.PhysicalRestoreRequest + 21, // 23: protos.ConfigAgent.CheckStatus:input_type -> protos.CheckStatusRequest + 4, // 24: protos.ConfigAgent.CreateCDB:input_type -> protos.CreateCDBRequest + 5, // 25: protos.ConfigAgent.CreateListener:input_type -> protos.CreateListenerRequest + 23, // 26: protos.ConfigAgent.DataPumpImport:input_type -> protos.DataPumpImportRequest + 43, // 27: protos.ConfigAgent.ListOperations:input_type -> google.longrunning.ListOperationsRequest + 44, // 28: protos.ConfigAgent.GetOperation:input_type -> google.longrunning.GetOperationRequest + 45, // 29: protos.ConfigAgent.DeleteOperation:input_type -> google.longrunning.DeleteOperationRequest + 26, // 30: protos.ConfigAgent.BootstrapDatabase:input_type -> protos.BootstrapDatabaseRequest + 28, // 31: protos.ConfigAgent.BootstrapStandby:input_type -> protos.BootstrapStandbyRequest + 24, // 32: protos.ConfigAgent.DataPumpExport:input_type -> protos.DataPumpExportRequest + 30, // 33: protos.ConfigAgent.SetParameter:input_type -> protos.SetParameterRequest + 32, // 34: protos.ConfigAgent.GetParameterTypeValue:input_type -> protos.GetParameterTypeValueRequest + 34, // 35: protos.ConfigAgent.BounceDatabase:input_type -> protos.BounceDatabaseRequest + 36, // 36: protos.ConfigAgent.RecoverConfigFile:input_type -> protos.RecoverConfigFileRequest + 38, // 37: protos.ConfigAgent.FetchServiceImageMetaData:input_type -> protos.FetchServiceImageMetaDataRequest + 9, // 38: protos.ConfigAgent.CreateDatabase:output_type -> protos.CreateDatabaseResponse + 11, // 39: protos.ConfigAgent.CreateUsers:output_type -> protos.CreateUsersResponse + 13, // 40: protos.ConfigAgent.CreateCDBUser:output_type -> protos.CreateCDBUserResponse + 16, // 41: protos.ConfigAgent.UsersChanged:output_type -> protos.UsersChangedResponse + 18, // 42: protos.ConfigAgent.UpdateUsers:output_type -> protos.UpdateUsersResponse + 46, // 43: protos.ConfigAgent.PhysicalBackup:output_type -> google.longrunning.Operation + 46, // 44: protos.ConfigAgent.PhysicalRestore:output_type -> google.longrunning.Operation + 22, // 45: protos.ConfigAgent.CheckStatus:output_type -> protos.CheckStatusResponse + 46, // 46: protos.ConfigAgent.CreateCDB:output_type -> google.longrunning.Operation + 6, // 47: protos.ConfigAgent.CreateListener:output_type -> protos.CreateListenerResponse + 46, // 48: protos.ConfigAgent.DataPumpImport:output_type -> google.longrunning.Operation + 47, // 49: protos.ConfigAgent.ListOperations:output_type -> google.longrunning.ListOperationsResponse + 46, // 50: protos.ConfigAgent.GetOperation:output_type -> google.longrunning.Operation + 48, // 51: protos.ConfigAgent.DeleteOperation:output_type -> google.protobuf.Empty + 46, // 52: protos.ConfigAgent.BootstrapDatabase:output_type -> google.longrunning.Operation + 29, // 53: protos.ConfigAgent.BootstrapStandby:output_type -> protos.BootstrapStandbyResponse + 46, // 54: protos.ConfigAgent.DataPumpExport:output_type -> google.longrunning.Operation + 31, // 55: protos.ConfigAgent.SetParameter:output_type -> protos.SetParameterResponse + 33, // 56: protos.ConfigAgent.GetParameterTypeValue:output_type -> protos.GetParameterTypeValueResponse + 35, // 57: protos.ConfigAgent.BounceDatabase:output_type -> protos.BounceDatabaseResponse + 37, // 58: protos.ConfigAgent.RecoverConfigFile:output_type -> protos.RecoverConfigFileResponse + 39, // 59: protos.ConfigAgent.FetchServiceImageMetaData:output_type -> protos.FetchServiceImageMetaDataResponse + 38, // [38:60] is the sub-list for method output_type + 16, // [16:38] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_oracle_pkg_agents_config_agent_protos_service_proto_init() } +func file_oracle_pkg_agents_config_agent_protos_service_proto_init() { + if File_oracle_pkg_agents_config_agent_protos_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCDBRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateListenerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateListenerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GsmSecretReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateDatabaseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateDatabaseResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateUsersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateUsersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCDBUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCDBUserResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*User); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsersChangedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsersChangedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateUsersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateUsersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhysicalBackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhysicalRestoreRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpImportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpExportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LROInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapDatabaseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapDatabaseResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapStandbyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapStandbyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetParameterRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetParameterResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetParameterTypeValueRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetParameterTypeValueResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BounceDatabaseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BounceDatabaseResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecoverConfigFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecoverConfigFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchServiceImageMetaDataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchServiceImageMetaDataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsersChangedResponse_Suppressed); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapStandbyResponse_User); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapStandbyResponse_PDB); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oracle_pkg_agents_config_agent_protos_service_proto_rawDesc, + NumEnums: 4, + NumMessages: 39, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_oracle_pkg_agents_config_agent_protos_service_proto_goTypes, + DependencyIndexes: file_oracle_pkg_agents_config_agent_protos_service_proto_depIdxs, + EnumInfos: file_oracle_pkg_agents_config_agent_protos_service_proto_enumTypes, + MessageInfos: file_oracle_pkg_agents_config_agent_protos_service_proto_msgTypes, + }.Build() + File_oracle_pkg_agents_config_agent_protos_service_proto = out.File + file_oracle_pkg_agents_config_agent_protos_service_proto_rawDesc = nil + file_oracle_pkg_agents_config_agent_protos_service_proto_goTypes = nil + file_oracle_pkg_agents_config_agent_protos_service_proto_depIdxs = nil +} diff --git a/oracle/pkg/agents/config_agent/protos/service.proto b/oracle/pkg/agents/config_agent/protos/service.proto new file mode 100644 index 0000000..4fbd556 --- /dev/null +++ b/oracle/pkg/agents/config_agent/protos/service.proto @@ -0,0 +1,338 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Config Agent proto for gRPC communications from controllers. +syntax = "proto3"; + +package protos; + +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos;protos"; + +service ConfigAgent { + rpc CreateDatabase(CreateDatabaseRequest) returns (CreateDatabaseResponse) {} + rpc CreateUsers(CreateUsersRequest) returns (CreateUsersResponse) {} + rpc CreateCDBUser(CreateCDBUserRequest) returns (CreateCDBUserResponse) {} + rpc UsersChanged(UsersChangedRequest) returns (UsersChangedResponse) {} + rpc UpdateUsers(UpdateUsersRequest) returns (UpdateUsersResponse) {} + rpc PhysicalBackup(PhysicalBackupRequest) + returns (google.longrunning.Operation) {} + rpc PhysicalRestore(PhysicalRestoreRequest) + returns (google.longrunning.Operation) {} + rpc CheckStatus(CheckStatusRequest) returns (CheckStatusResponse) {} + rpc CreateCDB(CreateCDBRequest) returns (google.longrunning.Operation) {} + rpc CreateListener(CreateListenerRequest) returns (CreateListenerResponse) {} + rpc DataPumpImport(DataPumpImportRequest) + returns (google.longrunning.Operation); + // Lists operations that match the specified filter in the request. + rpc ListOperations(google.longrunning.ListOperationsRequest) + returns (google.longrunning.ListOperationsResponse); + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result. + rpc GetOperation(google.longrunning.GetOperationRequest) + returns (google.longrunning.Operation); + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. + rpc DeleteOperation(google.longrunning.DeleteOperationRequest) + returns (google.protobuf.Empty); + rpc BootstrapDatabase(BootstrapDatabaseRequest) + returns (google.longrunning.Operation) {}; + rpc BootstrapStandby(BootstrapStandbyRequest) + returns (BootstrapStandbyResponse) {}; + rpc DataPumpExport(DataPumpExportRequest) + returns (google.longrunning.Operation); + rpc SetParameter(SetParameterRequest) returns (SetParameterResponse) {} + rpc GetParameterTypeValue(GetParameterTypeValueRequest) + returns (GetParameterTypeValueResponse) {} + rpc BounceDatabase(BounceDatabaseRequest) returns (BounceDatabaseResponse) {} + rpc RecoverConfigFile(RecoverConfigFileRequest) + returns (RecoverConfigFileResponse) {} + rpc FetchServiceImageMetaData(FetchServiceImageMetaDataRequest) + returns (FetchServiceImageMetaDataResponse) {} +} + +message CreateCDBRequest { + string oracle_home = 1; + string sid = 2; + string db_unique_name = 3; + string character_set = 4; + int32 memory_percent = 5; + repeated string additional_params = 6; + string version = 7; + string db_domain = 8; +} + +message CreateListenerRequest { + string name = 1; + int32 port = 2; + string protocol = 3; + string oracle_home = 4; + string db_domain = 5; +} + +message CreateListenerResponse {} + +message GsmSecretReference { + string project_id = 1; + string secret_id = 2; + string version = 3; + string last_version = 4; +} + +message CreateDatabaseRequest { + string cdb_name = 1; + string name = 2; + // only being used for plaintext password scenario. + // GSM doesn't use this field. + string password = 3; + string db_domain = 4; + GsmSecretReference admin_password_gsm_secret_ref = 5; + // only being used for plaintext password scenario. + // GSM doesn't use this field. + string last_password = 6; +} + +message CreateDatabaseResponse { + string status = 1; + string error_message = 2; +} + +message CreateUsersRequest { + string cdb_name = 1; + string pdb_name = 2; + repeated string create_users_cmd = 3; + repeated string grant_privs_cmd = 4; + string db_domain = 5; + repeated User user = 6; +} + +message CreateUsersResponse { + string status = 1; +} + +message CreateCDBUserRequest { + string cdb_name = 1; + repeated string create_users_cmd = 3; + repeated string grant_privs_cmd = 4; +} + +message CreateCDBUserResponse { + string status = 1; + string error_message = 2; +} + +message User { + string name = 1; + // only being used for plaintext password scenario. + // GSM doesn't use this field. + string password = 2; + repeated string privileges = 3; + GsmSecretReference password_gsm_secret_ref = 4; + // only being used for plaintext password scenario. + // GSM doesn't use this field. + string last_password = 5; +} + +message UsersChangedRequest { + string pdb_name = 1; + repeated User user_specs = 2; +} + +message UsersChangedResponse { + enum Type { + UNKNOWN_TYPE = 0; + DELETE = 1; + CREATE = 2; + } + + // Suppressed describes user creates/updates which will be suppressed in the + // current release. + message Suppressed { + Type suppress_type = 1; + string user_name = 2; + // sql is the suppressed cmd which can update the user to the spec defined + // state + string sql = 3; + } + bool changed = 1; + repeated Suppressed suppressed = 2; +} + +message UpdateUsersRequest { + string pdb_name = 1; + repeated User user_specs = 2; +} + +message UpdateUsersResponse {} + +message PhysicalBackupRequest { + enum Type { + UNKNOWN_TYPE = 0; + INSTANCE = 1; + DATABASE = 2; + TABLESPACE = 3; + DATAFILE = 4; + } + + Type backup_sub_type = 1; + repeated string backup_items = 2; + bool backupset = 3; + bool compressed = 4; + bool check_logical = 5; + + // DOP = degree of parallelism for physical backup. + int32 dop = 6; + + int32 level = 7; + int32 filesperset = 8; + int32 section_size = 9; + string local_path = 10; + string gcs_path = 11; + + LROInput lro_input = 12; +} + +message PhysicalRestoreRequest { + string instance_name = 1; + string cdb_name = 2; + + // DOP = degree of parallelism for a restore from a physical backup. + int32 dop = 3; + string local_path = 4; + string gcs_path = 5; + + LROInput lro_input = 6; +} + +message CheckStatusRequest { + enum Type { + UNKNOWN_TYPE = 0; + INSTANCE = 1; + } + + string name = 1; + string cdb_name = 2; + Type check_status_type = 3; + string db_domain = 4; +} + +message CheckStatusResponse { + string status = 1; + string error_message = 2; +} + +message DataPumpImportRequest { + string pdb_name = 1; + string db_domain = 2; + // GCS path to input dump file + string gcs_path = 3; + // GCS path to output log file + string gcs_log_path = 4; + + LROInput lro_input = 5; +} + +message DataPumpExportRequest { + string pdb_name = 1; + string db_domain = 2; + string object_type = 3; + string objects = 4; + string gcs_path = 5; + string gcs_log_path = 6; + LROInput lro_input = 7; + string flashback_time = 8; +} + +// LROInput is a common part of input requests for all Async operations. +message LROInput { + string operation_id = 1; +} + +message BootstrapDatabaseRequest { + string cdb_name = 1; + string version = 2; + string host = 3; + string db_unique_name = 4; + string dbdomain = 5; +} + +message BootstrapDatabaseResponse {} + +message BootstrapStandbyRequest { + string cdb_name = 1; + string version = 2; + string dbdomain = 3; +} + +message BootstrapStandbyResponse { + message User { + string user_name = 1; + repeated string privs = 2; + } + message PDB { + string pdb_name = 1; + repeated User users = 2; + } + repeated PDB pdbs = 1; +} + +message SetParameterRequest { + string key = 1; + string value = 2; + Type type = 3; + enum Type { + DYNAMIC = 0; + STATIC = 1; + DEFERRED = 2; + } +} + +message SetParameterResponse { + bool static = 1; +} + +message GetParameterTypeValueRequest { + repeated string keys = 1; +} + +message GetParameterTypeValueResponse { + repeated string types = 1; + repeated string values = 2; +} + +message BounceDatabaseRequest { + string sid = 1; + // avoid_config_backup: by default we backup the config except for scenarios + // when it isn't possible (like bootstrapping) + bool avoid_config_backup = 2; +} + +message BounceDatabaseResponse {} + +message RecoverConfigFileRequest { + string cdb_name = 1; +} + +message RecoverConfigFileResponse {} + +message FetchServiceImageMetaDataRequest {} + +message FetchServiceImageMetaDataResponse { + string version = 1; + string cdb_name = 2; + string oracle_home = 3; +} diff --git a/oracle/pkg/agents/config_agent/protos/service_grpc.pb.go b/oracle/pkg/agents/config_agent/protos/service_grpc.pb.go new file mode 100644 index 0000000..f545eba --- /dev/null +++ b/oracle/pkg/agents/config_agent/protos/service_grpc.pb.go @@ -0,0 +1,871 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package protos + +import ( + context "context" + empty "github.com/golang/protobuf/ptypes/empty" + longrunning "google.golang.org/genproto/googleapis/longrunning" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ConfigAgentClient is the client API for ConfigAgent service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ConfigAgentClient interface { + CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*CreateDatabaseResponse, error) + CreateUsers(ctx context.Context, in *CreateUsersRequest, opts ...grpc.CallOption) (*CreateUsersResponse, error) + CreateCDBUser(ctx context.Context, in *CreateCDBUserRequest, opts ...grpc.CallOption) (*CreateCDBUserResponse, error) + UsersChanged(ctx context.Context, in *UsersChangedRequest, opts ...grpc.CallOption) (*UsersChangedResponse, error) + UpdateUsers(ctx context.Context, in *UpdateUsersRequest, opts ...grpc.CallOption) (*UpdateUsersResponse, error) + PhysicalBackup(ctx context.Context, in *PhysicalBackupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + PhysicalRestore(ctx context.Context, in *PhysicalRestoreRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + CheckStatus(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption) (*CheckStatusResponse, error) + CreateCDB(ctx context.Context, in *CreateCDBRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + CreateListener(ctx context.Context, in *CreateListenerRequest, opts ...grpc.CallOption) (*CreateListenerResponse, error) + DataPumpImport(ctx context.Context, in *DataPumpImportRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // Lists operations that match the specified filter in the request. + ListOperations(ctx context.Context, in *longrunning.ListOperationsRequest, opts ...grpc.CallOption) (*longrunning.ListOperationsResponse, error) + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result. + GetOperation(ctx context.Context, in *longrunning.GetOperationRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. + DeleteOperation(ctx context.Context, in *longrunning.DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + BootstrapDatabase(ctx context.Context, in *BootstrapDatabaseRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + BootstrapStandby(ctx context.Context, in *BootstrapStandbyRequest, opts ...grpc.CallOption) (*BootstrapStandbyResponse, error) + DataPumpExport(ctx context.Context, in *DataPumpExportRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + SetParameter(ctx context.Context, in *SetParameterRequest, opts ...grpc.CallOption) (*SetParameterResponse, error) + GetParameterTypeValue(ctx context.Context, in *GetParameterTypeValueRequest, opts ...grpc.CallOption) (*GetParameterTypeValueResponse, error) + BounceDatabase(ctx context.Context, in *BounceDatabaseRequest, opts ...grpc.CallOption) (*BounceDatabaseResponse, error) + RecoverConfigFile(ctx context.Context, in *RecoverConfigFileRequest, opts ...grpc.CallOption) (*RecoverConfigFileResponse, error) + FetchServiceImageMetaData(ctx context.Context, in *FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*FetchServiceImageMetaDataResponse, error) +} + +type configAgentClient struct { + cc grpc.ClientConnInterface +} + +func NewConfigAgentClient(cc grpc.ClientConnInterface) ConfigAgentClient { + return &configAgentClient{cc} +} + +func (c *configAgentClient) CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*CreateDatabaseResponse, error) { + out := new(CreateDatabaseResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/CreateDatabase", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) CreateUsers(ctx context.Context, in *CreateUsersRequest, opts ...grpc.CallOption) (*CreateUsersResponse, error) { + out := new(CreateUsersResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/CreateUsers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) CreateCDBUser(ctx context.Context, in *CreateCDBUserRequest, opts ...grpc.CallOption) (*CreateCDBUserResponse, error) { + out := new(CreateCDBUserResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/CreateCDBUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) UsersChanged(ctx context.Context, in *UsersChangedRequest, opts ...grpc.CallOption) (*UsersChangedResponse, error) { + out := new(UsersChangedResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/UsersChanged", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) UpdateUsers(ctx context.Context, in *UpdateUsersRequest, opts ...grpc.CallOption) (*UpdateUsersResponse, error) { + out := new(UpdateUsersResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/UpdateUsers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) PhysicalBackup(ctx context.Context, in *PhysicalBackupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/PhysicalBackup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) PhysicalRestore(ctx context.Context, in *PhysicalRestoreRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/PhysicalRestore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) CheckStatus(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption) (*CheckStatusResponse, error) { + out := new(CheckStatusResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/CheckStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) CreateCDB(ctx context.Context, in *CreateCDBRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/CreateCDB", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) CreateListener(ctx context.Context, in *CreateListenerRequest, opts ...grpc.CallOption) (*CreateListenerResponse, error) { + out := new(CreateListenerResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/CreateListener", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) DataPumpImport(ctx context.Context, in *DataPumpImportRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/DataPumpImport", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) ListOperations(ctx context.Context, in *longrunning.ListOperationsRequest, opts ...grpc.CallOption) (*longrunning.ListOperationsResponse, error) { + out := new(longrunning.ListOperationsResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) GetOperation(ctx context.Context, in *longrunning.GetOperationRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/GetOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) DeleteOperation(ctx context.Context, in *longrunning.DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/DeleteOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) BootstrapDatabase(ctx context.Context, in *BootstrapDatabaseRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/BootstrapDatabase", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) BootstrapStandby(ctx context.Context, in *BootstrapStandbyRequest, opts ...grpc.CallOption) (*BootstrapStandbyResponse, error) { + out := new(BootstrapStandbyResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/BootstrapStandby", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) DataPumpExport(ctx context.Context, in *DataPumpExportRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/DataPumpExport", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) SetParameter(ctx context.Context, in *SetParameterRequest, opts ...grpc.CallOption) (*SetParameterResponse, error) { + out := new(SetParameterResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/SetParameter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) GetParameterTypeValue(ctx context.Context, in *GetParameterTypeValueRequest, opts ...grpc.CallOption) (*GetParameterTypeValueResponse, error) { + out := new(GetParameterTypeValueResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/GetParameterTypeValue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) BounceDatabase(ctx context.Context, in *BounceDatabaseRequest, opts ...grpc.CallOption) (*BounceDatabaseResponse, error) { + out := new(BounceDatabaseResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/BounceDatabase", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) RecoverConfigFile(ctx context.Context, in *RecoverConfigFileRequest, opts ...grpc.CallOption) (*RecoverConfigFileResponse, error) { + out := new(RecoverConfigFileResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/RecoverConfigFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configAgentClient) FetchServiceImageMetaData(ctx context.Context, in *FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*FetchServiceImageMetaDataResponse, error) { + out := new(FetchServiceImageMetaDataResponse) + err := c.cc.Invoke(ctx, "/protos.ConfigAgent/FetchServiceImageMetaData", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ConfigAgentServer is the server API for ConfigAgent service. +// All implementations must embed UnimplementedConfigAgentServer +// for forward compatibility +type ConfigAgentServer interface { + CreateDatabase(context.Context, *CreateDatabaseRequest) (*CreateDatabaseResponse, error) + CreateUsers(context.Context, *CreateUsersRequest) (*CreateUsersResponse, error) + CreateCDBUser(context.Context, *CreateCDBUserRequest) (*CreateCDBUserResponse, error) + UsersChanged(context.Context, *UsersChangedRequest) (*UsersChangedResponse, error) + UpdateUsers(context.Context, *UpdateUsersRequest) (*UpdateUsersResponse, error) + PhysicalBackup(context.Context, *PhysicalBackupRequest) (*longrunning.Operation, error) + PhysicalRestore(context.Context, *PhysicalRestoreRequest) (*longrunning.Operation, error) + CheckStatus(context.Context, *CheckStatusRequest) (*CheckStatusResponse, error) + CreateCDB(context.Context, *CreateCDBRequest) (*longrunning.Operation, error) + CreateListener(context.Context, *CreateListenerRequest) (*CreateListenerResponse, error) + DataPumpImport(context.Context, *DataPumpImportRequest) (*longrunning.Operation, error) + // Lists operations that match the specified filter in the request. + ListOperations(context.Context, *longrunning.ListOperationsRequest) (*longrunning.ListOperationsResponse, error) + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result. + GetOperation(context.Context, *longrunning.GetOperationRequest) (*longrunning.Operation, error) + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. + DeleteOperation(context.Context, *longrunning.DeleteOperationRequest) (*empty.Empty, error) + BootstrapDatabase(context.Context, *BootstrapDatabaseRequest) (*longrunning.Operation, error) + BootstrapStandby(context.Context, *BootstrapStandbyRequest) (*BootstrapStandbyResponse, error) + DataPumpExport(context.Context, *DataPumpExportRequest) (*longrunning.Operation, error) + SetParameter(context.Context, *SetParameterRequest) (*SetParameterResponse, error) + GetParameterTypeValue(context.Context, *GetParameterTypeValueRequest) (*GetParameterTypeValueResponse, error) + BounceDatabase(context.Context, *BounceDatabaseRequest) (*BounceDatabaseResponse, error) + RecoverConfigFile(context.Context, *RecoverConfigFileRequest) (*RecoverConfigFileResponse, error) + FetchServiceImageMetaData(context.Context, *FetchServiceImageMetaDataRequest) (*FetchServiceImageMetaDataResponse, error) + mustEmbedUnimplementedConfigAgentServer() +} + +// UnimplementedConfigAgentServer must be embedded to have forward compatible implementations. +type UnimplementedConfigAgentServer struct { +} + +func (UnimplementedConfigAgentServer) CreateDatabase(context.Context, *CreateDatabaseRequest) (*CreateDatabaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateDatabase not implemented") +} +func (UnimplementedConfigAgentServer) CreateUsers(context.Context, *CreateUsersRequest) (*CreateUsersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUsers not implemented") +} +func (UnimplementedConfigAgentServer) CreateCDBUser(context.Context, *CreateCDBUserRequest) (*CreateCDBUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCDBUser not implemented") +} +func (UnimplementedConfigAgentServer) UsersChanged(context.Context, *UsersChangedRequest) (*UsersChangedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UsersChanged not implemented") +} +func (UnimplementedConfigAgentServer) UpdateUsers(context.Context, *UpdateUsersRequest) (*UpdateUsersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUsers not implemented") +} +func (UnimplementedConfigAgentServer) PhysicalBackup(context.Context, *PhysicalBackupRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method PhysicalBackup not implemented") +} +func (UnimplementedConfigAgentServer) PhysicalRestore(context.Context, *PhysicalRestoreRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method PhysicalRestore not implemented") +} +func (UnimplementedConfigAgentServer) CheckStatus(context.Context, *CheckStatusRequest) (*CheckStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckStatus not implemented") +} +func (UnimplementedConfigAgentServer) CreateCDB(context.Context, *CreateCDBRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCDB not implemented") +} +func (UnimplementedConfigAgentServer) CreateListener(context.Context, *CreateListenerRequest) (*CreateListenerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateListener not implemented") +} +func (UnimplementedConfigAgentServer) DataPumpImport(context.Context, *DataPumpImportRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataPumpImport not implemented") +} +func (UnimplementedConfigAgentServer) ListOperations(context.Context, *longrunning.ListOperationsRequest) (*longrunning.ListOperationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListOperations not implemented") +} +func (UnimplementedConfigAgentServer) GetOperation(context.Context, *longrunning.GetOperationRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOperation not implemented") +} +func (UnimplementedConfigAgentServer) DeleteOperation(context.Context, *longrunning.DeleteOperationRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteOperation not implemented") +} +func (UnimplementedConfigAgentServer) BootstrapDatabase(context.Context, *BootstrapDatabaseRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method BootstrapDatabase not implemented") +} +func (UnimplementedConfigAgentServer) BootstrapStandby(context.Context, *BootstrapStandbyRequest) (*BootstrapStandbyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BootstrapStandby not implemented") +} +func (UnimplementedConfigAgentServer) DataPumpExport(context.Context, *DataPumpExportRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataPumpExport not implemented") +} +func (UnimplementedConfigAgentServer) SetParameter(context.Context, *SetParameterRequest) (*SetParameterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetParameter not implemented") +} +func (UnimplementedConfigAgentServer) GetParameterTypeValue(context.Context, *GetParameterTypeValueRequest) (*GetParameterTypeValueResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetParameterTypeValue not implemented") +} +func (UnimplementedConfigAgentServer) BounceDatabase(context.Context, *BounceDatabaseRequest) (*BounceDatabaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BounceDatabase not implemented") +} +func (UnimplementedConfigAgentServer) RecoverConfigFile(context.Context, *RecoverConfigFileRequest) (*RecoverConfigFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecoverConfigFile not implemented") +} +func (UnimplementedConfigAgentServer) FetchServiceImageMetaData(context.Context, *FetchServiceImageMetaDataRequest) (*FetchServiceImageMetaDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchServiceImageMetaData not implemented") +} +func (UnimplementedConfigAgentServer) mustEmbedUnimplementedConfigAgentServer() {} + +// UnsafeConfigAgentServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ConfigAgentServer will +// result in compilation errors. +type UnsafeConfigAgentServer interface { + mustEmbedUnimplementedConfigAgentServer() +} + +func RegisterConfigAgentServer(s grpc.ServiceRegistrar, srv ConfigAgentServer) { + s.RegisterService(&ConfigAgent_ServiceDesc, srv) +} + +func _ConfigAgent_CreateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).CreateDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/CreateDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).CreateDatabase(ctx, req.(*CreateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_CreateUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).CreateUsers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/CreateUsers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).CreateUsers(ctx, req.(*CreateUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_CreateCDBUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCDBUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).CreateCDBUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/CreateCDBUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).CreateCDBUser(ctx, req.(*CreateCDBUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_UsersChanged_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UsersChangedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).UsersChanged(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/UsersChanged", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).UsersChanged(ctx, req.(*UsersChangedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_UpdateUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).UpdateUsers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/UpdateUsers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).UpdateUsers(ctx, req.(*UpdateUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_PhysicalBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PhysicalBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).PhysicalBackup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/PhysicalBackup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).PhysicalBackup(ctx, req.(*PhysicalBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_PhysicalRestore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PhysicalRestoreRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).PhysicalRestore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/PhysicalRestore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).PhysicalRestore(ctx, req.(*PhysicalRestoreRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_CheckStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).CheckStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/CheckStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).CheckStatus(ctx, req.(*CheckStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_CreateCDB_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCDBRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).CreateCDB(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/CreateCDB", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).CreateCDB(ctx, req.(*CreateCDBRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_CreateListener_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateListenerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).CreateListener(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/CreateListener", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).CreateListener(ctx, req.(*CreateListenerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_DataPumpImport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataPumpImportRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).DataPumpImport(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/DataPumpImport", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).DataPumpImport(ctx, req.(*DataPumpImportRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(longrunning.ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).ListOperations(ctx, req.(*longrunning.ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(longrunning.GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).GetOperation(ctx, req.(*longrunning.GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(longrunning.DeleteOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).DeleteOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/DeleteOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).DeleteOperation(ctx, req.(*longrunning.DeleteOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_BootstrapDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BootstrapDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).BootstrapDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/BootstrapDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).BootstrapDatabase(ctx, req.(*BootstrapDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_BootstrapStandby_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BootstrapStandbyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).BootstrapStandby(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/BootstrapStandby", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).BootstrapStandby(ctx, req.(*BootstrapStandbyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_DataPumpExport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataPumpExportRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).DataPumpExport(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/DataPumpExport", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).DataPumpExport(ctx, req.(*DataPumpExportRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_SetParameter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetParameterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).SetParameter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/SetParameter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).SetParameter(ctx, req.(*SetParameterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_GetParameterTypeValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetParameterTypeValueRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).GetParameterTypeValue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/GetParameterTypeValue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).GetParameterTypeValue(ctx, req.(*GetParameterTypeValueRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_BounceDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BounceDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).BounceDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/BounceDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).BounceDatabase(ctx, req.(*BounceDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_RecoverConfigFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecoverConfigFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).RecoverConfigFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/RecoverConfigFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).RecoverConfigFile(ctx, req.(*RecoverConfigFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ConfigAgent_FetchServiceImageMetaData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FetchServiceImageMetaDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfigAgentServer).FetchServiceImageMetaData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ConfigAgent/FetchServiceImageMetaData", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfigAgentServer).FetchServiceImageMetaData(ctx, req.(*FetchServiceImageMetaDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ConfigAgent_ServiceDesc is the grpc.ServiceDesc for ConfigAgent service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ConfigAgent_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "protos.ConfigAgent", + HandlerType: (*ConfigAgentServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateDatabase", + Handler: _ConfigAgent_CreateDatabase_Handler, + }, + { + MethodName: "CreateUsers", + Handler: _ConfigAgent_CreateUsers_Handler, + }, + { + MethodName: "CreateCDBUser", + Handler: _ConfigAgent_CreateCDBUser_Handler, + }, + { + MethodName: "UsersChanged", + Handler: _ConfigAgent_UsersChanged_Handler, + }, + { + MethodName: "UpdateUsers", + Handler: _ConfigAgent_UpdateUsers_Handler, + }, + { + MethodName: "PhysicalBackup", + Handler: _ConfigAgent_PhysicalBackup_Handler, + }, + { + MethodName: "PhysicalRestore", + Handler: _ConfigAgent_PhysicalRestore_Handler, + }, + { + MethodName: "CheckStatus", + Handler: _ConfigAgent_CheckStatus_Handler, + }, + { + MethodName: "CreateCDB", + Handler: _ConfigAgent_CreateCDB_Handler, + }, + { + MethodName: "CreateListener", + Handler: _ConfigAgent_CreateListener_Handler, + }, + { + MethodName: "DataPumpImport", + Handler: _ConfigAgent_DataPumpImport_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ConfigAgent_ListOperations_Handler, + }, + { + MethodName: "GetOperation", + Handler: _ConfigAgent_GetOperation_Handler, + }, + { + MethodName: "DeleteOperation", + Handler: _ConfigAgent_DeleteOperation_Handler, + }, + { + MethodName: "BootstrapDatabase", + Handler: _ConfigAgent_BootstrapDatabase_Handler, + }, + { + MethodName: "BootstrapStandby", + Handler: _ConfigAgent_BootstrapStandby_Handler, + }, + { + MethodName: "DataPumpExport", + Handler: _ConfigAgent_DataPumpExport_Handler, + }, + { + MethodName: "SetParameter", + Handler: _ConfigAgent_SetParameter_Handler, + }, + { + MethodName: "GetParameterTypeValue", + Handler: _ConfigAgent_GetParameterTypeValue_Handler, + }, + { + MethodName: "BounceDatabase", + Handler: _ConfigAgent_BounceDatabase_Handler, + }, + { + MethodName: "RecoverConfigFile", + Handler: _ConfigAgent_RecoverConfigFile_Handler, + }, + { + MethodName: "FetchServiceImageMetaData", + Handler: _ConfigAgent_FetchServiceImageMetaData_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "oracle/pkg/agents/config_agent/protos/service.proto", +} diff --git a/oracle/pkg/agents/config_agent/server/BUILD.bazel b/oracle/pkg/agents/config_agent/server/BUILD.bazel new file mode 100644 index 0000000..30b024a --- /dev/null +++ b/oracle/pkg/agents/config_agent/server/BUILD.bazel @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "server", + srcs = [ + "configserver.go", + "user_repository.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/server", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/backup", + "//oracle/pkg/agents/common", + "//oracle/pkg/agents/common/sql", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/database/provision", + "@com_google_cloud_go//secretmanager/apiv1", + "@go_googleapis//google/cloud/secretmanager/v1:secretmanager_go_proto", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_k8s_klog_v2//:klog", + "@org_bitbucket_creachadair_stringset//:stringset", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_test( + name = "server_test", + srcs = ["configserver_test.go"], + embed = [":server"], + deps = [ + "//oracle/pkg/agents/common/sql", + "//oracle/pkg/agents/config_agent/protos", + "//oracle/pkg/agents/oracle", + "@com_github_google_go_cmp//cmp", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//test/bufconn", + ], +) diff --git a/oracle/pkg/agents/config_agent/server/configserver.go b/oracle/pkg/agents/config_agent/server/configserver.go new file mode 100644 index 0000000..3b3220a --- /dev/null +++ b/oracle/pkg/agents/config_agent/server/configserver.go @@ -0,0 +1,979 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configagent implements Config Agent gRPC interface. +package configagent + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + secretmanager "cloud.google.com/go/secretmanager/apiv1" + "github.com/golang/protobuf/ptypes/empty" + secretmanagerpb "google.golang.org/genproto/googleapis/cloud/secretmanager/v1" + lropb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/backup" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql" + pb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/provision" +) + +const ( + version = "12.2" + pdbAdmin = "GPDB_ADMIN" + gsmSecretStr = "projects/%s/secrets/%s/versions/%s" +) + +var ( + newDBDClient = func(ctx context.Context, server *ConfigServer) (dbdpb.DatabaseDaemonClient, func() error, error) { + conn, err := common.DatabaseDaemonDialService(ctx, fmt.Sprintf("%s:%d", server.DBService, server.DBPort), grpc.WithBlock()) + if err != nil { + return nil, func() error { return nil }, err + } + return dbdpb.NewDatabaseDaemonClient(conn), conn.Close, nil + } + + newGsmClient = func(ctx context.Context) (*secretmanager.Client, func() error, error) { + client, err := secretmanager.NewClient(ctx) + if err != nil { + return nil, func() error { return nil }, err + } + return client, client.Close, nil + } +) + +// pdb represents a PDB database. +type pdb struct { + containerDatabaseName string + dataFilesDir string + defaultTablespace string + defaultTablespaceDatafile string + fileConvertFrom string + fileConvertTo string + hostName string + listenerDir string + listeners map[string]*consts.Listener + pathPrefix string + pluggableAdminPasswd string + pluggableDatabaseName string + skipUserCheck bool + version string +} + +// ConfigServer represents a ConfigAgentServer +type ConfigServer struct { + *pb.UnimplementedConfigAgentServer + DBService string + DBPort int +} + +// CheckStatus runs a requested set of state checks. +// The Instance state check consists of: +// - checking the provisioning done file. +// - running a CDB connection test via DB Daemon. +func (s *ConfigServer) CheckStatus(ctx context.Context, req *pb.CheckStatusRequest) (*pb.CheckStatusResponse, error) { + klog.InfoS("configagent/CheckStatus", "req", req) + + switch req.GetCheckStatusType() { + case pb.CheckStatusRequest_INSTANCE: + klog.InfoS("configagent/CheckStatus: running a Database Instance status check...") + default: + return &pb.CheckStatusResponse{}, fmt.Errorf("configagent/CheckStatus: unsupported in this release check status type of %v", req.GetCheckStatusType()) + } + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/CheckStatus: failed to create database daemon client: %v", err) + } + defer closeConn() + klog.V(1).InfoS("configagent/CheckStatus", "client", client) + + resp, err := client.FileExists(ctx, &dbdpb.FileExistsRequest{Name: consts.ProvisioningDoneFile}) + if err != nil { + return nil, fmt.Errorf("configagent/CheckStatus: failed to check a provisioning file: %v", err) + } + + if !resp.Exists { + klog.InfoS("configagent/CheckStatus: provisioning file NOT found") + return &pb.CheckStatusResponse{Status: "InProgress"}, nil + } + klog.InfoS("configagent/CheckStatus: provisioning file found") + + if _, err = client.CheckDatabaseState(ctx, &dbdpb.CheckDatabaseStateRequest{IsCdb: true, DatabaseName: req.GetCdbName(), DbDomain: req.GetDbDomain()}); err != nil { + return nil, fmt.Errorf("configagent/CheckStatus: failed to check a Database Instance state: %v", err) + } + klog.InfoS("configagent/CheckStatus: Database Instance is up and running") + + pdbCheckCmd := []string{"select open_mode, restricted from v$pdbs"} + resp2, err := client.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: pdbCheckCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CheckStatus: failed to get a list of available PDBs: %v", err) + } + klog.InfoS("configagent/CheckStatus", "PDB query response", resp2) + + return &pb.CheckStatusResponse{Status: "Ready"}, nil +} + +// PhysicalRestore restores an RMAN backup (downloaded from GCS). +func (s *ConfigServer) PhysicalRestore(ctx context.Context, req *pb.PhysicalRestoreRequest) (*lropb.Operation, error) { + klog.InfoS("configagent/PhysicalRestore", "req", req) + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/PhysicalRestore: failed to create database daemon client: %v", err) + } + defer closeConn() + klog.InfoS("configagent/PhysicalRestore", "client", client) + + return backup.PhysicalRestore(ctx, &backup.Params{ + Client: client, + InstanceName: req.GetInstanceName(), + CDBName: req.CdbName, + DOP: req.GetDop(), + LocalPath: req.GetLocalPath(), + GCSPath: req.GetGcsPath(), + OperationID: req.GetLroInput().GetOperationId(), + }) +} + +// PhysicalBackup starts an RMAN backup and stores it in the GCS bucket provided. +func (s *ConfigServer) PhysicalBackup(ctx context.Context, req *pb.PhysicalBackupRequest) (*lropb.Operation, error) { + klog.InfoS("configagent/PhysicalBackup", "req", req) + + var granularity string + switch req.BackupSubType { + case pb.PhysicalBackupRequest_INSTANCE: + granularity = "database" + case pb.PhysicalBackupRequest_DATABASE: + if req.GetBackupItems() == nil { + return &lropb.Operation{}, fmt.Errorf("configagent/PhysicalBackup: failed a pre-flight check: a PDB backup is requested, but no PDB name(s) given") + } + + granularity = "pluggable database " + for i, pdb := range req.GetBackupItems() { + if i == 0 { + granularity += pdb + } else { + granularity += ", " + granularity += pdb + } + } + default: + return &lropb.Operation{}, fmt.Errorf("configagent/PhysicalBackup: unsupported in this release sub backup type of %v", req.BackupSubType) + } + klog.InfoS("configagent/PhysicalBackup", "granularity", granularity) + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/PhysicalBackup: failed to create database daemon client: %v", err) + } + defer closeConn() + klog.InfoS("configagent/PhysicalBackup", "client", client) + + return backup.PhysicalBackup(ctx, &backup.Params{ + Client: client, + Granularity: granularity, + Backupset: req.GetBackupset(), + CheckLogical: req.GetCheckLogical(), + Compressed: req.GetCompressed(), + DOP: req.GetDop(), + Level: req.GetLevel(), + Filesperset: req.GetFilesperset(), + SectionSize: req.GetSectionSize(), + LocalPath: req.GetLocalPath(), + GCSPath: req.GetGcsPath(), + OperationID: req.GetLroInput().GetOperationId(), + }) +} + +// CreateCDB creates a CDB using dbca. +func (s *ConfigServer) CreateCDB(ctx context.Context, req *pb.CreateCDBRequest) (*lropb.Operation, error) { + klog.InfoS("configagent/CreateCDB", "req", req) + dbdClient, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/CreateCDB: failed to create database daemon dbdClient: %v", err) + } + defer closeConn() + + _, err = dbdClient.CreateCDB(ctx, &dbdpb.CreateCDBRequest{ + OracleHome: req.GetOracleHome(), + DatabaseName: req.GetSid(), + Version: req.GetVersion(), + DbUniqueName: req.GetDbUniqueName(), + CharacterSet: req.GetCharacterSet(), + MemoryPercent: req.GetMemoryPercent(), + AdditionalParams: req.GetAdditionalParams(), + DbDomain: req.GetDbDomain(), + }) + if err != nil { + return nil, fmt.Errorf("configagent/CreateCDB: failed to create CDB: %v", err) + } + + if _, err := dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + DatabaseName: req.GetSid(), + }); err != nil { + return nil, fmt.Errorf("configagent/CreateCDB: shutdown failed: %v", err) + } + klog.InfoS("configagent/CreateCDB successfully completed") + return &lropb.Operation{Done: true}, nil +} + +// CreateListener invokes dbdaemon.CreateListener. +func (s *ConfigServer) CreateListener(ctx context.Context, req *pb.CreateListenerRequest) (*pb.CreateListenerResponse, error) { + klog.InfoS("configagent/CreateListener", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/CreateListener: failed to create listener: %v", err) + } + defer closeConn() + klog.InfoS("configagent/CreateListener", "client", client) + + _, err = client.CreateListener(ctx, &dbdpb.CreateListenerRequest{ + DatabaseName: req.Name, + Port: int32(req.Port), + Protocol: req.GetProtocol(), + OracleHome: req.GetOracleHome(), + DbDomain: req.GetDbDomain(), + }) + if err != nil { + return nil, fmt.Errorf("configagent/CreateListener: error while creating listener: %v", err) + } + return &pb.CreateListenerResponse{}, nil +} + +// CreateDatabase creates PDB as requested. +func (s *ConfigServer) CreateDatabase(ctx context.Context, req *pb.CreateDatabaseRequest) (*pb.CreateDatabaseResponse, error) { + klog.InfoS("configagent/CreateDatabase", "req", req) + + var pwd string + var err error + + toUpdatePlaintextAdminPwd := req.Password != "" && req.Password != req.LastPassword + if toUpdatePlaintextAdminPwd { + pwd = req.Password + } + + toUpdateGsmAdminPwd := req.AdminPasswordGsmSecretRef != nil && (req.AdminPasswordGsmSecretRef.Version != req.AdminPasswordGsmSecretRef.LastVersion || req.AdminPasswordGsmSecretRef.Version == "latest") + if toUpdateGsmAdminPwd { + pwd, err = AccessSecretVersionFunc(ctx, fmt.Sprintf(gsmSecretStr, req.AdminPasswordGsmSecretRef.ProjectId, req.AdminPasswordGsmSecretRef.SecretId, req.AdminPasswordGsmSecretRef.Version)) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to retrieve secret from Google Secret Manager: %v", err) + } + } + + p, err := buildPDB(req.CdbName, req.Name, pwd, version, consts.ListenerNames, true) + if err != nil { + return nil, err + } + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to create database daemon client: %v", err) + } + defer closeConn() + klog.InfoS("configagent/CreateDatabase", "client", client) + + _, err = client.CheckDatabaseState(ctx, &dbdpb.CheckDatabaseStateRequest{IsCdb: true, DatabaseName: req.GetCdbName(), DbDomain: req.GetDbDomain()}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to check a CDB state: %v", err) + } + klog.InfoS("configagent/CreateDatabase: pre-flight check#1: CDB is up and running") + + pdbCheckCmd := []string{fmt.Sprintf("select open_mode, restricted from v$pdbs where name = '%s'", sql.StringParam(p.pluggableDatabaseName))} + resp, err := client.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: pdbCheckCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to check if a PDB called %s already exists: %v", p.pluggableDatabaseName, err) + } + klog.InfoS("configagent/CreateDatabase pre-flight check#2", "pdb", p.pluggableDatabaseName, "resp", resp) + + if resp.Msg != nil { + if toUpdateGsmAdminPwd || toUpdatePlaintextAdminPwd { + sqls := append([]string{sql.QuerySetSessionContainer(p.pluggableDatabaseName)}, []string{sql.QueryAlterUser(pdbAdmin, pwd)}...) + if _, err := client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sqls, + }); err != nil { + return nil, fmt.Errorf("failed to alter user %s: %v", pdbAdmin, err) + } + klog.InfoS("configagent/CreateDatabase update pdb admin user succeeded", "user", pdbAdmin) + return &pb.CreateDatabaseResponse{Status: "AdminUserSyncCompleted"}, nil + } + klog.InfoS("configagent/CreateDatabase pre-flight check#2", "pdb", p.pluggableDatabaseName, "respMsg", resp.Msg) + return &pb.CreateDatabaseResponse{Status: "AlreadyExists"}, nil + } + klog.InfoS("configagent/CreateDatabase pre-flight check#2: pdb doesn't exist, proceeding to create", "pdb", p.pluggableDatabaseName) + + cdbDir := fmt.Sprintf(consts.DataDir, consts.DataMount, req.GetCdbName()) + pdbDir := filepath.Join(cdbDir, strings.ToUpper(req.GetName())) + toCreate := []string{ + fmt.Sprintf("%s/data", pdbDir), + fmt.Sprintf("%s/%s", pdbDir, consts.DpdumpDir.Linux), + fmt.Sprintf("%s/rman", consts.OracleBase), + } + for _, d := range toCreate { + if _, err := client.CreateDir(ctx, &dbdpb.CreateDirRequest{ + Path: d, + Perm: 0760, + }); err != nil { + return nil, fmt.Errorf("failed to create a PDB dir %q: %v", d, err) + } + } + + pdbCmd := []string{sql.QueryCreatePDB(p.pluggableDatabaseName, pdbAdmin, p.pluggableAdminPasswd, p.dataFilesDir, p.defaultTablespace, p.defaultTablespaceDatafile, p.fileConvertFrom, p.fileConvertTo)} + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: pdbCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to create a PDB %s: %v", p.pluggableDatabaseName, err) + } + klog.InfoS("configagent/CreateDatabase create a PDB Done", "pdb", p.pluggableDatabaseName) + + pdbOpen := []string{fmt.Sprintf("alter pluggable database %s open read write", sql.MustBeObjectName(p.pluggableDatabaseName))} + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: pdbOpen, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreatePDBDatabase: PDB %s open failed: %v", p.pluggableDatabaseName, err) + } + klog.InfoS("configagent/CreateDatabase PDB open", "pdb", p.pluggableDatabaseName) + + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{ + sql.QuerySetSessionContainer(p.pluggableDatabaseName), + sql.QueryGrantPrivileges("create session, dba", pdbAdmin), + sql.QueryGrantPrivileges("create session, resource, datapump_imp_full_database, datapump_exp_full_database, unlimited tablespace", consts.PDBLoaderUser), + }, Suppress: false}) + if err != nil { + // Until we have a proper error handling, just log an error here. + klog.ErrorS(err, "configagent/CreateDatabase: failed to create a PDB_ADMIN user and/or PDB loader user") + } + klog.InfoS("configagent/CreateDatabase: created PDB_ADMIN and PDB Loader users") + + // Separate out the directory treatment for the ease of troubleshooting. + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{ + sql.QuerySetSessionContainer(p.pluggableDatabaseName), + sql.QueryCreateDir(consts.DpdumpDir.Oracle, filepath.Join(p.pathPrefix, consts.DpdumpDir.Linux)), + sql.QueryGrantPrivileges(fmt.Sprintf("read,write on directory %s", consts.DpdumpDir.Oracle), consts.PDBLoaderUser), + }, Suppress: false}) + if err != nil { + klog.ErrorS(err, "configagent/CreateDatabase: failed to create a Data Pump directory", "datapumpDir", consts.DpdumpDir) + } + klog.InfoS("configagent/CreateDatabase: DONE", "pdb", p.pluggableDatabaseName) + + return &pb.CreateDatabaseResponse{Status: "Ready"}, nil +} + +// CreateUsers creates users as requested. +func (s *ConfigServer) CreateUsers(ctx context.Context, req *pb.CreateUsersRequest) (*pb.CreateUsersResponse, error) { + // UsersChanged is called before this function by caller (db controller) to check if + // the users requested are already existing. + // Thus no duplicated list user check is performed here. + klog.InfoS("configagent/CreateUsers", "req", req) + + p, err := buildPDB(req.GetCdbName(), req.GetPdbName(), "", version, consts.ListenerNames, true) + if err != nil { + return nil, err + } + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/CreateUsers: failed to create database daemon client: %v", err) + } + defer closeConn() + klog.InfoS("configagent/CreateUsers", "client", client) + + _, err = client.CheckDatabaseState(ctx, &dbdpb.CheckDatabaseStateRequest{IsCdb: true, DatabaseName: req.GetCdbName(), DbDomain: req.GetDbDomain()}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateUsers: failed to check a CDB state: %v", err) + } + klog.InfoS("configagent/CreateUsers: pre-flight check#: CDB is up and running") + + // Separate create users from grants to make troubleshooting easier. + usersCmd := []string{sql.QuerySetSessionContainer(p.pluggableDatabaseName)} + usersCmd = append(usersCmd, req.CreateUsersCmd...) + for _, u := range req.GetUser() { + if u.PasswordGsmSecretRef != nil && u.Name != "" { + var pwd string + pwd, err = AccessSecretVersionFunc(ctx, fmt.Sprintf(gsmSecretStr, u.PasswordGsmSecretRef.ProjectId, u.PasswordGsmSecretRef.SecretId, u.PasswordGsmSecretRef.Version)) + if err != nil { + return nil, fmt.Errorf("configagent/CreateUsers: failed to retrieve secret from Google Secret Manager: %v", err) + } + if _, err = sql.Identifier(pwd); err != nil { + return nil, fmt.Errorf("configagent/CreateUsers: Google Secret Manager contains an invalid password for user %q: %v", u.Name, err) + } + + usersCmd = append(usersCmd, sql.QueryCreateUser(u.Name, pwd)) + } + } + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: usersCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateUsers: failed to create users in a PDB %s: %v", p.pluggableDatabaseName, err) + } + klog.InfoS("configagent/CreateUsers: create users in PDB DONE", "pdb", p.pluggableDatabaseName) + + privsCmd := []string{sql.QuerySetSessionContainer(p.pluggableDatabaseName)} + privsCmd = append(privsCmd, req.GrantPrivsCmd...) + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: privsCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateUsers: failed to grant privileges in a PDB %s: %v", p.pluggableDatabaseName, err) + } + klog.InfoS("configagent/CreateUsers: DONE", "pdb", p.pluggableDatabaseName) + + return &pb.CreateUsersResponse{Status: "Ready"}, nil +} + +// UsersChanged determines whether there is change on users (update/delete/create). +func (s *ConfigServer) UsersChanged(ctx context.Context, req *pb.UsersChangedRequest) (*pb.UsersChangedResponse, error) { + klog.InfoS("configagent/UsersChanged", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/UsersChanged: failed to create database daemon client: %v", err) + } + defer closeConn() + us := newUsers(req.GetPdbName(), req.GetUserSpecs()) + toCreate, toUpdate, toDelete, toUpdatePwd, err := us.diff(ctx, client) + if err != nil { + return nil, fmt.Errorf("configagent/UsersChanged: failed to get difference between env and spec for users: %v", err) + } + var suppressed []*pb.UsersChangedResponse_Suppressed + for _, du := range toDelete { + suppressed = append(suppressed, &pb.UsersChangedResponse_Suppressed{ + SuppressType: pb.UsersChangedResponse_DELETE, + UserName: du.userName, + Sql: du.delete(), + }) + } + for _, cu := range toCreate { + if cu.newPassword == "" { + suppressed = append(suppressed, &pb.UsersChangedResponse_Suppressed{ + SuppressType: pb.UsersChangedResponse_CREATE, + UserName: cu.userName, + }) + } + } + resp := &pb.UsersChangedResponse{ + Changed: len(toCreate) != 0 || len(toUpdate) != 0 || len(toUpdatePwd) != 0, + Suppressed: suppressed, + } + klog.InfoS("configagent/UsersChanged: DONE", "resp", resp) + return resp, nil +} + +// UpdateUsers update/create users as requested. +func (s *ConfigServer) UpdateUsers(ctx context.Context, req *pb.UpdateUsersRequest) (*pb.UpdateUsersResponse, error) { + klog.InfoS("configagent/UpdateUsers", "req", req) + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/UpdateUsers: failed to create database daemon client: %v", err) + } + defer closeConn() + us := newUsers(req.GetPdbName(), req.GetUserSpecs()) + toCreate, toUpdate, _, toUpdatePwd, err := us.diff(ctx, client) + if err != nil { + return nil, fmt.Errorf("configagent/UpdateUsers: failed to get difference between env and spec for users: %v", err) + } + foundErr := false + for _, u := range toCreate { + klog.InfoS("configagent/UpdateUsers", "creating user", u.userName) + if err := u.create(ctx, client); err != nil { + klog.ErrorS(err, "failed to create user") + foundErr = true + } + } + + for _, u := range toUpdate { + klog.InfoS("configagent/UpdateUsers", "updating user", u.userName) + // we found there is a scenario that role comes with privileges. For example + // Grant dba role to a user will automatically give unlimited tablespace privilege. + // Revoke dba role will automatically revoke unlimited tablespace privilege. + // thus user update will first update role and then update sys privi. + if err := u.update(ctx, client, us.databaseRoles); err != nil { + klog.ErrorS(err, "failed to update user") + foundErr = true + } + } + + for _, u := range toUpdatePwd { + klog.InfoS("configagent/UpdateUsers", "updating user", u.userName) + if err := u.updatePassword(ctx, client); err != nil { + klog.ErrorS(err, "failed to update user password") + foundErr = true + } + } + + if foundErr { + return nil, errors.New("failed to update users") + } + klog.InfoS("configagent/UpdateUsers: DONE") + return &pb.UpdateUsersResponse{}, nil +} + +// GetOperation fetches corresponding lro given operation name. +func (s *ConfigServer) GetOperation(ctx context.Context, req *lropb.GetOperationRequest) (*lropb.Operation, error) { + klog.InfoS("configagent/GetOperation", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/GetOperation: failed to create database daemon client: %v", err) + } + defer func() { _ = closeConn() }() + klog.InfoS("configagent/GetOperation", "client", client) + + return client.GetOperation(ctx, req) +} + +// ListOperations lists all lro. +func (s *ConfigServer) ListOperations(ctx context.Context, req *lropb.ListOperationsRequest) (*lropb.ListOperationsResponse, error) { + klog.InfoS("configagent/ListOperations", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/ListOperations: failed to create database daemon client: %v", err) + } + defer func() { _ = closeConn() }() + klog.InfoS("configagent/ListOperations", "client", client) + + return client.ListOperations(ctx, req) +} + +// DeleteOperation deletes lro given by name. +func (s *ConfigServer) DeleteOperation(ctx context.Context, req *lropb.DeleteOperationRequest) (*empty.Empty, error) { + klog.InfoS("configagent/DeleteOperation", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/DeleteOperation: failed to create database daemon client: %v", err) + } + defer func() { _ = closeConn() }() + klog.InfoS("configagent/DeleteOperation", "client", client) + + return client.DeleteOperation(ctx, req) +} + +// CreateCDBUser creates CDB user as requested. +func (s *ConfigServer) CreateCDBUser(ctx context.Context, req *pb.CreateCDBUserRequest) (*pb.CreateCDBUserResponse, error) { + klog.InfoS("configagent/CreateCDBUser", "req", req) + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/CreateCDBUser: failed to create database daemon client: %v", err) + } + defer closeConn() + klog.InfoS("configagent/CreateCDBUser", "client", client) + + // Separate create users from grants to make troubleshooting easier. + usersCmd := []string{} + usersCmd = append(usersCmd, req.CreateUsersCmd...) + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: usersCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to create users in CDB %s: %v", req.CdbName, err) + } + klog.InfoS("configagent/CreateCDBUsers: create users in CDB DONE", req.CdbName) + + privsCmd := []string{} + privsCmd = append(privsCmd, req.GrantPrivsCmd...) + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: privsCmd, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/CreateDatabase: failed to grant privileges in CDB %s: %v", req.CdbName, err) + } + klog.InfoS("configagent/CreateCDBUsers: DONE", "pdb", req.CdbName) + + return &pb.CreateCDBUserResponse{Status: "Ready"}, nil +} + +// BootstrapStandby performs bootstrap steps for standby instance. +func (s *ConfigServer) BootstrapStandby(ctx context.Context, req *pb.BootstrapStandbyRequest) (*pb.BootstrapStandbyResponse, error) { + klog.InfoS("configagent/BootstrapStandby", "req", req) + dbdClient, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: failed to create database daemon client: %v", err) + } + defer closeConn() + + // skip if already bootstrapped + resp, err := dbdClient.FileExists(ctx, &dbdpb.FileExistsRequest{Name: consts.ProvisioningDoneFile}) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: failed to check a provisioning file: %v", err) + } + + if resp.Exists { + klog.InfoS("configagent/BootstrapStandby: standby is already provisioned") + return &pb.BootstrapStandbyResponse{}, nil + } + + task := provision.NewBootstrapDatabaseTaskForStandby(req.GetCdbName(), req.GetDbdomain(), dbdClient) + + if err := task.Call(ctx); err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: failed to bootstrap standby database : %v", err) + } + klog.InfoS("configagent/BootstrapStandby: bootstrap task completed successfully") + + // create listeners + _, err = s.CreateListener(ctx, &pb.CreateListenerRequest{ + Name: req.GetCdbName(), + Port: consts.SecureListenerPort, + Protocol: "TCP", + DbDomain: req.GetDbdomain(), + }) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: failed to create listener: %v", err) + } + + if _, err := dbdClient.BootstrapStandby(ctx, &dbdpb.BootstrapStandbyRequest{ + CdbName: req.GetCdbName(), + }); err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: dbdaemon failed to bootstrap standby: %v", err) + } + klog.InfoS("configagent/BootstrapStandby: dbdaemon completed bootstrap standby successfully") + + _, err = dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{consts.OpenPluggableDatabaseSQL}, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: failed to open pluggable database: %v", err) + } + + // fetch existing pdbs/users to create database resources for + knownPDBsResp, err := dbdClient.KnownPDBs(ctx, &dbdpb.KnownPDBsRequest{ + IncludeSeed: false, + OnlyOpen: false, + }) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: dbdaemon failed to get KnownPDBs: %v", err) + } + + var migratedPDBs []*pb.BootstrapStandbyResponse_PDB + for _, pdb := range knownPDBsResp.GetKnownPdbs() { + us := newUsers(pdb, []*pb.User{}) + _, _, existingUsers, _, err := us.diff(ctx, dbdClient) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapStandby: failed to get existing users for pdb %v: %v", pdb, err) + } + var migratedUsers []*pb.BootstrapStandbyResponse_User + for _, u := range existingUsers { + migratedUsers = append(migratedUsers, &pb.BootstrapStandbyResponse_User{ + UserName: u.GetUserName(), + Privs: u.GetUserEnvPrivs(), + }) + } + migratedPDBs = append(migratedPDBs, &pb.BootstrapStandbyResponse_PDB{ + PdbName: strings.ToLower(pdb), + Users: migratedUsers, + }) + } + + klog.InfoS("configagent/BootstrapStandby: fetch existing pdbs and users successfully.", "MigratedPDBs", migratedPDBs) + return &pb.BootstrapStandbyResponse{Pdbs: migratedPDBs}, nil +} + +// BootstrapDatabase bootstrap a CDB after creation. +func (s *ConfigServer) BootstrapDatabase(ctx context.Context, req *pb.BootstrapDatabaseRequest) (*lropb.Operation, error) { + klog.InfoS("configagent/BootstrapDatabase", "req", req) + + dbdClient, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/BootstrapDatabase: failed to create database daemon client: %v", err) + } + defer closeConn() + task := provision.NewBootstrapDatabaseTaskForUnseeded(req.CdbName, req.DbUniqueName, req.Dbdomain, dbdClient) + + if err := task.Call(ctx); err != nil { + return nil, fmt.Errorf("failed to bootstrap database : %v", err) + } + return &lropb.Operation{Done: true}, nil +} + +// DataPumpImport imports data dump file provided in GCS path. +func (s *ConfigServer) DataPumpImport(ctx context.Context, req *pb.DataPumpImportRequest) (*lropb.Operation, error) { + klog.InfoS("configagent/DataPumpImport", "req", req) + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/DataPumpImport: failed to create database daemon client: %v", err) + } + defer func() { _ = closeConn() }() + + return client.DataPumpImportAsync(ctx, &dbdpb.DataPumpImportAsyncRequest{ + SyncRequest: &dbdpb.DataPumpImportRequest{ + PdbName: req.PdbName, + DbDomain: req.DbDomain, + GcsPath: req.GcsPath, + GcsLogPath: req.GcsLogPath, + CommandParams: []string{ + "FULL=YES", + "METRICS=YES", + "LOGTIME=ALL", + }, + }, + LroInput: &dbdpb.LROInput{ + OperationId: req.GetLroInput().GetOperationId(), + }, + }) +} + +// DataPumpExport exports data pump file to GCS path provided. +func (s *ConfigServer) DataPumpExport(ctx context.Context, req *pb.DataPumpExportRequest) (*lropb.Operation, error) { + + klog.InfoS("configagent/DataPumpExport", "req", req) + + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/DataPumpExport: failed to create database daemon client: %v", err) + } + defer func() { _ = closeConn() }() + + return client.DataPumpExportAsync(ctx, &dbdpb.DataPumpExportAsyncRequest{ + SyncRequest: &dbdpb.DataPumpExportRequest{ + PdbName: req.PdbName, + DbDomain: req.DbDomain, + ObjectType: req.ObjectType, + Objects: req.Objects, + GcsPath: req.GcsPath, + GcsLogPath: req.GcsLogPath, + FlashbackTime: req.FlashbackTime, + CommandParams: []string{ + "METRICS=YES", + "LOGTIME=ALL", + }, + }, + LroInput: &dbdpb.LROInput{ + OperationId: req.GetLroInput().GetOperationId(), + }, + }) +} + +// SetParameter sets database parameter as requested. +func (s *ConfigServer) SetParameter(ctx context.Context, req *pb.SetParameterRequest) (*pb.SetParameterResponse, error) { + klog.InfoS("configagent/SetParameter", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/SetParameter: failed to create dbdClient: %v", err) + } + defer closeConn() + klog.InfoS("configagent/SetParameter", "client", client) + + // Fetch parameter type + // The possible values are IMMEDIATE FALSE DEFERRED + query := fmt.Sprintf("select issys_modifiable from v$parameter where name='%s'", sql.StringParam(req.Key)) + paramType, err := fetchAndParseSingleResultQuery(ctx, client, query) + if err != nil { + return nil, fmt.Errorf("configagent/SetParameter: error while inferring parameter type: %v", err) + } + query = fmt.Sprintf("select type from v$parameter where name='%s'", sql.StringParam(req.Key)) + paramDatatype, err := fetchAndParseSingleResultQuery(ctx, client, query) + if err != nil { + return nil, fmt.Errorf("configagent/SetParameter: error while inferring parameter data type: %v", err) + } + // string parameters need to be quoted, + // those have type 2, see the link for the parameter types description + // https://docs.oracle.com/database/121/REFRN/GUID-C86F3AB0-1191-447F-8EDF-4727D8693754.htm + isStringParam := paramDatatype == "2" + command, err := sql.QuerySetSystemParameterNoPanic(req.Key, req.Value, isStringParam) + if err != nil { + return nil, fmt.Errorf("configagent/SetParameter: error constructing set parameter query: %v", err) + } + + isStatic := false + if paramType == "FALSE" { + klog.InfoS("configagent/SetParameter", "parameter_type", "STATIC") + command = fmt.Sprintf("%s scope=spfile", command) + isStatic = true + } + + _, err = client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{command}, + Suppress: false, + }) + if err != nil { + return nil, fmt.Errorf("configagent/SetParameter: error while executing parameter command: %q", command) + } + return &pb.SetParameterResponse{Static: isStatic}, nil +} + +// GetParameterTypeValue returns parameters' type and value by querying DB. +func (s *ConfigServer) GetParameterTypeValue(ctx context.Context, req *pb.GetParameterTypeValueRequest) (*pb.GetParameterTypeValueResponse, error) { + klog.InfoS("configagent/GetParameterTypeValue", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/GetParameterTypeValue: failed to create dbdClient: %v", err) + } + defer closeConn() + klog.InfoS("configagent/GetParameterTypeValue", "client", client) + + types := []string{} + values := []string{} + + for _, key := range req.GetKeys() { + query := fmt.Sprintf("select issys_modifiable from v$parameter where name='%s'", sql.StringParam(key)) + value, err := fetchAndParseSingleResultQuery(ctx, client, query) + if err != nil { + return nil, fmt.Errorf("configagent/GetParameterTypeValue: error while fetching type for %v: %v", key, err) + } + types = append(types, value) + } + for _, key := range req.GetKeys() { + query := fmt.Sprintf("select value from v$parameter where name='%s'", sql.StringParam(key)) + value, err := fetchAndParseSingleResultQuery(ctx, client, query) + if err != nil { + return nil, fmt.Errorf("configagent/GetParameterTypeValue: error while fetching value for %v: %v", key, err) + } + values = append(values, value) + } + + return &pb.GetParameterTypeValueResponse{Types: types, Values: values}, nil +} + +// BounceDatabase shutdown/startup the database as requested. +func (s *ConfigServer) BounceDatabase(ctx context.Context, req *pb.BounceDatabaseRequest) (*pb.BounceDatabaseResponse, error) { + klog.InfoS("configagent/BounceDatabase", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/BounceDatabase: failed to create dbdClient: %v", err) + } + defer closeConn() + + klog.InfoS("configagent/BounceDatabase", "client", client) + _, err = client.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + DatabaseName: req.Sid, + Option: "immediate", + }) + if err != nil { + return nil, fmt.Errorf("configagent/BounceDatabase: error while shutting db: %v", err) + } + klog.InfoS("configagent/BounceDatabase: shutdown successful") + + _, err = client.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: req.Sid, + AvoidConfigBackup: req.AvoidConfigBackup, + }) + if err != nil { + return nil, fmt.Errorf("configagent/BounceDatabase: error while starting db: %v", err) + } + klog.InfoS("configagent/BounceDatabase: startup successful") + return &pb.BounceDatabaseResponse{}, err +} + +// RecoverConfigFile generates the binary spfile from the human readable backup pfile. +func (s *ConfigServer) RecoverConfigFile(ctx context.Context, req *pb.RecoverConfigFileRequest) (*pb.RecoverConfigFileResponse, error) { + klog.InfoS("configagent/RecoverConfigFile", "req", req) + client, closeConn, err := newDBDClient(ctx, s) + if err != nil { + return nil, fmt.Errorf("configagent/RecoverConfigFile: failed to create dbdClient: %v", err) + } + defer closeConn() + + if _, err := client.RecoverConfigFile(ctx, &dbdpb.RecoverConfigFileRequest{CdbName: req.CdbName}); err != nil { + klog.InfoS("configagent/RecoverConfigFile: error while recovering config file: err", "err", err) + return nil, fmt.Errorf("configagent/RecoverConfigFile: failed to recover config file due to: %v", err) + } + klog.InfoS("configagent/RecoverConfigFile: config file backup successful") + + return &pb.RecoverConfigFileResponse{}, err +} + +// fetchAndParseSingleResultQuery is a utility method intended for running single result queries. +// It parses the single column JSON result-set (returned by runSQLPlus API) and returns a list. +func fetchAndParseSingleResultQuery(ctx context.Context, client dbdpb.DatabaseDaemonClient, query string) (string, error) { + + sqlRequest := &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{query}, + Suppress: false, + } + response, err := client.RunSQLPlusFormatted(ctx, sqlRequest) + if err != nil { + return "", fmt.Errorf("failed to run query %q; DSN: %q; error: %v", query, sqlRequest.GetDsn(), err) + } + result, err := parseSQLResponse(response) + if err != nil { + return "", fmt.Errorf("error while parsing query response: %q; error: %v", query, err) + } + + var rows []string + for _, row := range result { + if len(row) != 1 { + return "", fmt.Errorf("fetchAndParseSingleColumnMultiRowQueriesFromEM: # of cols returned by query != 1: %v", row) + } + for _, v := range row { + rows = append(rows, v) + } + } + return rows[0], nil +} + +func buildPDB(cdbName, pdbName, pdbAdminPass, version string, listeners map[string]*consts.Listener, skipUserCheck bool) (*pdb, error) { + // For consistency sake, keeping all PDB names uppercase. + pdbName = strings.ToUpper(pdbName) + host, err := os.Hostname() + if err != nil { + return nil, err + } + return &pdb{ + pluggableDatabaseName: pdbName, + pluggableAdminPasswd: pdbAdminPass, + containerDatabaseName: cdbName, + dataFilesDir: fmt.Sprintf(consts.PDBDataDir, consts.DataMount, cdbName, pdbName), + defaultTablespace: fmt.Sprintf("%s_USERS", pdbName), + defaultTablespaceDatafile: fmt.Sprintf(consts.PDBDataDir+"/%s_users.dbf", consts.DataMount, cdbName, pdbName, strings.ToLower(pdbName)), + pathPrefix: fmt.Sprintf(consts.PDBPathPrefix, consts.DataMount, cdbName, pdbName), + fileConvertFrom: fmt.Sprintf(consts.PDBSeedDir, consts.DataMount, cdbName), + fileConvertTo: fmt.Sprintf(consts.PDBDataDir, consts.DataMount, cdbName, pdbName), + listenerDir: fmt.Sprintf(consts.ListenerDir, consts.DataMount), + listeners: listeners, + version: version, + hostName: host, + skipUserCheck: skipUserCheck, + }, nil +} + +// FetchServiceImageMetaData fetches the image metadata from the service image. +func (s *ConfigServer) FetchServiceImageMetaData(ctx context.Context, req *pb.FetchServiceImageMetaDataRequest) (*pb.FetchServiceImageMetaDataResponse, error) { + dbdClient, closeConn, err := newDBDClient(ctx, s) + defer func() { _ = closeConn() }() + if err != nil { + return nil, fmt.Errorf("configagent/FetchServiceImageMetaData: failed to create database daemon client: %w", err) + } + metaData, err := dbdClient.FetchServiceImageMetaData(ctx, &dbdpb.FetchServiceImageMetaDataRequest{}) + if err != nil { + return &pb.FetchServiceImageMetaDataResponse{}, nil + } + return &pb.FetchServiceImageMetaDataResponse{Version: metaData.Version, CdbName: metaData.CdbName, OracleHome: metaData.OracleHome}, nil +} + +// AccessSecretVersionFunc accesses the payload for the given secret version if one +// exists. The version can be a version number as a string (e.g. "5") or an +// alias (e.g. "latest"). +var AccessSecretVersionFunc = func(ctx context.Context, name string) (string, error) { + // Create the GSM client. + client, closeConn, err := newGsmClient(ctx) + if err != nil { + return "", fmt.Errorf("configagent/AccessSecretVersionFunc: failed to create secretmanager client: %v", err) + } + defer closeConn() + + // Build the request. + req := &secretmanagerpb.AccessSecretVersionRequest{ + Name: name, + } + + // Call the API. + result, err := client.AccessSecretVersion(ctx, req) + if err != nil { + return "", fmt.Errorf("configagent/AccessSecretVersionFunc: failed to access secret version: %v", err) + } + + return string(result.Payload.Data[:]), nil +} diff --git a/oracle/pkg/agents/config_agent/server/configserver_test.go b/oracle/pkg/agents/config_agent/server/configserver_test.go new file mode 100644 index 0000000..095ea20 --- /dev/null +++ b/oracle/pkg/agents/config_agent/server/configserver_test.go @@ -0,0 +1,942 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configagent + +import ( + "context" + "errors" + "fmt" + "net" + "sort" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql" + pb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +var ( + gsmRefNoChange = &pb.GsmSecretReference{ + ProjectId: "test-project", + SecretId: "test-secret", + Version: "1", + LastVersion: fmt.Sprintf(gsmSecretStr, "test-project", "test-secret", "1"), + } + + gsmRefWithChange = &pb.GsmSecretReference{ + ProjectId: "test-project", + SecretId: "test-secret", + Version: "2", + LastVersion: fmt.Sprintf(gsmSecretStr, "test-project", "test-secret", "1"), + } + + sampleSqlToResp = map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: { + `{"PRIVILEGE": "CREATE SESSION"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + `{"GRANTED_ROLE": "CONNECT"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + } +) + +func TestConfigServerUsersChanged(t *testing.T) { + dbdServer := &fakeServer{} + client, cleanup := newFakeDatabaseDaemonClient(t, dbdServer) + newDBDClientBak := newDBDClient + newDBDClient = func(context.Context, *ConfigServer) (dbdpb.DatabaseDaemonClient, func() error, error) { + return client, func() error { return nil }, nil + } + defer func() { + newDBDClient = newDBDClientBak + cleanup() + }() + ctx := context.Background() + testCases := []struct { + name string + sqlToResp map[string][]string + req *pb.UsersChangedRequest + wantChanged bool + wantSuppressedUsers []string + }{ + { + name: "no user changed", + sqlToResp: sampleSqlToResp, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + LastPassword: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantChanged: false, + }, + { + name: "added a user in spec", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: { + `{"PRIVILEGE": "CREATE SESSION"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantChanged: true, + }, + { + name: "deleted users in spec", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: { + `{"PRIVILEGE": "CREATE SESSION"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + `{"GRANTED_ROLE": "CONNECT"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + }, + }, + wantChanged: false, + wantSuppressedUsers: []string{"PROBERUSER", "SCOTT"}, + }, + { + name: "user added privs", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: {}, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantChanged: true, + }, + { + name: "user added and deleted privs", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: {}, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + Privileges: []string{"connect"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantChanged: true, + }, + { + name: "User updated plaintext password", + sqlToResp: sampleSqlToResp, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword1", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger1", + LastPassword: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword1", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantChanged: true, + }, + { + name: "User updated gsm password", + sqlToResp: sampleSqlToResp, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Privileges: []string{"dba"}, + PasswordGsmSecretRef: gsmRefWithChange, + }, + { + Name: "scott", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + PasswordGsmSecretRef: gsmRefWithChange, + }, + { + Name: "proberuser", + Privileges: []string{"create session"}, + PasswordGsmSecretRef: gsmRefWithChange, + }, + }, + }, + wantChanged: true, + }, + { + name: "User gsm password no update", + sqlToResp: sampleSqlToResp, + req: &pb.UsersChangedRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Privileges: []string{"dba"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "scott", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "proberuser", + Privileges: []string{"create session"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + }, + }, + wantChanged: false, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + AccessSecretVersionFunc = func(ctx context.Context, name string) (string, error) { + return "topsecuredsecret", nil + } + tc.sqlToResp[`alter session set container="MYDB";select role from dba_roles`] = []string{ + `{"ROLE": "CONNECT"}`, + `{"ROLE": "RESOURCE"}`, + `{"ROLE": "DBA"}`, + `{"ROLE": "PDB_DBA"}`, + `{"ROLE": "AUDIT_ADMIN"}`, + } + dbdServer.fakeRunSQLPlusFormatted = func(ctx context.Context, request *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + sql := strings.Join(request.GetCommands(), ";") + resp, ok := tc.sqlToResp[sql] + if !ok { + return nil, fmt.Errorf("failed to find mock sql resp for %q", sql) + } + return &dbdpb.RunCMDResponse{ + Msg: resp, + }, nil + } + configServer := &ConfigServer{} + resp, err := configServer.UsersChanged(ctx, tc.req) + if err != nil { + t.Fatalf("UsersChanged(ctx, %v) failed: %v", tc.req, err) + } + if resp.Changed != tc.wantChanged { + t.Errorf("UsersChanged got resp.Changed %v, want resp.Changed %v", resp.GetChanged(), tc.wantChanged) + } + var gotSuppressedUsers []string + for _, s := range resp.Suppressed { + gotSuppressedUsers = append(gotSuppressedUsers, s.UserName) + } + sort.Strings(gotSuppressedUsers) + if diff := cmp.Diff(tc.wantSuppressedUsers, gotSuppressedUsers); diff != "" { + t.Errorf("UsersChanged got unexpected resp.Suppressed for users: -want +got %v", diff) + } + for _, s := range resp.Suppressed { + wantSQL := fmt.Sprintf(`alter session set container="MYDB"; DROP USER %q CASCADE;`, s.UserName) + if s.Sql != wantSQL { + t.Errorf("UsersChanged got unexpected resp.Suppressed SQL %q, want %q", s.Sql, wantSQL) + } + } + }) + } +} + +func TestConfigServerUpdateUsers(t *testing.T) { + dbdServer := &fakeServer{} + client, cleanup := newFakeDatabaseDaemonClient(t, dbdServer) + newDBDClientBak := newDBDClient + newDBDClient = func(context.Context, *ConfigServer) (dbdpb.DatabaseDaemonClient, func() error, error) { + return client, func() error { return nil }, nil + } + defer func() { + newDBDClient = newDBDClientBak + cleanup() + }() + ctx := context.Background() + testCases := []struct { + name string + sqlToResp map[string][]string + req *pb.UpdateUsersRequest + wantSQLs [][]string + }{ + { + name: "no user changed", + sqlToResp: sampleSqlToResp, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + LastPassword: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + }, + { + name: "added a user in spec", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: { + `{"PRIVILEGE": "CREATE SESSION"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + LastPassword: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantSQLs: [][]string{ + { + `alter session set container="MYDB"`, + `create user "SCOTT" identified by "tiger"`, + `grant CONNECT to "SCOTT"`, + `grant RESOURCE to "SCOTT"`, + `grant UNLIMITED TABLESPACE to "SCOTT"`, + }, + }, + }, + { + name: "deleted users in spec", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: { + `{"PRIVILEGE": "CREATE SESSION"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + `{"GRANTED_ROLE": "CONNECT"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + }, + }, + }, + { + name: "user added privs", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: {}, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + LastPassword: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantSQLs: [][]string{ + {`alter session set container="MYDB"`, `grant CREATE SESSION to "PROBERUSER"`}, + {`alter session set container="MYDB"`, `grant CONNECT to "SCOTT"`}, + }, + }, + { + name: "user added and deleted privs", + sqlToResp: map[string][]string{ + `alter session set container="MYDB";select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'`: { + `{"USERNAME": "GPDB_ADMIN"}`, + `{"USERNAME": "SUPERUSER"}`, + `{"USERNAME": "SCOTT"}`, + `{"USERNAME": "PROBERUSER"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SUPERUSER'`: {}, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='SCOTT'`: { + `{"PRIVILEGE": "UNLIMITED TABLESPACE"}`, + }, + `alter session set container="MYDB";select privilege from dba_sys_privs where grantee='PROBERUSER'`: {}, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SUPERUSER'`: { + `{"GRANTED_ROLE": "DBA"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='SCOTT'`: { + `{"GRANTED_ROLE": "RESOURCE"}`, + }, + `alter session set container="MYDB";select granted_role from dba_role_privs where grantee='PROBERUSER'`: {}, + }, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "superuser", + Password: "superpassword", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + { + Name: "scott", + Password: "tiger", + LastPassword: "tiger", + Privileges: []string{"connect"}, + }, + { + Name: "proberuser", + Password: "proberpassword", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + }, + }, + wantSQLs: [][]string{ + {`alter session set container="MYDB"`, `grant CREATE SESSION to "PROBERUSER"`}, + {`alter session set container="MYDB"`, `grant CONNECT to "SCOTT"`}, + // revoke role first then privs + {`alter session set container="MYDB"`, `revoke RESOURCE from "SCOTT"`}, + {`alter session set container="MYDB"`, `revoke UNLIMITED TABLESPACE from "SCOTT"`}, + }, + }, + { + name: "user updated plaintext password", + sqlToResp: sampleSqlToResp, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "proberuser", + Password: "proberpassword1", + LastPassword: "proberpassword", + Privileges: []string{"create session"}, + }, + { + Name: "scott", + Password: "tiger1", + LastPassword: "tiger", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + }, + { + Name: "superuser", + Password: "superpassword1", + LastPassword: "superpassword", + Privileges: []string{"dba"}, + }, + }, + }, + wantSQLs: [][]string{ + {`alter session set container="MYDB"`, `alter user "PROBERUSER" identified by "proberpassword1"`}, + {`alter session set container="MYDB"`, `alter user "SCOTT" identified by "tiger1"`}, + {`alter session set container="MYDB"`, `alter user "SUPERUSER" identified by "superpassword1"`}, + }, + }, + { + name: "user updated gsm password", + sqlToResp: sampleSqlToResp, + req: &pb.UpdateUsersRequest{ + PdbName: "MYDB", + UserSpecs: []*pb.User{ + { + Name: "proberuser", + Privileges: []string{"create session"}, + PasswordGsmSecretRef: gsmRefWithChange, + }, + { + Name: "scott", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + PasswordGsmSecretRef: gsmRefWithChange, + }, + { + Name: "superuser", + Privileges: []string{"dba"}, + PasswordGsmSecretRef: gsmRefWithChange, + }, + }, + }, + wantSQLs: [][]string{ + {`alter session set container="MYDB"`, `alter user "PROBERUSER" identified by "topsecuredsecret"`}, + {`alter session set container="MYDB"`, `alter user "SCOTT" identified by "topsecuredsecret"`}, + {`alter session set container="MYDB"`, `alter user "SUPERUSER" identified by "topsecuredsecret"`}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + AccessSecretVersionFunc = func(ctx context.Context, name string) (string, error) { + return "topsecuredsecret", nil + } + tc.sqlToResp[`alter session set container="MYDB";select role from dba_roles`] = []string{ + `{"ROLE": "CONNECT"}`, + `{"ROLE": "RESOURCE"}`, + `{"ROLE": "DBA"}`, + `{"ROLE": "PDB_DBA"}`, + `{"ROLE": "AUDIT_ADMIN"}`, + } + dbdServer.fakeRunSQLPlusFormatted = func(ctx context.Context, request *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + sql := strings.Join(request.GetCommands(), ";") + resp, ok := tc.sqlToResp[sql] + if !ok { + return nil, fmt.Errorf("failed to find mock sql resp for %q", sql) + } + return &dbdpb.RunCMDResponse{ + Msg: resp, + }, nil + } + var gotSQLs [][]string + dbdServer.fakeRunSQLPlus = func(ctx context.Context, request *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + gotSQLs = append(gotSQLs, request.GetCommands()) + return &dbdpb.RunCMDResponse{}, nil + } + configServer := &ConfigServer{} + _, err := configServer.UpdateUsers(ctx, tc.req) + if err != nil { + t.Fatalf("UpdateUsers(ctx, %v) failed: %v", tc.req, err) + } + if len(tc.wantSQLs) != len(gotSQLs) { + t.Fatalf("UpdateUsers got %d SQLs cmd, want %d SQLs cmd", len(gotSQLs), len(tc.wantSQLs)) + } + for idx := range gotSQLs { + if diff := cmp.Diff(tc.wantSQLs[idx], gotSQLs[idx]); diff != "" { + t.Errorf("UpdateUsers got unexpected SQLs: -want +got %v", diff) + } + } + }) + } +} + +func TestConfigServerCreateUsers(t *testing.T) { + dbdServer := &fakeServer{} + client, cleanup := newFakeDatabaseDaemonClient(t, dbdServer) + newDBDClientBak := newDBDClient + newDBDClient = func(context.Context, *ConfigServer) (dbdpb.DatabaseDaemonClient, func() error, error) { + return client, func() error { return nil }, nil + } + defer func() { + newDBDClient = newDBDClientBak + cleanup() + }() + ctx := context.Background() + testCases := []struct { + name string + sqlToResp map[string][]string + req *pb.CreateUsersRequest + fakeGsmFunc func(ctx context.Context, name string) (string, error) + wantResp *pb.CreateUsersResponse + wantErr error + }{ + { + name: "Create users successfully", + sqlToResp: map[string][]string{ + `alter session set container="MYDB"`: {}, + }, + req: &pb.CreateUsersRequest{ + PdbName: "MYDB", + User: []*pb.User{ + { + Name: "superuser", + Privileges: []string{"dba"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "scott", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "proberuser", + Privileges: []string{"create session"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + }, + }, + fakeGsmFunc: func(ctx context.Context, name string) (string, error) { + return "topsecuredsecret", nil + }, + wantResp: &pb.CreateUsersResponse{Status: "Ready"}, + wantErr: nil, + }, + { + name: "Create users failed due to GSM access", + sqlToResp: map[string][]string{ + `alter session set container="MYDB"`: {}, + }, + req: &pb.CreateUsersRequest{ + PdbName: "MYDB", + User: []*pb.User{ + { + Name: "superuser", + Privileges: []string{"dba"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "scott", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "proberuser", + Privileges: []string{"create session"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + }, + }, + fakeGsmFunc: func(ctx context.Context, name string) (string, error) { + return "topsecuredsecret", errors.New("error access gsm") + }, + wantResp: &pb.CreateUsersResponse{Status: "Ready"}, + wantErr: fmt.Errorf("configagent/CreateUsers: failed to retrieve secret from Google Secret Manager: %v", errors.New("error access gsm")), + }, + { + name: "Create users failed due GSM holding an invalid password", + sqlToResp: map[string][]string{ + `alter session set container="MYDB"`: {}, + }, + req: &pb.CreateUsersRequest{ + PdbName: "MYDB", + User: []*pb.User{ + { + Name: "superuser", + Privileges: []string{"dba"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "scott", + Privileges: []string{"connect", "resource", "unlimited tablespace"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + { + Name: "proberuser", + Privileges: []string{"create session"}, + PasswordGsmSecretRef: gsmRefNoChange, + }, + }, + }, + fakeGsmFunc: func(ctx context.Context, name string) (string, error) { + return `x"; drop table all_users; --"`, nil + }, + wantResp: &pb.CreateUsersResponse{Status: "Ready"}, + wantErr: fmt.Errorf(`configagent/CreateUsers: Google Secret Manager contains an invalid password for user "superuser": %v`, sql.ErrQuoteInIdentifier), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + AccessSecretVersionFunc = tc.fakeGsmFunc + dbdServer.fakeRunSQLPlus = func(ctx context.Context, request *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + sql := strings.Join(request.GetCommands(), ";") + resp, ok := tc.sqlToResp[sql] + if !ok { + return nil, fmt.Errorf("failed to find mock sql resp for %q", sql) + } + return &dbdpb.RunCMDResponse{ + Msg: resp, + }, nil + } + configServer := &ConfigServer{} + resp, err := configServer.CreateUsers(ctx, tc.req) + if err != nil && tc.wantErr != nil { + if diff := cmp.Diff(err.Error(), tc.wantErr.Error()); diff != "" { + t.Fatalf("CreateUsers(ctx, %v) want error: %v, got: %v", tc.req, tc.wantErr, err) + } + } + if resp != nil && resp.Status != tc.wantResp.Status { + t.Errorf("CreateUsers got resp.Status %v, want resp.Status %v", resp.Status, tc.wantResp.Status) + } + }) + } +} + +type fakeServer struct { + *dbdpb.UnimplementedDatabaseDaemonServer + fakeRunSQLPlus func(context.Context, *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) + fakeRunSQLPlusFormatted func(context.Context, *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) +} + +func (f *fakeServer) RunSQLPlus(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + if f.fakeRunSQLPlus == nil { + return nil, errors.New("RunSQLPlus fake not found") + } + return f.fakeRunSQLPlus(ctx, req) +} + +func (f *fakeServer) RunSQLPlusFormatted(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + if f.fakeRunSQLPlusFormatted == nil { + return nil, errors.New("RunSQLPlusFormatted fake not found") + } + return f.fakeRunSQLPlusFormatted(ctx, req) +} +func (f *fakeServer) CheckDatabaseState(context.Context, *dbdpb.CheckDatabaseStateRequest) (*dbdpb.CheckDatabaseStateResponse, error) { + return &dbdpb.CheckDatabaseStateResponse{}, nil +} + +func newFakeDatabaseDaemonClient(t *testing.T, server *fakeServer) (dbdpb.DatabaseDaemonClient, func()) { + t.Helper() + grpcSvr := grpc.NewServer() + + dbdpb.RegisterDatabaseDaemonServer(grpcSvr, server) + lis := bufconn.Listen(2 * 1024 * 1024) + go grpcSvr.Serve(lis) + + dbdConn, err := grpc.Dial("test", + grpc.WithInsecure(), + grpc.WithContextDialer( + func(ctx context.Context, s string) (conn net.Conn, err error) { + return lis.Dial() + }), + ) + if err != nil { + t.Fatalf("failed to dial to dbDaemon: %v", err) + } + return dbdpb.NewDatabaseDaemonClient(dbdConn), func() { + dbdConn.Close() + grpcSvr.GracefulStop() + } +} diff --git a/oracle/pkg/agents/config_agent/server/user_repository.go b/oracle/pkg/agents/config_agent/server/user_repository.go new file mode 100644 index 0000000..4484b53 --- /dev/null +++ b/oracle/pkg/agents/config_agent/server/user_repository.go @@ -0,0 +1,476 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configagent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + + "bitbucket.org/creachadair/stringset" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common/sql" + pb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/config_agent/protos" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +// users describe the managed Oracle PDB users. +type users struct { + databaseName string + databaseRoles map[string]bool + nameToUser map[string]*user + // envUserNames keeps track of the managed users. + // The value will be initialized/refreshed with method users.readEnv + envUserNames []string +} + +// diff returns users, which should be created/updated/deleted by comparing k8s spec with real environment. +func (us *users) diff(ctx context.Context, client dbdpb.DatabaseDaemonClient) (toCreateUsers, toUpdateUsers, toDeleteUsers, toUpdatePwdUsers []*user, err error) { + if err := us.readEnv(ctx, client); err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to read the env users: %v", err) + } + var specUserNames []string + for k := range us.nameToUser { + specUserNames = append(specUserNames, k) + } + toCreate, toCheck, toDelete := compare(specUserNames, us.envUserNames) + toCreateUsers, err = us.getUsers(toCreate) + if err != nil { + return nil, nil, nil, nil, err + } + toCheckUsers, err := us.getUsers(toCheck) + if err != nil { + return nil, nil, nil, nil, err + } + for _, d := range toDelete { + du := newNoSpecUser(us.databaseName, d) + if err := du.readEnv(ctx, client); err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to read the env user %v: %v", du, err) + } + toDeleteUsers = append(toDeleteUsers, du) + } + for _, u := range toCheckUsers { + toGrant, toRevoke, toUpdatePwd, err := u.diff(ctx, client) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to read the env user %v: %v", u, err) + } + if len(toGrant) != 0 || len(toRevoke) != 0 { + toUpdateUsers = append(toUpdateUsers, u) + } + if toUpdatePwd { + toUpdatePwdUsers = append(toUpdatePwdUsers, u) + } + } + + return toCreateUsers, toUpdateUsers, toDeleteUsers, toUpdatePwdUsers, nil +} + +func (us *users) readEnv(ctx context.Context, client dbdpb.DatabaseDaemonClient) error { + envUserNames, err := queryDB( + ctx, + client, + us.databaseName, + "select username from dba_users where ORACLE_MAINTAINED='N' and INHERITED='NO'", + "USERNAME", + func(userName string) bool { + return userName != pdbAdmin + }, + ) + if err != nil { + return fmt.Errorf("failed to load users from DB %v", err) + } + us.envUserNames = envUserNames + roles, err := queryDB( + ctx, + client, + us.databaseName, + "select role from dba_roles", + "ROLE", + func(roleName string) bool { + return true + }, + ) + if err != nil { + return fmt.Errorf("failed to load roles from DB %v", err) + } + us.databaseRoles = make(map[string]bool) + for _, role := range roles { + us.databaseRoles[role] = true + } + return nil +} + +func (us *users) getUsers(names []string) ([]*user, error) { + var res []*user + for _, name := range names { + u, ok := us.nameToUser[name] + if !ok { + return nil, fmt.Errorf("failed to find %s in %v", name, us.nameToUser) + } + res = append(res, u) + } + return res, nil +} + +func newUsers(databaseName string, userSpecs []*pb.User) *users { + nameToUser := make(map[string]*user) + for _, us := range userSpecs { + nameToUser[strings.ToUpper(us.GetName())] = newUser(databaseName, us) + } + + return &users{ + databaseName: strings.ToUpper(databaseName), + nameToUser: nameToUser, + } +} + +// user describe a managed Oracle PDB user. +type user struct { + databaseName string + userName string + specPrivs []string + // envDbaSysPrivs keeps track of the privileges granted to the user (dba_sys_privs table). + // The value will be initialized/refreshed with method user.readEnv + envDbaSysPrivs []string + // envDbaRolePrivs keeps track of the roles granted to the user (dba_role_privs table). + // The value will be initialized/refreshed with method user.readEnv + envDbaRolePrivs []string + // gsmSecNewVer is new GSM secret version from the spec. + gsmSecNewVer string + // gsmSecCurVer is the current GSM secret version. + gsmSecCurVer string + // newPassword is used by both gsm and plaintext; + // can be overwritten later if GSM is enabled. + newPassword string + // curPassword is only used for plaintext status diff. + curPassword string +} + +func (u *user) readEnv(ctx context.Context, client dbdpb.DatabaseDaemonClient) error { + sysPrivs, err := queryDB( + ctx, + client, + u.databaseName, + fmt.Sprintf("select privilege from dba_sys_privs where grantee='%s'", sql.StringParam(u.userName)), + "PRIVILEGE", + func(string) bool { + return true + }, + ) + if err != nil { + return fmt.Errorf("failed to query dba sys privileges: %v", err) + } + u.envDbaSysPrivs = sysPrivs + rolePrivs, err := queryDB( + ctx, + client, + u.databaseName, + fmt.Sprintf("select granted_role from dba_role_privs where grantee='%s'", sql.StringParam(u.userName)), + "GRANTED_ROLE", + func(string) bool { + return true + }, + ) + if err != nil { + return fmt.Errorf("failed to query dba role privileges: %v", err) + } + u.envDbaRolePrivs = rolePrivs + return nil +} + +// diff returns privileges, which should be granted/revoked by comparing k8s spec with real environment. +func (u *user) diff(ctx context.Context, client dbdpb.DatabaseDaemonClient) (toGrant, toRevoke []string, toUpdatePwd bool, err error) { + if err := u.readEnv(ctx, client); err != nil { + return nil, nil, false, fmt.Errorf("failed to read the env user: %v", err) + } + var envPrivs []string + envPrivs = append(envPrivs, u.envDbaSysPrivs...) + envPrivs = append(envPrivs, u.envDbaRolePrivs...) + toGrant, _, toRevoke = compare(u.specPrivs, envPrivs) + // Always update password if the request version is not equal to the current version + // or the request version is latest (if the latest password equals to the current one, + // the SQL underlying won't report error as expected). + toUpdateGsmPwd := (u.gsmSecNewVer != "" && !strings.EqualFold(u.gsmSecNewVer, u.gsmSecCurVer)) || strings.HasSuffix(u.gsmSecNewVer, "latest") + if toUpdateGsmPwd { + var gsmPwd string + gsmPwd, err = AccessSecretVersionFunc(ctx, u.gsmSecNewVer) + if err != nil { + return nil, nil, false, fmt.Errorf("failed to read GSM secret: %v", err) + } + u.newPassword = gsmPwd + } + toUpdatePlaintextPwd := u.curPassword != "" && u.curPassword != u.newPassword + return toGrant, toRevoke, toUpdateGsmPwd || toUpdatePlaintextPwd, nil +} + +func (u *user) create(ctx context.Context, client dbdpb.DatabaseDaemonClient) error { + var grantCmds []string + for _, p := range u.specPrivs { + grantCmds = append(grantCmds, sql.QueryGrantPrivileges(p, u.userName)) + } + sqls := append( + []string{ + sql.QuerySetSessionContainer(u.databaseName), + sql.QueryCreateUser(u.userName, u.newPassword), + }, + grantCmds..., + ) + if _, err := client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sqls, + }); err != nil { + return fmt.Errorf("failed to create user %v: %v", u, err) + } + return nil +} + +func (u *user) update(ctx context.Context, client dbdpb.DatabaseDaemonClient, roles map[string]bool) error { + if err := u.updateRolePrivs(ctx, client, roles); err != nil { + return err + } + if err := u.updateSysPrivs(ctx, client, roles); err != nil { + return err + } + return nil + +} + +func (u *user) updateUserPassword(ctx context.Context, client dbdpb.DatabaseDaemonClient) error { + _, _, toUpdate, err := u.diff(ctx, client) + if err != nil { + return fmt.Errorf("failed to get diff to update user %v: %v", u, err) + } + if !toUpdate { + return nil + } + if err := u.updatePassword(ctx, client); err != nil { + return fmt.Errorf("failed to alter user %s: %v", u.userName, err) + } + return nil +} + +func (u *user) updateRolePrivs(ctx context.Context, client dbdpb.DatabaseDaemonClient, roles map[string]bool) error { + toGrant, toRevoke, _, err := u.diff(ctx, client) + if err != nil { + return fmt.Errorf("failed to get diff to update user %v: %v", u, err) + } + var toGrantRoles, toRevokeRoles []string + for _, g := range toGrant { + if roles[g] { + toGrantRoles = append(toGrantRoles, g) + } + } + + for _, r := range toRevoke { + if roles[r] { + toRevokeRoles = append(toRevokeRoles, r) + } + } + + if err := u.grant(ctx, client, toGrantRoles); err != nil { + return fmt.Errorf("failed to grant roles %v to user %s: %v", toGrantRoles, u.userName, err) + } + if err := u.revoke(ctx, client, toRevokeRoles); err != nil { + return fmt.Errorf("failed to revoke roles %v from user %s: %v", toRevokeRoles, u.userName, err) + } + return nil +} + +func (u *user) updateSysPrivs(ctx context.Context, client dbdpb.DatabaseDaemonClient, roles map[string]bool) error { + toGrant, toRevoke, _, err := u.diff(ctx, client) + if err != nil { + return fmt.Errorf("failed to get diff to update user %v: %v", u, err) + } + + var toGrantPrivs, toRevokePrivs []string + for _, g := range toGrant { + if !roles[g] { + toGrantPrivs = append(toGrantPrivs, g) + } + } + + for _, r := range toRevoke { + if !roles[r] { + toRevokePrivs = append(toRevokePrivs, r) + } + } + + if err := u.grant(ctx, client, toGrantPrivs); err != nil { + return fmt.Errorf("failed to grant privs %v to user %s: %v", toGrantPrivs, u.userName, err) + } + if err := u.revoke(ctx, client, toRevokePrivs); err != nil { + return fmt.Errorf("failed to revoke privs %v from user %s: %v", toRevokePrivs, u.userName, err) + } + return nil +} + +func (u *user) updatePassword(ctx context.Context, client dbdpb.DatabaseDaemonClient) error { + alterUserCmds := []string{sql.QueryAlterUser(u.userName, u.newPassword)} + sqls := append([]string{sql.QuerySetSessionContainer(u.databaseName)}, alterUserCmds...) + if _, err := client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sqls, + Suppress: true, + }); err != nil { + return fmt.Errorf("failed to alter user %s: %v", u.userName, err) + } + return nil +} + +func (u *user) grant(ctx context.Context, client dbdpb.DatabaseDaemonClient, toGrant []string) error { + if len(toGrant) == 0 { + return nil + } + var grantCmds []string + for _, p := range toGrant { + grantCmds = append(grantCmds, sql.QueryGrantPrivileges(p, u.userName)) + } + sqls := append([]string{sql.QuerySetSessionContainer(u.databaseName)}, grantCmds...) + if _, err := client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sqls, + }); err != nil { + return fmt.Errorf("failed to grant %v to user %s: %v", toGrant, u.userName, err) + } + return nil +} + +func (u *user) revoke(ctx context.Context, client dbdpb.DatabaseDaemonClient, toRevoke []string) error { + if len(toRevoke) == 0 { + return nil + } + var revokeCmds []string + for _, p := range toRevoke { + revokeCmds = append(revokeCmds, sql.QueryRevokePrivileges(p, u.userName)) + } + sqls := append([]string{sql.QuerySetSessionContainer(u.databaseName)}, revokeCmds...) + if _, err := client.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sqls, + }); err != nil { + return fmt.Errorf("failed to revoke %v from user %s: %v", toRevoke, u.userName, err) + } + return nil +} + +func (u *user) delete() (suppressedSQLs string) { + return sql.QuerySetSessionContainer(u.databaseName) + fmt.Sprintf("; DROP USER %s CASCADE;", sql.MustBeObjectName(u.userName)) +} + +func (u *user) String() string { + return fmt.Sprintf("{database: %q, name: %q, specPrivs: %v, envSysPrivs: %v, envRolePrivs %v}", u.databaseName, u.userName, u.specPrivs, u.envDbaSysPrivs, u.envDbaRolePrivs) +} + +func (u *user) GetUserName() string { + return u.userName +} + +func (u *user) GetUserEnvPrivs() []string { + var privs []string + privs = append(privs, u.envDbaRolePrivs...) + privs = append(privs, u.envDbaSysPrivs...) + return privs +} + +func newUser(databaseName string, specUser *pb.User) *user { + var privs []string + for _, p := range specUser.GetPrivileges() { + upperP := strings.ToUpper(p) + // example: GRANT SELECT ON TABLE t TO SCOTT + if strings.Contains(upperP, " ON ") { + klog.ErrorS(errors.New("object privileges not supported, will be omitted by operator"), "not supported privileges", "priv", p) + } else { + privs = append(privs, upperP) + } + } + user := &user{ + databaseName: strings.ToUpper(databaseName), + userName: strings.ToUpper(specUser.GetName()), + // Used by both gsm and plaintext + // can be overwritten later if GSM is enabled. + newPassword: specUser.GetPassword(), + // Only used for plaintext status diff. + curPassword: specUser.GetLastPassword(), + specPrivs: privs, + // Empty version is returned if PasswordGsmSecretRef is nil. + gsmSecCurVer: specUser.PasswordGsmSecretRef.GetLastVersion(), + } + if specUser.PasswordGsmSecretRef != nil { + user.gsmSecNewVer = fmt.Sprintf(gsmSecretStr, specUser.GetPasswordGsmSecretRef().GetProjectId(), specUser.GetPasswordGsmSecretRef().GetSecretId(), specUser.GetPasswordGsmSecretRef().GetVersion()) + } + return user +} + +func newNoSpecUser(databaseName, userName string) *user { + return &user{ + databaseName: strings.ToUpper(databaseName), + userName: strings.ToUpper(userName), + } +} + +func queryDB(ctx context.Context, client dbdpb.DatabaseDaemonClient, databaseName, sqlQuery, key string, filter func(val string) bool) ([]string, error) { + resp, err := client.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{ + sql.QuerySetSessionContainer(databaseName), + sqlQuery, + }, + }) + if err != nil { + return nil, fmt.Errorf("queryDB failed to query data: %v", err) + } + rows, err := parseSQLResponse(resp) + if err != nil { + return nil, fmt.Errorf("queryDB failed to parse data from %v: %v", resp, err) + } + userNames, err := queryRowsByKey(rows, key, filter) + if err != nil { + return nil, fmt.Errorf("failed to retrieve %v from %v", key, rows) + } + return userNames, nil +} + +// parseSQLResponse parses the JSON result-set (returned by runSQLPlus API) and +// returns a list of rows with column-value mapping. +func parseSQLResponse(resp *dbdpb.RunCMDResponse) ([]map[string]string, error) { + var rows []map[string]string + for _, msg := range resp.GetMsg() { + row := make(map[string]string) + if err := json.Unmarshal([]byte(msg), &row); err != nil { + return nil, fmt.Errorf("failed to parse %s: %v", msg, err) + } + rows = append(rows, row) + } + return rows, nil +} + +func queryRowsByKey(rows []map[string]string, rowKey string, filter func(val string) bool) ([]string, error) { + var res []string + for _, row := range rows { + v, ok := row[rowKey] + if !ok { + return nil, fmt.Errorf("failed to retrieve %v from %v", rowKey, row) + } + if filter(v) { + res = append(res, v) + } + } + return res, nil +} + +// compare returns set difference left\right intersection right\left +func compare(left, right []string) (leftMinusRight, intersection, rightMinusLeft []string) { + leftSet := stringset.New(left...) + rightSet := stringset.New(right...) + return leftSet.Diff(rightSet).Elements(), leftSet.Intersect(rightSet).Elements(), rightSet.Diff(leftSet).Elements() +} diff --git a/oracle/pkg/agents/consts/BUILD.bazel b/oracle/pkg/agents/consts/BUILD.bazel new file mode 100644 index 0000000..9cb04db --- /dev/null +++ b/oracle/pkg/agents/consts/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "consts", + srcs = ["consts.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts", + visibility = ["//visibility:public"], +) diff --git a/oracle/pkg/agents/consts/consts.go b/oracle/pkg/agents/consts/consts.go new file mode 100644 index 0000000..36c0c36 --- /dev/null +++ b/oracle/pkg/agents/consts/consts.go @@ -0,0 +1,206 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package consts provides common Oracle constants across the entire Data Plane. +package consts + +// Listener is an Oracle listener struct +type Listener struct { + LType string + Port int32 + Local bool + Protocol string +} + +// OraDir wraps up the host and oracle names for a specific directory. e.g. +// /u03/app/oracle//dpdump and PDB_DATA_PUMP_DIR +type OraDir struct { + Linux string + Oracle string +} + +const ( + // DefaultHealthAgentPort is Health Agent's default port number. + DefaultHealthAgentPort = 3201 + + // DefaultConfigAgentPort is Config Agent's default port number. + DefaultConfigAgentPort = 3202 + + // DefaultDBDaemonPort is DB daemon's default port number. + DefaultDBDaemonPort = 3203 + + // DefaultMonitoringAgentPort is the default port where the oracle exporter runs + DefaultMonitoringAgentPort = 9161 + + // Localhost is a general localhost name. + Localhost = "localhost" + + // DomainSocketFile is meant for the agents to communicate to the Database Daemon. + DomainSocketFile = "/var/tmp/dbdaemon.sock" + + // ProxyDomainSocketFile is meant for the database daemon to communicate to the database daemon proxy. + ProxyDomainSocketFile = "/var/tmp/dbdaemon_proxy.sock" + + // SecureListenerPort is a secure listener port number. + SecureListenerPort = 6021 + // SSLListenerPort is an SSL listener port number. + SSLListenerPort = 3307 + + // OpenPluggableDatabaseSQL is used to open pluggable database (e.g. after CDB start). + OpenPluggableDatabaseSQL = "alter pluggable database all open" + + // ListPDBsSQL lists pluggable databases excluding a root container. + ListPDBsSQL = "select name from v$containers where name !='CDB$ROOT'" + + // ListPluggableDatabaseExcludeSeedSQL is used to list pluggable databases exclude PDB$SEED + ListPluggableDatabaseExcludeSeedSQL = "select pdb_name from dba_pdbs where pdb_name!='PDB$SEED'" + + // DefaultPGAMB is the default size of the PGA which the CDBs are created. + DefaultPGAMB = 1200 + + // DefaultSGAMB is the default size of the SGA which the CDBs are created. + DefaultSGAMB = 1800 + + //Oracle18c Version for Oracle 18c XE + Oracle18c = "18c" + + // SourceOracleHome is the ORACLE_HOME home path where Oracle EE or SE is installed. + SourceOracleHome = "/u01/app/oracle/product/%s/db" + + // SourceOracleXeHome is the ORACLE_HOME path where Oracle 18c XE is installed. + SourceOracleXeHome = "/opt/oracle/product/%s/dbhomeXE" + + // SourceOracleBase is the ORACLE_BASE path where Oracle EE or SE is installed. + SourceOracleBase = "/u01/app/oracle" + + // SourceOracleXeBase is the ORACLE_BASE path where Oracle 18c XE is installed. + SourceOracleXeBase = "/opt/oracle" + + // SourceOracleInventory is the source OraInventory path for Oracle EE or SE + SourceOracleInventory = "/u01/app/oracle" + + // SourceOracleXeInventory is the source OraInventory path for Oracle 18c XE. + SourceOracleXeInventory = "/opt/oracle" + + // SourceOracleDataDirectory is where the docker image holds the data files for EE and SE Oracle. + SourceOracleDataDirectory = "/u01/app/oracle/oradata" + + // SourceOracleXeDataDirectory is where the docker image holds the data files for Oracle 18c XE. + SourceOracleXeDataDirectory = "/opt/oracle/oradata" + + // SourceDatabaseHost is the hostname used during image build process. + SourceDatabaseHost = "ol7-db12201-gi-cdb-docker-template-vm" + + // CharSet is the supported character set for Oracle databases. + CharSet = "AL32UTF8" + + // OracleDBContainerName is the container name for the Oracle database. + OracleDBContainerName = "oracle_db" + + // SecurityUser is the user for lockdown triggers. + SecurityUser = "gcsql$security" + + // PDBLoaderUser is the user for impdb/expdp operations by end users. + PDBLoaderUser = "gcsql$pdbloader" + + // MonitoringAgentName is the container name for the monitoring agent. + MonitoringAgentName = "oracle-monitoring" + + // DefaultExitErrorCode is default exit code + DefaultExitErrorCode = 128 + + // RMANBackup is the oracle rman command for taking backups. + RMANBackup = "backup" +) + +var ( + // ProvisioningDoneFile is a flag name/location created at the end of provisioning. + // this is placed on the PD storage so that on recreate, the bootstrap doesnt re-run. + ProvisioningDoneFile = "/u02/app/oracle/provisioning_successful" + + // SECURE is the name of the secure tns listener + SECURE = "SECURE" + // ListenerNames is the list of listeners + ListenerNames = map[string]*Listener{ + "SECURE": { + LType: SECURE, + Port: SecureListenerPort, + Local: true, + Protocol: "TCP", + }, + "SSL": { + LType: "SSL", + Port: SSLListenerPort, + Protocol: "TCPS", + }, + } + + // DpdumpDir is the Impdp/Expdp directory and oracle directory name. + // Linux is relative to the PDB PATH_PREFIX. + DpdumpDir = OraDir{Linux: "dmp", Oracle: "PDB_DATA_PUMP_DIR"} + + // OraGroup is the group that owns the database software. + OraGroup = []string{"dba", "oinstall"} + + // OraTab is the oratab file path. + OraTab = "/etc/oratab" + + // OraUser is the owner of the database and database software. + OraUser = "oracle" + + // OracleBase is the Oracle base path. + OracleBase = "/u02/app/oracle" + + // DataDir is the directory where datafiles exists. + DataDir = "/%s/app/oracle/oradata/%s" + + // PDBDataDir is the directory where PDB datafiles exists. + PDBDataDir = DataDir + "/%s/data" + + // PDBSeedDir is the directory where the SEED datafiles exists. + PDBSeedDir = DataDir + "/pdbseed" + + // PDBPathPrefix is the directory where PDB data directory exists. + PDBPathPrefix = DataDir + "/%s" + + // ConfigDir is where the spfile, pfile and pwd file are persisted. + ConfigDir = "/%s/app/oracle/oraconfig/%s" + + // RecoveryAreaDir is where the flash recovery area will be. + RecoveryAreaDir = "/%s/app/oracle/fast_recovery_area/%s" + + // DataMount is the PD mount where the data is persisted. + DataMount = "u02" + + // LogMount is the PD mount where the logs are persisted. + LogMount = "u03" + + // ListenerDir is the listener directory. + ListenerDir = "/%s/app/oracle/oraconfig/network" + + // ScriptDir is where the scripts are located on the container image. + ScriptDir = "/agents" + + // WalletDir is where the SSL Certs are stored. + WalletDir = "/u02/app/oracle/wallet" + + // OracleDir is where the env file is located + OracleDir = "/home/oracle" + + // DefaultRMANDir sets the default rman backup directory + DefaultRMANDir = "/u03/app/oracle/rman" + + // RMANStagingDir sets the staging directory for rman backup to GCS. + RMANStagingDir = "/u03/app/oracle/rmanstaging" +) diff --git a/oracle/pkg/agents/monitoring/BUILD.bazel b/oracle/pkg/agents/monitoring/BUILD.bazel new file mode 100644 index 0000000..aea0aa8 --- /dev/null +++ b/oracle/pkg/agents/monitoring/BUILD.bazel @@ -0,0 +1,5 @@ +filegroup( + name = "monitoring_files", + srcs = ["default-metrics.yaml"], + visibility = ["//visibility:public"], +) diff --git a/oracle/pkg/agents/monitoring/default-metrics.yaml b/oracle/pkg/agents/monitoring/default-metrics.yaml new file mode 100644 index 0000000..ead3f84 --- /dev/null +++ b/oracle/pkg/agents/monitoring/default-metrics.yaml @@ -0,0 +1,133 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +metric: + - context: sessions + labels: + - status + - type + metricsdesc: + value: Gauge metric with count of sessions by status and type. + request: >- + SELECT status, type, COUNT(*) as value FROM v$session GROUP BY status, + type + - context: resource + labels: + - resource_name + metricsdesc: + current_utilization: >- + Generic counter metric from v$resource_limit view in Oracle (current + value). + limit_value: >- + Generic counter metric from v$resource_limit view in Oracle (UNLIMITED: + -1). + request: >- + SELECT resource_name,current_utilization,CASE WHEN TRIM(limit_value) LIKE + 'UNLIMITED' THEN '-1' ELSE TRIM(limit_value) END as limit_value FROM + v$resource_limit + - context: activity + metricsdesc: + value: Generic counter metric from v$sysstat view in Oracle. + fieldtoappend: name + request: >- + SELECT name, value FROM v$sysstat WHERE name IN ('parse count (total)', + 'execute count', 'user commits', 'user rollbacks') + - context: process + metricsdesc: + count: Gauge metric with count of processes. + request: SELECT COUNT(*) as count FROM v$process + - context: wait_time + metricsdesc: + value: Generic counter metric from v$waitclassmetric view in Oracle. + fieldtoappend: wait_class + request: | + SELECT + n.wait_class as WAIT_CLASS, + round(m.time_waited/m.INTSIZE_CSEC,3) as VALUE + FROM + v$waitclassmetric m, v$system_wait_class n + WHERE + m.wait_class_id=n.wait_class_id AND n.wait_class != 'Idle' + - context: tablespace + labels: + - tablespace + - type + metricsdesc: + bytes: Generic counter metric of tablespaces bytes in Oracle. + max_bytes: Generic counter metric of tablespaces max bytes in Oracle. + free: Generic counter metric of tablespaces free bytes in Oracle. + request: | + SELECT + df.tablespace_name as tablespace, + df.type as type, + nvl(sum(df.bytes),0) as bytes, + nvl(sum(df.max_bytes),0) as max_bytes, + nvl(sum(f.free),0) as free + FROM + ( + SELECT + ddf.file_id, + dt.contents as type, + ddf.file_name, + ddf.tablespace_name, + TRUNC(ddf.bytes) as bytes, + TRUNC(GREATEST(ddf.bytes,ddf.maxbytes)) as max_bytes + FROM + dba_data_files ddf, + dba_tablespaces dt + WHERE ddf.tablespace_name = dt.tablespace_name + ) df, + ( + SELECT + TRUNC(SUM(bytes)) AS free, + file_id + FROM dba_free_space + GROUP BY file_id + ) f + WHERE df.file_id = f.file_id (+) + GROUP BY df.tablespace_name, df.type + UNION ALL + SELECT + Y.name as tablespace_name, + Y.type as type, + SUM(Y.bytes) as bytes, + SUM(Y.max_bytes) as max_bytes, + MAX(nvl(Y.free_bytes,0)) as free + FROM + ( + SELECT + dtf.tablespace_name as name, + dt.contents as type, + dtf.status as status, + dtf.bytes as bytes, + ( + SELECT + ((f.total_blocks - s.tot_used_blocks)*vp.value) + FROM + (SELECT tablespace_name, sum(used_blocks) tot_used_blocks FROM gv$sort_segment WHERE tablespace_name!='DUMMY' GROUP BY tablespace_name) s, + (SELECT tablespace_name, sum(blocks) total_blocks FROM dba_temp_files where tablespace_name !='DUMMY' GROUP BY tablespace_name) f, + (SELECT value FROM v$parameter WHERE name = 'db_block_size') vp + WHERE f.tablespace_name=s.tablespace_name AND f.tablespace_name = dtf.tablespace_name + ) as free_bytes, + CASE + WHEN dtf.maxbytes = 0 THEN dtf.bytes + ELSE dtf.maxbytes + END as max_bytes + FROM + sys.dba_temp_files dtf, + sys.dba_tablespaces dt + WHERE dtf.tablespace_name = dt.tablespace_name + ) Y + GROUP BY Y.name, Y.type + ORDER BY tablespace diff --git a/oracle/pkg/agents/oracle/BUILD.bazel b/oracle/pkg/agents/oracle/BUILD.bazel new file mode 100644 index 0000000..f3ba8b5 --- /dev/null +++ b/oracle/pkg/agents/oracle/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") + +proto_library( + name = "oracle_proto", + srcs = [ + "dbdaemon.proto", + "dbdaemon_proxy.proto", + "oracle.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:empty_proto", + "@com_google_protobuf//:timestamp_proto", + "@go_googleapis//google/longrunning:longrunning_proto", + ], +) + +go_proto_library( + name = "oracle_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle", + proto = ":oracle_proto", + visibility = ["//visibility:public"], + deps = [ + "@go_googleapis//google/longrunning:longrunning_go_proto", + ], +) + +go_library( + name = "oracle", + embed = [":oracle_go_proto"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle", + visibility = ["//visibility:public"], +) diff --git a/oracle/pkg/agents/oracle/dbdaemon.pb.go b/oracle/pkg/agents/oracle/dbdaemon.pb.go new file mode 100644 index 0000000..0f9ac14 --- /dev/null +++ b/oracle/pkg/agents/oracle/dbdaemon.pb.go @@ -0,0 +1,4390 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Database Daemon is used for privileged database ops, e.g. +// run sqlplus rman. It is intended to be used by the agents running on the +// database sidecar container(via *nix domain socket protocol). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.4 +// source: oracle/pkg/agents/oracle/dbdaemon.proto + +package oracle + +import ( + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + longrunning "google.golang.org/genproto/googleapis/longrunning" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetDatabaseTypeResponse_DatabaseType int32 + +const ( + GetDatabaseTypeResponse_UNKNOWN_DATABASE_TYPE GetDatabaseTypeResponse_DatabaseType = 0 + GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE GetDatabaseTypeResponse_DatabaseType = 1 + GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE_NONCDB GetDatabaseTypeResponse_DatabaseType = 2 +) + +// Enum value maps for GetDatabaseTypeResponse_DatabaseType. +var ( + GetDatabaseTypeResponse_DatabaseType_name = map[int32]string{ + 0: "UNKNOWN_DATABASE_TYPE", + 1: "ORACLE_12_2_ENTERPRISE", + 2: "ORACLE_12_2_ENTERPRISE_NONCDB", + } + GetDatabaseTypeResponse_DatabaseType_value = map[string]int32{ + "UNKNOWN_DATABASE_TYPE": 0, + "ORACLE_12_2_ENTERPRISE": 1, + "ORACLE_12_2_ENTERPRISE_NONCDB": 2, + } +) + +func (x GetDatabaseTypeResponse_DatabaseType) Enum() *GetDatabaseTypeResponse_DatabaseType { + p := new(GetDatabaseTypeResponse_DatabaseType) + *p = x + return p +} + +func (x GetDatabaseTypeResponse_DatabaseType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetDatabaseTypeResponse_DatabaseType) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_enumTypes[0].Descriptor() +} + +func (GetDatabaseTypeResponse_DatabaseType) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_oracle_dbdaemon_proto_enumTypes[0] +} + +func (x GetDatabaseTypeResponse_DatabaseType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetDatabaseTypeResponse_DatabaseType.Descriptor instead. +func (GetDatabaseTypeResponse_DatabaseType) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{24, 0} +} + +type CreateDirRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // path is a directory name. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // perm is the permission bits perm (before umask) are used for all + // directories CreateDir creates. + Perm uint32 `protobuf:"varint,2,opt,name=perm,proto3" json:"perm,omitempty"` +} + +func (x *CreateDirRequest) Reset() { + *x = CreateDirRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateDirRequest) ProtoMessage() {} + +func (x *CreateDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateDirRequest.ProtoReflect.Descriptor instead. +func (*CreateDirRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *CreateDirRequest) GetPerm() uint32 { + if x != nil { + return x.Perm + } + return 0 +} + +type CreateDirResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateDirResponse) Reset() { + *x = CreateDirResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateDirResponse) ProtoMessage() {} + +func (x *CreateDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateDirResponse.ProtoReflect.Descriptor instead. +func (*CreateDirResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{1} +} + +type ReadDirRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // path is a directory name. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // set recursive to true if collect all files and directories metadata in the + // file tree rooted at path. set recursive to false if only collect the first + // level files and directories metadata. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` +} + +func (x *ReadDirRequest) Reset() { + *x = ReadDirRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDirRequest) ProtoMessage() {} + +func (x *ReadDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDirRequest.ProtoReflect.Descriptor instead. +func (*ReadDirRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{2} +} + +func (x *ReadDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ReadDirRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type ReadDirResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CurrPath *ReadDirResponse_FileInfo `protobuf:"bytes,1,opt,name=currPath,proto3" json:"currPath,omitempty"` + SubPaths []*ReadDirResponse_FileInfo `protobuf:"bytes,2,rep,name=subPaths,proto3" json:"subPaths,omitempty"` +} + +func (x *ReadDirResponse) Reset() { + *x = ReadDirResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDirResponse) ProtoMessage() {} + +func (x *ReadDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDirResponse.ProtoReflect.Descriptor instead. +func (*ReadDirResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{3} +} + +func (x *ReadDirResponse) GetCurrPath() *ReadDirResponse_FileInfo { + if x != nil { + return x.CurrPath + } + return nil +} + +func (x *ReadDirResponse) GetSubPaths() []*ReadDirResponse_FileInfo { + if x != nil { + return x.SubPaths + } + return nil +} + +type DeleteDirRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // path is a directory name to be deleted. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // set force to false if removes a file or (empty) directory + // DeleteDir removes path and any children it contains if force set to true + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *DeleteDirRequest) Reset() { + *x = DeleteDirRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteDirRequest) ProtoMessage() {} + +func (x *DeleteDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteDirRequest.ProtoReflect.Descriptor instead. +func (*DeleteDirRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{4} +} + +func (x *DeleteDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *DeleteDirRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +type DeleteDirResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteDirResponse) Reset() { + *x = DeleteDirResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteDirResponse) ProtoMessage() {} + +func (x *DeleteDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteDirResponse.ProtoReflect.Descriptor instead. +func (*DeleteDirResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{5} +} + +type RunCMDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msg []string `protobuf:"bytes,1,rep,name=msg,proto3" json:"msg,omitempty"` +} + +func (x *RunCMDResponse) Reset() { + *x = RunCMDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunCMDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunCMDResponse) ProtoMessage() {} + +func (x *RunCMDResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunCMDResponse.ProtoReflect.Descriptor instead. +func (*RunCMDResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{6} +} + +func (x *RunCMDResponse) GetMsg() []string { + if x != nil { + return x.Msg + } + return nil +} + +type LocalConnection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LocalConnection) Reset() { + *x = LocalConnection{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalConnection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalConnection) ProtoMessage() {} + +func (x *LocalConnection) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalConnection.ProtoReflect.Descriptor instead. +func (*LocalConnection) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{7} +} + +type RunSQLPlusCMDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Commands []string `protobuf:"bytes,1,rep,name=commands,proto3" json:"commands,omitempty"` + // Optional TnsAdmin location for custom sql env settings. + TnsAdmin string `protobuf:"bytes,2,opt,name=tns_admin,json=tnsAdmin,proto3" json:"tns_admin,omitempty"` + Suppress bool `protobuf:"varint,3,opt,name=suppress,proto3" json:"suppress,omitempty"` + // Connection target. + // + // Types that are assignable to ConnectInfo: + // *RunSQLPlusCMDRequest_Local + // *RunSQLPlusCMDRequest_Dsn + // *RunSQLPlusCMDRequest_DatabaseName + ConnectInfo isRunSQLPlusCMDRequest_ConnectInfo `protobuf_oneof:"connectInfo"` + // Quiet mode, suppress all output. + Quiet bool `protobuf:"varint,7,opt,name=quiet,proto3" json:"quiet,omitempty"` +} + +func (x *RunSQLPlusCMDRequest) Reset() { + *x = RunSQLPlusCMDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunSQLPlusCMDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunSQLPlusCMDRequest) ProtoMessage() {} + +func (x *RunSQLPlusCMDRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunSQLPlusCMDRequest.ProtoReflect.Descriptor instead. +func (*RunSQLPlusCMDRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{8} +} + +func (x *RunSQLPlusCMDRequest) GetCommands() []string { + if x != nil { + return x.Commands + } + return nil +} + +func (x *RunSQLPlusCMDRequest) GetTnsAdmin() string { + if x != nil { + return x.TnsAdmin + } + return "" +} + +func (x *RunSQLPlusCMDRequest) GetSuppress() bool { + if x != nil { + return x.Suppress + } + return false +} + +func (m *RunSQLPlusCMDRequest) GetConnectInfo() isRunSQLPlusCMDRequest_ConnectInfo { + if m != nil { + return m.ConnectInfo + } + return nil +} + +func (x *RunSQLPlusCMDRequest) GetLocal() *LocalConnection { + if x, ok := x.GetConnectInfo().(*RunSQLPlusCMDRequest_Local); ok { + return x.Local + } + return nil +} + +func (x *RunSQLPlusCMDRequest) GetDsn() string { + if x, ok := x.GetConnectInfo().(*RunSQLPlusCMDRequest_Dsn); ok { + return x.Dsn + } + return "" +} + +func (x *RunSQLPlusCMDRequest) GetDatabaseName() string { + if x, ok := x.GetConnectInfo().(*RunSQLPlusCMDRequest_DatabaseName); ok { + return x.DatabaseName + } + return "" +} + +func (x *RunSQLPlusCMDRequest) GetQuiet() bool { + if x != nil { + return x.Quiet + } + return false +} + +type isRunSQLPlusCMDRequest_ConnectInfo interface { + isRunSQLPlusCMDRequest_ConnectInfo() +} + +type RunSQLPlusCMDRequest_Local struct { + // Connect to local database. + Local *LocalConnection `protobuf:"bytes,4,opt,name=local,proto3,oneof"` +} + +type RunSQLPlusCMDRequest_Dsn struct { + // dsn string used to connect to an external database. This is to support + // connecting to an external server from the DB container. Formats + // supported are listed here + // https://github.com/godror/godror/blob/main/README.md#connect + Dsn string `protobuf:"bytes,5,opt,name=dsn,proto3,oneof"` +} + +type RunSQLPlusCMDRequest_DatabaseName struct { + // Connect to local by non-local database. + // Explicitly sets the ORACLE_SID. This is required to + // to support an ES replica. + DatabaseName string `protobuf:"bytes,6,opt,name=database_name,json=databaseName,proto3,oneof"` +} + +func (*RunSQLPlusCMDRequest_Local) isRunSQLPlusCMDRequest_ConnectInfo() {} + +func (*RunSQLPlusCMDRequest_Dsn) isRunSQLPlusCMDRequest_ConnectInfo() {} + +func (*RunSQLPlusCMDRequest_DatabaseName) isRunSQLPlusCMDRequest_ConnectInfo() {} + +type CheckDatabaseStateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + IsCdb bool `protobuf:"varint,2,opt,name=is_cdb,json=isCdb,proto3" json:"is_cdb,omitempty"` + DbDomain string `protobuf:"bytes,3,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` +} + +func (x *CheckDatabaseStateRequest) Reset() { + *x = CheckDatabaseStateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckDatabaseStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckDatabaseStateRequest) ProtoMessage() {} + +func (x *CheckDatabaseStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckDatabaseStateRequest.ProtoReflect.Descriptor instead. +func (*CheckDatabaseStateRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{9} +} + +func (x *CheckDatabaseStateRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +func (x *CheckDatabaseStateRequest) GetIsCdb() bool { + if x != nil { + return x.IsCdb + } + return false +} + +func (x *CheckDatabaseStateRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +type CheckDatabaseStateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CheckDatabaseStateResponse) Reset() { + *x = CheckDatabaseStateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckDatabaseStateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckDatabaseStateResponse) ProtoMessage() {} + +func (x *CheckDatabaseStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckDatabaseStateResponse.ProtoReflect.Descriptor instead. +func (*CheckDatabaseStateResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{10} +} + +type CreatePasswordFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + SysPassword string `protobuf:"bytes,2,opt,name=sys_password,json=sysPassword,proto3" json:"sys_password,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=dir,proto3" json:"dir,omitempty"` +} + +func (x *CreatePasswordFileRequest) Reset() { + *x = CreatePasswordFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreatePasswordFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreatePasswordFileRequest) ProtoMessage() {} + +func (x *CreatePasswordFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreatePasswordFileRequest.ProtoReflect.Descriptor instead. +func (*CreatePasswordFileRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{11} +} + +func (x *CreatePasswordFileRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +func (x *CreatePasswordFileRequest) GetSysPassword() string { + if x != nil { + return x.SysPassword + } + return "" +} + +func (x *CreatePasswordFileRequest) GetDir() string { + if x != nil { + return x.Dir + } + return "" +} + +type CreatePasswordFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreatePasswordFileResponse) Reset() { + *x = CreatePasswordFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreatePasswordFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreatePasswordFileResponse) ProtoMessage() {} + +func (x *CreatePasswordFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreatePasswordFileResponse.ProtoReflect.Descriptor instead. +func (*CreatePasswordFileResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{12} +} + +type CreateReplicaInitOraFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EmHost string `protobuf:"bytes,1,opt,name=em_host,json=emHost,proto3" json:"em_host,omitempty"` + EmPort int32 `protobuf:"varint,2,opt,name=em_port,json=emPort,proto3" json:"em_port,omitempty"` + EmDbName string `protobuf:"bytes,3,opt,name=em_db_name,json=emDbName,proto3" json:"em_db_name,omitempty"` + EmDbUniqueName string `protobuf:"bytes,4,opt,name=em_db_unique_name,json=emDbUniqueName,proto3" json:"em_db_unique_name,omitempty"` + EmDbDomain string `protobuf:"bytes,5,opt,name=em_db_domain,json=emDbDomain,proto3" json:"em_db_domain,omitempty"` + LogFileDirList string `protobuf:"bytes,6,opt,name=log_file_dir_list,json=logFileDirList,proto3" json:"log_file_dir_list,omitempty"` + DataFileDirList string `protobuf:"bytes,7,opt,name=data_file_dir_list,json=dataFileDirList,proto3" json:"data_file_dir_list,omitempty"` + EmCompatibility string `protobuf:"bytes,8,opt,name=em_compatibility,json=emCompatibility,proto3" json:"em_compatibility,omitempty"` + InitOraDir string `protobuf:"bytes,9,opt,name=init_ora_dir,json=initOraDir,proto3" json:"init_ora_dir,omitempty"` + InitOraFileName string `protobuf:"bytes,10,opt,name=init_ora_file_name,json=initOraFileName,proto3" json:"init_ora_file_name,omitempty"` +} + +func (x *CreateReplicaInitOraFileRequest) Reset() { + *x = CreateReplicaInitOraFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateReplicaInitOraFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateReplicaInitOraFileRequest) ProtoMessage() {} + +func (x *CreateReplicaInitOraFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateReplicaInitOraFileRequest.ProtoReflect.Descriptor instead. +func (*CreateReplicaInitOraFileRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{13} +} + +func (x *CreateReplicaInitOraFileRequest) GetEmHost() string { + if x != nil { + return x.EmHost + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetEmPort() int32 { + if x != nil { + return x.EmPort + } + return 0 +} + +func (x *CreateReplicaInitOraFileRequest) GetEmDbName() string { + if x != nil { + return x.EmDbName + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetEmDbUniqueName() string { + if x != nil { + return x.EmDbUniqueName + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetEmDbDomain() string { + if x != nil { + return x.EmDbDomain + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetLogFileDirList() string { + if x != nil { + return x.LogFileDirList + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetDataFileDirList() string { + if x != nil { + return x.DataFileDirList + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetEmCompatibility() string { + if x != nil { + return x.EmCompatibility + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetInitOraDir() string { + if x != nil { + return x.InitOraDir + } + return "" +} + +func (x *CreateReplicaInitOraFileRequest) GetInitOraFileName() string { + if x != nil { + return x.InitOraFileName + } + return "" +} + +type CreateReplicaInitOraFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InitOraFileContent string `protobuf:"bytes,1,opt,name=init_ora_file_content,json=initOraFileContent,proto3" json:"init_ora_file_content,omitempty"` +} + +func (x *CreateReplicaInitOraFileResponse) Reset() { + *x = CreateReplicaInitOraFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateReplicaInitOraFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateReplicaInitOraFileResponse) ProtoMessage() {} + +func (x *CreateReplicaInitOraFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateReplicaInitOraFileResponse.ProtoReflect.Descriptor instead. +func (*CreateReplicaInitOraFileResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateReplicaInitOraFileResponse) GetInitOraFileContent() string { + if x != nil { + return x.InitOraFileContent + } + return "" +} + +// KnownPDBsRequest is a message used for getting +// a list of known PDBs in a CDB. +type KnownPDBsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // By default a SEED PDB is not included, but a caller can override it here. + IncludeSeed bool `protobuf:"varint,1,opt,name=include_seed,json=includeSeed,proto3" json:"include_seed,omitempty"` + // By default a state of a PDB is ignored, but a caller may request a list + // of PDBs only in the OPEN state (as opposed to just MOUNTED). + OnlyOpen bool `protobuf:"varint,2,opt,name=only_open,json=onlyOpen,proto3" json:"only_open,omitempty"` +} + +func (x *KnownPDBsRequest) Reset() { + *x = KnownPDBsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KnownPDBsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KnownPDBsRequest) ProtoMessage() {} + +func (x *KnownPDBsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KnownPDBsRequest.ProtoReflect.Descriptor instead. +func (*KnownPDBsRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{15} +} + +func (x *KnownPDBsRequest) GetIncludeSeed() bool { + if x != nil { + return x.IncludeSeed + } + return false +} + +func (x *KnownPDBsRequest) GetOnlyOpen() bool { + if x != nil { + return x.OnlyOpen + } + return false +} + +// KnownPDBsResponse is a message returning a list of known PDBs. +type KnownPDBsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KnownPdbs []string `protobuf:"bytes,1,rep,name=known_pdbs,json=knownPdbs,proto3" json:"known_pdbs,omitempty"` +} + +func (x *KnownPDBsResponse) Reset() { + *x = KnownPDBsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KnownPDBsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KnownPDBsResponse) ProtoMessage() {} + +func (x *KnownPDBsResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KnownPDBsResponse.ProtoReflect.Descriptor instead. +func (*KnownPDBsResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{16} +} + +func (x *KnownPDBsResponse) GetKnownPdbs() []string { + if x != nil { + return x.KnownPdbs + } + return nil +} + +type RunRMANRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Scripts to be executed by RMAN in sequence. + Scripts []string `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty"` + // sets TNS_ADMIN to override location for network configuration. + TnsAdmin string `protobuf:"bytes,2,opt,name=tns_admin,json=tnsAdmin,proto3" json:"tns_admin,omitempty"` + Suppress bool `protobuf:"varint,3,opt,name=suppress,proto3" json:"suppress,omitempty"` + // target is the primary database to connect to. This is usually + // the source database in clone operations. This would be the + // ES primary for the ES setup. + Target string `protobuf:"bytes,4,opt,name=target,proto3" json:"target,omitempty"` + // auxiliary is the secondary database to connect to. + // this is the ES replica database in the ES setup + Auxiliary string `protobuf:"bytes,5,opt,name=auxiliary,proto3" json:"auxiliary,omitempty"` + // gcs_path is the destination gcs bucket for the backup + GcsPath string `protobuf:"bytes,6,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + // local_path is the destination directory for the backup + LocalPath string `protobuf:"bytes,7,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` + // rman command to run, currently support backup and restore + Cmd string `protobuf:"bytes,8,opt,name=cmd,proto3" json:"cmd,omitempty"` +} + +func (x *RunRMANRequest) Reset() { + *x = RunRMANRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunRMANRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunRMANRequest) ProtoMessage() {} + +func (x *RunRMANRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunRMANRequest.ProtoReflect.Descriptor instead. +func (*RunRMANRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{17} +} + +func (x *RunRMANRequest) GetScripts() []string { + if x != nil { + return x.Scripts + } + return nil +} + +func (x *RunRMANRequest) GetTnsAdmin() string { + if x != nil { + return x.TnsAdmin + } + return "" +} + +func (x *RunRMANRequest) GetSuppress() bool { + if x != nil { + return x.Suppress + } + return false +} + +func (x *RunRMANRequest) GetTarget() string { + if x != nil { + return x.Target + } + return "" +} + +func (x *RunRMANRequest) GetAuxiliary() string { + if x != nil { + return x.Auxiliary + } + return "" +} + +func (x *RunRMANRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *RunRMANRequest) GetLocalPath() string { + if x != nil { + return x.LocalPath + } + return "" +} + +func (x *RunRMANRequest) GetCmd() string { + if x != nil { + return x.Cmd + } + return "" +} + +// LROInput is a common part of input requests for all Async operations. +type LROInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional identifier of requested operation. + // If not provided a random id will be generated. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` +} + +func (x *LROInput) Reset() { + *x = LROInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LROInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LROInput) ProtoMessage() {} + +func (x *LROInput) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LROInput.ProtoReflect.Descriptor instead. +func (*LROInput) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{18} +} + +func (x *LROInput) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +type RunRMANAsyncRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyncRequest *RunRMANRequest `protobuf:"bytes,1,opt,name=sync_request,json=syncRequest,proto3" json:"sync_request,omitempty"` + LroInput *LROInput `protobuf:"bytes,2,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *RunRMANAsyncRequest) Reset() { + *x = RunRMANAsyncRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunRMANAsyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunRMANAsyncRequest) ProtoMessage() {} + +func (x *RunRMANAsyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunRMANAsyncRequest.ProtoReflect.Descriptor instead. +func (*RunRMANAsyncRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{19} +} + +func (x *RunRMANAsyncRequest) GetSyncRequest() *RunRMANRequest { + if x != nil { + return x.SyncRequest + } + return nil +} + +func (x *RunRMANAsyncRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type RunRMANResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output of each script executed by RMAN. + Output []string `protobuf:"bytes,1,rep,name=output,proto3" json:"output,omitempty"` +} + +func (x *RunRMANResponse) Reset() { + *x = RunRMANResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunRMANResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunRMANResponse) ProtoMessage() {} + +func (x *RunRMANResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunRMANResponse.ProtoReflect.Descriptor instead. +func (*RunRMANResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{20} +} + +func (x *RunRMANResponse) GetOutput() []string { + if x != nil { + return x.Output + } + return nil +} + +type NIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ORACLE_SID env value + Sid string `protobuf:"bytes,1,opt,name=sid,proto3" json:"sid,omitempty"` + // Optional new database name if rename of database is also required. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` +} + +func (x *NIDRequest) Reset() { + *x = NIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NIDRequest) ProtoMessage() {} + +func (x *NIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NIDRequest.ProtoReflect.Descriptor instead. +func (*NIDRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{21} +} + +func (x *NIDRequest) GetSid() string { + if x != nil { + return x.Sid + } + return "" +} + +func (x *NIDRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +type NIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NIDResponse) Reset() { + *x = NIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NIDResponse) ProtoMessage() {} + +func (x *NIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NIDResponse.ProtoReflect.Descriptor instead. +func (*NIDResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{22} +} + +type GetDatabaseTypeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetDatabaseTypeRequest) Reset() { + *x = GetDatabaseTypeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetDatabaseTypeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDatabaseTypeRequest) ProtoMessage() {} + +func (x *GetDatabaseTypeRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDatabaseTypeRequest.ProtoReflect.Descriptor instead. +func (*GetDatabaseTypeRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{23} +} + +type GetDatabaseTypeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatabaseType GetDatabaseTypeResponse_DatabaseType `protobuf:"varint,1,opt,name=database_type,json=databaseType,proto3,enum=agents.oracle.GetDatabaseTypeResponse_DatabaseType" json:"database_type,omitempty"` +} + +func (x *GetDatabaseTypeResponse) Reset() { + *x = GetDatabaseTypeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetDatabaseTypeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDatabaseTypeResponse) ProtoMessage() {} + +func (x *GetDatabaseTypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDatabaseTypeResponse.ProtoReflect.Descriptor instead. +func (*GetDatabaseTypeResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{24} +} + +func (x *GetDatabaseTypeResponse) GetDatabaseType() GetDatabaseTypeResponse_DatabaseType { + if x != nil { + return x.DatabaseType + } + return GetDatabaseTypeResponse_UNKNOWN_DATABASE_TYPE +} + +type GetDatabaseNameRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetDatabaseNameRequest) Reset() { + *x = GetDatabaseNameRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetDatabaseNameRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDatabaseNameRequest) ProtoMessage() {} + +func (x *GetDatabaseNameRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDatabaseNameRequest.ProtoReflect.Descriptor instead. +func (*GetDatabaseNameRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{25} +} + +type GetDatabaseNameResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` +} + +func (x *GetDatabaseNameResponse) Reset() { + *x = GetDatabaseNameResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetDatabaseNameResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDatabaseNameResponse) ProtoMessage() {} + +func (x *GetDatabaseNameResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDatabaseNameResponse.ProtoReflect.Descriptor instead. +func (*GetDatabaseNameResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{26} +} + +func (x *GetDatabaseNameResponse) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +type SetListenerRegistrationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // global_database_name is the name of the database + // unique name with domain name. + GlobalDatabaseName string `protobuf:"bytes,1,opt,name=global_database_name,json=globalDatabaseName,proto3" json:"global_database_name,omitempty"` + // database_name is the name of the database. This is same + // between ES primary and replica + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` +} + +func (x *SetListenerRegistrationRequest) Reset() { + *x = SetListenerRegistrationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetListenerRegistrationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetListenerRegistrationRequest) ProtoMessage() {} + +func (x *SetListenerRegistrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetListenerRegistrationRequest.ProtoReflect.Descriptor instead. +func (*SetListenerRegistrationRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{27} +} + +func (x *SetListenerRegistrationRequest) GetGlobalDatabaseName() string { + if x != nil { + return x.GlobalDatabaseName + } + return "" +} + +func (x *SetListenerRegistrationRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +type BootstrapStandbyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` +} + +func (x *BootstrapStandbyRequest) Reset() { + *x = BootstrapStandbyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapStandbyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapStandbyRequest) ProtoMessage() {} + +func (x *BootstrapStandbyRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapStandbyRequest.ProtoReflect.Descriptor instead. +func (*BootstrapStandbyRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{28} +} + +func (x *BootstrapStandbyRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +type BootstrapStandbyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BootstrapStandbyResponse) Reset() { + *x = BootstrapStandbyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapStandbyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapStandbyResponse) ProtoMessage() {} + +func (x *BootstrapStandbyResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapStandbyResponse.ProtoReflect.Descriptor instead. +func (*BootstrapStandbyResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{29} +} + +type CreateCDBRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OracleHome string `protobuf:"bytes,1,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + DbUniqueName string `protobuf:"bytes,3,opt,name=db_unique_name,json=dbUniqueName,proto3" json:"db_unique_name,omitempty"` + CharacterSet string `protobuf:"bytes,4,opt,name=character_set,json=characterSet,proto3" json:"character_set,omitempty"` + MemoryPercent int32 `protobuf:"varint,5,opt,name=memory_percent,json=memoryPercent,proto3" json:"memory_percent,omitempty"` + AdditionalParams []string `protobuf:"bytes,6,rep,name=additional_params,json=additionalParams,proto3" json:"additional_params,omitempty"` + Version string `protobuf:"bytes,7,opt,name=version,proto3" json:"version,omitempty"` + DbDomain string `protobuf:"bytes,8,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` +} + +func (x *CreateCDBRequest) Reset() { + *x = CreateCDBRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCDBRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCDBRequest) ProtoMessage() {} + +func (x *CreateCDBRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCDBRequest.ProtoReflect.Descriptor instead. +func (*CreateCDBRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{30} +} + +func (x *CreateCDBRequest) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +func (x *CreateCDBRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +func (x *CreateCDBRequest) GetDbUniqueName() string { + if x != nil { + return x.DbUniqueName + } + return "" +} + +func (x *CreateCDBRequest) GetCharacterSet() string { + if x != nil { + return x.CharacterSet + } + return "" +} + +func (x *CreateCDBRequest) GetMemoryPercent() int32 { + if x != nil { + return x.MemoryPercent + } + return 0 +} + +func (x *CreateCDBRequest) GetAdditionalParams() []string { + if x != nil { + return x.AdditionalParams + } + return nil +} + +func (x *CreateCDBRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *CreateCDBRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +type CreateCDBAsyncRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyncRequest *CreateCDBRequest `protobuf:"bytes,1,opt,name=sync_request,json=syncRequest,proto3" json:"sync_request,omitempty"` + LroInput *LROInput `protobuf:"bytes,2,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *CreateCDBAsyncRequest) Reset() { + *x = CreateCDBAsyncRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCDBAsyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCDBAsyncRequest) ProtoMessage() {} + +func (x *CreateCDBAsyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCDBAsyncRequest.ProtoReflect.Descriptor instead. +func (*CreateCDBAsyncRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{31} +} + +func (x *CreateCDBAsyncRequest) GetSyncRequest() *CreateCDBRequest { + if x != nil { + return x.SyncRequest + } + return nil +} + +func (x *CreateCDBAsyncRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type CreateCDBResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateCDBResponse) Reset() { + *x = CreateCDBResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCDBResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCDBResponse) ProtoMessage() {} + +func (x *CreateCDBResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCDBResponse.ProtoReflect.Descriptor instead. +func (*CreateCDBResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{32} +} + +type CreateListenerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + Protocol string `protobuf:"bytes,3,opt,name=protocol,proto3" json:"protocol,omitempty"` + OracleHome string `protobuf:"bytes,4,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` + DbDomain string `protobuf:"bytes,5,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` +} + +func (x *CreateListenerRequest) Reset() { + *x = CreateListenerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateListenerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateListenerRequest) ProtoMessage() {} + +func (x *CreateListenerRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateListenerRequest.ProtoReflect.Descriptor instead. +func (*CreateListenerRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{33} +} + +func (x *CreateListenerRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +func (x *CreateListenerRequest) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *CreateListenerRequest) GetProtocol() string { + if x != nil { + return x.Protocol + } + return "" +} + +func (x *CreateListenerRequest) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +func (x *CreateListenerRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +type CreateListenerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateListenerResponse) Reset() { + *x = CreateListenerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateListenerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateListenerResponse) ProtoMessage() {} + +func (x *CreateListenerResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateListenerResponse.ProtoReflect.Descriptor instead. +func (*CreateListenerResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{34} +} + +type FileExistsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *FileExistsRequest) Reset() { + *x = FileExistsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileExistsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileExistsRequest) ProtoMessage() {} + +func (x *FileExistsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileExistsRequest.ProtoReflect.Descriptor instead. +func (*FileExistsRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{35} +} + +func (x *FileExistsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type FileExistsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` +} + +func (x *FileExistsResponse) Reset() { + *x = FileExistsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileExistsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileExistsResponse) ProtoMessage() {} + +func (x *FileExistsResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileExistsResponse.ProtoReflect.Descriptor instead. +func (*FileExistsResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{36} +} + +func (x *FileExistsResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +type PhysicalRestoreRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RestoreStatement string `protobuf:"bytes,1,opt,name=restore_statement,json=restoreStatement,proto3" json:"restore_statement,omitempty"` + LatestRecoverableScnQuery string `protobuf:"bytes,2,opt,name=latest_recoverable_scn_query,json=latestRecoverableScnQuery,proto3" json:"latest_recoverable_scn_query,omitempty"` + RecoverStatementTemplate string `protobuf:"bytes,3,opt,name=recover_statement_template,json=recoverStatementTemplate,proto3" json:"recover_statement_template,omitempty"` +} + +func (x *PhysicalRestoreRequest) Reset() { + *x = PhysicalRestoreRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhysicalRestoreRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhysicalRestoreRequest) ProtoMessage() {} + +func (x *PhysicalRestoreRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhysicalRestoreRequest.ProtoReflect.Descriptor instead. +func (*PhysicalRestoreRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{37} +} + +func (x *PhysicalRestoreRequest) GetRestoreStatement() string { + if x != nil { + return x.RestoreStatement + } + return "" +} + +func (x *PhysicalRestoreRequest) GetLatestRecoverableScnQuery() string { + if x != nil { + return x.LatestRecoverableScnQuery + } + return "" +} + +func (x *PhysicalRestoreRequest) GetRecoverStatementTemplate() string { + if x != nil { + return x.RecoverStatementTemplate + } + return "" +} + +type PhysicalRestoreAsyncRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyncRequest *PhysicalRestoreRequest `protobuf:"bytes,1,opt,name=sync_request,json=syncRequest,proto3" json:"sync_request,omitempty"` + LroInput *LROInput `protobuf:"bytes,2,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *PhysicalRestoreAsyncRequest) Reset() { + *x = PhysicalRestoreAsyncRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhysicalRestoreAsyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhysicalRestoreAsyncRequest) ProtoMessage() {} + +func (x *PhysicalRestoreAsyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhysicalRestoreAsyncRequest.ProtoReflect.Descriptor instead. +func (*PhysicalRestoreAsyncRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{38} +} + +func (x *PhysicalRestoreAsyncRequest) GetSyncRequest() *PhysicalRestoreRequest { + if x != nil { + return x.SyncRequest + } + return nil +} + +func (x *PhysicalRestoreAsyncRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type DataPumpImportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + DbDomain string `protobuf:"bytes,2,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` + CommandParams []string `protobuf:"bytes,3,rep,name=command_params,json=commandParams,proto3" json:"command_params,omitempty"` + // GCS path to input dump file + GcsPath string `protobuf:"bytes,4,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + // GCS path to output log file + GcsLogPath string `protobuf:"bytes,5,opt,name=gcs_log_path,json=gcsLogPath,proto3" json:"gcs_log_path,omitempty"` +} + +func (x *DataPumpImportRequest) Reset() { + *x = DataPumpImportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpImportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpImportRequest) ProtoMessage() {} + +func (x *DataPumpImportRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpImportRequest.ProtoReflect.Descriptor instead. +func (*DataPumpImportRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{39} +} + +func (x *DataPumpImportRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *DataPumpImportRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +func (x *DataPumpImportRequest) GetCommandParams() []string { + if x != nil { + return x.CommandParams + } + return nil +} + +func (x *DataPumpImportRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *DataPumpImportRequest) GetGcsLogPath() string { + if x != nil { + return x.GcsLogPath + } + return "" +} + +type DataPumpImportAsyncRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyncRequest *DataPumpImportRequest `protobuf:"bytes,1,opt,name=sync_request,json=syncRequest,proto3" json:"sync_request,omitempty"` + LroInput *LROInput `protobuf:"bytes,2,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *DataPumpImportAsyncRequest) Reset() { + *x = DataPumpImportAsyncRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpImportAsyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpImportAsyncRequest) ProtoMessage() {} + +func (x *DataPumpImportAsyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpImportAsyncRequest.ProtoReflect.Descriptor instead. +func (*DataPumpImportAsyncRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{40} +} + +func (x *DataPumpImportAsyncRequest) GetSyncRequest() *DataPumpImportRequest { + if x != nil { + return x.SyncRequest + } + return nil +} + +func (x *DataPumpImportAsyncRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type DataPumpImportResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DataPumpImportResponse) Reset() { + *x = DataPumpImportResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpImportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpImportResponse) ProtoMessage() {} + +func (x *DataPumpImportResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpImportResponse.ProtoReflect.Descriptor instead. +func (*DataPumpImportResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{41} +} + +type DataPumpExportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PdbName string `protobuf:"bytes,1,opt,name=pdb_name,json=pdbName,proto3" json:"pdb_name,omitempty"` + DbDomain string `protobuf:"bytes,2,opt,name=db_domain,json=dbDomain,proto3" json:"db_domain,omitempty"` + ObjectType string `protobuf:"bytes,3,opt,name=object_type,json=objectType,proto3" json:"object_type,omitempty"` + Objects string `protobuf:"bytes,4,opt,name=objects,proto3" json:"objects,omitempty"` + CommandParams []string `protobuf:"bytes,5,rep,name=command_params,json=commandParams,proto3" json:"command_params,omitempty"` + GcsPath string `protobuf:"bytes,6,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + GcsLogPath string `protobuf:"bytes,7,opt,name=gcs_log_path,json=gcsLogPath,proto3" json:"gcs_log_path,omitempty"` + FlashbackTime string `protobuf:"bytes,8,opt,name=flashback_time,json=flashbackTime,proto3" json:"flashback_time,omitempty"` +} + +func (x *DataPumpExportRequest) Reset() { + *x = DataPumpExportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpExportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpExportRequest) ProtoMessage() {} + +func (x *DataPumpExportRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpExportRequest.ProtoReflect.Descriptor instead. +func (*DataPumpExportRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{42} +} + +func (x *DataPumpExportRequest) GetPdbName() string { + if x != nil { + return x.PdbName + } + return "" +} + +func (x *DataPumpExportRequest) GetDbDomain() string { + if x != nil { + return x.DbDomain + } + return "" +} + +func (x *DataPumpExportRequest) GetObjectType() string { + if x != nil { + return x.ObjectType + } + return "" +} + +func (x *DataPumpExportRequest) GetObjects() string { + if x != nil { + return x.Objects + } + return "" +} + +func (x *DataPumpExportRequest) GetCommandParams() []string { + if x != nil { + return x.CommandParams + } + return nil +} + +func (x *DataPumpExportRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *DataPumpExportRequest) GetGcsLogPath() string { + if x != nil { + return x.GcsLogPath + } + return "" +} + +func (x *DataPumpExportRequest) GetFlashbackTime() string { + if x != nil { + return x.FlashbackTime + } + return "" +} + +type DataPumpExportAsyncRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyncRequest *DataPumpExportRequest `protobuf:"bytes,1,opt,name=sync_request,json=syncRequest,proto3" json:"sync_request,omitempty"` + LroInput *LROInput `protobuf:"bytes,2,opt,name=lro_input,json=lroInput,proto3" json:"lro_input,omitempty"` +} + +func (x *DataPumpExportAsyncRequest) Reset() { + *x = DataPumpExportAsyncRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpExportAsyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpExportAsyncRequest) ProtoMessage() {} + +func (x *DataPumpExportAsyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpExportAsyncRequest.ProtoReflect.Descriptor instead. +func (*DataPumpExportAsyncRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{43} +} + +func (x *DataPumpExportAsyncRequest) GetSyncRequest() *DataPumpExportRequest { + if x != nil { + return x.SyncRequest + } + return nil +} + +func (x *DataPumpExportAsyncRequest) GetLroInput() *LROInput { + if x != nil { + return x.LroInput + } + return nil +} + +type DataPumpExportResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DataPumpExportResponse) Reset() { + *x = DataPumpExportResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataPumpExportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataPumpExportResponse) ProtoMessage() {} + +func (x *DataPumpExportResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataPumpExportResponse.ProtoReflect.Descriptor instead. +func (*DataPumpExportResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{44} +} + +type RecoverConfigFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CdbName string `protobuf:"bytes,1,opt,name=cdbName,proto3" json:"cdbName,omitempty"` +} + +func (x *RecoverConfigFileRequest) Reset() { + *x = RecoverConfigFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RecoverConfigFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecoverConfigFileRequest) ProtoMessage() {} + +func (x *RecoverConfigFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecoverConfigFileRequest.ProtoReflect.Descriptor instead. +func (*RecoverConfigFileRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{45} +} + +func (x *RecoverConfigFileRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +type RecoverConfigFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RecoverConfigFileResponse) Reset() { + *x = RecoverConfigFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RecoverConfigFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecoverConfigFileResponse) ProtoMessage() {} + +func (x *RecoverConfigFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecoverConfigFileResponse.ProtoReflect.Descriptor instead. +func (*RecoverConfigFileResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{46} +} + +type DownloadDirectoryFromGCSRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GcsPath string `protobuf:"bytes,1,opt,name=gcs_path,json=gcsPath,proto3" json:"gcs_path,omitempty"` + LocalPath string `protobuf:"bytes,2,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` +} + +func (x *DownloadDirectoryFromGCSRequest) Reset() { + *x = DownloadDirectoryFromGCSRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownloadDirectoryFromGCSRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownloadDirectoryFromGCSRequest) ProtoMessage() {} + +func (x *DownloadDirectoryFromGCSRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownloadDirectoryFromGCSRequest.ProtoReflect.Descriptor instead. +func (*DownloadDirectoryFromGCSRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{47} +} + +func (x *DownloadDirectoryFromGCSRequest) GetGcsPath() string { + if x != nil { + return x.GcsPath + } + return "" +} + +func (x *DownloadDirectoryFromGCSRequest) GetLocalPath() string { + if x != nil { + return x.LocalPath + } + return "" +} + +type DownloadDirectoryFromGCSResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DownloadDirectoryFromGCSResponse) Reset() { + *x = DownloadDirectoryFromGCSResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownloadDirectoryFromGCSResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownloadDirectoryFromGCSResponse) ProtoMessage() {} + +func (x *DownloadDirectoryFromGCSResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownloadDirectoryFromGCSResponse.ProtoReflect.Descriptor instead. +func (*DownloadDirectoryFromGCSResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{48} +} + +type FetchServiceImageMetaDataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FetchServiceImageMetaDataRequest) Reset() { + *x = FetchServiceImageMetaDataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FetchServiceImageMetaDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FetchServiceImageMetaDataRequest) ProtoMessage() {} + +func (x *FetchServiceImageMetaDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FetchServiceImageMetaDataRequest.ProtoReflect.Descriptor instead. +func (*FetchServiceImageMetaDataRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{49} +} + +type FetchServiceImageMetaDataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + CdbName string `protobuf:"bytes,2,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + OracleHome string `protobuf:"bytes,3,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` +} + +func (x *FetchServiceImageMetaDataResponse) Reset() { + *x = FetchServiceImageMetaDataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FetchServiceImageMetaDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FetchServiceImageMetaDataResponse) ProtoMessage() {} + +func (x *FetchServiceImageMetaDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FetchServiceImageMetaDataResponse.ProtoReflect.Descriptor instead. +func (*FetchServiceImageMetaDataResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{50} +} + +func (x *FetchServiceImageMetaDataResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *FetchServiceImageMetaDataResponse) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *FetchServiceImageMetaDataResponse) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +// FileInfo describes a file and is returned by Stat. +type ReadDirResponse_FileInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Mode uint32 `protobuf:"varint,3,opt,name=mode,proto3" json:"mode,omitempty"` + ModTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=modTime,proto3" json:"modTime,omitempty"` + IsDir bool `protobuf:"varint,5,opt,name=isDir,proto3" json:"isDir,omitempty"` + AbsPath string `protobuf:"bytes,6,opt,name=absPath,proto3" json:"absPath,omitempty"` +} + +func (x *ReadDirResponse_FileInfo) Reset() { + *x = ReadDirResponse_FileInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDirResponse_FileInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDirResponse_FileInfo) ProtoMessage() {} + +func (x *ReadDirResponse_FileInfo) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDirResponse_FileInfo.ProtoReflect.Descriptor instead. +func (*ReadDirResponse_FileInfo) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ReadDirResponse_FileInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ReadDirResponse_FileInfo) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *ReadDirResponse_FileInfo) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +func (x *ReadDirResponse_FileInfo) GetModTime() *timestamp.Timestamp { + if x != nil { + return x.ModTime + } + return nil +} + +func (x *ReadDirResponse_FileInfo) GetIsDir() bool { + if x != nil { + return x.IsDir + } + return false +} + +func (x *ReadDirResponse_FileInfo) GetAbsPath() string { + if x != nil { + return x.AbsPath + } + return "" +} + +var File_oracle_pkg_agents_oracle_dbdaemon_proto protoreflect.FileDescriptor + +var file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x64, 0x62, 0x64, 0x61, 0x65, + 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x1a, 0x25, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, + 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x3a, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x65, + 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x65, 0x72, 0x6d, 0x22, 0x13, + 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x42, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, + 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xca, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x61, 0x64, + 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x63, + 0x75, 0x72, 0x72, 0x50, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x75, 0x72, 0x72, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x43, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x75, 0x62, + 0x50, 0x61, 0x74, 0x68, 0x73, 0x1a, 0xac, 0x01, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, + 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x34, + 0x0a, 0x07, 0x6d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x6d, 0x6f, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x73, 0x44, 0x69, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x44, 0x69, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x62, + 0x73, 0x50, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x62, 0x73, + 0x50, 0x61, 0x74, 0x68, 0x22, 0x3c, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x43, 0x4d, + 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x11, 0x0a, 0x0f, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x83, + 0x02, 0x0a, 0x14, 0x52, 0x75, 0x6e, 0x53, 0x51, 0x4c, 0x50, 0x6c, 0x75, 0x73, 0x43, 0x4d, 0x44, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6e, 0x73, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6e, 0x73, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, 0x0a, 0x05, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x73, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x73, 0x6e, 0x12, 0x25, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x71, 0x75, 0x69, 0x65, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, + 0x71, 0x75, 0x69, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x74, 0x0a, 0x19, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x61, 0x74, + 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x73, 0x5f, 0x63, 0x64, 0x62, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x43, 0x64, 0x62, 0x12, 0x1b, 0x0a, + 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x79, + 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x79, 0x73, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x64, 0x69, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x22, + 0x1c, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x03, + 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, + 0x6e, 0x69, 0x74, 0x4f, 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6d, + 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x65, 0x6d, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x0a, 0x65, 0x6d, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6d, 0x44, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x11, 0x65, 0x6d, 0x5f, 0x64, 0x62, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6d, + 0x44, 0x62, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0c, + 0x65, 0x6d, 0x5f, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6d, 0x44, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, + 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x44, 0x69, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x44, + 0x69, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x65, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x6f, 0x72, 0x61, 0x5f, 0x64, 0x69, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x6e, 0x69, 0x74, 0x4f, 0x72, 0x61, + 0x44, 0x69, 0x72, 0x12, 0x2b, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x6f, 0x72, 0x61, 0x5f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x4f, 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x55, 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x6f, 0x72, 0x61, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x4f, 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x52, 0x0a, 0x10, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, + 0x50, 0x44, 0x42, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x65, 0x65, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x4f, 0x70, 0x65, 0x6e, 0x22, 0x32, 0x0a, 0x11, 0x4b, + 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x44, 0x42, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x64, 0x62, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x64, 0x62, 0x73, 0x22, + 0xe5, 0x01, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, 0x4e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6e, 0x73, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6e, 0x73, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x75, 0x70, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x75, 0x70, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x78, 0x69, 0x6c, 0x69, 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x78, 0x69, 0x6c, 0x69, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x67, + 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, + 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x63, 0x6d, 0x64, 0x22, 0x2d, 0x0a, 0x08, 0x4c, 0x52, 0x4f, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x52, 0x4d, + 0x41, 0x4e, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, + 0x0a, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, 0x4e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0b, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x34, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, 0x72, + 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x29, 0x0a, 0x0f, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, + 0x4e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x22, 0x43, 0x0a, 0x0a, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x73, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x69, + 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x0d, 0x0a, 0x0b, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0xdd, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x64, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x68, 0x0a, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x5f, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, + 0x12, 0x1a, 0x0a, 0x16, 0x4f, 0x52, 0x41, 0x43, 0x4c, 0x45, 0x5f, 0x31, 0x32, 0x5f, 0x32, 0x5f, + 0x45, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x52, 0x49, 0x53, 0x45, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x4f, 0x52, 0x41, 0x43, 0x4c, 0x45, 0x5f, 0x31, 0x32, 0x5f, 0x32, 0x5f, 0x45, 0x4e, 0x54, 0x45, + 0x52, 0x50, 0x52, 0x49, 0x53, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x43, 0x44, 0x42, 0x10, 0x02, 0x22, + 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3e, 0x0a, 0x17, 0x47, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, + 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x77, 0x0a, 0x1e, 0x53, 0x65, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x22, 0x34, 0x0a, 0x17, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, + 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xae, 0x02, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x44, 0x42, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x24, 0x0a, 0x0e, 0x64, 0x62, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x62, 0x55, 0x6e, 0x69, 0x71, 0x75, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, + 0x65, 0x72, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x68, + 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x53, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x91, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x43, 0x44, 0x42, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x42, 0x0a, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x08, 0x6c, 0x72, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, + 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x18, 0x0a, 0x16, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x2c, + 0x0a, 0x12, 0x46, 0x69, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0xc4, 0x01, 0x0a, + 0x16, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x6e, 0x5f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x6e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x72, 0x65, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x22, 0x9d, 0x01, 0x0a, 0x1b, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, + 0x61, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x0b, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, + 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, + 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, 0x72, 0x6f, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x22, 0xb3, 0x01, 0x0a, 0x15, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x70, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x19, 0x0a, 0x08, + 0x67, 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x67, 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0c, 0x67, 0x63, 0x73, 0x5f, 0x6c, + 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, + 0x63, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x44, 0x61, + 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x73, 0x79, 0x6e, + 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x79, 0x6e, 0x63, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x34, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, + 0x72, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x50, + 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x95, 0x02, 0x0a, 0x15, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, + 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x62, 0x5f, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x62, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x63, 0x73, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x63, 0x73, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x20, 0x0a, 0x0c, 0x67, 0x63, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, 0x63, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x6c, 0x61, 0x73, + 0x68, 0x62, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x44, 0x61, + 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x73, 0x79, 0x6e, + 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x79, 0x6e, 0x63, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x34, 0x0a, 0x09, 0x6c, 0x72, 0x6f, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4c, 0x52, 0x4f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x6c, + 0x72, 0x6f, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x50, + 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x34, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a, 0x1f, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6d, 0x47, 0x43, 0x53, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x63, 0x73, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x63, 0x73, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x61, 0x74, + 0x68, 0x22, 0x22, 0x0a, 0x20, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6d, 0x47, 0x43, 0x53, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x20, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x79, 0x0a, 0x21, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x6f, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, + 0x48, 0x6f, 0x6d, 0x65, 0x32, 0xfb, 0x16, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x44, 0x69, 0x72, 0x12, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x52, 0x65, 0x61, 0x64, 0x44, + 0x69, 0x72, 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4e, 0x0a, 0x09, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x72, 0x12, 0x1f, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x5d, 0x0a, 0x0e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5d, 0x0a, 0x0e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x69, 0x0a, 0x12, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0a, 0x52, 0x75, + 0x6e, 0x53, 0x51, 0x4c, 0x50, 0x6c, 0x75, 0x73, 0x12, 0x23, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x51, 0x4c, 0x50, + 0x6c, 0x75, 0x73, 0x43, 0x4d, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, + 0x6e, 0x43, 0x4d, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x13, + 0x52, 0x75, 0x6e, 0x53, 0x51, 0x4c, 0x50, 0x6c, 0x75, 0x73, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x74, 0x65, 0x64, 0x12, 0x23, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x51, 0x4c, 0x50, 0x6c, 0x75, 0x73, 0x43, 0x4d, + 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x43, 0x4d, 0x44, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x09, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, + 0x50, 0x44, 0x42, 0x73, 0x12, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x44, 0x42, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x44, 0x42, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x52, 0x4d, + 0x41, 0x4e, 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, 0x4e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, 0x4e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, 0x4e, 0x41, 0x73, 0x79, 0x6e, + 0x63, 0x12, 0x22, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x4d, 0x41, 0x4e, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x03, 0x4e, 0x49, 0x44, 0x12, 0x19, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4e, 0x49, 0x44, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x60, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x28, 0x2e, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7b, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2e, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, 0x6e, 0x69, 0x74, 0x4f, + 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, 0x6e, 0x69, 0x74, 0x4f, + 0x72, 0x61, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, + 0x0a, 0x17, 0x53, 0x65, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x63, 0x0a, 0x10, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, + 0x64, 0x62, 0x79, 0x12, 0x26, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, + 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x62, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, + 0x42, 0x12, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, + 0x42, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x44, 0x42, + 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x0e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x24, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0a, 0x46, 0x69, + 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x45, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, + 0x14, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x41, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2a, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x5f, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x29, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, + 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5f, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, 0x70, 0x45, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x29, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x75, 0x6d, + 0x70, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x67, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x11, 0x52, 0x65, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x27, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, + 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x18, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6d, 0x47, 0x43, 0x53, 0x12, 0x2e, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, + 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x46, 0x72, 0x6f, 0x6d, 0x47, 0x43, 0x53, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x44, + 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x46, 0x72, 0x6f, 0x6d, 0x47, 0x43, 0x53, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x80, 0x01, 0x0a, 0x19, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x42, 0x58, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x65, 0x6c, 0x63, 0x61, 0x72, 0x72, 0x6f, 0x2d, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x3b, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescOnce sync.Once + file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescData = file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDesc +) + +func file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescGZIP() []byte { + file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescOnce.Do(func() { + file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescData = protoimpl.X.CompressGZIP(file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescData) + }) + return file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDescData +} + +var file_oracle_pkg_agents_oracle_dbdaemon_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes = make([]protoimpl.MessageInfo, 52) +var file_oracle_pkg_agents_oracle_dbdaemon_proto_goTypes = []interface{}{ + (GetDatabaseTypeResponse_DatabaseType)(0), // 0: agents.oracle.GetDatabaseTypeResponse.DatabaseType + (*CreateDirRequest)(nil), // 1: agents.oracle.CreateDirRequest + (*CreateDirResponse)(nil), // 2: agents.oracle.CreateDirResponse + (*ReadDirRequest)(nil), // 3: agents.oracle.ReadDirRequest + (*ReadDirResponse)(nil), // 4: agents.oracle.ReadDirResponse + (*DeleteDirRequest)(nil), // 5: agents.oracle.DeleteDirRequest + (*DeleteDirResponse)(nil), // 6: agents.oracle.DeleteDirResponse + (*RunCMDResponse)(nil), // 7: agents.oracle.RunCMDResponse + (*LocalConnection)(nil), // 8: agents.oracle.LocalConnection + (*RunSQLPlusCMDRequest)(nil), // 9: agents.oracle.RunSQLPlusCMDRequest + (*CheckDatabaseStateRequest)(nil), // 10: agents.oracle.CheckDatabaseStateRequest + (*CheckDatabaseStateResponse)(nil), // 11: agents.oracle.CheckDatabaseStateResponse + (*CreatePasswordFileRequest)(nil), // 12: agents.oracle.CreatePasswordFileRequest + (*CreatePasswordFileResponse)(nil), // 13: agents.oracle.CreatePasswordFileResponse + (*CreateReplicaInitOraFileRequest)(nil), // 14: agents.oracle.CreateReplicaInitOraFileRequest + (*CreateReplicaInitOraFileResponse)(nil), // 15: agents.oracle.CreateReplicaInitOraFileResponse + (*KnownPDBsRequest)(nil), // 16: agents.oracle.KnownPDBsRequest + (*KnownPDBsResponse)(nil), // 17: agents.oracle.KnownPDBsResponse + (*RunRMANRequest)(nil), // 18: agents.oracle.RunRMANRequest + (*LROInput)(nil), // 19: agents.oracle.LROInput + (*RunRMANAsyncRequest)(nil), // 20: agents.oracle.RunRMANAsyncRequest + (*RunRMANResponse)(nil), // 21: agents.oracle.RunRMANResponse + (*NIDRequest)(nil), // 22: agents.oracle.NIDRequest + (*NIDResponse)(nil), // 23: agents.oracle.NIDResponse + (*GetDatabaseTypeRequest)(nil), // 24: agents.oracle.GetDatabaseTypeRequest + (*GetDatabaseTypeResponse)(nil), // 25: agents.oracle.GetDatabaseTypeResponse + (*GetDatabaseNameRequest)(nil), // 26: agents.oracle.GetDatabaseNameRequest + (*GetDatabaseNameResponse)(nil), // 27: agents.oracle.GetDatabaseNameResponse + (*SetListenerRegistrationRequest)(nil), // 28: agents.oracle.SetListenerRegistrationRequest + (*BootstrapStandbyRequest)(nil), // 29: agents.oracle.BootstrapStandbyRequest + (*BootstrapStandbyResponse)(nil), // 30: agents.oracle.BootstrapStandbyResponse + (*CreateCDBRequest)(nil), // 31: agents.oracle.CreateCDBRequest + (*CreateCDBAsyncRequest)(nil), // 32: agents.oracle.CreateCDBAsyncRequest + (*CreateCDBResponse)(nil), // 33: agents.oracle.CreateCDBResponse + (*CreateListenerRequest)(nil), // 34: agents.oracle.CreateListenerRequest + (*CreateListenerResponse)(nil), // 35: agents.oracle.CreateListenerResponse + (*FileExistsRequest)(nil), // 36: agents.oracle.FileExistsRequest + (*FileExistsResponse)(nil), // 37: agents.oracle.FileExistsResponse + (*PhysicalRestoreRequest)(nil), // 38: agents.oracle.PhysicalRestoreRequest + (*PhysicalRestoreAsyncRequest)(nil), // 39: agents.oracle.PhysicalRestoreAsyncRequest + (*DataPumpImportRequest)(nil), // 40: agents.oracle.DataPumpImportRequest + (*DataPumpImportAsyncRequest)(nil), // 41: agents.oracle.DataPumpImportAsyncRequest + (*DataPumpImportResponse)(nil), // 42: agents.oracle.DataPumpImportResponse + (*DataPumpExportRequest)(nil), // 43: agents.oracle.DataPumpExportRequest + (*DataPumpExportAsyncRequest)(nil), // 44: agents.oracle.DataPumpExportAsyncRequest + (*DataPumpExportResponse)(nil), // 45: agents.oracle.DataPumpExportResponse + (*RecoverConfigFileRequest)(nil), // 46: agents.oracle.RecoverConfigFileRequest + (*RecoverConfigFileResponse)(nil), // 47: agents.oracle.RecoverConfigFileResponse + (*DownloadDirectoryFromGCSRequest)(nil), // 48: agents.oracle.DownloadDirectoryFromGCSRequest + (*DownloadDirectoryFromGCSResponse)(nil), // 49: agents.oracle.DownloadDirectoryFromGCSResponse + (*FetchServiceImageMetaDataRequest)(nil), // 50: agents.oracle.FetchServiceImageMetaDataRequest + (*FetchServiceImageMetaDataResponse)(nil), // 51: agents.oracle.FetchServiceImageMetaDataResponse + (*ReadDirResponse_FileInfo)(nil), // 52: agents.oracle.ReadDirResponse.FileInfo + (*timestamp.Timestamp)(nil), // 53: google.protobuf.Timestamp + (*BounceDatabaseRequest)(nil), // 54: agents.oracle.BounceDatabaseRequest + (*BounceListenerRequest)(nil), // 55: agents.oracle.BounceListenerRequest + (*longrunning.ListOperationsRequest)(nil), // 56: google.longrunning.ListOperationsRequest + (*longrunning.GetOperationRequest)(nil), // 57: google.longrunning.GetOperationRequest + (*longrunning.DeleteOperationRequest)(nil), // 58: google.longrunning.DeleteOperationRequest + (*BounceDatabaseResponse)(nil), // 59: agents.oracle.BounceDatabaseResponse + (*BounceListenerResponse)(nil), // 60: agents.oracle.BounceListenerResponse + (*longrunning.Operation)(nil), // 61: google.longrunning.Operation + (*longrunning.ListOperationsResponse)(nil), // 62: google.longrunning.ListOperationsResponse + (*empty.Empty)(nil), // 63: google.protobuf.Empty +} +var file_oracle_pkg_agents_oracle_dbdaemon_proto_depIdxs = []int32{ + 52, // 0: agents.oracle.ReadDirResponse.currPath:type_name -> agents.oracle.ReadDirResponse.FileInfo + 52, // 1: agents.oracle.ReadDirResponse.subPaths:type_name -> agents.oracle.ReadDirResponse.FileInfo + 8, // 2: agents.oracle.RunSQLPlusCMDRequest.local:type_name -> agents.oracle.LocalConnection + 18, // 3: agents.oracle.RunRMANAsyncRequest.sync_request:type_name -> agents.oracle.RunRMANRequest + 19, // 4: agents.oracle.RunRMANAsyncRequest.lro_input:type_name -> agents.oracle.LROInput + 0, // 5: agents.oracle.GetDatabaseTypeResponse.database_type:type_name -> agents.oracle.GetDatabaseTypeResponse.DatabaseType + 31, // 6: agents.oracle.CreateCDBAsyncRequest.sync_request:type_name -> agents.oracle.CreateCDBRequest + 19, // 7: agents.oracle.CreateCDBAsyncRequest.lro_input:type_name -> agents.oracle.LROInput + 38, // 8: agents.oracle.PhysicalRestoreAsyncRequest.sync_request:type_name -> agents.oracle.PhysicalRestoreRequest + 19, // 9: agents.oracle.PhysicalRestoreAsyncRequest.lro_input:type_name -> agents.oracle.LROInput + 40, // 10: agents.oracle.DataPumpImportAsyncRequest.sync_request:type_name -> agents.oracle.DataPumpImportRequest + 19, // 11: agents.oracle.DataPumpImportAsyncRequest.lro_input:type_name -> agents.oracle.LROInput + 43, // 12: agents.oracle.DataPumpExportAsyncRequest.sync_request:type_name -> agents.oracle.DataPumpExportRequest + 19, // 13: agents.oracle.DataPumpExportAsyncRequest.lro_input:type_name -> agents.oracle.LROInput + 53, // 14: agents.oracle.ReadDirResponse.FileInfo.modTime:type_name -> google.protobuf.Timestamp + 1, // 15: agents.oracle.DatabaseDaemon.CreateDir:input_type -> agents.oracle.CreateDirRequest + 3, // 16: agents.oracle.DatabaseDaemon.ReadDir:input_type -> agents.oracle.ReadDirRequest + 5, // 17: agents.oracle.DatabaseDaemon.DeleteDir:input_type -> agents.oracle.DeleteDirRequest + 54, // 18: agents.oracle.DatabaseDaemon.BounceDatabase:input_type -> agents.oracle.BounceDatabaseRequest + 55, // 19: agents.oracle.DatabaseDaemon.BounceListener:input_type -> agents.oracle.BounceListenerRequest + 10, // 20: agents.oracle.DatabaseDaemon.CheckDatabaseState:input_type -> agents.oracle.CheckDatabaseStateRequest + 9, // 21: agents.oracle.DatabaseDaemon.RunSQLPlus:input_type -> agents.oracle.RunSQLPlusCMDRequest + 9, // 22: agents.oracle.DatabaseDaemon.RunSQLPlusFormatted:input_type -> agents.oracle.RunSQLPlusCMDRequest + 16, // 23: agents.oracle.DatabaseDaemon.KnownPDBs:input_type -> agents.oracle.KnownPDBsRequest + 18, // 24: agents.oracle.DatabaseDaemon.RunRMAN:input_type -> agents.oracle.RunRMANRequest + 20, // 25: agents.oracle.DatabaseDaemon.RunRMANAsync:input_type -> agents.oracle.RunRMANAsyncRequest + 22, // 26: agents.oracle.DatabaseDaemon.NID:input_type -> agents.oracle.NIDRequest + 24, // 27: agents.oracle.DatabaseDaemon.GetDatabaseType:input_type -> agents.oracle.GetDatabaseTypeRequest + 26, // 28: agents.oracle.DatabaseDaemon.GetDatabaseName:input_type -> agents.oracle.GetDatabaseNameRequest + 12, // 29: agents.oracle.DatabaseDaemon.CreatePasswordFile:input_type -> agents.oracle.CreatePasswordFileRequest + 14, // 30: agents.oracle.DatabaseDaemon.CreateReplicaInitOraFile:input_type -> agents.oracle.CreateReplicaInitOraFileRequest + 28, // 31: agents.oracle.DatabaseDaemon.SetListenerRegistration:input_type -> agents.oracle.SetListenerRegistrationRequest + 29, // 32: agents.oracle.DatabaseDaemon.BootstrapStandby:input_type -> agents.oracle.BootstrapStandbyRequest + 31, // 33: agents.oracle.DatabaseDaemon.CreateCDB:input_type -> agents.oracle.CreateCDBRequest + 32, // 34: agents.oracle.DatabaseDaemon.CreateCDBAsync:input_type -> agents.oracle.CreateCDBAsyncRequest + 34, // 35: agents.oracle.DatabaseDaemon.CreateListener:input_type -> agents.oracle.CreateListenerRequest + 36, // 36: agents.oracle.DatabaseDaemon.FileExists:input_type -> agents.oracle.FileExistsRequest + 39, // 37: agents.oracle.DatabaseDaemon.PhysicalRestoreAsync:input_type -> agents.oracle.PhysicalRestoreAsyncRequest + 41, // 38: agents.oracle.DatabaseDaemon.DataPumpImportAsync:input_type -> agents.oracle.DataPumpImportAsyncRequest + 44, // 39: agents.oracle.DatabaseDaemon.DataPumpExportAsync:input_type -> agents.oracle.DataPumpExportAsyncRequest + 56, // 40: agents.oracle.DatabaseDaemon.ListOperations:input_type -> google.longrunning.ListOperationsRequest + 57, // 41: agents.oracle.DatabaseDaemon.GetOperation:input_type -> google.longrunning.GetOperationRequest + 58, // 42: agents.oracle.DatabaseDaemon.DeleteOperation:input_type -> google.longrunning.DeleteOperationRequest + 46, // 43: agents.oracle.DatabaseDaemon.RecoverConfigFile:input_type -> agents.oracle.RecoverConfigFileRequest + 48, // 44: agents.oracle.DatabaseDaemon.DownloadDirectoryFromGCS:input_type -> agents.oracle.DownloadDirectoryFromGCSRequest + 50, // 45: agents.oracle.DatabaseDaemon.FetchServiceImageMetaData:input_type -> agents.oracle.FetchServiceImageMetaDataRequest + 2, // 46: agents.oracle.DatabaseDaemon.CreateDir:output_type -> agents.oracle.CreateDirResponse + 4, // 47: agents.oracle.DatabaseDaemon.ReadDir:output_type -> agents.oracle.ReadDirResponse + 6, // 48: agents.oracle.DatabaseDaemon.DeleteDir:output_type -> agents.oracle.DeleteDirResponse + 59, // 49: agents.oracle.DatabaseDaemon.BounceDatabase:output_type -> agents.oracle.BounceDatabaseResponse + 60, // 50: agents.oracle.DatabaseDaemon.BounceListener:output_type -> agents.oracle.BounceListenerResponse + 11, // 51: agents.oracle.DatabaseDaemon.CheckDatabaseState:output_type -> agents.oracle.CheckDatabaseStateResponse + 7, // 52: agents.oracle.DatabaseDaemon.RunSQLPlus:output_type -> agents.oracle.RunCMDResponse + 7, // 53: agents.oracle.DatabaseDaemon.RunSQLPlusFormatted:output_type -> agents.oracle.RunCMDResponse + 17, // 54: agents.oracle.DatabaseDaemon.KnownPDBs:output_type -> agents.oracle.KnownPDBsResponse + 21, // 55: agents.oracle.DatabaseDaemon.RunRMAN:output_type -> agents.oracle.RunRMANResponse + 61, // 56: agents.oracle.DatabaseDaemon.RunRMANAsync:output_type -> google.longrunning.Operation + 23, // 57: agents.oracle.DatabaseDaemon.NID:output_type -> agents.oracle.NIDResponse + 25, // 58: agents.oracle.DatabaseDaemon.GetDatabaseType:output_type -> agents.oracle.GetDatabaseTypeResponse + 27, // 59: agents.oracle.DatabaseDaemon.GetDatabaseName:output_type -> agents.oracle.GetDatabaseNameResponse + 13, // 60: agents.oracle.DatabaseDaemon.CreatePasswordFile:output_type -> agents.oracle.CreatePasswordFileResponse + 15, // 61: agents.oracle.DatabaseDaemon.CreateReplicaInitOraFile:output_type -> agents.oracle.CreateReplicaInitOraFileResponse + 60, // 62: agents.oracle.DatabaseDaemon.SetListenerRegistration:output_type -> agents.oracle.BounceListenerResponse + 30, // 63: agents.oracle.DatabaseDaemon.BootstrapStandby:output_type -> agents.oracle.BootstrapStandbyResponse + 33, // 64: agents.oracle.DatabaseDaemon.CreateCDB:output_type -> agents.oracle.CreateCDBResponse + 61, // 65: agents.oracle.DatabaseDaemon.CreateCDBAsync:output_type -> google.longrunning.Operation + 35, // 66: agents.oracle.DatabaseDaemon.CreateListener:output_type -> agents.oracle.CreateListenerResponse + 37, // 67: agents.oracle.DatabaseDaemon.FileExists:output_type -> agents.oracle.FileExistsResponse + 61, // 68: agents.oracle.DatabaseDaemon.PhysicalRestoreAsync:output_type -> google.longrunning.Operation + 61, // 69: agents.oracle.DatabaseDaemon.DataPumpImportAsync:output_type -> google.longrunning.Operation + 61, // 70: agents.oracle.DatabaseDaemon.DataPumpExportAsync:output_type -> google.longrunning.Operation + 62, // 71: agents.oracle.DatabaseDaemon.ListOperations:output_type -> google.longrunning.ListOperationsResponse + 61, // 72: agents.oracle.DatabaseDaemon.GetOperation:output_type -> google.longrunning.Operation + 63, // 73: agents.oracle.DatabaseDaemon.DeleteOperation:output_type -> google.protobuf.Empty + 47, // 74: agents.oracle.DatabaseDaemon.RecoverConfigFile:output_type -> agents.oracle.RecoverConfigFileResponse + 49, // 75: agents.oracle.DatabaseDaemon.DownloadDirectoryFromGCS:output_type -> agents.oracle.DownloadDirectoryFromGCSResponse + 51, // 76: agents.oracle.DatabaseDaemon.FetchServiceImageMetaData:output_type -> agents.oracle.FetchServiceImageMetaDataResponse + 46, // [46:77] is the sub-list for method output_type + 15, // [15:46] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_oracle_pkg_agents_oracle_dbdaemon_proto_init() } +func file_oracle_pkg_agents_oracle_dbdaemon_proto_init() { + if File_oracle_pkg_agents_oracle_dbdaemon_proto != nil { + return + } + file_oracle_pkg_agents_oracle_oracle_proto_init() + if !protoimpl.UnsafeEnabled { + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateDirRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateDirResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDirRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDirResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteDirRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteDirResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunCMDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalConnection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunSQLPlusCMDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckDatabaseStateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckDatabaseStateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreatePasswordFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreatePasswordFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateReplicaInitOraFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateReplicaInitOraFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KnownPDBsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KnownPDBsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunRMANRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LROInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunRMANAsyncRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunRMANResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDatabaseTypeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDatabaseTypeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDatabaseNameRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDatabaseNameResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetListenerRegistrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapStandbyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapStandbyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCDBRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCDBAsyncRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateCDBResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateListenerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateListenerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileExistsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileExistsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhysicalRestoreRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhysicalRestoreAsyncRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpImportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpImportAsyncRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpImportResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpExportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpExportAsyncRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataPumpExportResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecoverConfigFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecoverConfigFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadDirectoryFromGCSRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadDirectoryFromGCSResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchServiceImageMetaDataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchServiceImageMetaDataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDirResponse_FileInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*RunSQLPlusCMDRequest_Local)(nil), + (*RunSQLPlusCMDRequest_Dsn)(nil), + (*RunSQLPlusCMDRequest_DatabaseName)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDesc, + NumEnums: 1, + NumMessages: 52, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_oracle_pkg_agents_oracle_dbdaemon_proto_goTypes, + DependencyIndexes: file_oracle_pkg_agents_oracle_dbdaemon_proto_depIdxs, + EnumInfos: file_oracle_pkg_agents_oracle_dbdaemon_proto_enumTypes, + MessageInfos: file_oracle_pkg_agents_oracle_dbdaemon_proto_msgTypes, + }.Build() + File_oracle_pkg_agents_oracle_dbdaemon_proto = out.File + file_oracle_pkg_agents_oracle_dbdaemon_proto_rawDesc = nil + file_oracle_pkg_agents_oracle_dbdaemon_proto_goTypes = nil + file_oracle_pkg_agents_oracle_dbdaemon_proto_depIdxs = nil +} diff --git a/oracle/pkg/agents/oracle/dbdaemon.proto b/oracle/pkg/agents/oracle/dbdaemon.proto new file mode 100644 index 0000000..7153831 --- /dev/null +++ b/oracle/pkg/agents/oracle/dbdaemon.proto @@ -0,0 +1,459 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Database Daemon is used for privileged database ops, e.g. +// run sqlplus rman. It is intended to be used by the agents running on the +// database sidecar container(via *nix domain socket protocol). +syntax = "proto3"; + +package agents.oracle; + +import "oracle/pkg/agents/oracle/oracle.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; +import "google/longrunning/operations.proto"; + +option go_package = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle;oracle"; + +// DatabaseDaemon defines the API for a daemon running together with +// a database in the same container. +service DatabaseDaemon { + // CreateDir RPC call to create a directory named path, along with any + // necessary parents. + rpc CreateDir(CreateDirRequest) returns (CreateDirResponse); + + // ReadDir RPC call to read the directory named by path and returns Fileinfos + // for the path and children. + rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); + + // DeleteDir RPC to call remove path. + rpc DeleteDir(DeleteDirRequest) returns (DeleteDirResponse); + + // BounceDatabase RPC call to start/stop a database. + rpc BounceDatabase(BounceDatabaseRequest) returns (BounceDatabaseResponse); + + // BounceListener RPC call to start/stop a listener. + rpc BounceListener(BounceListenerRequest) returns (BounceListenerResponse); + + // CheckDatabaseState RPC call verifies the database is running. + rpc CheckDatabaseState(CheckDatabaseStateRequest) + returns (CheckDatabaseStateResponse); + + // RunSQLPlus RPC call executes Oracle's sqlplus utility. + rpc RunSQLPlus(RunSQLPlusCMDRequest) returns (RunCMDResponse); + + // RunSQLPlusFormatted RPC is similar to RunSQLPlus, but for queries. + rpc RunSQLPlusFormatted(RunSQLPlusCMDRequest) returns (RunCMDResponse); + + // KnownPDBs RPC call returns a list of known PDBs. + rpc KnownPDBs(KnownPDBsRequest) returns (KnownPDBsResponse); + + // RunRMAN RPC call executes Oracle's rman utility. + rpc RunRMAN(RunRMANRequest) returns (RunRMANResponse); + + // RunRMANAsync RPC call executes Oracle's rman utility asynchronously. + rpc RunRMANAsync(RunRMANAsyncRequest) returns (google.longrunning.Operation); + + // NID changes a database id and/or database name. + rpc NID(NIDRequest) returns (NIDResponse); + + // GetDatabaseType returns database type(eg. ORACLE_12_2_ENTERPRISE_NONCDB) + rpc GetDatabaseType(GetDatabaseTypeRequest) returns (GetDatabaseTypeResponse); + + // GetDatabaseName returns database name. + rpc GetDatabaseName(GetDatabaseNameRequest) returns (GetDatabaseNameResponse); + + // CreatePasswordFile creates a password file for the database. + rpc CreatePasswordFile(CreatePasswordFileRequest) + returns (CreatePasswordFileResponse); + + // CreateReplicaInitOraFile creates init.ora file using the template and the + // provided parameters. + rpc CreateReplicaInitOraFile(CreateReplicaInitOraFileRequest) + returns (CreateReplicaInitOraFileResponse); + + // SetListenerRegistration sets a static listener registration and restarts + // the listener. + rpc SetListenerRegistration(SetListenerRegistrationRequest) + returns (BounceListenerResponse); + + // BootstrapStandby performs bootstrap tasks that have to be done by dbdaemon. + rpc BootstrapStandby(BootstrapStandbyRequest) + returns (BootstrapStandbyResponse); + + // CreateCDB creates a database instance. + rpc CreateCDB(CreateCDBRequest) returns (CreateCDBResponse); + + // CreateCDBAsync creates a database instance asynchronously. + rpc CreateCDBAsync(CreateCDBAsyncRequest) + returns (google.longrunning.Operation); + + // CreateListener creates a database listener. + rpc CreateListener(CreateListenerRequest) returns (CreateListenerResponse); + + // FileExists runs a simple check to confirm whether a requested file + // exists in a database container or not. + // An example of where FileExists is used is a check on + // the provisioning_successful file, but any file (nor a dir) can be + // checked via this RPC call. + rpc FileExists(FileExistsRequest) returns (FileExistsResponse); + + // PhysicalRestoreAsync runs RMAN and SQL queries in sequence to restore + // a database from an RMAN backup. + rpc PhysicalRestoreAsync(PhysicalRestoreAsyncRequest) + returns (google.longrunning.Operation); + + // DataPumpImportAsync imports data from a .dmp file to an existing PDB. + rpc DataPumpImportAsync(DataPumpImportAsyncRequest) + returns (google.longrunning.Operation); + + // DataPumpExportAsync exports data to a .dmp file using expdp + rpc DataPumpExportAsync(DataPumpExportAsyncRequest) + returns (google.longrunning.Operation); + + // ListOperations lists operations that match the specified filter in the + // request. + rpc ListOperations(google.longrunning.ListOperationsRequest) + returns (google.longrunning.ListOperationsResponse); + + // GetOperation gets the latest state of a long-running operation. Clients can + // use this method to poll the operation result. + rpc GetOperation(google.longrunning.GetOperationRequest) + returns (google.longrunning.Operation); + + // DeleteOperation deletes a long-running operation. This method indicates + // that the client is no longer interested in the operation result. It does + // not cancel the operation. + rpc DeleteOperation(google.longrunning.DeleteOperationRequest) + returns (google.protobuf.Empty); + + // RecoverConfigFile creates a binary pfile from the backed up spfile + rpc RecoverConfigFile(RecoverConfigFileRequest) + returns (RecoverConfigFileResponse); + + // DownloadDirectoryFromGCS downloads a directory from GCS bucket to local + // path. + rpc DownloadDirectoryFromGCS(DownloadDirectoryFromGCSRequest) + returns (DownloadDirectoryFromGCSResponse); + + // FetchServiceImageMetaData returns the service image metadata. + rpc FetchServiceImageMetaData(FetchServiceImageMetaDataRequest) + returns (FetchServiceImageMetaDataResponse) {} +} + +message CreateDirRequest { + // path is a directory name. + string path = 1; + // perm is the permission bits perm (before umask) are used for all + // directories CreateDir creates. + uint32 perm = 2; +} + +message CreateDirResponse {} + +message ReadDirRequest { + // path is a directory name. + string path = 1; + // set recursive to true if collect all files and directories metadata in the + // file tree rooted at path. set recursive to false if only collect the first + // level files and directories metadata. + bool recursive = 2; +} + +message ReadDirResponse { + // FileInfo describes a file and is returned by Stat. + message FileInfo { + string name = 1; + int64 size = 2; + uint32 mode = 3; + google.protobuf.Timestamp modTime = 4; + bool isDir = 5; + string absPath = 6; + } + FileInfo currPath = 1; + repeated FileInfo subPaths = 2; +} + +message DeleteDirRequest { + // path is a directory name to be deleted. + string path = 1; + // set force to false if removes a file or (empty) directory + // DeleteDir removes path and any children it contains if force set to true + bool force = 2; +} + +message DeleteDirResponse {} + +message RunCMDResponse { + repeated string msg = 1; +} + +message LocalConnection {} + +message RunSQLPlusCMDRequest { + repeated string commands = 1; + + // Optional TnsAdmin location for custom sql env settings. + string tns_admin = 2; + + bool suppress = 3; + + // Connection target. + oneof connectInfo { + // Connect to local database. + LocalConnection local = 4; + + // dsn string used to connect to an external database. This is to support + // connecting to an external server from the DB container. Formats + // supported are listed here + // https://github.com/godror/godror/blob/main/README.md#connect + string dsn = 5; + + // Connect to local by non-local database. + // Explicitly sets the ORACLE_SID. This is required to + // to support an ES replica. + string database_name = 6; + } + + // Quiet mode, suppress all output. + bool quiet = 7; +} + +message CheckDatabaseStateRequest { + string database_name = 1; + bool is_cdb = 2; + string db_domain = 3; +} + +message CheckDatabaseStateResponse {} + +message CreatePasswordFileRequest { + string database_name = 1; + string sys_password = 2; + string dir = 3; +} + +message CreatePasswordFileResponse {} + +message CreateReplicaInitOraFileRequest { + string em_host = 1; + int32 em_port = 2; + string em_db_name = 3; + string em_db_unique_name = 4; + string em_db_domain = 5; + string log_file_dir_list = 6; + string data_file_dir_list = 7; + string em_compatibility = 8; + string init_ora_dir = 9; + string init_ora_file_name = 10; +} + +message CreateReplicaInitOraFileResponse { + string init_ora_file_content = 1; +} + +// KnownPDBsRequest is a message used for getting +// a list of known PDBs in a CDB. +message KnownPDBsRequest { + // By default a SEED PDB is not included, but a caller can override it here. + bool include_seed = 1; + // By default a state of a PDB is ignored, but a caller may request a list + // of PDBs only in the OPEN state (as opposed to just MOUNTED). + bool only_open = 2; +} + +// KnownPDBsResponse is a message returning a list of known PDBs. +message KnownPDBsResponse { + repeated string known_pdbs = 1; +} + +message RunRMANRequest { + // Scripts to be executed by RMAN in sequence. + repeated string scripts = 1; + // sets TNS_ADMIN to override location for network configuration. + string tns_admin = 2; + bool suppress = 3; + // target is the primary database to connect to. This is usually + // the source database in clone operations. This would be the + // ES primary for the ES setup. + string target = 4; + // auxiliary is the secondary database to connect to. + // this is the ES replica database in the ES setup + string auxiliary = 5; + // gcs_path is the destination gcs bucket for the backup + string gcs_path = 6; + // local_path is the destination directory for the backup + string local_path = 7; + // rman command to run, currently support backup and restore + string cmd = 8; +} + +// LROInput is a common part of input requests for all Async operations. +message LROInput { + // Optional identifier of requested operation. + // If not provided a random id will be generated. + string operation_id = 1; +} + +message RunRMANAsyncRequest { + RunRMANRequest sync_request = 1; + LROInput lro_input = 2; +} + +message RunRMANResponse { + // Output of each script executed by RMAN. + repeated string output = 1; +} + +message NIDRequest { + // ORACLE_SID env value + string sid = 1; + // Optional new database name if rename of database is also required. + string database_name = 2; +} + +message NIDResponse {} + +message GetDatabaseTypeRequest {} + +message GetDatabaseTypeResponse { + enum DatabaseType { + UNKNOWN_DATABASE_TYPE = 0; + ORACLE_12_2_ENTERPRISE = 1; + ORACLE_12_2_ENTERPRISE_NONCDB = 2; + } + DatabaseType database_type = 1; +} + +message GetDatabaseNameRequest {} + +message GetDatabaseNameResponse { + string database_name = 1; +} + +message SetListenerRegistrationRequest { + // global_database_name is the name of the database + // unique name with domain name. + string global_database_name = 1; + // database_name is the name of the database. This is same + // between ES primary and replica + string database_name = 2; +} + +message BootstrapStandbyRequest { + string cdb_name = 1; +} + +message BootstrapStandbyResponse {} + +message CreateCDBRequest { + string oracle_home = 1; + string database_name = 2; + string db_unique_name = 3; + string character_set = 4; + int32 memory_percent = 5; + repeated string additional_params = 6; + string version = 7; + string db_domain = 8; +} + +message CreateCDBAsyncRequest { + CreateCDBRequest sync_request = 1; + LROInput lro_input = 2; +} + +message CreateCDBResponse {} + +message CreateListenerRequest { + string database_name = 1; + int32 port = 2; + string protocol = 3; + string oracle_home = 4; + string db_domain = 5; +} + +message CreateListenerResponse {} + +message FileExistsRequest { + string name = 1; +} + +message FileExistsResponse { + bool exists = 1; +} + +message PhysicalRestoreRequest { + string restore_statement = 1; + string latest_recoverable_scn_query = 2; + string recover_statement_template = 3; +} + +message PhysicalRestoreAsyncRequest { + PhysicalRestoreRequest sync_request = 1; + LROInput lro_input = 2; +} + +message DataPumpImportRequest { + string pdb_name = 1; + string db_domain = 2; + repeated string command_params = 3; + // GCS path to input dump file + string gcs_path = 4; + // GCS path to output log file + string gcs_log_path = 5; +} + +message DataPumpImportAsyncRequest { + DataPumpImportRequest sync_request = 1; + LROInput lro_input = 2; +} + +message DataPumpImportResponse {} + +message DataPumpExportRequest { + string pdb_name = 1; + string db_domain = 2; + string object_type = 3; + string objects = 4; + repeated string command_params = 5; + string gcs_path = 6; + string gcs_log_path = 7; + string flashback_time = 8; +} + +message DataPumpExportAsyncRequest { + DataPumpExportRequest sync_request = 1; + LROInput lro_input = 2; +} + +message DataPumpExportResponse {} + +message RecoverConfigFileRequest { + string cdbName = 1; +} +message RecoverConfigFileResponse {} + +message DownloadDirectoryFromGCSRequest { + string gcs_path = 1; + string local_path = 2; +} +message DownloadDirectoryFromGCSResponse {} + +message FetchServiceImageMetaDataRequest {} + +message FetchServiceImageMetaDataResponse { + string version = 1; + string cdb_name = 2; + string oracle_home = 3; +} \ No newline at end of file diff --git a/oracle/pkg/agents/oracle/dbdaemon_grpc.pb.go b/oracle/pkg/agents/oracle/dbdaemon_grpc.pb.go new file mode 100644 index 0000000..8de03a9 --- /dev/null +++ b/oracle/pkg/agents/oracle/dbdaemon_grpc.pb.go @@ -0,0 +1,1273 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package oracle + +import ( + context "context" + empty "github.com/golang/protobuf/ptypes/empty" + longrunning "google.golang.org/genproto/googleapis/longrunning" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// DatabaseDaemonClient is the client API for DatabaseDaemon service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DatabaseDaemonClient interface { + // CreateDir RPC call to create a directory named path, along with any + // necessary parents. + CreateDir(ctx context.Context, in *CreateDirRequest, opts ...grpc.CallOption) (*CreateDirResponse, error) + // ReadDir RPC call to read the directory named by path and returns Fileinfos + // for the path and children. + ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) + // DeleteDir RPC to call remove path. + DeleteDir(ctx context.Context, in *DeleteDirRequest, opts ...grpc.CallOption) (*DeleteDirResponse, error) + // BounceDatabase RPC call to start/stop a database. + BounceDatabase(ctx context.Context, in *BounceDatabaseRequest, opts ...grpc.CallOption) (*BounceDatabaseResponse, error) + // BounceListener RPC call to start/stop a listener. + BounceListener(ctx context.Context, in *BounceListenerRequest, opts ...grpc.CallOption) (*BounceListenerResponse, error) + // CheckDatabaseState RPC call verifies the database is running. + CheckDatabaseState(ctx context.Context, in *CheckDatabaseStateRequest, opts ...grpc.CallOption) (*CheckDatabaseStateResponse, error) + // RunSQLPlus RPC call executes Oracle's sqlplus utility. + RunSQLPlus(ctx context.Context, in *RunSQLPlusCMDRequest, opts ...grpc.CallOption) (*RunCMDResponse, error) + // RunSQLPlusFormatted RPC is similar to RunSQLPlus, but for queries. + RunSQLPlusFormatted(ctx context.Context, in *RunSQLPlusCMDRequest, opts ...grpc.CallOption) (*RunCMDResponse, error) + // KnownPDBs RPC call returns a list of known PDBs. + KnownPDBs(ctx context.Context, in *KnownPDBsRequest, opts ...grpc.CallOption) (*KnownPDBsResponse, error) + // RunRMAN RPC call executes Oracle's rman utility. + RunRMAN(ctx context.Context, in *RunRMANRequest, opts ...grpc.CallOption) (*RunRMANResponse, error) + // RunRMANAsync RPC call executes Oracle's rman utility asynchronously. + RunRMANAsync(ctx context.Context, in *RunRMANAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // NID changes a database id and/or database name. + NID(ctx context.Context, in *NIDRequest, opts ...grpc.CallOption) (*NIDResponse, error) + // GetDatabaseType returns database type(eg. ORACLE_12_2_ENTERPRISE_NONCDB) + GetDatabaseType(ctx context.Context, in *GetDatabaseTypeRequest, opts ...grpc.CallOption) (*GetDatabaseTypeResponse, error) + // GetDatabaseName returns database name. + GetDatabaseName(ctx context.Context, in *GetDatabaseNameRequest, opts ...grpc.CallOption) (*GetDatabaseNameResponse, error) + // CreatePasswordFile creates a password file for the database. + CreatePasswordFile(ctx context.Context, in *CreatePasswordFileRequest, opts ...grpc.CallOption) (*CreatePasswordFileResponse, error) + // CreateReplicaInitOraFile creates init.ora file using the template and the + // provided parameters. + CreateReplicaInitOraFile(ctx context.Context, in *CreateReplicaInitOraFileRequest, opts ...grpc.CallOption) (*CreateReplicaInitOraFileResponse, error) + // SetListenerRegistration sets a static listener registration and restarts + // the listener. + SetListenerRegistration(ctx context.Context, in *SetListenerRegistrationRequest, opts ...grpc.CallOption) (*BounceListenerResponse, error) + // BootstrapStandby performs bootstrap tasks that have to be done by dbdaemon. + BootstrapStandby(ctx context.Context, in *BootstrapStandbyRequest, opts ...grpc.CallOption) (*BootstrapStandbyResponse, error) + // CreateCDB creates a database instance. + CreateCDB(ctx context.Context, in *CreateCDBRequest, opts ...grpc.CallOption) (*CreateCDBResponse, error) + // CreateCDBAsync creates a database instance asynchronously. + CreateCDBAsync(ctx context.Context, in *CreateCDBAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // CreateListener creates a database listener. + CreateListener(ctx context.Context, in *CreateListenerRequest, opts ...grpc.CallOption) (*CreateListenerResponse, error) + // FileExists runs a simple check to confirm whether a requested file + // exists in a database container or not. + // An example of where FileExists is used is a check on + // the provisioning_successful file, but any file (nor a dir) can be + // checked via this RPC call. + FileExists(ctx context.Context, in *FileExistsRequest, opts ...grpc.CallOption) (*FileExistsResponse, error) + // PhysicalRestoreAsync runs RMAN and SQL queries in sequence to restore + // a database from an RMAN backup. + PhysicalRestoreAsync(ctx context.Context, in *PhysicalRestoreAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // DataPumpImportAsync imports data from a .dmp file to an existing PDB. + DataPumpImportAsync(ctx context.Context, in *DataPumpImportAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // DataPumpExportAsync exports data to a .dmp file using expdp + DataPumpExportAsync(ctx context.Context, in *DataPumpExportAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // ListOperations lists operations that match the specified filter in the + // request. + ListOperations(ctx context.Context, in *longrunning.ListOperationsRequest, opts ...grpc.CallOption) (*longrunning.ListOperationsResponse, error) + // GetOperation gets the latest state of a long-running operation. Clients can + // use this method to poll the operation result. + GetOperation(ctx context.Context, in *longrunning.GetOperationRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + // DeleteOperation deletes a long-running operation. This method indicates + // that the client is no longer interested in the operation result. It does + // not cancel the operation. + DeleteOperation(ctx context.Context, in *longrunning.DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // RecoverConfigFile creates a binary pfile from the backed up spfile + RecoverConfigFile(ctx context.Context, in *RecoverConfigFileRequest, opts ...grpc.CallOption) (*RecoverConfigFileResponse, error) + // DownloadDirectoryFromGCS downloads a directory from GCS bucket to local + // path. + DownloadDirectoryFromGCS(ctx context.Context, in *DownloadDirectoryFromGCSRequest, opts ...grpc.CallOption) (*DownloadDirectoryFromGCSResponse, error) + // FetchServiceImageMetaData returns the service image metadata. + FetchServiceImageMetaData(ctx context.Context, in *FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*FetchServiceImageMetaDataResponse, error) +} + +type databaseDaemonClient struct { + cc grpc.ClientConnInterface +} + +func NewDatabaseDaemonClient(cc grpc.ClientConnInterface) DatabaseDaemonClient { + return &databaseDaemonClient{cc} +} + +func (c *databaseDaemonClient) CreateDir(ctx context.Context, in *CreateDirRequest, opts ...grpc.CallOption) (*CreateDirResponse, error) { + out := new(CreateDirResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CreateDir", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { + out := new(ReadDirResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/ReadDir", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) DeleteDir(ctx context.Context, in *DeleteDirRequest, opts ...grpc.CallOption) (*DeleteDirResponse, error) { + out := new(DeleteDirResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/DeleteDir", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) BounceDatabase(ctx context.Context, in *BounceDatabaseRequest, opts ...grpc.CallOption) (*BounceDatabaseResponse, error) { + out := new(BounceDatabaseResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/BounceDatabase", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) BounceListener(ctx context.Context, in *BounceListenerRequest, opts ...grpc.CallOption) (*BounceListenerResponse, error) { + out := new(BounceListenerResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/BounceListener", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) CheckDatabaseState(ctx context.Context, in *CheckDatabaseStateRequest, opts ...grpc.CallOption) (*CheckDatabaseStateResponse, error) { + out := new(CheckDatabaseStateResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CheckDatabaseState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) RunSQLPlus(ctx context.Context, in *RunSQLPlusCMDRequest, opts ...grpc.CallOption) (*RunCMDResponse, error) { + out := new(RunCMDResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/RunSQLPlus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) RunSQLPlusFormatted(ctx context.Context, in *RunSQLPlusCMDRequest, opts ...grpc.CallOption) (*RunCMDResponse, error) { + out := new(RunCMDResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/RunSQLPlusFormatted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) KnownPDBs(ctx context.Context, in *KnownPDBsRequest, opts ...grpc.CallOption) (*KnownPDBsResponse, error) { + out := new(KnownPDBsResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/KnownPDBs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) RunRMAN(ctx context.Context, in *RunRMANRequest, opts ...grpc.CallOption) (*RunRMANResponse, error) { + out := new(RunRMANResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/RunRMAN", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) RunRMANAsync(ctx context.Context, in *RunRMANAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/RunRMANAsync", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) NID(ctx context.Context, in *NIDRequest, opts ...grpc.CallOption) (*NIDResponse, error) { + out := new(NIDResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/NID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) GetDatabaseType(ctx context.Context, in *GetDatabaseTypeRequest, opts ...grpc.CallOption) (*GetDatabaseTypeResponse, error) { + out := new(GetDatabaseTypeResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/GetDatabaseType", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) GetDatabaseName(ctx context.Context, in *GetDatabaseNameRequest, opts ...grpc.CallOption) (*GetDatabaseNameResponse, error) { + out := new(GetDatabaseNameResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/GetDatabaseName", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) CreatePasswordFile(ctx context.Context, in *CreatePasswordFileRequest, opts ...grpc.CallOption) (*CreatePasswordFileResponse, error) { + out := new(CreatePasswordFileResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CreatePasswordFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) CreateReplicaInitOraFile(ctx context.Context, in *CreateReplicaInitOraFileRequest, opts ...grpc.CallOption) (*CreateReplicaInitOraFileResponse, error) { + out := new(CreateReplicaInitOraFileResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CreateReplicaInitOraFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) SetListenerRegistration(ctx context.Context, in *SetListenerRegistrationRequest, opts ...grpc.CallOption) (*BounceListenerResponse, error) { + out := new(BounceListenerResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/SetListenerRegistration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) BootstrapStandby(ctx context.Context, in *BootstrapStandbyRequest, opts ...grpc.CallOption) (*BootstrapStandbyResponse, error) { + out := new(BootstrapStandbyResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/BootstrapStandby", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) CreateCDB(ctx context.Context, in *CreateCDBRequest, opts ...grpc.CallOption) (*CreateCDBResponse, error) { + out := new(CreateCDBResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CreateCDB", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) CreateCDBAsync(ctx context.Context, in *CreateCDBAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CreateCDBAsync", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) CreateListener(ctx context.Context, in *CreateListenerRequest, opts ...grpc.CallOption) (*CreateListenerResponse, error) { + out := new(CreateListenerResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/CreateListener", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) FileExists(ctx context.Context, in *FileExistsRequest, opts ...grpc.CallOption) (*FileExistsResponse, error) { + out := new(FileExistsResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/FileExists", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) PhysicalRestoreAsync(ctx context.Context, in *PhysicalRestoreAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/PhysicalRestoreAsync", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) DataPumpImportAsync(ctx context.Context, in *DataPumpImportAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/DataPumpImportAsync", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) DataPumpExportAsync(ctx context.Context, in *DataPumpExportAsyncRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/DataPumpExportAsync", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) ListOperations(ctx context.Context, in *longrunning.ListOperationsRequest, opts ...grpc.CallOption) (*longrunning.ListOperationsResponse, error) { + out := new(longrunning.ListOperationsResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) GetOperation(ctx context.Context, in *longrunning.GetOperationRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/GetOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) DeleteOperation(ctx context.Context, in *longrunning.DeleteOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/DeleteOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) RecoverConfigFile(ctx context.Context, in *RecoverConfigFileRequest, opts ...grpc.CallOption) (*RecoverConfigFileResponse, error) { + out := new(RecoverConfigFileResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/RecoverConfigFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) DownloadDirectoryFromGCS(ctx context.Context, in *DownloadDirectoryFromGCSRequest, opts ...grpc.CallOption) (*DownloadDirectoryFromGCSResponse, error) { + out := new(DownloadDirectoryFromGCSResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/DownloadDirectoryFromGCS", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonClient) FetchServiceImageMetaData(ctx context.Context, in *FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*FetchServiceImageMetaDataResponse, error) { + out := new(FetchServiceImageMetaDataResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemon/FetchServiceImageMetaData", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseDaemonServer is the server API for DatabaseDaemon service. +// All implementations must embed UnimplementedDatabaseDaemonServer +// for forward compatibility +type DatabaseDaemonServer interface { + // CreateDir RPC call to create a directory named path, along with any + // necessary parents. + CreateDir(context.Context, *CreateDirRequest) (*CreateDirResponse, error) + // ReadDir RPC call to read the directory named by path and returns Fileinfos + // for the path and children. + ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) + // DeleteDir RPC to call remove path. + DeleteDir(context.Context, *DeleteDirRequest) (*DeleteDirResponse, error) + // BounceDatabase RPC call to start/stop a database. + BounceDatabase(context.Context, *BounceDatabaseRequest) (*BounceDatabaseResponse, error) + // BounceListener RPC call to start/stop a listener. + BounceListener(context.Context, *BounceListenerRequest) (*BounceListenerResponse, error) + // CheckDatabaseState RPC call verifies the database is running. + CheckDatabaseState(context.Context, *CheckDatabaseStateRequest) (*CheckDatabaseStateResponse, error) + // RunSQLPlus RPC call executes Oracle's sqlplus utility. + RunSQLPlus(context.Context, *RunSQLPlusCMDRequest) (*RunCMDResponse, error) + // RunSQLPlusFormatted RPC is similar to RunSQLPlus, but for queries. + RunSQLPlusFormatted(context.Context, *RunSQLPlusCMDRequest) (*RunCMDResponse, error) + // KnownPDBs RPC call returns a list of known PDBs. + KnownPDBs(context.Context, *KnownPDBsRequest) (*KnownPDBsResponse, error) + // RunRMAN RPC call executes Oracle's rman utility. + RunRMAN(context.Context, *RunRMANRequest) (*RunRMANResponse, error) + // RunRMANAsync RPC call executes Oracle's rman utility asynchronously. + RunRMANAsync(context.Context, *RunRMANAsyncRequest) (*longrunning.Operation, error) + // NID changes a database id and/or database name. + NID(context.Context, *NIDRequest) (*NIDResponse, error) + // GetDatabaseType returns database type(eg. ORACLE_12_2_ENTERPRISE_NONCDB) + GetDatabaseType(context.Context, *GetDatabaseTypeRequest) (*GetDatabaseTypeResponse, error) + // GetDatabaseName returns database name. + GetDatabaseName(context.Context, *GetDatabaseNameRequest) (*GetDatabaseNameResponse, error) + // CreatePasswordFile creates a password file for the database. + CreatePasswordFile(context.Context, *CreatePasswordFileRequest) (*CreatePasswordFileResponse, error) + // CreateReplicaInitOraFile creates init.ora file using the template and the + // provided parameters. + CreateReplicaInitOraFile(context.Context, *CreateReplicaInitOraFileRequest) (*CreateReplicaInitOraFileResponse, error) + // SetListenerRegistration sets a static listener registration and restarts + // the listener. + SetListenerRegistration(context.Context, *SetListenerRegistrationRequest) (*BounceListenerResponse, error) + // BootstrapStandby performs bootstrap tasks that have to be done by dbdaemon. + BootstrapStandby(context.Context, *BootstrapStandbyRequest) (*BootstrapStandbyResponse, error) + // CreateCDB creates a database instance. + CreateCDB(context.Context, *CreateCDBRequest) (*CreateCDBResponse, error) + // CreateCDBAsync creates a database instance asynchronously. + CreateCDBAsync(context.Context, *CreateCDBAsyncRequest) (*longrunning.Operation, error) + // CreateListener creates a database listener. + CreateListener(context.Context, *CreateListenerRequest) (*CreateListenerResponse, error) + // FileExists runs a simple check to confirm whether a requested file + // exists in a database container or not. + // An example of where FileExists is used is a check on + // the provisioning_successful file, but any file (nor a dir) can be + // checked via this RPC call. + FileExists(context.Context, *FileExistsRequest) (*FileExistsResponse, error) + // PhysicalRestoreAsync runs RMAN and SQL queries in sequence to restore + // a database from an RMAN backup. + PhysicalRestoreAsync(context.Context, *PhysicalRestoreAsyncRequest) (*longrunning.Operation, error) + // DataPumpImportAsync imports data from a .dmp file to an existing PDB. + DataPumpImportAsync(context.Context, *DataPumpImportAsyncRequest) (*longrunning.Operation, error) + // DataPumpExportAsync exports data to a .dmp file using expdp + DataPumpExportAsync(context.Context, *DataPumpExportAsyncRequest) (*longrunning.Operation, error) + // ListOperations lists operations that match the specified filter in the + // request. + ListOperations(context.Context, *longrunning.ListOperationsRequest) (*longrunning.ListOperationsResponse, error) + // GetOperation gets the latest state of a long-running operation. Clients can + // use this method to poll the operation result. + GetOperation(context.Context, *longrunning.GetOperationRequest) (*longrunning.Operation, error) + // DeleteOperation deletes a long-running operation. This method indicates + // that the client is no longer interested in the operation result. It does + // not cancel the operation. + DeleteOperation(context.Context, *longrunning.DeleteOperationRequest) (*empty.Empty, error) + // RecoverConfigFile creates a binary pfile from the backed up spfile + RecoverConfigFile(context.Context, *RecoverConfigFileRequest) (*RecoverConfigFileResponse, error) + // DownloadDirectoryFromGCS downloads a directory from GCS bucket to local + // path. + DownloadDirectoryFromGCS(context.Context, *DownloadDirectoryFromGCSRequest) (*DownloadDirectoryFromGCSResponse, error) + // FetchServiceImageMetaData returns the service image metadata. + FetchServiceImageMetaData(context.Context, *FetchServiceImageMetaDataRequest) (*FetchServiceImageMetaDataResponse, error) + mustEmbedUnimplementedDatabaseDaemonServer() +} + +// UnimplementedDatabaseDaemonServer must be embedded to have forward compatible implementations. +type UnimplementedDatabaseDaemonServer struct { +} + +func (UnimplementedDatabaseDaemonServer) CreateDir(context.Context, *CreateDirRequest) (*CreateDirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateDir not implemented") +} +func (UnimplementedDatabaseDaemonServer) ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDir not implemented") +} +func (UnimplementedDatabaseDaemonServer) DeleteDir(context.Context, *DeleteDirRequest) (*DeleteDirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteDir not implemented") +} +func (UnimplementedDatabaseDaemonServer) BounceDatabase(context.Context, *BounceDatabaseRequest) (*BounceDatabaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BounceDatabase not implemented") +} +func (UnimplementedDatabaseDaemonServer) BounceListener(context.Context, *BounceListenerRequest) (*BounceListenerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BounceListener not implemented") +} +func (UnimplementedDatabaseDaemonServer) CheckDatabaseState(context.Context, *CheckDatabaseStateRequest) (*CheckDatabaseStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckDatabaseState not implemented") +} +func (UnimplementedDatabaseDaemonServer) RunSQLPlus(context.Context, *RunSQLPlusCMDRequest) (*RunCMDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunSQLPlus not implemented") +} +func (UnimplementedDatabaseDaemonServer) RunSQLPlusFormatted(context.Context, *RunSQLPlusCMDRequest) (*RunCMDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunSQLPlusFormatted not implemented") +} +func (UnimplementedDatabaseDaemonServer) KnownPDBs(context.Context, *KnownPDBsRequest) (*KnownPDBsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method KnownPDBs not implemented") +} +func (UnimplementedDatabaseDaemonServer) RunRMAN(context.Context, *RunRMANRequest) (*RunRMANResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunRMAN not implemented") +} +func (UnimplementedDatabaseDaemonServer) RunRMANAsync(context.Context, *RunRMANAsyncRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunRMANAsync not implemented") +} +func (UnimplementedDatabaseDaemonServer) NID(context.Context, *NIDRequest) (*NIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NID not implemented") +} +func (UnimplementedDatabaseDaemonServer) GetDatabaseType(context.Context, *GetDatabaseTypeRequest) (*GetDatabaseTypeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDatabaseType not implemented") +} +func (UnimplementedDatabaseDaemonServer) GetDatabaseName(context.Context, *GetDatabaseNameRequest) (*GetDatabaseNameResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDatabaseName not implemented") +} +func (UnimplementedDatabaseDaemonServer) CreatePasswordFile(context.Context, *CreatePasswordFileRequest) (*CreatePasswordFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreatePasswordFile not implemented") +} +func (UnimplementedDatabaseDaemonServer) CreateReplicaInitOraFile(context.Context, *CreateReplicaInitOraFileRequest) (*CreateReplicaInitOraFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateReplicaInitOraFile not implemented") +} +func (UnimplementedDatabaseDaemonServer) SetListenerRegistration(context.Context, *SetListenerRegistrationRequest) (*BounceListenerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetListenerRegistration not implemented") +} +func (UnimplementedDatabaseDaemonServer) BootstrapStandby(context.Context, *BootstrapStandbyRequest) (*BootstrapStandbyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BootstrapStandby not implemented") +} +func (UnimplementedDatabaseDaemonServer) CreateCDB(context.Context, *CreateCDBRequest) (*CreateCDBResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCDB not implemented") +} +func (UnimplementedDatabaseDaemonServer) CreateCDBAsync(context.Context, *CreateCDBAsyncRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCDBAsync not implemented") +} +func (UnimplementedDatabaseDaemonServer) CreateListener(context.Context, *CreateListenerRequest) (*CreateListenerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateListener not implemented") +} +func (UnimplementedDatabaseDaemonServer) FileExists(context.Context, *FileExistsRequest) (*FileExistsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FileExists not implemented") +} +func (UnimplementedDatabaseDaemonServer) PhysicalRestoreAsync(context.Context, *PhysicalRestoreAsyncRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method PhysicalRestoreAsync not implemented") +} +func (UnimplementedDatabaseDaemonServer) DataPumpImportAsync(context.Context, *DataPumpImportAsyncRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataPumpImportAsync not implemented") +} +func (UnimplementedDatabaseDaemonServer) DataPumpExportAsync(context.Context, *DataPumpExportAsyncRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataPumpExportAsync not implemented") +} +func (UnimplementedDatabaseDaemonServer) ListOperations(context.Context, *longrunning.ListOperationsRequest) (*longrunning.ListOperationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListOperations not implemented") +} +func (UnimplementedDatabaseDaemonServer) GetOperation(context.Context, *longrunning.GetOperationRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOperation not implemented") +} +func (UnimplementedDatabaseDaemonServer) DeleteOperation(context.Context, *longrunning.DeleteOperationRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteOperation not implemented") +} +func (UnimplementedDatabaseDaemonServer) RecoverConfigFile(context.Context, *RecoverConfigFileRequest) (*RecoverConfigFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecoverConfigFile not implemented") +} +func (UnimplementedDatabaseDaemonServer) DownloadDirectoryFromGCS(context.Context, *DownloadDirectoryFromGCSRequest) (*DownloadDirectoryFromGCSResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DownloadDirectoryFromGCS not implemented") +} +func (UnimplementedDatabaseDaemonServer) FetchServiceImageMetaData(context.Context, *FetchServiceImageMetaDataRequest) (*FetchServiceImageMetaDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchServiceImageMetaData not implemented") +} +func (UnimplementedDatabaseDaemonServer) mustEmbedUnimplementedDatabaseDaemonServer() {} + +// UnsafeDatabaseDaemonServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DatabaseDaemonServer will +// result in compilation errors. +type UnsafeDatabaseDaemonServer interface { + mustEmbedUnimplementedDatabaseDaemonServer() +} + +func RegisterDatabaseDaemonServer(s grpc.ServiceRegistrar, srv DatabaseDaemonServer) { + s.RegisterService(&DatabaseDaemon_ServiceDesc, srv) +} + +func _DatabaseDaemon_CreateDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CreateDir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CreateDir", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CreateDir(ctx, req.(*CreateDirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).ReadDir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/ReadDir", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).ReadDir(ctx, req.(*ReadDirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_DeleteDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).DeleteDir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/DeleteDir", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).DeleteDir(ctx, req.(*DeleteDirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_BounceDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BounceDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).BounceDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/BounceDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).BounceDatabase(ctx, req.(*BounceDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_BounceListener_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BounceListenerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).BounceListener(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/BounceListener", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).BounceListener(ctx, req.(*BounceListenerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_CheckDatabaseState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckDatabaseStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CheckDatabaseState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CheckDatabaseState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CheckDatabaseState(ctx, req.(*CheckDatabaseStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_RunSQLPlus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunSQLPlusCMDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).RunSQLPlus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/RunSQLPlus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).RunSQLPlus(ctx, req.(*RunSQLPlusCMDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_RunSQLPlusFormatted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunSQLPlusCMDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).RunSQLPlusFormatted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/RunSQLPlusFormatted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).RunSQLPlusFormatted(ctx, req.(*RunSQLPlusCMDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_KnownPDBs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KnownPDBsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).KnownPDBs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/KnownPDBs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).KnownPDBs(ctx, req.(*KnownPDBsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_RunRMAN_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunRMANRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).RunRMAN(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/RunRMAN", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).RunRMAN(ctx, req.(*RunRMANRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_RunRMANAsync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunRMANAsyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).RunRMANAsync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/RunRMANAsync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).RunRMANAsync(ctx, req.(*RunRMANAsyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_NID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).NID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/NID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).NID(ctx, req.(*NIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_GetDatabaseType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseTypeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).GetDatabaseType(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/GetDatabaseType", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).GetDatabaseType(ctx, req.(*GetDatabaseTypeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_GetDatabaseName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseNameRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).GetDatabaseName(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/GetDatabaseName", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).GetDatabaseName(ctx, req.(*GetDatabaseNameRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_CreatePasswordFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreatePasswordFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CreatePasswordFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CreatePasswordFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CreatePasswordFile(ctx, req.(*CreatePasswordFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_CreateReplicaInitOraFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateReplicaInitOraFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CreateReplicaInitOraFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CreateReplicaInitOraFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CreateReplicaInitOraFile(ctx, req.(*CreateReplicaInitOraFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_SetListenerRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetListenerRegistrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).SetListenerRegistration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/SetListenerRegistration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).SetListenerRegistration(ctx, req.(*SetListenerRegistrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_BootstrapStandby_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BootstrapStandbyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).BootstrapStandby(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/BootstrapStandby", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).BootstrapStandby(ctx, req.(*BootstrapStandbyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_CreateCDB_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCDBRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CreateCDB(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CreateCDB", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CreateCDB(ctx, req.(*CreateCDBRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_CreateCDBAsync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCDBAsyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CreateCDBAsync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CreateCDBAsync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CreateCDBAsync(ctx, req.(*CreateCDBAsyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_CreateListener_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateListenerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).CreateListener(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/CreateListener", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).CreateListener(ctx, req.(*CreateListenerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_FileExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FileExistsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).FileExists(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/FileExists", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).FileExists(ctx, req.(*FileExistsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_PhysicalRestoreAsync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PhysicalRestoreAsyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).PhysicalRestoreAsync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/PhysicalRestoreAsync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).PhysicalRestoreAsync(ctx, req.(*PhysicalRestoreAsyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_DataPumpImportAsync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataPumpImportAsyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).DataPumpImportAsync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/DataPumpImportAsync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).DataPumpImportAsync(ctx, req.(*DataPumpImportAsyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_DataPumpExportAsync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataPumpExportAsyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).DataPumpExportAsync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/DataPumpExportAsync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).DataPumpExportAsync(ctx, req.(*DataPumpExportAsyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(longrunning.ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).ListOperations(ctx, req.(*longrunning.ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(longrunning.GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).GetOperation(ctx, req.(*longrunning.GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(longrunning.DeleteOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).DeleteOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/DeleteOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).DeleteOperation(ctx, req.(*longrunning.DeleteOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_RecoverConfigFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecoverConfigFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).RecoverConfigFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/RecoverConfigFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).RecoverConfigFile(ctx, req.(*RecoverConfigFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_DownloadDirectoryFromGCS_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DownloadDirectoryFromGCSRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).DownloadDirectoryFromGCS(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/DownloadDirectoryFromGCS", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).DownloadDirectoryFromGCS(ctx, req.(*DownloadDirectoryFromGCSRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemon_FetchServiceImageMetaData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FetchServiceImageMetaDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonServer).FetchServiceImageMetaData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemon/FetchServiceImageMetaData", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonServer).FetchServiceImageMetaData(ctx, req.(*FetchServiceImageMetaDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DatabaseDaemon_ServiceDesc is the grpc.ServiceDesc for DatabaseDaemon service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DatabaseDaemon_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agents.oracle.DatabaseDaemon", + HandlerType: (*DatabaseDaemonServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateDir", + Handler: _DatabaseDaemon_CreateDir_Handler, + }, + { + MethodName: "ReadDir", + Handler: _DatabaseDaemon_ReadDir_Handler, + }, + { + MethodName: "DeleteDir", + Handler: _DatabaseDaemon_DeleteDir_Handler, + }, + { + MethodName: "BounceDatabase", + Handler: _DatabaseDaemon_BounceDatabase_Handler, + }, + { + MethodName: "BounceListener", + Handler: _DatabaseDaemon_BounceListener_Handler, + }, + { + MethodName: "CheckDatabaseState", + Handler: _DatabaseDaemon_CheckDatabaseState_Handler, + }, + { + MethodName: "RunSQLPlus", + Handler: _DatabaseDaemon_RunSQLPlus_Handler, + }, + { + MethodName: "RunSQLPlusFormatted", + Handler: _DatabaseDaemon_RunSQLPlusFormatted_Handler, + }, + { + MethodName: "KnownPDBs", + Handler: _DatabaseDaemon_KnownPDBs_Handler, + }, + { + MethodName: "RunRMAN", + Handler: _DatabaseDaemon_RunRMAN_Handler, + }, + { + MethodName: "RunRMANAsync", + Handler: _DatabaseDaemon_RunRMANAsync_Handler, + }, + { + MethodName: "NID", + Handler: _DatabaseDaemon_NID_Handler, + }, + { + MethodName: "GetDatabaseType", + Handler: _DatabaseDaemon_GetDatabaseType_Handler, + }, + { + MethodName: "GetDatabaseName", + Handler: _DatabaseDaemon_GetDatabaseName_Handler, + }, + { + MethodName: "CreatePasswordFile", + Handler: _DatabaseDaemon_CreatePasswordFile_Handler, + }, + { + MethodName: "CreateReplicaInitOraFile", + Handler: _DatabaseDaemon_CreateReplicaInitOraFile_Handler, + }, + { + MethodName: "SetListenerRegistration", + Handler: _DatabaseDaemon_SetListenerRegistration_Handler, + }, + { + MethodName: "BootstrapStandby", + Handler: _DatabaseDaemon_BootstrapStandby_Handler, + }, + { + MethodName: "CreateCDB", + Handler: _DatabaseDaemon_CreateCDB_Handler, + }, + { + MethodName: "CreateCDBAsync", + Handler: _DatabaseDaemon_CreateCDBAsync_Handler, + }, + { + MethodName: "CreateListener", + Handler: _DatabaseDaemon_CreateListener_Handler, + }, + { + MethodName: "FileExists", + Handler: _DatabaseDaemon_FileExists_Handler, + }, + { + MethodName: "PhysicalRestoreAsync", + Handler: _DatabaseDaemon_PhysicalRestoreAsync_Handler, + }, + { + MethodName: "DataPumpImportAsync", + Handler: _DatabaseDaemon_DataPumpImportAsync_Handler, + }, + { + MethodName: "DataPumpExportAsync", + Handler: _DatabaseDaemon_DataPumpExportAsync_Handler, + }, + { + MethodName: "ListOperations", + Handler: _DatabaseDaemon_ListOperations_Handler, + }, + { + MethodName: "GetOperation", + Handler: _DatabaseDaemon_GetOperation_Handler, + }, + { + MethodName: "DeleteOperation", + Handler: _DatabaseDaemon_DeleteOperation_Handler, + }, + { + MethodName: "RecoverConfigFile", + Handler: _DatabaseDaemon_RecoverConfigFile_Handler, + }, + { + MethodName: "DownloadDirectoryFromGCS", + Handler: _DatabaseDaemon_DownloadDirectoryFromGCS_Handler, + }, + { + MethodName: "FetchServiceImageMetaData", + Handler: _DatabaseDaemon_FetchServiceImageMetaData_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "oracle/pkg/agents/oracle/dbdaemon.proto", +} diff --git a/oracle/pkg/agents/oracle/dbdaemon_proxy.pb.go b/oracle/pkg/agents/oracle/dbdaemon_proxy.pb.go new file mode 100644 index 0000000..3e428b8 --- /dev/null +++ b/oracle/pkg/agents/oracle/dbdaemon_proxy.pb.go @@ -0,0 +1,557 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Database Daemon proxy is used for privileged database ops, +// e.g. bouncing a database and the listeners. It is intended to be used by the +// agents running locally on the same database container. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.4 +// source: oracle/pkg/agents/oracle/dbdaemon_proxy.proto + +package oracle + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SetEnvRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OracleHome string `protobuf:"bytes,1,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` + CdbName string `protobuf:"bytes,2,opt,name=cdb_name,json=cdbName,proto3" json:"cdb_name,omitempty"` + SpfilePath string `protobuf:"bytes,3,opt,name=spfile_path,json=spfilePath,proto3" json:"spfile_path,omitempty"` +} + +func (x *SetEnvRequest) Reset() { + *x = SetEnvRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetEnvRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetEnvRequest) ProtoMessage() {} + +func (x *SetEnvRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetEnvRequest.ProtoReflect.Descriptor instead. +func (*SetEnvRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP(), []int{0} +} + +func (x *SetEnvRequest) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +func (x *SetEnvRequest) GetCdbName() string { + if x != nil { + return x.CdbName + } + return "" +} + +func (x *SetEnvRequest) GetSpfilePath() string { + if x != nil { + return x.SpfilePath + } + return "" +} + +type SetEnvResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetEnvResponse) Reset() { + *x = SetEnvResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetEnvResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetEnvResponse) ProtoMessage() {} + +func (x *SetEnvResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetEnvResponse.ProtoReflect.Descriptor instead. +func (*SetEnvResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP(), []int{1} +} + +type ProxyRunNIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceDbName string `protobuf:"bytes,1,opt,name=source_db_name,json=sourceDbName,proto3" json:"source_db_name,omitempty"` + DestDbName string `protobuf:"bytes,2,opt,name=dest_db_name,json=destDbName,proto3" json:"dest_db_name,omitempty"` + Params []string `protobuf:"bytes,3,rep,name=params,proto3" json:"params,omitempty"` +} + +func (x *ProxyRunNIDRequest) Reset() { + *x = ProxyRunNIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyRunNIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyRunNIDRequest) ProtoMessage() {} + +func (x *ProxyRunNIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyRunNIDRequest.ProtoReflect.Descriptor instead. +func (*ProxyRunNIDRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP(), []int{2} +} + +func (x *ProxyRunNIDRequest) GetSourceDbName() string { + if x != nil { + return x.SourceDbName + } + return "" +} + +func (x *ProxyRunNIDRequest) GetDestDbName() string { + if x != nil { + return x.DestDbName + } + return "" +} + +func (x *ProxyRunNIDRequest) GetParams() []string { + if x != nil { + return x.Params + } + return nil +} + +type ProxyRunNIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ProxyRunNIDResponse) Reset() { + *x = ProxyRunNIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyRunNIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyRunNIDResponse) ProtoMessage() {} + +func (x *ProxyRunNIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyRunNIDResponse.ProtoReflect.Descriptor instead. +func (*ProxyRunNIDResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP(), []int{3} +} + +type ProxyRunDbcaRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OracleHome string `protobuf:"bytes,1,opt,name=oracle_home,json=oracleHome,proto3" json:"oracle_home,omitempty"` + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + Params []string `protobuf:"bytes,3,rep,name=params,proto3" json:"params,omitempty"` +} + +func (x *ProxyRunDbcaRequest) Reset() { + *x = ProxyRunDbcaRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyRunDbcaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyRunDbcaRequest) ProtoMessage() {} + +func (x *ProxyRunDbcaRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyRunDbcaRequest.ProtoReflect.Descriptor instead. +func (*ProxyRunDbcaRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP(), []int{4} +} + +func (x *ProxyRunDbcaRequest) GetOracleHome() string { + if x != nil { + return x.OracleHome + } + return "" +} + +func (x *ProxyRunDbcaRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +func (x *ProxyRunDbcaRequest) GetParams() []string { + if x != nil { + return x.Params + } + return nil +} + +type ProxyRunDbcaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ProxyRunDbcaResponse) Reset() { + *x = ProxyRunDbcaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyRunDbcaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyRunDbcaResponse) ProtoMessage() {} + +func (x *ProxyRunDbcaResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyRunDbcaResponse.ProtoReflect.Descriptor instead. +func (*ProxyRunDbcaResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP(), []int{5} +} + +var File_oracle_pkg_agents_oracle_dbdaemon_proxy_proto protoreflect.FileDescriptor + +var file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x64, 0x62, 0x64, 0x61, 0x65, + 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x1a, 0x25, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, + 0x5f, 0x68, 0x6f, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x64, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x64, 0x62, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x70, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x70, 0x66, 0x69, 0x6c, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x74, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, + 0x6e, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x73, 0x74, 0x44, 0x62, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, 0x6e, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x73, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, 0x6e, 0x44, 0x62, + 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x61, + 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x48, 0x6f, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x52, 0x75, 0x6e, 0x44, 0x62, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0xc9, 0x03, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x61, 0x65, 0x6d, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x5d, 0x0a, 0x0e, 0x42, 0x6f, 0x75, 0x6e, 0x63, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, + 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, + 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, + 0x6e, 0x44, 0x62, 0x63, 0x61, 0x12, 0x22, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, 0x6e, 0x44, 0x62, + 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, + 0x75, 0x6e, 0x44, 0x62, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, + 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, 0x6e, 0x4e, 0x49, 0x44, 0x12, 0x21, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x52, 0x75, 0x6e, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x75, 0x6e, 0x4e, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x12, 0x1c, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x53, + 0x65, 0x74, 0x45, 0x6e, 0x76, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x53, 0x65, 0x74, + 0x45, 0x6e, 0x76, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x58, 0x5a, 0x56, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x65, 0x6c, + 0x63, 0x61, 0x72, 0x72, 0x6f, 0x2d, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2d, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x3b, 0x6f, + 0x72, 0x61, 0x63, 0x6c, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescOnce sync.Once + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescData = file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDesc +) + +func file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescGZIP() []byte { + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescOnce.Do(func() { + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescData = protoimpl.X.CompressGZIP(file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescData) + }) + return file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDescData +} + +var file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_goTypes = []interface{}{ + (*SetEnvRequest)(nil), // 0: agents.oracle.SetEnvRequest + (*SetEnvResponse)(nil), // 1: agents.oracle.SetEnvResponse + (*ProxyRunNIDRequest)(nil), // 2: agents.oracle.ProxyRunNIDRequest + (*ProxyRunNIDResponse)(nil), // 3: agents.oracle.ProxyRunNIDResponse + (*ProxyRunDbcaRequest)(nil), // 4: agents.oracle.ProxyRunDbcaRequest + (*ProxyRunDbcaResponse)(nil), // 5: agents.oracle.ProxyRunDbcaResponse + (*BounceDatabaseRequest)(nil), // 6: agents.oracle.BounceDatabaseRequest + (*BounceListenerRequest)(nil), // 7: agents.oracle.BounceListenerRequest + (*BounceDatabaseResponse)(nil), // 8: agents.oracle.BounceDatabaseResponse + (*BounceListenerResponse)(nil), // 9: agents.oracle.BounceListenerResponse +} +var file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_depIdxs = []int32{ + 6, // 0: agents.oracle.DatabaseDaemonProxy.BounceDatabase:input_type -> agents.oracle.BounceDatabaseRequest + 7, // 1: agents.oracle.DatabaseDaemonProxy.BounceListener:input_type -> agents.oracle.BounceListenerRequest + 4, // 2: agents.oracle.DatabaseDaemonProxy.ProxyRunDbca:input_type -> agents.oracle.ProxyRunDbcaRequest + 2, // 3: agents.oracle.DatabaseDaemonProxy.ProxyRunNID:input_type -> agents.oracle.ProxyRunNIDRequest + 0, // 4: agents.oracle.DatabaseDaemonProxy.SetEnv:input_type -> agents.oracle.SetEnvRequest + 8, // 5: agents.oracle.DatabaseDaemonProxy.BounceDatabase:output_type -> agents.oracle.BounceDatabaseResponse + 9, // 6: agents.oracle.DatabaseDaemonProxy.BounceListener:output_type -> agents.oracle.BounceListenerResponse + 5, // 7: agents.oracle.DatabaseDaemonProxy.ProxyRunDbca:output_type -> agents.oracle.ProxyRunDbcaResponse + 3, // 8: agents.oracle.DatabaseDaemonProxy.ProxyRunNID:output_type -> agents.oracle.ProxyRunNIDResponse + 1, // 9: agents.oracle.DatabaseDaemonProxy.SetEnv:output_type -> agents.oracle.SetEnvResponse + 5, // [5:10] is the sub-list for method output_type + 0, // [0:5] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_init() } +func file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_init() { + if File_oracle_pkg_agents_oracle_dbdaemon_proxy_proto != nil { + return + } + file_oracle_pkg_agents_oracle_oracle_proto_init() + if !protoimpl.UnsafeEnabled { + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetEnvRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetEnvResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyRunNIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyRunNIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyRunDbcaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyRunDbcaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_goTypes, + DependencyIndexes: file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_depIdxs, + MessageInfos: file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_msgTypes, + }.Build() + File_oracle_pkg_agents_oracle_dbdaemon_proxy_proto = out.File + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_rawDesc = nil + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_goTypes = nil + file_oracle_pkg_agents_oracle_dbdaemon_proxy_proto_depIdxs = nil +} diff --git a/oracle/pkg/agents/oracle/dbdaemon_proxy.proto b/oracle/pkg/agents/oracle/dbdaemon_proxy.proto new file mode 100644 index 0000000..b3f8eea --- /dev/null +++ b/oracle/pkg/agents/oracle/dbdaemon_proxy.proto @@ -0,0 +1,63 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Database Daemon proxy is used for privileged database ops, +// e.g. bouncing a database and the listeners. It is intended to be used by the +// agents running locally on the same database container. +syntax = "proto3"; + +package agents.oracle; + +import "oracle/pkg/agents/oracle/oracle.proto"; + +option go_package = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle;oracle"; + +// DatabaseDaemonProxy defines the API for a daemon proxy running together with +// a database in the same container. +service DatabaseDaemonProxy { + // BounceDatabase RPC call to start/stop a database. + rpc BounceDatabase(BounceDatabaseRequest) returns (BounceDatabaseResponse); + // BounceListener RPC call to start/stop a listener. + rpc BounceListener(BounceListenerRequest) returns (BounceListenerResponse); + // ProxyRunDbca execute the dbca command with the given parameters + rpc ProxyRunDbca(ProxyRunDbcaRequest) returns (ProxyRunDbcaResponse); + // ProxyRunNID RPC call executes database rename operations + rpc ProxyRunNID(ProxyRunNIDRequest) returns (ProxyRunNIDResponse); + // SetEnv RPC call moves/relinks oracle config files within oracledb container + rpc SetEnv(SetEnvRequest) returns (SetEnvResponse); +} + +message SetEnvRequest { + string oracle_home = 1; + string cdb_name = 2; + string spfile_path = 3; +} + +message SetEnvResponse {} + +message ProxyRunNIDRequest { + string source_db_name = 1; + string dest_db_name = 2; + repeated string params = 3; +} + +message ProxyRunNIDResponse {} + +message ProxyRunDbcaRequest { + string oracle_home = 1; + string database_name = 2; + repeated string params = 3; +} + +message ProxyRunDbcaResponse {} diff --git a/oracle/pkg/agents/oracle/dbdaemon_proxy_grpc.pb.go b/oracle/pkg/agents/oracle/dbdaemon_proxy_grpc.pb.go new file mode 100644 index 0000000..c3421bf --- /dev/null +++ b/oracle/pkg/agents/oracle/dbdaemon_proxy_grpc.pb.go @@ -0,0 +1,255 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package oracle + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// DatabaseDaemonProxyClient is the client API for DatabaseDaemonProxy service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DatabaseDaemonProxyClient interface { + // BounceDatabase RPC call to start/stop a database. + BounceDatabase(ctx context.Context, in *BounceDatabaseRequest, opts ...grpc.CallOption) (*BounceDatabaseResponse, error) + // BounceListener RPC call to start/stop a listener. + BounceListener(ctx context.Context, in *BounceListenerRequest, opts ...grpc.CallOption) (*BounceListenerResponse, error) + // ProxyRunDbca execute the dbca command with the given parameters + ProxyRunDbca(ctx context.Context, in *ProxyRunDbcaRequest, opts ...grpc.CallOption) (*ProxyRunDbcaResponse, error) + // ProxyRunNID RPC call executes database rename operations + ProxyRunNID(ctx context.Context, in *ProxyRunNIDRequest, opts ...grpc.CallOption) (*ProxyRunNIDResponse, error) + // SetEnv RPC call moves/relinks oracle config files within oracledb container + SetEnv(ctx context.Context, in *SetEnvRequest, opts ...grpc.CallOption) (*SetEnvResponse, error) +} + +type databaseDaemonProxyClient struct { + cc grpc.ClientConnInterface +} + +func NewDatabaseDaemonProxyClient(cc grpc.ClientConnInterface) DatabaseDaemonProxyClient { + return &databaseDaemonProxyClient{cc} +} + +func (c *databaseDaemonProxyClient) BounceDatabase(ctx context.Context, in *BounceDatabaseRequest, opts ...grpc.CallOption) (*BounceDatabaseResponse, error) { + out := new(BounceDatabaseResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemonProxy/BounceDatabase", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonProxyClient) BounceListener(ctx context.Context, in *BounceListenerRequest, opts ...grpc.CallOption) (*BounceListenerResponse, error) { + out := new(BounceListenerResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemonProxy/BounceListener", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonProxyClient) ProxyRunDbca(ctx context.Context, in *ProxyRunDbcaRequest, opts ...grpc.CallOption) (*ProxyRunDbcaResponse, error) { + out := new(ProxyRunDbcaResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemonProxy/ProxyRunDbca", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonProxyClient) ProxyRunNID(ctx context.Context, in *ProxyRunNIDRequest, opts ...grpc.CallOption) (*ProxyRunNIDResponse, error) { + out := new(ProxyRunNIDResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemonProxy/ProxyRunNID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseDaemonProxyClient) SetEnv(ctx context.Context, in *SetEnvRequest, opts ...grpc.CallOption) (*SetEnvResponse, error) { + out := new(SetEnvResponse) + err := c.cc.Invoke(ctx, "/agents.oracle.DatabaseDaemonProxy/SetEnv", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseDaemonProxyServer is the server API for DatabaseDaemonProxy service. +// All implementations must embed UnimplementedDatabaseDaemonProxyServer +// for forward compatibility +type DatabaseDaemonProxyServer interface { + // BounceDatabase RPC call to start/stop a database. + BounceDatabase(context.Context, *BounceDatabaseRequest) (*BounceDatabaseResponse, error) + // BounceListener RPC call to start/stop a listener. + BounceListener(context.Context, *BounceListenerRequest) (*BounceListenerResponse, error) + // ProxyRunDbca execute the dbca command with the given parameters + ProxyRunDbca(context.Context, *ProxyRunDbcaRequest) (*ProxyRunDbcaResponse, error) + // ProxyRunNID RPC call executes database rename operations + ProxyRunNID(context.Context, *ProxyRunNIDRequest) (*ProxyRunNIDResponse, error) + // SetEnv RPC call moves/relinks oracle config files within oracledb container + SetEnv(context.Context, *SetEnvRequest) (*SetEnvResponse, error) + mustEmbedUnimplementedDatabaseDaemonProxyServer() +} + +// UnimplementedDatabaseDaemonProxyServer must be embedded to have forward compatible implementations. +type UnimplementedDatabaseDaemonProxyServer struct { +} + +func (UnimplementedDatabaseDaemonProxyServer) BounceDatabase(context.Context, *BounceDatabaseRequest) (*BounceDatabaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BounceDatabase not implemented") +} +func (UnimplementedDatabaseDaemonProxyServer) BounceListener(context.Context, *BounceListenerRequest) (*BounceListenerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BounceListener not implemented") +} +func (UnimplementedDatabaseDaemonProxyServer) ProxyRunDbca(context.Context, *ProxyRunDbcaRequest) (*ProxyRunDbcaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProxyRunDbca not implemented") +} +func (UnimplementedDatabaseDaemonProxyServer) ProxyRunNID(context.Context, *ProxyRunNIDRequest) (*ProxyRunNIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProxyRunNID not implemented") +} +func (UnimplementedDatabaseDaemonProxyServer) SetEnv(context.Context, *SetEnvRequest) (*SetEnvResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetEnv not implemented") +} +func (UnimplementedDatabaseDaemonProxyServer) mustEmbedUnimplementedDatabaseDaemonProxyServer() {} + +// UnsafeDatabaseDaemonProxyServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DatabaseDaemonProxyServer will +// result in compilation errors. +type UnsafeDatabaseDaemonProxyServer interface { + mustEmbedUnimplementedDatabaseDaemonProxyServer() +} + +func RegisterDatabaseDaemonProxyServer(s grpc.ServiceRegistrar, srv DatabaseDaemonProxyServer) { + s.RegisterService(&DatabaseDaemonProxy_ServiceDesc, srv) +} + +func _DatabaseDaemonProxy_BounceDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BounceDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonProxyServer).BounceDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemonProxy/BounceDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonProxyServer).BounceDatabase(ctx, req.(*BounceDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemonProxy_BounceListener_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BounceListenerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonProxyServer).BounceListener(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemonProxy/BounceListener", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonProxyServer).BounceListener(ctx, req.(*BounceListenerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemonProxy_ProxyRunDbca_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProxyRunDbcaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonProxyServer).ProxyRunDbca(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemonProxy/ProxyRunDbca", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonProxyServer).ProxyRunDbca(ctx, req.(*ProxyRunDbcaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemonProxy_ProxyRunNID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProxyRunNIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonProxyServer).ProxyRunNID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemonProxy/ProxyRunNID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonProxyServer).ProxyRunNID(ctx, req.(*ProxyRunNIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseDaemonProxy_SetEnv_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetEnvRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseDaemonProxyServer).SetEnv(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/agents.oracle.DatabaseDaemonProxy/SetEnv", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseDaemonProxyServer).SetEnv(ctx, req.(*SetEnvRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DatabaseDaemonProxy_ServiceDesc is the grpc.ServiceDesc for DatabaseDaemonProxy service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DatabaseDaemonProxy_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agents.oracle.DatabaseDaemonProxy", + HandlerType: (*DatabaseDaemonProxyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "BounceDatabase", + Handler: _DatabaseDaemonProxy_BounceDatabase_Handler, + }, + { + MethodName: "BounceListener", + Handler: _DatabaseDaemonProxy_BounceListener_Handler, + }, + { + MethodName: "ProxyRunDbca", + Handler: _DatabaseDaemonProxy_ProxyRunDbca_Handler, + }, + { + MethodName: "ProxyRunNID", + Handler: _DatabaseDaemonProxy_ProxyRunNID_Handler, + }, + { + MethodName: "SetEnv", + Handler: _DatabaseDaemonProxy_SetEnv_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "oracle/pkg/agents/oracle/dbdaemon_proxy.proto", +} diff --git a/oracle/pkg/agents/oracle/oracle.pb.go b/oracle/pkg/agents/oracle/oracle.pb.go new file mode 100644 index 0000000..b253131 --- /dev/null +++ b/oracle/pkg/agents/oracle/oracle.pb.go @@ -0,0 +1,729 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.4 +// source: oracle/pkg/agents/oracle/oracle.proto + +package oracle + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// DatabaseState represents different states of a database. +type DatabaseState int32 + +const ( + // Not set. + DatabaseState_DATABASE_STATE_UNSPECIFIED DatabaseState = 0 + // Being created. + DatabaseState_CREATING DatabaseState = 1 + // Has been created and is fully usable. + DatabaseState_READY DatabaseState = 2 + // Being stopped. + DatabaseState_STOPPING DatabaseState = 3 + // Stopped. + DatabaseState_STOPPED DatabaseState = 4 + // Being updated. + DatabaseState_UPDATING DatabaseState = 5 + // Being deleted. + DatabaseState_DELETING DatabaseState = 6 + // Being repaired and may be unusable. + DatabaseState_DATABASE_STATE_ERROR DatabaseState = 7 +) + +// Enum value maps for DatabaseState. +var ( + DatabaseState_name = map[int32]string{ + 0: "DATABASE_STATE_UNSPECIFIED", + 1: "CREATING", + 2: "READY", + 3: "STOPPING", + 4: "STOPPED", + 5: "UPDATING", + 6: "DELETING", + 7: "DATABASE_STATE_ERROR", + } + DatabaseState_value = map[string]int32{ + "DATABASE_STATE_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, + "STOPPING": 3, + "STOPPED": 4, + "UPDATING": 5, + "DELETING": 6, + "DATABASE_STATE_ERROR": 7, + } +) + +func (x DatabaseState) Enum() *DatabaseState { + p := new(DatabaseState) + *p = x + return p +} + +func (x DatabaseState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DatabaseState) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[0].Descriptor() +} + +func (DatabaseState) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[0] +} + +func (x DatabaseState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DatabaseState.Descriptor instead. +func (DatabaseState) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{0} +} + +// ListenerState defines whether a listener is up, down or in error state. +type ListenerState int32 + +const ( + ListenerState_LISTENER_STATE_UNSPECIFIED ListenerState = 0 + ListenerState_UP ListenerState = 1 + ListenerState_DOWN ListenerState = 2 + // enum values use C++ scoping rules, meaning that enum values + // are siblings of their type, not children of it. + // Therefore an ERROR must be unique across all enums in this proto. + ListenerState_LISTENER_STATE_ERROR ListenerState = 3 +) + +// Enum value maps for ListenerState. +var ( + ListenerState_name = map[int32]string{ + 0: "LISTENER_STATE_UNSPECIFIED", + 1: "UP", + 2: "DOWN", + 3: "LISTENER_STATE_ERROR", + } + ListenerState_value = map[string]int32{ + "LISTENER_STATE_UNSPECIFIED": 0, + "UP": 1, + "DOWN": 2, + "LISTENER_STATE_ERROR": 3, + } +) + +func (x ListenerState) Enum() *ListenerState { + p := new(ListenerState) + *p = x + return p +} + +func (x ListenerState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ListenerState) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[1].Descriptor() +} + +func (ListenerState) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[1] +} + +func (x ListenerState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ListenerState.Descriptor instead. +func (ListenerState) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{1} +} + +// Allowed database bounce operations. +type BounceDatabaseRequest_Operation int32 + +const ( + // UNKNOWN is an unacceptable database operation + // to be rejected by the DatabaseDaemon. + BounceDatabaseRequest_UNKNOWN BounceDatabaseRequest_Operation = 0 + // STARTUP is an operation that starts a database + // bringing it up to a state defined by OPTION + // (default OPTION is open). + BounceDatabaseRequest_STARTUP BounceDatabaseRequest_Operation = 1 + // SHUTDOWN is an operation that stops a database. + // The way a database is stopped using SHUTDOWN + // operation is determined by OPTION + // (default OPTION is immediate). + BounceDatabaseRequest_SHUTDOWN BounceDatabaseRequest_Operation = 2 +) + +// Enum value maps for BounceDatabaseRequest_Operation. +var ( + BounceDatabaseRequest_Operation_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STARTUP", + 2: "SHUTDOWN", + } + BounceDatabaseRequest_Operation_value = map[string]int32{ + "UNKNOWN": 0, + "STARTUP": 1, + "SHUTDOWN": 2, + } +) + +func (x BounceDatabaseRequest_Operation) Enum() *BounceDatabaseRequest_Operation { + p := new(BounceDatabaseRequest_Operation) + *p = x + return p +} + +func (x BounceDatabaseRequest_Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BounceDatabaseRequest_Operation) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[2].Descriptor() +} + +func (BounceDatabaseRequest_Operation) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[2] +} + +func (x BounceDatabaseRequest_Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BounceDatabaseRequest_Operation.Descriptor instead. +func (BounceDatabaseRequest_Operation) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{0, 0} +} + +// Allowed listener bounce operations. +type BounceListenerRequest_Operation int32 + +const ( + // UNKNOWN is an unacceptable listener operation + // to be rejected by the DatabaseDaemon. + BounceListenerRequest_UNKNOWN BounceListenerRequest_Operation = 0 + // START is an operation that starts a listener. + BounceListenerRequest_START BounceListenerRequest_Operation = 1 + // STOP is an operation that stops a listener. + BounceListenerRequest_STOP BounceListenerRequest_Operation = 2 +) + +// Enum value maps for BounceListenerRequest_Operation. +var ( + BounceListenerRequest_Operation_name = map[int32]string{ + 0: "UNKNOWN", + 1: "START", + 2: "STOP", + } + BounceListenerRequest_Operation_value = map[string]int32{ + "UNKNOWN": 0, + "START": 1, + "STOP": 2, + } +) + +func (x BounceListenerRequest_Operation) Enum() *BounceListenerRequest_Operation { + p := new(BounceListenerRequest_Operation) + *p = x + return p +} + +func (x BounceListenerRequest_Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BounceListenerRequest_Operation) Descriptor() protoreflect.EnumDescriptor { + return file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[3].Descriptor() +} + +func (BounceListenerRequest_Operation) Type() protoreflect.EnumType { + return &file_oracle_pkg_agents_oracle_oracle_proto_enumTypes[3] +} + +func (x BounceListenerRequest_Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BounceListenerRequest_Operation.Descriptor instead. +func (BounceListenerRequest_Operation) EnumDescriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{2, 0} +} + +type BounceDatabaseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + // operation: startup / shutdown + Operation BounceDatabaseRequest_Operation `protobuf:"varint,2,opt,name=operation,proto3,enum=agents.oracle.BounceDatabaseRequest_Operation" json:"operation,omitempty"` + // option: nomount/mount/open or immediate/transactional/abort + Option string `protobuf:"bytes,3,opt,name=option,proto3" json:"option,omitempty"` + // avoid_config_backup: by default we backup the config except for scenarios + // when it isn't possible (like bootstrapping) + AvoidConfigBackup bool `protobuf:"varint,4,opt,name=avoid_config_backup,json=avoidConfigBackup,proto3" json:"avoid_config_backup,omitempty"` +} + +func (x *BounceDatabaseRequest) Reset() { + *x = BounceDatabaseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BounceDatabaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BounceDatabaseRequest) ProtoMessage() {} + +func (x *BounceDatabaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BounceDatabaseRequest.ProtoReflect.Descriptor instead. +func (*BounceDatabaseRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{0} +} + +func (x *BounceDatabaseRequest) GetDatabaseName() string { + if x != nil { + return x.DatabaseName + } + return "" +} + +func (x *BounceDatabaseRequest) GetOperation() BounceDatabaseRequest_Operation { + if x != nil { + return x.Operation + } + return BounceDatabaseRequest_UNKNOWN +} + +func (x *BounceDatabaseRequest) GetOption() string { + if x != nil { + return x.Option + } + return "" +} + +func (x *BounceDatabaseRequest) GetAvoidConfigBackup() bool { + if x != nil { + return x.AvoidConfigBackup + } + return false +} + +// BounceDatabaseResponse is a message containing +// a response from a BounceDatabase request. +// database_state is set to ERROR if an expected error +// occurs (e.g. a security verification check fails). +// Otherwise database_state is set to nil and an +// unexpected error is returned via error. +type BounceDatabaseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Possible states: READY, STOPPED, ERROR. + DatabaseState DatabaseState `protobuf:"varint,1,opt,name=database_state,json=databaseState,proto3,enum=agents.oracle.DatabaseState" json:"database_state,omitempty"` + ErrorMsg []string `protobuf:"bytes,2,rep,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"` +} + +func (x *BounceDatabaseResponse) Reset() { + *x = BounceDatabaseResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BounceDatabaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BounceDatabaseResponse) ProtoMessage() {} + +func (x *BounceDatabaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BounceDatabaseResponse.ProtoReflect.Descriptor instead. +func (*BounceDatabaseResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{1} +} + +func (x *BounceDatabaseResponse) GetDatabaseState() DatabaseState { + if x != nil { + return x.DatabaseState + } + return DatabaseState_DATABASE_STATE_UNSPECIFIED +} + +func (x *BounceDatabaseResponse) GetErrorMsg() []string { + if x != nil { + return x.ErrorMsg + } + return nil +} + +type BounceListenerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ListenerName string `protobuf:"bytes,1,opt,name=listener_name,json=listenerName,proto3" json:"listener_name,omitempty"` + // TNS_ADMIN is the user specified path to Oracle configuration files. + TnsAdmin string `protobuf:"bytes,2,opt,name=tns_admin,json=tnsAdmin,proto3" json:"tns_admin,omitempty"` + // operation: start / stop + Operation BounceListenerRequest_Operation `protobuf:"varint,3,opt,name=operation,proto3,enum=agents.oracle.BounceListenerRequest_Operation" json:"operation,omitempty"` +} + +func (x *BounceListenerRequest) Reset() { + *x = BounceListenerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BounceListenerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BounceListenerRequest) ProtoMessage() {} + +func (x *BounceListenerRequest) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BounceListenerRequest.ProtoReflect.Descriptor instead. +func (*BounceListenerRequest) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{2} +} + +func (x *BounceListenerRequest) GetListenerName() string { + if x != nil { + return x.ListenerName + } + return "" +} + +func (x *BounceListenerRequest) GetTnsAdmin() string { + if x != nil { + return x.TnsAdmin + } + return "" +} + +func (x *BounceListenerRequest) GetOperation() BounceListenerRequest_Operation { + if x != nil { + return x.Operation + } + return BounceListenerRequest_UNKNOWN +} + +// BounceListenerResponse is a message containing +// a response from a BounceListener request. +// listener_state is set to ERROR if an expected error +// occurs (e.g. a security verification check fails). +// Otherwise listener_state is set to nil and an +// unexpected error is returned via error. +type BounceListenerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Possible states: READY, STOPPED, ERROR. + ListenerState ListenerState `protobuf:"varint,1,opt,name=listener_state,json=listenerState,proto3,enum=agents.oracle.ListenerState" json:"listener_state,omitempty"` + ErrorMsg []string `protobuf:"bytes,2,rep,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"` +} + +func (x *BounceListenerResponse) Reset() { + *x = BounceListenerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BounceListenerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BounceListenerResponse) ProtoMessage() {} + +func (x *BounceListenerResponse) ProtoReflect() protoreflect.Message { + mi := &file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BounceListenerResponse.ProtoReflect.Descriptor instead. +func (*BounceListenerResponse) Descriptor() ([]byte, []int) { + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP(), []int{3} +} + +func (x *BounceListenerResponse) GetListenerState() ListenerState { + if x != nil { + return x.ListenerState + } + return ListenerState_LISTENER_STATE_UNSPECIFIED +} + +func (x *BounceListenerResponse) GetErrorMsg() []string { + if x != nil { + return x.ErrorMsg + } + return nil +} + +var File_oracle_pkg_agents_oracle_oracle_proto protoreflect.FileDescriptor + +var file_oracle_pkg_agents_oracle_oracle_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x15, 0x42, 0x6f, 0x75, 0x6e, 0x63, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x61, + 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x33, 0x0a, 0x09, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x41, 0x52, 0x54, 0x55, 0x50, + 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x55, 0x54, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x22, 0x7a, 0x0a, 0x16, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0e, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, + 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x22, 0xd6, 0x01, 0x0a, + 0x15, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x6e, 0x73, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x6e, 0x73, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x42, 0x6f, 0x75, 0x6e, + 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x54, 0x4f, 0x50, 0x10, 0x02, 0x22, 0x7a, 0x0a, 0x16, 0x42, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x43, 0x0a, 0x0e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, + 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, + 0x67, 0x2a, 0x99, 0x01, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, + 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, + 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, + 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x55, 0x50, 0x44, 0x41, 0x54, + 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x49, 0x4e, + 0x47, 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x07, 0x2a, 0x5b, 0x0a, + 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, + 0x0a, 0x1a, 0x4c, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x06, + 0x0a, 0x02, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x42, 0x58, 0x5a, 0x56, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x65, 0x6c, 0x63, + 0x61, 0x72, 0x72, 0x6f, 0x2d, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2d, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x6f, 0x72, 0x61, 0x63, 0x6c, 0x65, 0x3b, 0x6f, 0x72, + 0x61, 0x63, 0x6c, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oracle_pkg_agents_oracle_oracle_proto_rawDescOnce sync.Once + file_oracle_pkg_agents_oracle_oracle_proto_rawDescData = file_oracle_pkg_agents_oracle_oracle_proto_rawDesc +) + +func file_oracle_pkg_agents_oracle_oracle_proto_rawDescGZIP() []byte { + file_oracle_pkg_agents_oracle_oracle_proto_rawDescOnce.Do(func() { + file_oracle_pkg_agents_oracle_oracle_proto_rawDescData = protoimpl.X.CompressGZIP(file_oracle_pkg_agents_oracle_oracle_proto_rawDescData) + }) + return file_oracle_pkg_agents_oracle_oracle_proto_rawDescData +} + +var file_oracle_pkg_agents_oracle_oracle_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_oracle_pkg_agents_oracle_oracle_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_oracle_pkg_agents_oracle_oracle_proto_goTypes = []interface{}{ + (DatabaseState)(0), // 0: agents.oracle.DatabaseState + (ListenerState)(0), // 1: agents.oracle.ListenerState + (BounceDatabaseRequest_Operation)(0), // 2: agents.oracle.BounceDatabaseRequest.Operation + (BounceListenerRequest_Operation)(0), // 3: agents.oracle.BounceListenerRequest.Operation + (*BounceDatabaseRequest)(nil), // 4: agents.oracle.BounceDatabaseRequest + (*BounceDatabaseResponse)(nil), // 5: agents.oracle.BounceDatabaseResponse + (*BounceListenerRequest)(nil), // 6: agents.oracle.BounceListenerRequest + (*BounceListenerResponse)(nil), // 7: agents.oracle.BounceListenerResponse +} +var file_oracle_pkg_agents_oracle_oracle_proto_depIdxs = []int32{ + 2, // 0: agents.oracle.BounceDatabaseRequest.operation:type_name -> agents.oracle.BounceDatabaseRequest.Operation + 0, // 1: agents.oracle.BounceDatabaseResponse.database_state:type_name -> agents.oracle.DatabaseState + 3, // 2: agents.oracle.BounceListenerRequest.operation:type_name -> agents.oracle.BounceListenerRequest.Operation + 1, // 3: agents.oracle.BounceListenerResponse.listener_state:type_name -> agents.oracle.ListenerState + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_oracle_pkg_agents_oracle_oracle_proto_init() } +func file_oracle_pkg_agents_oracle_oracle_proto_init() { + if File_oracle_pkg_agents_oracle_oracle_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BounceDatabaseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BounceDatabaseResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BounceListenerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oracle_pkg_agents_oracle_oracle_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BounceListenerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oracle_pkg_agents_oracle_oracle_proto_rawDesc, + NumEnums: 4, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_oracle_pkg_agents_oracle_oracle_proto_goTypes, + DependencyIndexes: file_oracle_pkg_agents_oracle_oracle_proto_depIdxs, + EnumInfos: file_oracle_pkg_agents_oracle_oracle_proto_enumTypes, + MessageInfos: file_oracle_pkg_agents_oracle_oracle_proto_msgTypes, + }.Build() + File_oracle_pkg_agents_oracle_oracle_proto = out.File + file_oracle_pkg_agents_oracle_oracle_proto_rawDesc = nil + file_oracle_pkg_agents_oracle_oracle_proto_goTypes = nil + file_oracle_pkg_agents_oracle_oracle_proto_depIdxs = nil +} diff --git a/oracle/pkg/agents/oracle/oracle.proto b/oracle/pkg/agents/oracle/oracle.proto new file mode 100644 index 0000000..2e8a0bd --- /dev/null +++ b/oracle/pkg/agents/oracle/oracle.proto @@ -0,0 +1,122 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package agents.oracle; + +option go_package = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle;oracle"; + +// DatabaseState represents different states of a database. +enum DatabaseState { + // Not set. + DATABASE_STATE_UNSPECIFIED = 0; + // Being created. + CREATING = 1; + // Has been created and is fully usable. + READY = 2; + // Being stopped. + STOPPING = 3; + // Stopped. + STOPPED = 4; + // Being updated. + UPDATING = 5; + // Being deleted. + DELETING = 6; + // Being repaired and may be unusable. + DATABASE_STATE_ERROR = 7; +} + +// ListenerState defines whether a listener is up, down or in error state. +enum ListenerState { + LISTENER_STATE_UNSPECIFIED = 0; + UP = 1; + DOWN = 2; + // enum values use C++ scoping rules, meaning that enum values + // are siblings of their type, not children of it. + // Therefore an ERROR must be unique across all enums in this proto. + LISTENER_STATE_ERROR = 3; +} + +message BounceDatabaseRequest { + // Allowed database bounce operations. + enum Operation { + // UNKNOWN is an unacceptable database operation + // to be rejected by the DatabaseDaemon. + UNKNOWN = 0; + // STARTUP is an operation that starts a database + // bringing it up to a state defined by OPTION + // (default OPTION is open). + STARTUP = 1; + // SHUTDOWN is an operation that stops a database. + // The way a database is stopped using SHUTDOWN + // operation is determined by OPTION + // (default OPTION is immediate). + SHUTDOWN = 2; + } + + string database_name = 1; + // operation: startup / shutdown + Operation operation = 2; + // option: nomount/mount/open or immediate/transactional/abort + string option = 3; + // avoid_config_backup: by default we backup the config except for scenarios + // when it isn't possible (like bootstrapping) + bool avoid_config_backup = 4; +} + +// BounceDatabaseResponse is a message containing +// a response from a BounceDatabase request. +// database_state is set to ERROR if an expected error +// occurs (e.g. a security verification check fails). +// Otherwise database_state is set to nil and an +// unexpected error is returned via error. +message BounceDatabaseResponse { + // Possible states: READY, STOPPED, ERROR. + DatabaseState database_state = 1; + repeated string error_msg = 2; +} + +message BounceListenerRequest { + // Allowed listener bounce operations. + enum Operation { + // UNKNOWN is an unacceptable listener operation + // to be rejected by the DatabaseDaemon. + UNKNOWN = 0; + // START is an operation that starts a listener. + START = 1; + // STOP is an operation that stops a listener. + STOP = 2; + } + + // The name of the listener process. + + string listener_name = 1; + // TNS_ADMIN is the user specified path to Oracle configuration files. + string tns_admin = 2; + // operation: start / stop + Operation operation = 3; +} + +// BounceListenerResponse is a message containing +// a response from a BounceListener request. +// listener_state is set to ERROR if an expected error +// occurs (e.g. a security verification check fails). +// Otherwise listener_state is set to nil and an +// unexpected error is returned via error. +message BounceListenerResponse { + // Possible states: READY, STOPPED, ERROR. + ListenerState listener_state = 1; + repeated string error_msg = 2; +} diff --git a/oracle/pkg/agents/security/BUILD.bazel b/oracle/pkg/agents/security/BUILD.bazel new file mode 100644 index 0000000..130a52d --- /dev/null +++ b/oracle/pkg/agents/security/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "security", + srcs = ["security.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/security", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/common", + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "@com_github_docker_docker//api/types", + "@com_github_docker_docker//client", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/oracle/pkg/agents/security/security.go b/oracle/pkg/agents/security/security.go new file mode 100644 index 0000000..698c63f --- /dev/null +++ b/oracle/pkg/agents/security/security.go @@ -0,0 +1,219 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package security contains common methods regarding encryption and passwords. +package security + +import ( + "context" + "crypto/rand" + "database/sql" + "fmt" + "math/big" + "time" + "unicode" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "google.golang.org/grpc" + "k8s.io/klog/v2" + + connect "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +const ( + passLength = 10 + alterUserSQL = "alter user %s identified by %s" +) + +type dockerClient interface { + client.ContainerAPIClient + Close() error +} + +type runSQLOnClient interface { + RunSQLPlus(context.Context, *dbdpb.RunSQLPlusCMDRequest, ...grpc.CallOption) (*dbdpb.RunCMDResponse, error) +} + +type runSQLOnServer interface { + RunSQLPlus(context.Context, *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) +} + +// Security provides login and encryption methods. +type Security struct { + dockerClient dockerClient + sqlOpen func(string, string) (*sql.DB, error) + pollInterval time.Duration + dbdConn *grpc.ClientConn + dbdClient runSQLOnClient +} + +// Close closes any Security resources and connections. +func (s *Security) Close() error { + if s.dockerClient != nil { + if err := s.dockerClient.Close(); err != nil { + if s.dbdConn != nil { + if err2 := s.dbdConn.Close(); err2 != nil { + return err2 + } + } + return err + } + } + + if s.dbdConn != nil { + return s.dbdConn.Close() + } + + return nil +} + +// RandOraclePassword returns a random password containing letters and numbers. +// It is caller's responsibility to handle the error. +func RandOraclePassword() (string, error) { + const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + const numbers = "0123456789" + const alphanumeric = chars + numbers + result := make([]byte, passLength-1) + + hasNumeric := false + + for i := 0; i < passLength-1; i++ { + aRand, err := randInt(len(alphanumeric)) + if err != nil { + return "", err + } + ch := alphanumeric[aRand] + if unicode.IsNumber(rune(ch)) { + hasNumeric = true + } + result[i] = ch + } + + // We need at least one number in the password or Oracle will reject it. + if !hasNumeric { + nRand, err := randInt(len(numbers)) + if err != nil { + return "", err + } + iRand, err := randInt(passLength - 1) + if err != nil { + return "", err + } + result[iRand] = numbers[nRand] + } + + cRand, err := randInt(len(chars)) + if err != nil { + return "", err + } + + // Construct a password that starts with a character. + return string(chars[cRand]) + string(result), nil +} + +// SetupUserPwConnStringByClient sets the password for the given user to +// a randomized password with the client and returns the connection string. +func SetupUserPwConnStringByClient(ctx context.Context, onClient runSQLOnClient, username, db, DBDomain string) (string, error) { + passwd, err := RandOraclePassword() + if err != nil { + return "", err + } + applySQL := []string{fmt.Sprintf(alterUserSQL, username, passwd)} + if _, err := onClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: applySQL, Suppress: true}); err != nil { + return "", err + } + return connect.EZ(username, passwd, consts.Localhost, fmt.Sprint(consts.SecureListenerPort), db, DBDomain, false), nil +} + +// SetupUserPwConnStringOnServer sets the password for the given user to +// a randomized password on the DB server and returns the connection string. +func SetupUserPwConnStringOnServer(ctx context.Context, onServer runSQLOnServer, username, db, DBDomain string) (string, error) { + passwd, err := RandOraclePassword() + if err != nil { + return "", err + } + applySQL := []string{fmt.Sprintf(alterUserSQL, username, passwd)} + if _, err := onServer.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: applySQL, Suppress: true}); err != nil { + return "", err + } + return connect.EZ(username, passwd, consts.Localhost, fmt.Sprint(consts.SecureListenerPort), db, DBDomain, false), nil +} + +// SetupConnStringOnServer generates and sets a random password for the given user +// on the DB server and returns +// the connection string without user/password part and the generated password. +func SetupConnStringOnServer(ctx context.Context, onServer runSQLOnServer, username, db, DBDomain string) (string, string, error) { + passwd, err := RandOraclePassword() + if err != nil { + return "", "", err + } + applySQL := []string{fmt.Sprintf(alterUserSQL, username, passwd)} + if _, err := onServer.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: applySQL, Suppress: true}); err != nil { + return "", "", err + } + return connect.EZ("", "", consts.Localhost, fmt.Sprint(consts.SecureListenerPort), db, DBDomain, false), passwd, nil +} + +// SetupUserPwConnString sets the password for the given user to a randomized password and returns the connection string. +func (s *Security) SetupUserPwConnString(ctx context.Context, username, db, DBDomain string) (string, error) { + return SetupUserPwConnStringByClient(ctx, s.dbdClient, username, db, DBDomain) +} + +// waitForHealthyOracleDBContainer waits until the oracle_db container is healthy or until context times out. +func (s *Security) waitForHealthyOracleDBContainer(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + isHealthy, err := s.IsContainerHealthy(ctx, consts.OracleDBContainerName) + if err != nil { + klog.ErrorS(err, "unable to connect to container, will retry", "container", consts.OracleDBContainerName) + } else if isHealthy { + return nil + } + } + time.Sleep(s.pollInterval) + } +} + +// IsContainerHealthy checks the health status of a named container. +func (s *Security) IsContainerHealthy(ctx context.Context, name string) (bool, error) { + cs, err := s.dockerClient.ContainerInspect(ctx, name) + if err != nil { + return false, err + } + + if cs.ContainerJSONBase != nil && cs.State != nil && cs.State.Health != nil { + healthStatus := cs.State.Health.Status + klog.InfoS("container health status", "container", cs.Name, "healthStatus", healthStatus) + if healthStatus == types.Healthy { + return true, nil + } + } else { + klog.InfoS("container has no available health status", "container", name) + } + return false, nil +} + +func randInt(maxInt int) (int64, error) { + n, err := rand.Int(rand.Reader, big.NewInt(int64(maxInt))) + if err != nil { + return 0, err + } + return n.Int64(), nil +} diff --git a/oracle/pkg/database/common/BUILD.bazel b/oracle/pkg/database/common/BUILD.bazel new file mode 100644 index 0000000..c51d48c --- /dev/null +++ b/oracle/pkg/database/common/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "common", + srcs = ["common.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/common", + visibility = ["//visibility:public"], + deps = ["//oracle/pkg/agents/consts"], +) diff --git a/oracle/pkg/database/common/common.go b/oracle/pkg/database/common/common.go new file mode 100644 index 0000000..ef0969e --- /dev/null +++ b/oracle/pkg/database/common/common.go @@ -0,0 +1,39 @@ +package common + +import ( + "fmt" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" +) + +// GetSourceOracleDataDirectory returns the Oracle data directory on the volume where Oracle is installed. +func GetSourceOracleDataDirectory(oracleVersion string) string { + if oracleVersion == consts.Oracle18c { + return consts.SourceOracleXeDataDirectory + } + return consts.SourceOracleDataDirectory +} + +// GetSourceOracleBase returns the value of ORACLE_BASE where Oracle is installed. +func GetSourceOracleBase(oracleVersion string) string { + if oracleVersion == consts.Oracle18c { + return consts.SourceOracleXeBase + } + return consts.SourceOracleBase +} + +// GetSourceOracleHome returns the value of ORACLE_HOME where Oracle is installed. +func GetSourceOracleHome(oracleVersion string) string { + if oracleVersion == consts.Oracle18c { + return fmt.Sprintf(consts.SourceOracleXeHome, oracleVersion) + } + return fmt.Sprintf(consts.SourceOracleHome, oracleVersion) +} + +// GetSourceOracleInventory returns the source OraInventory path. +func GetSourceOracleInventory(oracleVersion string) string { + if oracleVersion == consts.Oracle18c { + return consts.SourceOracleXeInventory + } + return consts.SourceOracleInventory +} diff --git a/oracle/pkg/database/dbdaemon/BUILD.bazel b/oracle/pkg/database/dbdaemon/BUILD.bazel new file mode 100644 index 0000000..e9bc884 --- /dev/null +++ b/oracle/pkg/database/dbdaemon/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "dbdaemon", + srcs = [ + "dbdaemon_server.go", + "utils.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/dbdaemon", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/common", + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/agents/security", + "//oracle/pkg/database/lib/lro", + "//oracle/pkg/database/provision", + "@com_github_godror_godror//:godror", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_pkg_errors//:errors", + "@com_google_cloud_go_storage//:storage", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_api//iterator", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_protobuf//proto", + ], +) + +go_test( + name = "dbdaemon_test", + srcs = [ + "dbdaemon_server_test.go", + "utils_test.go", + ], + embed = [":dbdaemon"], + deps = [ + "//oracle/pkg/agents/oracle", + "@com_github_godror_godror//:godror", + "@com_github_google_go_cmp//cmp", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//test/bufconn", + ], +) diff --git a/oracle/pkg/database/dbdaemon/dbdaemon_server.go b/oracle/pkg/database/dbdaemon/dbdaemon_server.go new file mode 100644 index 0000000..8bcf10d --- /dev/null +++ b/oracle/pkg/database/dbdaemon/dbdaemon_server.go @@ -0,0 +1,1799 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbdaemon implements a gRPC service for +// running privileged database ops, e.g. sqlplus, rman. +package dbdaemon + +import ( + "bufio" + "context" + "database/sql" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/godror/godror" // Register database/sql driver + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/empty" + "github.com/pkg/errors" + lropb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/security" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/lib/lro" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/provision" +) + +const ( + listenerDir = "/u02/app/oracle/oraconfig/network" +) + +var ( + oraDataDir = "/u02/app/oracle/oradata" + + maxWalkFiles = 10000 +) + +// oracleDatabase defines the sql.DB APIs, which will be used in this package +type oracleDatabase interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + Ping() error + Close() error +} + +type dbdaemon interface { + shutdownDatabase(context.Context, godror.ShutdownMode) error + startupDatabase(context.Context, godror.StartupMode, string) error + setDatabaseUpgradeMode(ctx context.Context) error + openPDBs(ctx context.Context) error + runSQL(context.Context, []string, bool, bool, oracleDatabase) ([]string, error) + runQuery(context.Context, []string, oracleDatabase) ([]string, error) +} + +// DB is a wrapper around database/sql.DB database handle. +// In unit tests it gets mocked with the FakeDB. +type DB struct { +} + +// Server holds a database config. +type Server struct { + *dbdpb.UnimplementedDatabaseDaemonServer + hostName string + database dbdaemon + databaseSid *syncState + databaseHome string + pdbConnStr string + osUtil osUtil + dbdClient dbdpb.DatabaseDaemonProxyClient + dbdClientClose func() error + lroServer *lro.Server + syncJobs *syncJobs + gcsUtil gcsUtil +} + +// Remove pdbConnStr from String(), as that may contain the pdb user/password +// Remove UnimplementedDatabaseDaemonServer field to improve logs for better readability +func (s Server) String() string { + pdbConnStr := s.pdbConnStr + if pdbConnStr != "" { + pdbConnStr = "" + } + return fmt.Sprintf("{hostName=%q, database=%+v, databaseSid=%+v, databaseHome=%q, pdbConnStr=%q}", s.hostName, s.database, s.databaseSid, s.databaseHome, pdbConnStr) +} + +type syncState struct { + sync.RWMutex + val string +} + +type syncJobs struct { + // pdbLoadMutex is a mutex for operations running + // under consts.PDBLoaderUser user, currently those are DataPump import/export. + // pdbLoadMutex is used to ensure only one of such operations is running at a time. + pdbLoadMutex sync.Mutex + + // Mutex used for maintenance operations (currently for patching) + maintenanceMutex sync.RWMutex +} + +// Call this function to get any buffered DMBS_OUTPUT. sqlplus* calls this +// after every command issued. Typically any output you expect to see from +// sqlplus* will be returned via DBMS_OUTPUT. +func dbmsOutputGetLines(ctx context.Context, db oracleDatabase) ([]string, error) { + lines := make([]string, 0, 1024) + status := 0 + // 0 is success, until it fails there may be more lines buffered. + for status == 0 { + var line string + if _, err := db.ExecContext(ctx, "BEGIN DBMS_OUTPUT.GET_LINE(:line, :status); END;", + sql.Named("line", sql.Out{Dest: &line}), + sql.Named("status", sql.Out{Dest: &status, In: true})); err != nil { + return nil, err + } + if status == 0 { + lines = append(lines, line) + } + } + return lines, nil +} + +// shutdownDatabase performs a database shutdown in a requested . +// It always connects to the local database. +// Set ORACLE_HOME and ORACLE_SID in the env to control the target database. +// A caller may decide to ignore ORA-1034 and just log a warning +// if a database has already been down (or raise an error if appropriate).. +func (d *DB) shutdownDatabase(ctx context.Context, mode godror.ShutdownMode) error { + // Consider allowing PRELIM mode connections for SHUTDOWN ABORT mode. + // This is useful when the server has maxed out on connections. + db, err := sql.Open("godror", "oracle://?sysdba=1") + if err != nil { + klog.ErrorS(err, "dbdaemon/shutdownDatabase: failed to connect to a database") + return err + } + defer db.Close() + + oraDB, err := godror.DriverConn(ctx, db) + if err != nil { + return err + } + if err := oraDB.Shutdown(mode); err != nil { + return err + } + // The shutdown process is over after the first Shutdown call in ABORT + // mode. + if mode == godror.ShutdownAbort { + return err + } + + _, err = db.Exec("alter database close normal") + if err != nil && strings.Contains(err.Error(), "ORA-01507:") { + klog.InfoS("dbdaemon/shutdownDatabase: database is already closed", "err", err) + err = nil + } + if err != nil { + return err + } + + _, err = db.Exec("alter database dismount") + if err != nil && strings.Contains(err.Error(), "ORA-01507:") { + klog.InfoS("dbdaemon/shutdownDatabase: database is already dismounted", "err", err) + err = nil + } + if err != nil { + return err + } + + return oraDB.Shutdown(godror.ShutdownFinal) +} + +// startupDatabase performs a database startup in a requested mode. +// godror.StartupMode controls FORCE/RESTRICT options. +// databaseState string controls NOMOUNT/MOUNT/OPEN options. +// Setting a pfile to use on startup is currently unsupported. +// It always connects to the local database. +// Set ORACLE_HOME and ORACLE_SID in the env to control the target database. +func (d *DB) startupDatabase(ctx context.Context, mode godror.StartupMode, state string) error { + // To startup a shutdown database, open a prelim connection. + db, err := sql.Open("godror", "oracle://?sysdba=1&prelim=1") + if err != nil { + return err + } + defer db.Close() + + oraDB, err := godror.DriverConn(ctx, db) + if err != nil { + return err + } + if err := oraDB.Startup(mode); err != nil { + return err + } + if strings.ToLower(state) == "nomount" { + return nil + } + + // To finish mounting/opening, open a normal connection. + db2, err := sql.Open("godror", "oracle://?sysdba=1") + if err != nil { + return err + } + defer db2.Close() + + if _, err := db2.Exec("alter database mount"); err != nil { + return err + } + if strings.ToLower(state) == "mount" { + return nil + } + _, err = db2.Exec("alter database open") + return err +} + +// Turn a freshly started NOMOUNT database to a migrate mode +// Opens CDB in upgrade mode +// Opens all PDBs in upgrade mode +// Executes the following steps: +// SQL> alter database mount +// SQL> alter database open upgrade +// SQL> alter pluggable database all open upgrade +func (d *DB) setDatabaseUpgradeMode(ctx context.Context) error { + db, err := sql.Open("godror", "oracle://?sysdba=1") + if err != nil { + return fmt.Errorf("dbdaemon/setDatabaseUpgradeMode failed to open DB connection: %w", err) + } + defer db.Close() + + // SQL> alter database mount -- this will turn CDB$ROOT, PDB$SEED and all PDBs into 'MOUNTED' state + if _, err := db.Exec("alter database mount"); err != nil { + return err + } + + // SQL> alter database open upgrade -- this will turn CDB$ROOT, PDB$SEED into 'MIGRATE' state + if _, err := db.Exec("alter database open upgrade"); err != nil { + return err + } + + // SQL> alter pluggable database all open upgrade + if _, err := db.Exec("alter pluggable database all open upgrade"); err != nil { + return err + } + + // At this point CDB$ROOT, PDB$SEED and all PDBs should be in 'MIGRATE' state + // Check that all container states = 'MIGRATE' + + rows, err := db.Query("SELECT name,open_mode FROM v$containers") + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var name, openMode string + if err := rows.Scan(&name, &openMode); err != nil { + return err + } + klog.InfoS("dbdaemon/setDatabaseUpgradeMode CONTAINER MODE: ", name, openMode) + if openMode != "MIGRATE" { + return fmt.Errorf("failed to turn container %v into MIGRATE mode: %w", name, err) + } + } + return nil +} + +// Open all PDBs +func (d *DB) openPDBs(ctx context.Context) error { + db, err := sql.Open("godror", "oracle://?sysdba=1") + if err != nil { + return fmt.Errorf("dbdaemon/openPDBs: failed to open DB connection: %w", err) + } + defer db.Close() + + // SQL> alter pluggable database all open + if _, err := db.Exec("alter pluggable database all open"); err != nil { + return err + } + return nil +} + +// CreatePasswordFile is a Database Daemon method to create password file. +func (s *Server) CreatePasswordFile(ctx context.Context, req *dbdpb.CreatePasswordFileRequest) (*dbdpb.CreatePasswordFileResponse, error) { + if err := os.Setenv("ORACLE_HOME", s.databaseHome); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + + if req.GetDatabaseName() == "" { + return nil, fmt.Errorf("missing database name for req: %v", req) + } + if req.GetSysPassword() == "" { + return nil, fmt.Errorf("missing password for req: %v", req) + } + + passwordFile := fmt.Sprintf("%s/orapw%s", req.Dir, strings.ToUpper(req.DatabaseName)) + + params := []string{fmt.Sprintf("file=%s", passwordFile)} + params = append(params, fmt.Sprintf("password=%s", req.SysPassword)) + params = append(params, fmt.Sprintf("ignorecase=n")) + + if err := os.Remove(passwordFile); err != nil { + klog.Warningf("failed to remove %v: %v", passwordFile, err) + } + + if err := s.osUtil.runCommand(orapwd(s.databaseHome), params); err != nil { + return nil, fmt.Errorf("orapwd cmd failed: %v", err) + } + return &dbdpb.CreatePasswordFileResponse{}, nil +} + +// CreateReplicaInitOraFile creates init.ora file using the template and the provided parameters. +func (s *Server) CreateReplicaInitOraFile(ctx context.Context, req *dbdpb.CreateReplicaInitOraFileRequest) (*dbdpb.CreateReplicaInitOraFileResponse, error) { + klog.InfoS("dbdaemon/CreateReplicaInitOraFile: not implemented in current release", "req", req) + return &dbdpb.CreateReplicaInitOraFileResponse{InitOraFileContent: ""}, nil +} + +// SetListenerRegistration is a Database Daemon method to create a static listener registration. +func (s *Server) SetListenerRegistration(ctx context.Context, req *dbdpb.SetListenerRegistrationRequest) (*dbdpb.BounceListenerResponse, error) { + return nil, fmt.Errorf("not implemented") +} + +// physicalRestore runs +// 1. RMAN restore command +// 2. SQL to get latest SCN +// 3. RMAN recover command, created by applying SCN value +// to the recover statement template passed as a parameter. +func (s *Server) physicalRestore(ctx context.Context, req *dbdpb.PhysicalRestoreRequest) (*empty.Empty, error) { + errorPrefix := "dbdaemon/physicalRestore: " + + if _, err := s.RunRMAN(ctx, &dbdpb.RunRMANRequest{Scripts: []string{req.GetRestoreStatement()}}); err != nil { + return nil, fmt.Errorf(errorPrefix+"failed to restore a database: %v", err) + } + + scnResp, err := s.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{req.GetLatestRecoverableScnQuery()}}) + if err != nil || len(scnResp.GetMsg()) < 1 { + return nil, fmt.Errorf(errorPrefix+"failed to query archive log SCNs, results: %v, err: %v", scnResp, err) + } + + row := make(map[string]string) + if err := json.Unmarshal([]byte(scnResp.GetMsg()[0]), &row); err != nil { + return nil, err + } + + scn, ok := row["SCN"] + if !ok { + return nil, fmt.Errorf(errorPrefix + "failed to find column SCN in the archive log query") + } + + latestSCN, err := strconv.ParseInt(scn, 10, 64) + if err != nil { + return nil, fmt.Errorf(errorPrefix+"failed to parse the SCN query (%v) to find int64: %v", scn, err) + } + + recoverStmt := fmt.Sprintf(req.GetRecoverStatementTemplate(), latestSCN) + klog.InfoS(errorPrefix+"final recovery request", "recoverStmt", recoverStmt) + + recoverReq := &dbdpb.RunRMANRequest{Scripts: []string{recoverStmt}} + if _, err := s.RunRMAN(ctx, recoverReq); err != nil { + return nil, fmt.Errorf(errorPrefix+"failed to recover a database: %v", err) + } + + // always remove rman staging dir for restore from GCS + if err := os.RemoveAll(consts.RMANStagingDir); err != nil { + klog.Warningf("physicalRestore: can't cleanup staging dir from local disk.") + } + return &empty.Empty{}, nil +} + +// PhysicalRestoreAsync turns physicalRestore into an async call. +func (s *Server) PhysicalRestoreAsync(ctx context.Context, req *dbdpb.PhysicalRestoreAsyncRequest) (*lropb.Operation, error) { + job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "PhysicalRestore", s.lroServer, + func(ctx context.Context) (proto.Message, error) { + return s.physicalRestore(ctx, req.SyncRequest) + }) + + if err != nil { + klog.ErrorS(err, "dbdaemon/PhysicalRestoreAsync failed to create an LRO job", "request", req) + return nil, err + } + + return &lropb.Operation{Name: job.ID(), Done: false}, nil +} + +// dataPumpImport runs impdp Oracle tool against existing PDB which +// imports data from a data pump .dmp file. +func (s *Server) dataPumpImport(ctx context.Context, req *dbdpb.DataPumpImportRequest) (*dbdpb.DataPumpImportResponse, error) { + s.syncJobs.pdbLoadMutex.Lock() + defer s.syncJobs.pdbLoadMutex.Unlock() + + importFilename := "import.dmp" + logFilename := "import.log" + + pdbPath := fmt.Sprintf(consts.PDBPathPrefix, consts.DataMount, s.databaseSid.val, strings.ToUpper(req.PdbName)) + dumpDir := filepath.Join(pdbPath, consts.DpdumpDir.Linux) + klog.InfoS("dbdaemon/dataPumpImport", "dumpDir", dumpDir) + + dmpReader, err := s.gcsUtil.download(ctx, req.GcsPath) + if err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpImport: initiating GCS download failed: %v", err) + } + defer dmpReader.Close() + + importFileFullPath := filepath.Join(dumpDir, importFilename) + if err := s.osUtil.createFile(importFileFullPath, dmpReader); err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpImport: download from GCS failed: %v", err) + } + klog.Infof("dbdaemon/dataPumpImport: downloaded import dmp file from %s to %s", req.GcsPath, importFileFullPath) + defer func() { + if err := s.osUtil.removeFile(importFileFullPath); err != nil { + klog.Warning(fmt.Sprintf("dbdaemon/dataPumpImport: failed to remove import dmp file after import: %v", err)) + } + }() + + impdpTarget, err := security.SetupUserPwConnStringOnServer(ctx, s, consts.PDBLoaderUser, req.PdbName, req.DbDomain) + if err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpImport: failed to alter user %s", consts.PDBLoaderUser) + } + + params := []string{impdpTarget} + params = append(params, req.CommandParams...) + params = append(params, fmt.Sprintf("directory=%s", consts.DpdumpDir.Oracle)) + params = append(params, "dumpfile="+importFilename) + params = append(params, "logfile="+logFilename) + + if err := s.runCommand(impdp(s.databaseHome), params); err != nil { + // On error code 5 (EX_SUCC_ERR), process completed reached the + // end but data in the DMP might have been skipped (foreign + // schemas, already imported tables, even failed schema imports + // because the DMP didn't include CREATE USER statements.) + if !s.osUtil.isReturnCodeEqual(err, 5) { + return nil, fmt.Errorf("data pump import failed, err = %v", err) + } + + klog.Warning("dbdaemon/dataPumpImport: completed with EX_SUCC_ERR") + } + + if len(req.GcsLogPath) > 0 { + logFullPath := filepath.Join(dumpDir, logFilename) + + if err := s.gcsUtil.uploadFile(ctx, req.GcsLogPath, logFullPath, contentTypePlainText); err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpImport: import completed successfully, failed to upload import log to GCS: %v", err) + } + + klog.Infof("dbdaemon/dataPumpImport: uploaded import log to %s", req.GcsLogPath) + } + + return &dbdpb.DataPumpImportResponse{}, nil +} + +// DataPumpImportAsync turns dataPumpImport into an async call. +func (s *Server) DataPumpImportAsync(ctx context.Context, req *dbdpb.DataPumpImportAsyncRequest) (*lropb.Operation, error) { + job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "DataPumpImport", s.lroServer, + func(ctx context.Context) (proto.Message, error) { + return s.dataPumpImport(ctx, req.SyncRequest) + }) + + if err != nil { + klog.ErrorS(err, "dbdaemon/DataPumpImportAsync failed to create an LRO job", "request", req) + return nil, err + } + + return &lropb.Operation{Name: job.ID(), Done: false}, nil +} + +// dataPumpExport runs expdp Oracle tool to export data to a data pump .dmp file. +func (s *Server) dataPumpExport(ctx context.Context, req *dbdpb.DataPumpExportRequest) (*dbdpb.DataPumpExportResponse, error) { + s.syncJobs.pdbLoadMutex.Lock() + defer s.syncJobs.pdbLoadMutex.Unlock() + + dmpObjectType := "SCHEMAS" + exportName := fmt.Sprintf("export_%s", time.Now().Format("20060102150405")) + dmpFile := exportName + ".dmp" + dmpLogFile := exportName + ".log" + parFile := exportName + ".par" + + if len(req.ObjectType) != 0 { + dmpObjectType = req.ObjectType + } + + pdbPath := fmt.Sprintf(consts.PDBPathPrefix, consts.DataMount, s.databaseSid.val, strings.ToUpper(req.PdbName)) + dmpPath := filepath.Join(pdbPath, consts.DpdumpDir.Linux, dmpFile) // full path + parPath := filepath.Join(pdbPath, consts.DpdumpDir.Linux, parFile) + + klog.InfoS("dbdaemon/dataPumpExport", "dmpPath", dmpPath) + + // Remove the dmp file from os if it already exists because oracle will not dump to existing files. + // expdp will log below errors: + // ORA-39000: bad dump file specification + // ORA-31641: unable to create dump file "/u02/app/oracle/oradata/TEST/PDB1/dmp/exportTable.dmp" + // ORA-27038: created file already exists + if err := os.Remove(dmpPath); err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("dataPumpExport failed: can't remove existing dmp file %s", dmpPath) + } + + expdpTarget, err := security.SetupUserPwConnStringOnServer(ctx, s, consts.PDBLoaderUser, req.PdbName, req.DbDomain) + if err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpExport: failed to alter user %s", consts.PDBLoaderUser) + } + + var params []string + params = append(params, fmt.Sprintf("%s=%s", dmpObjectType, req.Objects)) + params = append(params, fmt.Sprintf("DIRECTORY=%s", consts.DpdumpDir.Oracle)) + params = append(params, fmt.Sprintf("DUMPFILE=%s", dmpFile)) + params = append(params, fmt.Sprintf("LOGFILE=%s", dmpLogFile)) + params = append(params, req.CommandParams...) + if len(req.FlashbackTime) != 0 { + params = append(params, fmt.Sprintf("FLASHBACK_TIME=%q", req.FlashbackTime)) + } + + // To avoid having to supply additional quotation marks on the command line, Oracle recommends the use of parameter files. + if err = writeParFile(parPath, params); err != nil { + return nil, fmt.Errorf("data pump export failed, err = %v", err) + } + + cmdParams := []string{expdpTarget} + cmdParams = append(cmdParams, fmt.Sprintf("parfile=%s", parPath)) + if err := s.runCommand(expdp(s.databaseHome), cmdParams); err != nil { + if s.osUtil.isReturnCodeEqual(err, 5) { // see dataPumpImport for an explanation of error code 5 + return nil, fmt.Errorf("data pump export failed, err = %v", err) + } + klog.Warning("dbdaemon/dataPumpExport: completed with EX_SUCC_ERR") + } + klog.Infof("dbdaemon/dataPumpExport: export to %s completed successfully", dmpPath) + + if err := s.gcsUtil.uploadFile(ctx, req.GcsPath, dmpPath, contentTypePlainText); err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpExport: failed to upload dmp file to %s: %v", req.GcsPath, err) + } + klog.Infof("dbdaemon/dataPumpExport: uploaded dmp file to %s", req.GcsPath) + + if len(req.GcsLogPath) > 0 { + logPath := filepath.Join(pdbPath, consts.DpdumpDir.Linux, dmpLogFile) + + if err := s.gcsUtil.uploadFile(ctx, req.GcsLogPath, logPath, contentTypePlainText); err != nil { + return nil, fmt.Errorf("dbdaemon/dataPumpExport: failed to upload log file to %s: %v", req.GcsLogPath, err) + } + klog.Infof("dbdaemon/dataPumpExport: uploaded log file to %s", req.GcsLogPath) + } + + return &dbdpb.DataPumpExportResponse{}, nil +} + +// writeParFile writes data pump export parameter file in parPath. +func writeParFile(parPath string, params []string) error { + f, err := os.Create(parPath) + if err != nil { + return err + } + defer func() { + if err := f.Close(); err != nil { + klog.Warningf("failed to close %v: %v", f, err) + } + }() + for _, param := range params { + if _, err := f.WriteString(param + "\n"); err != nil { + return err + } + } + return nil +} + +// DataPumpExportAsync turns dataPumpExport into an async call. +func (s *Server) DataPumpExportAsync(ctx context.Context, req *dbdpb.DataPumpExportAsyncRequest) (*lropb.Operation, error) { + job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "DataPumpExport", s.lroServer, + func(ctx context.Context) (proto.Message, error) { + return s.dataPumpExport(ctx, req.SyncRequest) + }) + + if err != nil { + klog.ErrorS(err, "dbdaemon/DataPumpExportAsync failed to create an LRO job", "request", req) + return nil, err + } + return &lropb.Operation{Name: job.ID(), Done: false}, nil +} + +// ListOperations returns a paged list of currently managed long running operations. +func (s *Server) ListOperations(ctx context.Context, req *lropb.ListOperationsRequest) (*lropb.ListOperationsResponse, error) { + return s.lroServer.ListOperations(ctx, req) +} + +// GetOperation returns details of a requested long running operation. +func (s *Server) GetOperation(ctx context.Context, req *lropb.GetOperationRequest) (*lropb.Operation, error) { + return s.lroServer.GetOperation(ctx, req) +} + +// DeleteOperation deletes a long running operation by its id. +func (s *Server) DeleteOperation(ctx context.Context, req *lropb.DeleteOperationRequest) (*empty.Empty, error) { + return s.lroServer.DeleteOperation(ctx, req) +} + +func (s *Server) runCommand(bin string, params []string) error { + // Sets env to bounce a database|listener. + if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil { + return fmt.Errorf("failed to set env variable: %v", err) + } + if err := os.Setenv("ORACLE_HOME", s.databaseHome); err != nil { + return fmt.Errorf("failed to set env variable: %v", err) + } + + return s.osUtil.runCommand(bin, params) +} + +var newDB = func(driverName, dataSourceName string) (oracleDatabase, error) { + return sql.Open(driverName, dataSourceName) +} + +// open returns a connection to the given database URL, +// When `prelim` is true, open will make a second connection attempt +// if the first connection fails. +// +// The caller is responsible for closing the returned connection. +// +// open method is created to break down runSQLPlusHelper and make the code +// testable, thus it returns interface oracleDatabase. +func open(ctx context.Context, dbURL string, prelim bool) (oracleDatabase, error) { + // "/ as sysdba" + db, err := newDB("godror", dbURL) + if err == nil { + // Force a connection with Ping. + err = db.Ping() + if err != nil { + // Connection pool opened but ping failed, close this pool. + if err := db.Close(); err != nil { + klog.Warningf("failed to close db connection: %v", err) + } + } + } + + if err != nil { + klog.ErrorS(err, "dbdaemon/open: newDB failed", "prelim", prelim) + if prelim { + // If a prelim connection is requested (e.g. for creating + // an spfile, also enable DBMS_OUTPUT. + db, err = newDB("godror", dbURL+"&prelim=1") + } + } + + if err != nil { + klog.ErrorS(err, "dbdaemon/open: newDB failed", "prelim", prelim) + return nil, err + } + + return db, nil +} + +func (d *DB) runSQL(ctx context.Context, sqls []string, prelim, suppress bool, db oracleDatabase) ([]string, error) { + sqlForLogging := strings.Join(sqls, ";") + if suppress { + sqlForLogging = "suppressed" + } + + // This will fail on prelim connections, so ignore errors in that case + if _, err := db.ExecContext(ctx, "BEGIN DBMS_OUTPUT.ENABLE(); END;"); err != nil && !prelim { + klog.ErrorS(err, "dbdaemon/runSQL: failed to enable dbms_output", "sql", sqlForLogging) + return nil, err + } + + klog.InfoS("dbdaemon/runSQL: running SQL statements", "sql", sqlForLogging) + + output := []string{} + for _, sql := range sqls { + if _, err := db.ExecContext(ctx, sql); err != nil { + klog.ErrorS(err, "dbdaemon/runSQL: failed to execute", "sql", sqlForLogging) + return nil, err + } + out, err := dbmsOutputGetLines(ctx, db) + if err != nil && !prelim { + klog.ErrorS(err, "dbdaemon/runSQL: failed to get DBMS_OUTPUT", "sql", sqlForLogging) + return nil, err + } + output = append(output, out...) + } + + return output, nil +} + +func (d *DB) runQuery(ctx context.Context, sqls []string, db oracleDatabase) ([]string, error) { + klog.InfoS("dbdaemon/runQuery: running sql", "sql", sqls) + sqlLen := len(sqls) + for i := 0; i < sqlLen-1; i++ { + if _, err := db.ExecContext(ctx, sqls[i]); err != nil { + return nil, err + } + } + rows, err := db.QueryContext(ctx, sqls[sqlLen-1]) + if err != nil { + klog.ErrorS(err, "dbdaemon/runQuery: failed to query a database", "sql", sqls[sqlLen-1]) + return nil, err + } + defer rows.Close() + + colNames, err := rows.Columns() + if err != nil { + klog.ErrorS(err, "dbdaemon/runQuery: failed to get column names for query", "sql", sqls[sqlLen-1]) + return nil, err + } + + var output []string + for rows.Next() { + // Store as strings, database/sql will handle conversion to + // string type for us in Rows.Scan. + data := make([]string, len(colNames)) + dataPtr := make([]interface{}, len(colNames)) + for i := range colNames { + dataPtr[i] = &data[i] + } + if err := rows.Scan(dataPtr...); err != nil { + klog.ErrorS(err, "dbdaemon/runQuery: failed to read a row") + return nil, err + } + + // Convert row to JSON map + dataMap := map[string]string{} + for i, colName := range colNames { + dataMap[colName] = data[i] + } + j, err := json.Marshal(dataMap) + if err != nil { + klog.ErrorS(err, "dbdaemon/runQuery: failed to marshal a data map", "dataMap", dataMap) + return nil, err + } + output = append(output, string(j)) + } + return output, nil +} + +func (s *Server) runSQLPlusHelper(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest, formattedSQL bool) (*dbdpb.RunCMDResponse, error) { + + if err := os.Setenv("ORACLE_HOME", s.databaseHome); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + if req.GetTnsAdmin() != "" { + if err := os.Setenv("TNS_ADMIN", req.GetTnsAdmin()); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + defer func() { + if err := os.Unsetenv("TNS_ADMIN"); err != nil { + klog.Warningf("failed to unset env variable: %v", err) + } + }() + } + + sqls := req.GetCommands() + if len(sqls) < 1 { + return nil, fmt.Errorf("dbdaemon/RunSQLPlus requires a sql statement to run, provided: %d", len(sqls)) + } + + // formattedSQL = query, hence it is not an op that needs a prelim conn. + // Only enable prelim for known prelim queries, CREATE SPFILE and CREATE PFILE. + var prelim bool + if !formattedSQL && (strings.HasPrefix(strings.ToLower(sqls[0]), "create spfile") || + strings.HasPrefix(strings.ToLower(sqls[0]), "create pfile")) { + prelim = true + } + + // This default connect string requires the ORACLE_SID env variable to be set. + connectString := "oracle://?sysdba=1" + + switch req.ConnectInfo.(type) { + case *dbdpb.RunSQLPlusCMDRequest_Dsn: + connectString = req.GetDsn() + case *dbdpb.RunSQLPlusCMDRequest_DatabaseName: + if err := os.Setenv("ORACLE_SID", req.GetDatabaseName()); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + case *dbdpb.RunSQLPlusCMDRequest_Local: + if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + default: + // For backward compatibility if connect_info field isn't defined in the request + // we fallback to the Local option. + if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + } + + klog.InfoS("dbdaemon/runSQLPlusHelper: updated env ", "sid", s.databaseSid.val) + db, err := open(ctx, connectString, prelim) + if err != nil { + return nil, fmt.Errorf("dbdaemon/RunSQLPlus failed to open a database connection: %v", err) + } + defer func() { + if err := db.Close(); err != nil { + klog.Warningf("failed to close db connection: %v", err) + } + }() + + var o []string + if formattedSQL { + o, err = s.database.runQuery(ctx, sqls, db) + } else { + o, err = s.database.runSQL(ctx, sqls, prelim, req.GetSuppress(), db) + } + if err != nil { + klog.ErrorS(err, "dbdaemon/RunSQLPlus: error in execution", "formattedSQL", formattedSQL, "ORACLE_SID", s.databaseSid.val) + return nil, err + } + + klog.InfoS("dbdaemon/RunSQLPlus", "output", strings.Join(o, "\n")) + return &dbdpb.RunCMDResponse{Msg: o}, nil +} + +// RunSQLPlus executes oracle's sqlplus and returns output. +// This function only returns DBMS_OUTPUT and not any row data. +// To read from SELECTs use RunSQLPlusFormatted. +func (s *Server) RunSQLPlus(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + if req.GetSuppress() { + klog.InfoS("dbdaemon/RunSQLPlus", "req", "suppressed", "serverObj", s) + } else { + klog.InfoS("dbdaemon/RunSQLPlus", "req", req, "serverObj", s) + } + + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + // Only add lock in top level API to avoid deadlock. + s.databaseSid.Lock() + defer s.databaseSid.Unlock() + return s.runSQLPlusHelper(ctx, req, false) +} + +// RunSQLPlusFormatted executes a SQL command and returns the row results. +// If instead you want DBMS_OUTPUT please issue RunSQLPlus +func (s *Server) RunSQLPlusFormatted(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + if req.GetSuppress() { + klog.InfoS("dbdaemon/RunSQLPlusFormatted", "req", "suppressed", "serverObj", s) + } else { + klog.InfoS("dbdaemon/RunSQLPlusFormatted", "req", req, "serverObj", s) + } + sqls := req.GetCommands() + klog.InfoS("dbdaemon/RunSQLPlusFormatted: executing formatted SQL commands", "sql", sqls) + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + // Only add lock in top level API to avoid deadlock. + s.databaseSid.Lock() + defer s.databaseSid.Unlock() + + return s.runSQLPlusHelper(ctx, req, true) +} + +// KnownPDBs runs a database query returning a list of PDBs known +// to a database. By default it doesn't include a seed PDB. +// It also by default doesn't pay attention to a state of a PDB. +// A caller can overwrite both of the above settings with the flags. +func (s *Server) KnownPDBs(ctx context.Context, req *dbdpb.KnownPDBsRequest) (*dbdpb.KnownPDBsResponse, error) { + klog.InfoS("dbdaemon/KnownPDBs", "req", req, "serverObj", s) + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + // Only add lock in top level API to avoid deadlock. + s.databaseSid.RLock() + defer s.databaseSid.RUnlock() + knownPDBs, err := s.knownPDBs(ctx, req.GetIncludeSeed(), req.GetOnlyOpen()) + if err != nil { + return nil, err + } + return &dbdpb.KnownPDBsResponse{KnownPdbs: knownPDBs}, nil +} + +func (s *Server) knownPDBs(ctx context.Context, includeSeed, onlyOpen bool) ([]string, error) { + sql := consts.ListPDBsSQL + + if !includeSeed { + where := "and name != 'PDB$SEED'" + sql = fmt.Sprintf("%s %s", sql, where) + } + + if onlyOpen { + where := "and open_mode = 'READ WRITE'" + sql = fmt.Sprintf("%s %s", sql, where) + + } + + resp, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{sql}}, true) + if err != nil { + return nil, err + } + klog.InfoS("dbdaemon/knownPDBs", "resp", resp) + + var knownPDBs []string + for _, msg := range resp.Msg { + row := make(map[string]string) + if err := json.Unmarshal([]byte(msg), &row); err != nil { + klog.ErrorS(err, "dbdaemon/knownPDBS: failed to unmarshal PDB query resultset") + return nil, err + } + if name, ok := row["NAME"]; ok { + knownPDBs = append(knownPDBs, name) + } + } + klog.InfoS("dbdaemon/knownPDBs", "knownPDBs", knownPDBs) + + return knownPDBs, nil +} + +func (s *Server) isKnownPDB(ctx context.Context, name string, includeSeed, onlyOpen bool) (bool, []string) { + knownPDBs, err := s.knownPDBs(ctx, includeSeed, onlyOpen) + if err != nil { + return false, nil + } + + for _, pdb := range knownPDBs { + if pdb == strings.ToUpper(name) { + return true, knownPDBs + } + } + return false, knownPDBs +} + +// CheckDatabaseState pings a database to check its status. +// This method has been tested for checking a CDB state. +func (s *Server) CheckDatabaseState(ctx context.Context, req *dbdpb.CheckDatabaseStateRequest) (*dbdpb.CheckDatabaseStateResponse, error) { + klog.InfoS("dbdaemon/CheckDatabaseState", "req", req, "serverObj", s) + reqDatabaseName := req.GetDatabaseName() + if reqDatabaseName == "" { + return nil, fmt.Errorf("a database check is requested, but a mandatory database name parameter is not provided (server: %v)", s) + } + + var dbURL string + if req.GetIsCdb() { + // Local connection, set env variables. + if err := os.Setenv("ORACLE_SID", req.GetDatabaseName()); err != nil { + return nil, err + } + if err := os.Setenv("ORACLE_HOME", s.databaseHome); err != nil { + return nil, err + } + // Even for CDB check, use TNS connection to verify listener health. + cs, pass, err := security.SetupConnStringOnServer(ctx, s, consts.SecurityUser, req.GetDatabaseName(), req.GetDbDomain()) + if err != nil { + return nil, fmt.Errorf("dbdaemon/CheckDatabaseState: failed to alter user %s", consts.SecurityUser) + } + dbURL = fmt.Sprintf("user=%q password=%q connectString=%q standaloneConnection=true", + consts.SecurityUser, pass, cs) + } else { + // A PDB that a Database Daemon is requested to operate on + // must be part of the Server object (set based on the metadata). + // (a "part of" is for a future support for multiple PDBs per CDB). + if known, knownPDBs := s.isKnownPDB(ctx, reqDatabaseName, false, false); !known { + return nil, fmt.Errorf("%q is not in the known PDB list: %v", reqDatabaseName, knownPDBs) + } + + // Alter security password and if it's not been set yet. + if s.pdbConnStr == "" { + cs, err := security.SetupUserPwConnStringOnServer(ctx, s, consts.SecurityUser, reqDatabaseName, req.GetDbDomain()) + if err != nil { + return nil, fmt.Errorf("dbdaemon/CheckDatabaseState: failed to alter user %s", consts.SecurityUser) + } + s.pdbConnStr = cs + } + // Use new PDB connection string to check PDB status. + dbURL = s.pdbConnStr + } + + db, err := sql.Open("godror", dbURL) + if err != nil { + klog.ErrorS(err, "dbdaemon/CheckDatabaseState: failed to open a database") + return nil, err + } + defer db.Close() + + if err := db.PingContext(ctx); err != nil { + klog.ErrorS(err, "dbdaemon/CheckDatabaseState: database not running") + return nil, fmt.Errorf("cannot connect to database %s: %v", reqDatabaseName, err) + } + return &dbdpb.CheckDatabaseStateResponse{}, nil +} + +// RunRMAN will run the script to execute RMAN and create a physical backup in the target directory, then back it up to GCS if requested +func (s *Server) RunRMAN(ctx context.Context, req *dbdpb.RunRMANRequest) (*dbdpb.RunRMANResponse, error) { + // Required for local connections (when no SID is specified on connect string). + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + // Only add lock in top level API to avoid deadlock. + if req.GetSuppress() { + klog.Info("RunRMAN", "request", "suppressed") + } else { + klog.Info("RunRMAN", "request", req) + } + + s.databaseSid.RLock() + defer s.databaseSid.RUnlock() + if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + if err := os.Setenv("ORACLE_HOME", s.databaseHome); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + if req.GetTnsAdmin() != "" { + if err := os.Setenv("TNS_ADMIN", req.GetTnsAdmin()); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + defer func() { + if err := os.Unsetenv("TNS_ADMIN"); err != nil { + klog.Warningf("failed to unset env variable: %v", err) + } + }() + } + + scripts := req.GetScripts() + if len(scripts) < 1 { + return nil, fmt.Errorf("RunRMAN requires at least 1 script to run, provided: %d", len(scripts)) + } + var res []string + for _, script := range scripts { + target := "/" + if req.GetTarget() != "" { + target = req.GetTarget() + } + args := []string{fmt.Sprintf("target=%s", target)} + + if req.GetAuxiliary() != "" { + args = append(args, fmt.Sprintf("auxiliary=%s", req.Auxiliary)) + } + + args = append(args, "@/dev/stdin") + + cmd := exec.Command(rman(s.databaseHome), args...) + cmd.Stdin = strings.NewReader(script) + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("RunRMAN failed,\nscript: %q\nFailed with: %v\nErr: %v", script, string(out), err) + } + res = append(res, string(out)) + + if req.GetGcsPath() != "" && req.GetCmd() == consts.RMANBackup { + if err = s.uploadDirectoryContentsToGCS(ctx, consts.RMANStagingDir, req.GetGcsPath()); err != nil { + klog.ErrorS(err, "GCS Upload error:") + return nil, err + } + } + } + + return &dbdpb.RunRMANResponse{Output: res}, nil +} + +// RunRMANAsync turns RunRMAN into an async call. +func (s *Server) RunRMANAsync(ctx context.Context, req *dbdpb.RunRMANAsyncRequest) (*lropb.Operation, error) { + job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "RMAN", s.lroServer, + func(ctx context.Context) (proto.Message, error) { + return s.RunRMAN(ctx, req.SyncRequest) + }) + + if err != nil { + klog.ErrorS(err, "dbdaemon/RunRMANAsync failed to create an LRO job", "request", req) + return nil, err + } + + return &lropb.Operation{Name: job.ID(), Done: false}, nil +} + +func (s *Server) uploadDirectoryContentsToGCS(ctx context.Context, backupDir, gcsPath string) error { + klog.InfoS("RunRMAN: uploadDirectoryContentsToGCS", "backupdir", backupDir, "gcsPath", gcsPath) + err := filepath.Walk(backupDir, func(fpath string, info os.FileInfo, errInner error) error { + klog.InfoS("RunRMAN: walking...", "fpath", fpath, "info", info, "errInner", errInner) + if errInner != nil { + return errInner + } + if info.IsDir() { + return nil + } + + relPath, err := filepath.Rel(backupDir, fpath) + if err != nil { + return errors.Errorf("filepath.Rel(%s, %s) returned err: %s", backupDir, fpath, err) + } + gcsTarget, err := url.Parse(gcsPath) + if err != nil { + return errors.Errorf("invalid GcsPath err: %v", err) + } + gcsTarget.Path = path.Join(gcsTarget.Path, relPath) + klog.InfoS("gcs", "target", gcsTarget) + start := time.Now() + err = s.gcsUtil.uploadFile(ctx, gcsTarget.String(), fpath, contentTypePlainText) + if err != nil { + return err + } + end := time.Now() + rate := float64(info.Size()) / (end.Sub(start).Seconds()) + klog.Infof("Uploaded %s (%f MB/s)\n", gcsTarget.String(), rate/1024/1024) + + return nil + }) + + if err := os.RemoveAll(consts.RMANStagingDir); err != nil { + klog.Warningf("uploadDirectoryContentsToGCS: can't cleanup staging dir from local disk.") + } + return err +} + +// NID changes a database id and/or database name. +func (s *Server) NID(ctx context.Context, req *dbdpb.NIDRequest) (*dbdpb.NIDResponse, error) { + params := []string{"target=/"} + if req.GetSid() == "" { + return nil, fmt.Errorf("dbdaemon/NID: missing sid for req: %v", req) + } + if err := os.Setenv("ORACLE_HOME", s.databaseHome); err != nil { + return nil, fmt.Errorf("dbdaemon/NID: set env ORACLE_HOME failed: %v", err) + } + if err := os.Setenv("ORACLE_SID", req.GetSid()); err != nil { + return nil, fmt.Errorf("dbdaemon/NID: set env ORACLE_SID failed: %v", err) + } + + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + // When renaming the DB, DB is not ready to run cmds or SQLs, it seems to be ok to block all other APIs for now. + s.databaseSid.Lock() + defer s.databaseSid.Unlock() + if req.GetDatabaseName() != "" { + s.databaseSid.val = req.GetDatabaseName() + params = append(params, fmt.Sprintf("dbname=%s", req.GetDatabaseName())) + } + + params = append(params, "logfile=/home/oracle/nid.log") + + _, err := s.dbdClient.ProxyRunNID(ctx, &dbdpb.ProxyRunNIDRequest{Params: params, DestDbName: req.GetDatabaseName()}) + if err != nil { + return nil, fmt.Errorf("nid failed: %v", err) + } + + klog.InfoS("dbdaemon/NID: done", "req", req) + return &dbdpb.NIDResponse{}, nil +} + +// GetDatabaseType returns database type, eg. ORACLE_12_2_ENTERPRISE_NONCDB +func (s *Server) GetDatabaseType(ctx context.Context, req *dbdpb.GetDatabaseTypeRequest) (*dbdpb.GetDatabaseTypeResponse, error) { + f, err := os.Open(consts.OraTab) + if err != nil { + return nil, fmt.Errorf("GetDatabaseType: failed to open %q", consts.OraTab) + } + defer func() { + if err := f.Close(); err != nil { + klog.Warningf("failed to close %v: %v", f, err) + } + }() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + // The content of oratab is expected to be of the form: + // # comments + // :DatabaseHome: + // # DATABASETYPE:ORACLE_12_2_ENTERPRISE_NONCDB + if !strings.HasPrefix(line, "# DATABASETYPE") { + continue + } + fragment := strings.Split(line, ":") + if len(fragment) != 2 { + return nil, fmt.Errorf("GetDatabaseType: failed to parse %q for database type(number of fields is %d, not 2)", consts.OraTab, len(fragment)) + } + + switch fragment[1] { + case "ORACLE_12_2_ENTERPRISE": + return &dbdpb.GetDatabaseTypeResponse{ + DatabaseType: dbdpb.GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE, + }, nil + case "ORACLE_12_2_ENTERPRISE_NONCDB": + return &dbdpb.GetDatabaseTypeResponse{ + DatabaseType: dbdpb.GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE_NONCDB, + }, nil + default: + return nil, fmt.Errorf("GetDatabaseType: failed to get valid database type from %q", consts.OraTab) + } + } + + // For backward compatibility, return ORACLE_12_2_ENTERPRISE by default + return &dbdpb.GetDatabaseTypeResponse{ + DatabaseType: dbdpb.GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE, + }, nil +} + +// GetDatabaseName returns database name. +func (s *Server) GetDatabaseName(ctx context.Context, req *dbdpb.GetDatabaseNameRequest) (*dbdpb.GetDatabaseNameResponse, error) { + //databaseSid value will be set in dbdserver's constructor and NID API with write lock. + //databaseSid is expected to be valid in dbdserver's life cycle. + s.databaseSid.RLock() + defer s.databaseSid.RUnlock() + + return &dbdpb.GetDatabaseNameResponse{DatabaseName: s.databaseSid.val}, nil +} + +// BounceDatabase starts/stops request specified database. +func (s *Server) BounceDatabase(ctx context.Context, req *dbdpb.BounceDatabaseRequest) (*dbdpb.BounceDatabaseResponse, error) { + klog.InfoS("BounceDatabase request delegated to proxy", "req", req) + database, err := s.dbdClient.BounceDatabase(ctx, req) + if err != nil { + msg := "dbdaemon/BounceDatabase: error while bouncing database" + klog.InfoS(msg, "err", err) + return nil, fmt.Errorf("%s: %v", msg, err) + } + if req.Operation == dbdpb.BounceDatabaseRequest_STARTUP && !req.GetAvoidConfigBackup() { + if err := s.BackupConfigFile(ctx, s.databaseSid.val); err != nil { + msg := "dbdaemon/BounceDatabase: error while backing up config file: err" + klog.InfoS(msg, "err", err) + return nil, fmt.Errorf("%s: %v", msg, err) + } + klog.InfoS("dbdaemon/BounceDatabase start operation: config file backup successful") + } + return database, err +} + +// BounceListener starts/stops request specified listener. +func (s *Server) BounceListener(ctx context.Context, req *dbdpb.BounceListenerRequest) (*dbdpb.BounceListenerResponse, error) { + klog.InfoS("BounceListener request delegated to proxy", "req", req) + return s.dbdClient.BounceListener(ctx, req) +} + +func (s *Server) close() { + if err := s.dbdClientClose(); err != nil { + klog.Warningf("failed to close dbdaemon client: %v", err) + } +} + +// BootstrapStandby perform bootstrap tasks for standby instance. +func (s *Server) BootstrapStandby(ctx context.Context, req *dbdpb.BootstrapStandbyRequest) (*dbdpb.BootstrapStandbyResponse, error) { + klog.InfoS("dbdaemon/BootstrapStandby", "req", req) + cdbName := req.GetCdbName() + spfile := filepath.Join(fmt.Sprintf(consts.ConfigDir, consts.DataMount, cdbName), fmt.Sprintf("spfile%s.ora", cdbName)) + + resp, err := s.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{"select value from v$parameter where name='spfile'"}}) + if err != nil || len(resp.GetMsg()) < 1 { + return nil, fmt.Errorf("dbdaemon/BootstrapStandby: failed to check spfile, results: %v, err: %v", resp, err) + } + row := make(map[string]string) + if err := json.Unmarshal([]byte(resp.GetMsg()[0]), &row); err != nil { + return nil, err + } + + value, _ := row["VALUE"] + if value != "" { + spfile = value + } else { + _, err := s.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{fmt.Sprintf("create spfile='%s' from memory", spfile)}, Suppress: false}) + if err != nil { + return nil, fmt.Errorf("dbdaemon/BootstrapStandby: failed to create spfile from memory: %v", err) + } + } + + if _, err = s.dbdClient.SetEnv(ctx, &dbdpb.SetEnvRequest{ + OracleHome: s.databaseHome, + CdbName: req.GetCdbName(), + SpfilePath: spfile, + }); err != nil { + return nil, fmt.Errorf("dbdaemon/BootstrapStandby: proxy failed to SetEnv: %v", err) + } + klog.InfoS("dbdaemon/BootstrapStandby: spfile creation/relocation completed successfully") + + if err := markProvisioned(); err != nil { + return nil, fmt.Errorf("dbdaemon/BootstrapStandby: error while creating provisioning file: %v", err) + } + klog.InfoS("dbdaemon/BootstrapStandby: Provisioning file created successfully") + return &dbdpb.BootstrapStandbyResponse{}, nil +} + +// CreateCDB creates a database instance +func (s *Server) CreateCDB(ctx context.Context, req *dbdpb.CreateCDBRequest) (*dbdpb.CreateCDBResponse, error) { + klog.InfoS("CreateCDB request invoked", "req", req) + + password, err := security.RandOraclePassword() + if err != nil { + return nil, fmt.Errorf("error generating temporary password") + } + characterSet := req.GetCharacterSet() + sid := req.GetDatabaseName() + memoryPercent := req.GetMemoryPercent() + var initParams string + + if sid == "" { + return nil, fmt.Errorf("dbdaemon/CreateCDB: DBname is empty") + } + if characterSet == "" { + characterSet = "AL32UTF8" + } + if memoryPercent == 0 { + memoryPercent = 25 + } + if req.GetAdditionalParams() == nil { + initParams = strings.Join(provision.MapToSlice(provision.GetDefaultInitParams(req.DatabaseName)), ",") + if req.GetDbDomain() != "" { + initParams = fmt.Sprintf("%s,DB_DOMAIN=%s", initParams, req.GetDbDomain()) + } + } else { + + foundDBDomain := false + for _, param := range req.GetAdditionalParams() { + if strings.Contains(strings.ToUpper(param), "DB_DOMAIN=") { + foundDBDomain = true + break + } + } + initParamsArr := req.GetAdditionalParams() + if !foundDBDomain && req.GetDbDomain() != "" { + initParamsArr = append(initParamsArr, fmt.Sprintf("DB_DOMAIN=%s", req.GetDbDomain())) + } + + initParamsMap, err := provision.MergeInitParams(provision.GetDefaultInitParams(req.DatabaseName), initParamsArr) + if err != nil { + return nil, fmt.Errorf("error while merging user defined init params with default values, %v", err) + } + initParamsArr = provision.MapToSlice(initParamsMap) + initParams = strings.Join(initParamsArr, ",") + } + + params := []string{ + "-silent", + "-createDatabase", + "-templateName", "General_Purpose.dbc", + "-gdbName", sid, + "-responseFile", "NO_VALUE", + "-createAsContainerDatabase", strconv.FormatBool(true), + "-sid", sid, + "-characterSet", characterSet, + fmt.Sprintf("-memoryPercentage"), strconv.FormatInt(int64(memoryPercent), 10), + "-emConfiguration", "NONE", + "-datafileDestination", oraDataDir, + "-storageType", "FS", + "-initParams", initParams, + "-databaseType", "MULTIPURPOSE", + "-recoveryAreaDestination", "/u03/app/oracle/fast_recovery_area", + "-sysPassword", password, + "-systemPassword", password, + } + + _, err = s.dbdClient.ProxyRunDbca(ctx, &dbdpb.ProxyRunDbcaRequest{OracleHome: s.databaseHome, DatabaseName: req.DatabaseName, Params: params}) + if err != nil { + return nil, fmt.Errorf("error while running dbca command: %v", err) + } + klog.InfoS("CDB created successfully") + + if err := markProvisioned(); err != nil { + return nil, fmt.Errorf("error while creating provisioning file: %v", err) + } + klog.InfoS("Provisioning file created successfully") + + if err := setEnvNew(s, s.databaseHome, req.DatabaseName); err != nil { + return nil, fmt.Errorf("failed to setup environment: %v", err) + } + // hack fix for new PDB listener + if _, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{fmt.Sprintf("alter system set local_listener='(DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=REGLSNR_%d)))' scope=both", consts.SecureListenerPort)}, + }, false); err != nil { + klog.Error(err, "set local_listener error") + } + klog.InfoS("Env setup successfully") + return &dbdpb.CreateCDBResponse{}, nil +} + +// CreateCDBAsync turns CreateCDB into an async call. +func (s *Server) CreateCDBAsync(ctx context.Context, req *dbdpb.CreateCDBAsyncRequest) (*lropb.Operation, error) { + job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "CreateCDB", s.lroServer, + func(ctx context.Context) (proto.Message, error) { + return s.CreateCDB(ctx, req.SyncRequest) + }) + + if err != nil { + klog.ErrorS(err, "dbdaemon/CreateCDBAsync failed to create an LRO job", "request", req) + return nil, err + } + + return &lropb.Operation{Name: job.ID(), Done: false}, nil +} + +func setEnvNew(s *Server, home string, dbName string) error { + s.databaseHome = home + s.databaseSid.val = dbName + if err := provision.RelinkConfigFiles(home, dbName); err != nil { + return err + } + return nil +} + +// markProvisioned creates a flag file to indicate that CDB provisioning completed successfully +func markProvisioned() error { + f, err := os.Create(consts.ProvisioningDoneFile) + if err != nil { + return fmt.Errorf("could not create %s file: %v", consts.ProvisioningDoneFile, err) + } + defer func() { + if err := f.Close(); err != nil { + klog.Warningf("failed to close %v: %v", f, err) + } + }() + return nil +} + +// A user running this program should not be root and +// a primary group should be either dba or oinstall. +func oracleUserUIDGID(skipChecking bool) (uint32, uint32, error) { + if skipChecking { + klog.InfoS("oracleUserUIDGID: skipped by request") + return 0, 0, nil + } + u, err := user.Lookup(consts.OraUser) + if err != nil { + return 0, 0, fmt.Errorf("oracleUserUIDGID: could not determine the current user: %v", err) + } + + if u.Username == "root" { + return 0, 0, fmt.Errorf("oracleUserUIDGID: this program is designed to run by the Oracle software installation owner (e.g. oracle), not %q", u.Username) + } + + groups := consts.OraGroup + var gids []string + for _, group := range groups { + g, err := user.LookupGroup(group) + // Not both groups are mandatory, e.g. oinstall may not exist. + klog.InfoS("group=%s, g=%v", group, g) + if err != nil { + continue + } + gids = append(gids, g.Gid) + } + for _, g := range gids { + if u.Gid == g { + usr, err := strconv.ParseUint(u.Uid, 10, 32) + if err != nil { + return 0, 0, err + } + grp, err := strconv.ParseUint(u.Gid, 10, 32) + if err != nil { + return 0, 0, err + } + return uint32(usr), uint32(grp), nil + } + } + return 0, 0, fmt.Errorf("oracleUserUIDGID: current user's primary group (GID=%q) is not dba|oinstall (GID=%q)", u.Gid, gids) +} + +// CreateListener create a new listener for the database. +func (s *Server) CreateListener(ctx context.Context, req *dbdpb.CreateListenerRequest) (*dbdpb.CreateListenerResponse, error) { + domain := req.GetDbDomain() + if req.GetDbDomain() != "" { + domain = fmt.Sprintf(".%s", req.GetDbDomain()) + } + uid, gid, err := oracleUserUIDGID(true) + if err != nil { + return nil, fmt.Errorf("initDBListeners: get uid gid failed: %v", err) + } + l := &provision.ListenerInput{ + DatabaseName: req.DatabaseName, + DatabaseBase: consts.OracleBase, + DatabaseHome: s.databaseHome, + DatabaseHost: s.hostName, + DBDomain: domain, + } + + pdbNames, err := s.fetchPDBNames(ctx) + if err != nil { + return nil, err + } + l.PluggableDatabaseNames = pdbNames + + lType := consts.SECURE + lDir := filepath.Join(listenerDir, lType) + listenerFileContent, tnsFileContent, sqlNetContent, err := provision.LoadTemplateListener(l, lType, fmt.Sprint(req.Port), req.Protocol) + if err != nil { + return &dbdpb.CreateListenerResponse{}, fmt.Errorf("initDBListeners: loading template for listener %q failed: %v", req.DatabaseName, err) + } + + if err != nil { + return nil, fmt.Errorf("initDBListeners: error while fetching uid gid: %v", err) + } + if err := provision.MakeDirs(ctx, []string{lDir}, uid, gid); err != nil { + return nil, fmt.Errorf("initDBListeners: making a listener directory %q failed: %v", lDir, err) + } + + // Prepare listener.ora. + if err := ioutil.WriteFile(filepath.Join(lDir, "listener.ora"), []byte(listenerFileContent), 0600); err != nil { + return nil, fmt.Errorf("initDBListeners: creating a listener.ora file failed: %v", err) + } + + // Prepare sqlnet.ora. + if err := ioutil.WriteFile(filepath.Join(lDir, "sqlnet.ora"), []byte(sqlNetContent), 0600); err != nil { + return nil, fmt.Errorf("initDBListeners: unable to write sqlnet: %v", err) + } + + // Prepare tnsnames.ora. + if err := ioutil.WriteFile(filepath.Join(lDir, "tnsnames.ora"), []byte(tnsFileContent), 0600); err != nil { + return nil, fmt.Errorf("initDBListeners: creating a tnsnames.ora file failed: %v", err) + } + + if _, err := s.BounceListener(ctx, &dbdpb.BounceListenerRequest{ + Operation: dbdpb.BounceListenerRequest_STOP, + ListenerName: lType, + TnsAdmin: lDir, + }); err != nil { + klog.ErrorS(err, "Listener stop failed", "name", lType, "lDir", lDir) + } + + if _, err := s.BounceListener(ctx, &dbdpb.BounceListenerRequest{ + Operation: dbdpb.BounceListenerRequest_START, + ListenerName: lType, + TnsAdmin: lDir, + }); err != nil { + return nil, fmt.Errorf("listener %s startup failed: %s, %v", lType, lDir, err) + } + + return &dbdpb.CreateListenerResponse{}, nil +} + +func (s *Server) fetchPDBNames(ctx context.Context) ([]string, error) { + sqlResp, err := s.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{consts.ListPluggableDatabaseExcludeSeedSQL}, + Suppress: false, + }) + + if err != nil { + return nil, fmt.Errorf("BootstrapTask: query pdb names failed: %v", err) + } + + pdbNames := sqlResp.GetMsg() + knownPDBs := make([]string, len(pdbNames)) + for i, msg := range pdbNames { + row := make(map[string]string) + if err := json.Unmarshal([]byte(msg), &row); err != nil { + return knownPDBs, err + } + if name, ok := row["PDB_NAME"]; ok { + knownPDBs[i] = name + } + } + klog.InfoS("BootstrapTask: Found known pdbs", "knownPDBs", knownPDBs) + return knownPDBs, nil +} + +// FileExists is used to check an existence of a file (e.g. useful for provisioning). +func (s *Server) FileExists(ctx context.Context, req *dbdpb.FileExistsRequest) (*dbdpb.FileExistsResponse, error) { + host, err := os.Hostname() + if err != nil { + return &dbdpb.FileExistsResponse{}, fmt.Errorf("dbdaemon/FileExists: failed to get host name: %v", err) + } + + file := req.GetName() + + if _, err := os.Stat(file); err == nil { + klog.InfoS("dbdaemon/FileExists", "requested file", file, "result", "found") + return &dbdpb.FileExistsResponse{Exists: true}, nil + } + + if os.IsNotExist(err) { + klog.InfoS("dbdaemon/FileExists", "requested file", file, "on host", host, "result", "NOT found") + return &dbdpb.FileExistsResponse{Exists: false}, nil + } + + // Something is wrong, return error. + klog.Errorf("dbdaemon/FileExists: failed to determine the status of a requested file %q on host %q: %v", file, host, err) + + return &dbdpb.FileExistsResponse{}, err +} + +// CreateDir RPC call to create a directory named path, along with any necessary parents. +func (s *Server) CreateDir(ctx context.Context, req *dbdpb.CreateDirRequest) (*dbdpb.CreateDirResponse, error) { + if err := os.MkdirAll(req.GetPath(), os.FileMode(req.GetPerm())); err != nil { + return nil, fmt.Errorf("dbdaemon/CreateDir failed: %v", err) + } + return &dbdpb.CreateDirResponse{}, nil +} + +// ReadDir RPC call to read the directory named by path and returns Fileinfos for the path and children. +func (s *Server) ReadDir(ctx context.Context, req *dbdpb.ReadDirRequest) (*dbdpb.ReadDirResponse, error) { + if !strings.HasPrefix(req.GetPath(), "/") { + return nil, fmt.Errorf("dbdaemon/ReadDir failed to read %v, only accept absolute path", req.GetPath()) + } + currFileInfo, err := os.Stat(req.GetPath()) + if err != nil { + return nil, fmt.Errorf("dbdaemon/ReadDir os.Stat(%v) failed: %v ", req.GetPath(), err) + } + rpcCurrFileInfo, err := convertToRpcFileInfo(currFileInfo, req.GetPath()) + if err != nil { + return nil, fmt.Errorf("dbdaemon/ReadDir failed: %v ", err) + } + resp := &dbdpb.ReadDirResponse{ + CurrPath: rpcCurrFileInfo, + } + + if !currFileInfo.IsDir() { + // for a file, just return its fileInfo + return resp, nil + } + + if req.GetRecursive() { + if err := filepath.Walk(req.GetPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + // stop walking if we see any error. + return fmt.Errorf("visit %v, %v failed: %v", path, info, err) + } + if len(resp.SubPaths) >= maxWalkFiles { + return fmt.Errorf("visited more than %v files, try reduce the dir scope", maxWalkFiles) + } + if path == req.GetPath() { + return nil + } + rpcInfo, err := convertToRpcFileInfo(info, path) + if err != nil { + return fmt.Errorf("visit %v, %v failed: %v ", info, path, err) + } + resp.SubPaths = append(resp.SubPaths, rpcInfo) + return nil + }); err != nil { + return nil, fmt.Errorf("dbdaemon/ReadDir filepath.Walk(%v) failed: %v ", req.GetPath(), err) + } + } else { + subFileInfos, err := ioutil.ReadDir(req.GetPath()) + if err != nil { + return nil, fmt.Errorf("dbdaemon/ReadDir ioutil.ReadDir(%v) failed: %v ", req.GetPath(), err) + } + for _, info := range subFileInfos { + rpcInfo, err := convertToRpcFileInfo(info, filepath.Join(req.GetPath(), info.Name())) + if err != nil { + return nil, fmt.Errorf("dbdaemon/ReadDir failed: %v ", err) + } + resp.SubPaths = append(resp.SubPaths, rpcInfo) + } + } + + return resp, nil +} + +func convertToRpcFileInfo(info os.FileInfo, absPath string) (*dbdpb.ReadDirResponse_FileInfo, error) { + timestampProto, err := ptypes.TimestampProto(info.ModTime()) + if err != nil { + return nil, fmt.Errorf("convertToRpcFileInfo(%v) failed: %v", info, err) + } + return &dbdpb.ReadDirResponse_FileInfo{ + Name: info.Name(), + Size: info.Size(), + Mode: uint32(info.Mode()), + ModTime: timestampProto, + IsDir: info.IsDir(), + AbsPath: absPath, + }, nil +} + +// DeleteDir removes path and any children it contains. +func (s *Server) DeleteDir(ctx context.Context, req *dbdpb.DeleteDirRequest) (*dbdpb.DeleteDirResponse, error) { + + removeFun := os.Remove + if req.GetForce() { + removeFun = os.RemoveAll + } + if err := removeFun(req.GetPath()); err != nil { + return nil, fmt.Errorf("dbdaemon/DeleteDir(%v) failed: %v", req, err) + } + return &dbdpb.DeleteDirResponse{}, nil +} + +// BackupConfigFile converts the binary spfile to human readable pfile and +// creates a snapshot copy named pfile.lkws (lkws -> last known working state). +// This file will be used for recovery in the event of parameter update workflow +// failure due to bad static parameters. +func (s *Server) BackupConfigFile(ctx context.Context, cdbName string) error { + configDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, cdbName) + backupPFileLoc := fmt.Sprintf("%s/%s", configDir, "pfile.lkws") + klog.InfoS("dbdaemon/BackupConfigFile: backup config file", "backupPFileLoc", backupPFileLoc) + + _, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{fmt.Sprintf("create pfile='%s' from spfile", backupPFileLoc)}}, false) + if err != nil { + klog.InfoS("dbdaemon/BackupConfigFile: error while backing up config file", "err", err) + return fmt.Errorf("BackupConfigFile: failed to create pfile due to error: %v", err) + } + klog.InfoS("dbdaemon/BackupConfigFile: Successfully backed up config file") + return nil +} + +// RecoverConfigFile generates the binary spfile from the human readable backup pfile +func (s *Server) RecoverConfigFile(ctx context.Context, req *dbdpb.RecoverConfigFileRequest) (*dbdpb.RecoverConfigFileResponse, error) { + configDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, req.GetCdbName()) + backupPFileLoc := fmt.Sprintf("%s/%s", configDir, "pfile.lkws") + spFileLoc := fmt.Sprintf("%s/%s", configDir, fmt.Sprintf("spfile%s.ora", req.CdbName)) + + klog.InfoS("dbdaemon/RecoverConfigFile: recover config file", "backupPFileLoc", backupPFileLoc, "spFileLoc", spFileLoc) + + _, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{fmt.Sprintf("create spfile='%s' from pfile='%s'", spFileLoc, backupPFileLoc)}}, false) + if err != nil { + klog.InfoS("dbdaemon/RecoverConfigFile: error while backing up config file", "err", err) + return nil, fmt.Errorf("dbdaemon/RecoverConfigFile: error while backing up config file: %v", err) + } + klog.InfoS("dbdaemon/RecoverConfigFile: Successfully backed up config file") + return &dbdpb.RecoverConfigFileResponse{}, nil +} + +// New creates a new dbdaemon server. +func New(ctx context.Context, cdbNameFromYaml string) (*Server, error) { + klog.InfoS("dbdaemon/New: Dialing dbdaemon proxy") + conn, err := common.DatabaseDaemonDialSocket(ctx, consts.ProxyDomainSocketFile, grpc.WithBlock()) + if err != nil { + return nil, fmt.Errorf("failed to dial to database daemon: %v", err) + } + klog.InfoS("dbdaemon/New: Successfully connected to dbdaemon proxy") + + hostname, err := os.Hostname() + if err != nil { + return nil, fmt.Errorf("failed to get hostname: %v", err) + } + + s := &Server{ + hostName: hostname, + database: &DB{}, + osUtil: &osUtilImpl{}, + databaseSid: &syncState{}, + dbdClient: dbdpb.NewDatabaseDaemonProxyClient(conn), + dbdClientClose: conn.Close, + lroServer: lro.NewServer(ctx), + syncJobs: &syncJobs{}, + gcsUtil: &gcsUtilImpl{}, + } + + oracleHome, _, _, err := provision.FetchMetaDataFromImage(provision.MetaDataFile) + if err != nil { + return nil, fmt.Errorf("error while fetching metadata from image: %v", err) + } + if err := setEnvNew(s, oracleHome, cdbNameFromYaml); err != nil { + return nil, fmt.Errorf("failed to setup environment: %v", err) + } + return s, nil +} + +// DownloadDirectoryFromGCS downloads objects from GCS bucket using prefix +func (s *Server) DownloadDirectoryFromGCS(ctx context.Context, req *dbdpb.DownloadDirectoryFromGCSRequest) (*dbdpb.DownloadDirectoryFromGCSResponse, error) { + + klog.Infof("dbdaemon/DownloadDirectoryFromGCS: req %v", req) + bucket, prefix, err := s.gcsUtil.splitURI(req.GcsPath) + if err != nil { + return nil, fmt.Errorf("failed to parse gcs path %s", err) + } + + klog.Infof("dbdaemon/downloadDirectoryFromGCS: destination path is %s", req.GetLocalPath()) + client, err := storage.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("storage.NewClient: %v", err) + } + defer client.Close() + ctx, cancel := context.WithTimeout(ctx, time.Second*3600) + defer cancel() + it := client.Bucket(bucket).Objects(ctx, &storage.Query{ + Prefix: prefix, + }) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("Bucket(%q).Objects(): %v", bucket, err) + } + if err := s.downloadFile(ctx, client, bucket, attrs.Name, prefix, req.GetLocalPath()); err != nil { + return nil, fmt.Errorf("failed to download file %s", err) + } + } + return &dbdpb.DownloadDirectoryFromGCSResponse{}, nil +} + +// FetchServiceImageMetaData fetches the image metadata from the image. +func (s *Server) FetchServiceImageMetaData(ctx context.Context, req *dbdpb.FetchServiceImageMetaDataRequest) (*dbdpb.FetchServiceImageMetaDataResponse, error) { + oracleHome, cdbName, version, err := provision.FetchMetaDataFromImage(provision.MetaDataFile) + if err != nil { + return &dbdpb.FetchServiceImageMetaDataResponse{}, nil + } + return &dbdpb.FetchServiceImageMetaDataResponse{Version: version, CdbName: cdbName, OracleHome: oracleHome}, nil +} + +func (s *Server) downloadFile(ctx context.Context, c *storage.Client, bucket, gcsPath, baseDir, dest string) error { + reader, err := c.Bucket(bucket).Object(gcsPath).NewReader(ctx) + if err != nil { + return fmt.Errorf("failed to read URL %s: %v", gcsPath, err) + } + + relPath, err := filepath.Rel(baseDir, gcsPath) + if err != nil { + return fmt.Errorf("failed to parse relPath for gcsPath %s", gcsPath) + } + + f := filepath.Join(dest, relPath) + if err := s.osUtil.createFile(f, reader); err != nil { + return fmt.Errorf("failed to createFile for file %s, err %s", f, err) + } + klog.InfoS("dbdaemon/downloadFile:", "downloaded", f) + return nil +} diff --git a/oracle/pkg/database/dbdaemon/dbdaemon_server_test.go b/oracle/pkg/database/dbdaemon/dbdaemon_server_test.go new file mode 100644 index 0000000..dcd1f80 --- /dev/null +++ b/oracle/pkg/database/dbdaemon/dbdaemon_server_test.go @@ -0,0 +1,487 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbdaemon + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/godror/godror" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +func TestServerCreateDir(t *testing.T) { + ctx := context.Background() + client, cleanup := newFakeDatabaseDaemonClient(t) + defer cleanup() + + oldMask := syscall.Umask(0022) + testDir, err := ioutil.TempDir("", "TestServerCreateDir") + if err != nil { + t.Fatalf("failed to create test dir: %v", err) + } + defer func() { + syscall.Umask(oldMask) + os.RemoveAll(testDir) + }() + testCases := []struct { + name string + path string + perm uint32 + wantPerm uint32 + }{ + { + name: "direct dir", + path: filepath.Join(testDir, "dir1"), + perm: 0777, + wantPerm: 0755, + }, + { + name: "nested dirs", + path: filepath.Join(testDir, "dir2", "dir3"), + perm: 0750, + wantPerm: 0750, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if _, err := client.CreateDir(ctx, &dbdpb.CreateDirRequest{ + Path: tc.path, + Perm: tc.perm, + }); err != nil { + t.Fatalf("dbdaemon.CreateDir failed: %v", err) + } + info, err := os.Stat(tc.path) + if err != nil { + t.Fatalf("dbdaemon.CreateDir os.Stat(%q) failed: %v", tc.path, err) + } + if !info.IsDir() { + t.Errorf("dbdaemon.CreateDir got file %q, want dir", tc.path) + } + // the API set perm before umask , https://github.com/golang/go/issues/15210 + if info.Mode().Perm() != os.FileMode(tc.wantPerm) { + t.Errorf("dbdaemon.CreateDir got file perm %q, want perm %q", info.Mode().Perm(), os.FileMode(tc.wantPerm)) + } + }) + } +} + +func TestServerReadDir(t *testing.T) { + client, cleanup := newFakeDatabaseDaemonClient(t) + defer cleanup() + ctx := context.Background() + + testDir, err := ioutil.TempDir("", "TestServerReadDir") + if err != nil { + t.Fatalf("failed to create test dir: %v", err) + } + defer os.RemoveAll(testDir) + testCases := []struct { + name string + path string + recursive bool + dirs []string + files []string + wantCurrPath string + wantSubPaths []string + }{ + { + name: "file", + path: filepath.Join(testDir, "test1"), + wantCurrPath: filepath.Join(testDir, "test1"), + }, + { + name: "dir without content", + path: filepath.Join(testDir, "dir1"), + wantCurrPath: filepath.Join(testDir, "dir1"), + }, + { + name: "dir with contents recursive", + path: filepath.Join(testDir, "dir2"), + recursive: true, + dirs: []string{ + filepath.Join(testDir, "dir2", "dir3"), + filepath.Join(testDir, "dir2", "dir3", "dir4"), + }, + files: []string{ + filepath.Join(testDir, "dir2", "test2"), + filepath.Join(testDir, "dir2", "dir3", "test3"), + filepath.Join(testDir, "dir2", "dir3", "dir4", "test4"), + }, + wantCurrPath: filepath.Join(testDir, "dir2"), + wantSubPaths: []string{ + filepath.Join(testDir, "dir2/dir3"), + filepath.Join(testDir, "dir2/dir3/dir4"), + filepath.Join(testDir, "dir2/dir3/dir4/test4"), + filepath.Join(testDir, "dir2/dir3/test3"), + filepath.Join(testDir, "dir2/test2"), + }, + }, + { + name: "dir with contents not recursive", + path: filepath.Join(testDir, "dir5"), + dirs: []string{ + filepath.Join(testDir, "dir5", "dir3"), + filepath.Join(testDir, "dir5", "dir3", "dir4"), + }, + files: []string{ + filepath.Join(testDir, "dir5", "test2"), + filepath.Join(testDir, "dir5", "dir3", "test3"), + filepath.Join(testDir, "dir5", "dir3", "dir4", "test4"), + }, + wantCurrPath: filepath.Join(testDir, "dir5"), + wantSubPaths: []string{ + filepath.Join(testDir, "dir5/dir3"), + filepath.Join(testDir, "dir5/test2"), + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := os.MkdirAll(tc.path, 0755); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test dir: %v", err) + } + for _, dir := range tc.dirs { + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test dir: %v", err) + } + } + for _, file := range tc.files { + f, err := os.Create(file) + f.Close() + if err != nil { + t.Fatalf("dbdaemon.ReadDir failed to create test file: %v", err) + } + } + + resp, err := client.ReadDir(ctx, &dbdpb.ReadDirRequest{ + Path: tc.path, + Recursive: tc.recursive, + }) + if err != nil { + t.Fatalf("dbdaemon.ReadDir failed: %v", err) + } + gotCurrPath := resp.CurrPath.AbsPath + var gotSubPaths []string + for _, sp := range resp.SubPaths { + gotSubPaths = append(gotSubPaths, sp.AbsPath) + } + if gotCurrPath != tc.wantCurrPath { + t.Errorf("dbdaemon.ReadDir curr path %s, want %s", gotCurrPath, tc.wantCurrPath) + } + if diff := cmp.Diff(tc.wantSubPaths, gotSubPaths); diff != "" { + t.Errorf("dbdaemon.ReadDir sub paths got unexpected files/dirs: -want +got %v", diff) + } + // verify fileInfo + verifyPaths := append(tc.wantSubPaths, tc.wantCurrPath) + gotInfo := append(resp.SubPaths, resp.CurrPath) + for i, path := range verifyPaths { + info, err := os.Stat(path) + if err != nil { + t.Errorf("dbdaemon.ReadDir os.Stat(%q) failed: %v", path, err) + continue + } + if gotInfo[i].Name != info.Name() { + t.Errorf("dbdaemon.ReadDir sub path %q, got name %v, want %v", path, gotInfo[i].Name, info.Name()) + } + if gotInfo[i].Size != info.Size() { + t.Errorf("dbdaemon.ReadDir sub path %q, got size %v, want %v", path, gotInfo[i].Size, info.Size()) + } + if os.FileMode(gotInfo[i].Mode) != info.Mode() { + t.Errorf("dbdaemon.ReadDir sub path %q, got mode %v, want %v", path, os.FileMode(gotInfo[i].Mode), info.Mode()) + } + if gotInfo[i].IsDir != info.IsDir() { + t.Errorf("dbdaemon.ReadDir sub path %q, got isDir %v, want %v", path, gotInfo[i].IsDir, info.IsDir()) + } + if gotInfo[i].AbsPath != path { + t.Errorf("dbdaemon.ReadDir sub path %q, got abs path %v, want %v", path, gotInfo[i].AbsPath, path) + } + } + }) + } +} + +func TestServerDeleteDir(t *testing.T) { + ctx := context.Background() + client, cleanup := newFakeDatabaseDaemonClient(t) + defer cleanup() + + testDir, err := ioutil.TempDir("", "TestServerDeleteDir") + if err != nil { + t.Fatalf("failed to create test dir: %v", err) + } + defer os.RemoveAll(testDir) + testCases := []struct { + name string + path string + force bool + files []string + dirs []string + }{ + { + name: "dir without content", + path: filepath.Join(testDir, "dir1"), + }, + { + name: "dir without content", + force: true, + path: filepath.Join(testDir, "dir2"), + }, + { + name: "dir with contents", + path: filepath.Join(testDir, "dir3"), + force: true, + files: []string{filepath.Join(testDir, "dir3", "test1")}, + dirs: []string{filepath.Join(testDir, "dir3", "dir4")}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := os.MkdirAll(tc.path, 0755); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test dir: %v", err) + } + for _, file := range tc.files { + f, err := os.Create(file) + f.Close() + if err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test file: %v", err) + } + } + for _, dir := range tc.dirs { + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test dir: %v", err) + } + } + if _, err := client.DeleteDir(ctx, &dbdpb.DeleteDirRequest{ + Path: tc.path, + Force: tc.force, + }); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed: %v", err) + } + if _, err := os.Stat(tc.path); !os.IsNotExist(err) { + t.Fatalf("dbdaemon.DeleteDir got %q exists, want not exists", tc.path) + } + }) + } +} + +func TestServerDeleteDirErrors(t *testing.T) { + ctx := context.Background() + client, cleanup := newFakeDatabaseDaemonClient(t) + defer cleanup() + + testDir, err := ioutil.TempDir("", "TestServerDeleteDirErrors") + if err != nil { + t.Fatalf("failed to create test dir: %v", err) + } + defer os.RemoveAll(testDir) + testCases := []struct { + name string + path string + files []string + dirs []string + }{ + { + name: "dir with files contents", + path: filepath.Join(testDir, "dir1"), + files: []string{filepath.Join(testDir, "dir1", "test1")}, + }, + { + name: "dir with dirs contents", + path: filepath.Join(testDir, "dir2"), + dirs: []string{filepath.Join(testDir, "dir2", "dir3")}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := os.MkdirAll(tc.path, 0755); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test dir: %v", err) + } + for _, file := range tc.files { + f, err := os.Create(file) + f.Close() + if err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test file: %v", err) + } + } + for _, dir := range tc.dirs { + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed to create test dir: %v", err) + } + } + if _, err := client.DeleteDir(ctx, &dbdpb.DeleteDirRequest{ + Path: tc.path, + }); err == nil { + t.Errorf("dbdaemon.DeleteDir succeeded, want not-nil err") + } + // double check the dir still exists + if _, err := os.Stat(tc.path); err != nil { + t.Fatalf("dbdaemon.DeleteDir failed and got %q not exists, want exists", tc.path) + } + }) + } +} + +func newFakeDatabaseDaemonClient(t *testing.T) (dbdpb.DatabaseDaemonClient, func()) { + t.Helper() + grpcSvr := grpc.NewServer() + + dbdpb.RegisterDatabaseDaemonServer(grpcSvr, &Server{}) + lis := bufconn.Listen(2 * 1024 * 1024) + go grpcSvr.Serve(lis) + + dbdConn, err := grpc.Dial("test", + grpc.WithInsecure(), + grpc.WithContextDialer( + func(ctx context.Context, s string) (conn net.Conn, err error) { + return lis.Dial() + }), + ) + if err != nil { + t.Fatalf("failed to dial to dbDaemon: %v", err) + } + return dbdpb.NewDatabaseDaemonClient(dbdConn), func() { + dbdConn.Close() + grpcSvr.GracefulStop() + } +} + +func TestServerString(t *testing.T) { + password := "myPassword456" + s := Server{ + hostName: "hostnameValue", + pdbConnStr: fmt.Sprintf("username/%v@localhost:6021/GCLOUD.GCE", password), + } + + stringVal := s.String() + if strings.Contains(stringVal, password) { + t.Errorf("server.String() got password in string %v, want string without password %v", stringVal, password) + } +} + +// Mock DB ('dbdaemon' interface) +type mockDB struct { + setDatabaseUpgradeModeCount int + openPDBsCount int +} + +func (m mockDB) shutdownDatabase(ctx context.Context, mode godror.ShutdownMode) error { + panic("implement me") +} + +func (m mockDB) startupDatabase(ctx context.Context, mode godror.StartupMode, s string) error { + panic("implement me") +} + +func (m *mockDB) setDatabaseUpgradeMode(ctx context.Context) error { + m.setDatabaseUpgradeModeCount++ + return nil +} + +func (m *mockDB) openPDBs(ctx context.Context) error { + m.openPDBsCount++ + return nil +} + +func (m mockDB) runSQL(ctx context.Context, i []string, b bool, b2 bool, database oracleDatabase) ([]string, error) { + panic("implement me") +} + +func (m mockDB) runQuery(ctx context.Context, i []string, database oracleDatabase) ([]string, error) { + panic("implement me") +} + +// Mock dbdaemon_proxy client +type mockDatabaseDaemonProxyClient struct { + startupCount int + shutdownCount int +} + +func (m *mockDatabaseDaemonProxyClient) BounceDatabase(ctx context.Context, in *dbdpb.BounceDatabaseRequest, opts ...grpc.CallOption) (*dbdpb.BounceDatabaseResponse, error) { + if in.Operation == dbdpb.BounceDatabaseRequest_STARTUP { + m.startupCount++ + } else if in.Operation == dbdpb.BounceDatabaseRequest_SHUTDOWN { + m.shutdownCount++ + } + return new(dbdpb.BounceDatabaseResponse), nil +} + +func (m *mockDatabaseDaemonProxyClient) BounceListener(ctx context.Context, in *dbdpb.BounceListenerRequest, opts ...grpc.CallOption) (*dbdpb.BounceListenerResponse, error) { + panic("implement me") +} + +func (m *mockDatabaseDaemonProxyClient) ProxyRunDbca(ctx context.Context, in *dbdpb.ProxyRunDbcaRequest, opts ...grpc.CallOption) (*dbdpb.ProxyRunDbcaResponse, error) { + panic("implement me") +} + +func (m *mockDatabaseDaemonProxyClient) ProxyRunNID(ctx context.Context, in *dbdpb.ProxyRunNIDRequest, opts ...grpc.CallOption) (*dbdpb.ProxyRunNIDResponse, error) { + panic("implement me") +} + +func (m *mockDatabaseDaemonProxyClient) SetEnv(ctx context.Context, in *dbdpb.SetEnvRequest, opts ...grpc.CallOption) (*dbdpb.SetEnvResponse, error) { + panic("implement me") +} + +// Mock osUtil +type mockOsUtil struct { + commands []string +} + +func (m *mockOsUtil) runCommand(bin string, params []string) error { + m.commands = append(m.commands, bin) + return nil +} + +func (m *mockOsUtil) isReturnCodeEqual(err error, code int) bool { + panic("implement me") +} + +func (m *mockOsUtil) createFile(file string, content io.Reader) error { + panic("implement me") +} + +func (m *mockOsUtil) removeFile(file string) error { + panic("implement me") +} + +func NewMockServer(ctx context.Context, cdbNameFromYaml string) (*Server, error) { + + s := &Server{ + hostName: "MOCK_HOST", + database: &mockDB{}, + osUtil: &mockOsUtil{}, + databaseSid: &syncState{}, + dbdClient: &mockDatabaseDaemonProxyClient{}, + dbdClientClose: nil, + lroServer: nil, + syncJobs: &syncJobs{}, + gcsUtil: &gcsUtilImpl{}, + } + s.databaseHome = "DBHOME" + s.databaseSid.val = "MOCK_DB" + return s, nil +} diff --git a/oracle/pkg/database/dbdaemon/utils.go b/oracle/pkg/database/dbdaemon/utils.go new file mode 100644 index 0000000..f744a78 --- /dev/null +++ b/oracle/pkg/database/dbdaemon/utils.go @@ -0,0 +1,207 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbdaemon + +import ( + "compress/gzip" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "cloud.google.com/go/storage" + "k8s.io/klog/v2" +) + +const gsPrefix = "gs://" + +// Override library functions for the benefit of unit tests. +var ( + lsnrctl = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "lsnrctl") + } + rman = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "rman") + } + orapwd = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "orapwd") + } + impdp = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "impdp") + } + expdp = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "expdp") + } +) + +const ( + contentTypePlainText = "plain/text" + contentTypeGZ = "application/gzip" +) + +// osUtil was defined for tests. +type osUtil interface { + runCommand(bin string, params []string) error + isReturnCodeEqual(err error, code int) bool + createFile(file string, content io.Reader) error + removeFile(file string) error +} + +type osUtilImpl struct { +} + +func (o *osUtilImpl) runCommand(bin string, params []string) error { + ohome := os.Getenv("ORACLE_HOME") + klog.InfoS("executing command with args", "cmd", bin, "params", params, "ORACLE_SID", os.Getenv("ORACLE_SID"), "ORACLE_HOME", ohome, "TNS_ADMIN", os.Getenv("TNS_ADMIN")) + switch bin { + case lsnrctl(ohome), rman(ohome), orapwd(ohome), impdp(ohome), expdp(ohome): + default: + klog.InfoS("command not supported", "bin", bin) + return fmt.Errorf("command %q is not supported", bin) + } + cmd := exec.Command(bin) + cmd.Args = append(cmd.Args, params...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func (o *osUtilImpl) isReturnCodeEqual(err error, code int) bool { + if exitError, ok := err.(*exec.ExitError); ok { + return exitError.ExitCode() == code + } + return false +} + +func (o *osUtilImpl) createFile(file string, content io.Reader) error { + dir := filepath.Dir(file) + if err := os.MkdirAll(dir, 0750); err != nil { + return fmt.Errorf("couldn't create dir err: %v", err) + } + f, err := os.Create(file) // truncates if file exists. + if err != nil { + return fmt.Errorf("couldn't create file err: %v", err) + } + defer func() { + if err := f.Close(); err != nil { + klog.Warningf("failed to close %v: %v", f, err) + } + }() + if _, err := io.Copy(f, content); err != nil { + return fmt.Errorf("copying contents failed: %v", err) + } + return nil +} + +func (o *osUtilImpl) removeFile(file string) error { + return os.Remove(file) +} + +// gcsUtil contains helper methods for reading/writing GCS objects. +type gcsUtil interface { + // download returns an io.ReadCloser for GCS object at given gcsPath. + download(ctx context.Context, gcsPath string) (io.ReadCloser, error) + // uploadFile uploads contents of a file at filepath to gcsPath location in + // GCS and sets object's contentType. + // If gcsPath ends with .gz it also compresses the uploaded contents + // and sets object's content type to application/gzip. + uploadFile(ctx context.Context, gcsPath, filepath, contentType string) error + // splitURI takes a GCS URI and splits it into bucket and object names. If the URI does not have + // the gs:// scheme, or the URI doesn't specify both a bucket and an object name, returns an error. + splitURI(url string) (bucket, name string, err error) +} + +type gcsUtilImpl struct{} + +func (g *gcsUtilImpl) download(ctx context.Context, gcsPath string) (io.ReadCloser, error) { + bucket, name, err := g.splitURI(gcsPath) + if err != nil { + return nil, err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("failed to init GCS client: %v", err) + } + defer client.Close() + + reader, err := client.Bucket(bucket).Object(name).NewReader(ctx) + if err != nil { + return nil, fmt.Errorf("failed to read URL %s: %v", gcsPath, err) + } + + return reader, nil +} + +func (g *gcsUtilImpl) uploadFile(ctx context.Context, gcsPath, filePath, contentType string) error { + bucket, name, err := g.splitURI(gcsPath) + if err != nil { + return err + } + + f, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + if err := f.Close(); err != nil { + klog.Warningf("failed to close %v: %v", f, err) + } + }() + + client, err := storage.NewClient(ctx) + if err != nil { + return fmt.Errorf("failed to init GCS client: %v", err) + } + defer client.Close() + + b := client.Bucket(bucket) + // check if bucket exists and it is accessible + if _, err := b.Attrs(ctx); err != nil { + return err + } + + gcsWriter := b.Object(name).NewWriter(ctx) + gcsWriter.ContentType = contentType + defer gcsWriter.Close() + + var writer io.WriteCloser = gcsWriter + if strings.HasSuffix(gcsPath, ".gz") { + gcsWriter.ContentType = contentTypeGZ + writer = gzip.NewWriter(gcsWriter) + defer writer.Close() + } + + _, err = io.Copy(writer, f) + if err != nil { + return fmt.Errorf("failed to write file %s to %s: %v", filePath, gcsPath, err) + } + + return nil +} + +func (g *gcsUtilImpl) splitURI(url string) (bucket, name string, err error) { + u := strings.TrimPrefix(url, gsPrefix) + if u == url { + return "", "", fmt.Errorf("URL %q is missing the %q prefix", url, gsPrefix) + } + if i := strings.Index(u, "/"); i >= 2 { + return u[:i], u[i+1:], nil + } + return "", "", fmt.Errorf("URL %q does not specify a bucket and a name", url) +} diff --git a/oracle/pkg/database/dbdaemon/utils_test.go b/oracle/pkg/database/dbdaemon/utils_test.go new file mode 100644 index 0000000..5bd16a2 --- /dev/null +++ b/oracle/pkg/database/dbdaemon/utils_test.go @@ -0,0 +1,59 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbdaemon + +import ( + "testing" +) + +func TestGcsUtilImplSplitURI(t *testing.T) { + tests := []struct { + url string + wantBucket string + wantName string + }{ + {"gs://path/to/a/file", "path", "to/a/file"}, + {"gs://bucket/file.ext", "bucket", "file.ext"}, + } + + for _, test := range tests { + g := &gcsUtilImpl{} + gotBucket, gotName, err := g.splitURI(test.url) + + if err != nil || gotBucket != test.wantBucket || gotName != test.wantName { + t.Errorf("gcsUtilImpl.splitURI(%q)=(%q, %q, %q); wanted (%q, %q, nil)", + test.url, gotBucket, gotName, err, test.wantBucket, test.wantName) + } + } +} + +func TestGcsUtilImplSplitURIError(t *testing.T) { + tests := []struct { + url string + }{ + {"missing/prefix/in/url"}, + {"gs://missing-bucket"}, + } + + for _, test := range tests { + g := &gcsUtilImpl{} + gotBucket, gotName, err := g.splitURI(test.url) + + if err == nil { + t.Errorf("gcsUtilImpl.splitURI(%q)=(%q, %q, nil); wanted an error", + test.url, gotBucket, gotName) + } + } +} diff --git a/oracle/pkg/database/dbdaemonproxy/BUILD.bazel b/oracle/pkg/database/dbdaemonproxy/BUILD.bazel new file mode 100644 index 0000000..acc944f --- /dev/null +++ b/oracle/pkg/database/dbdaemonproxy/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "dbdaemonproxy", + srcs = ["dbdaemon_proxy.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/dbdaemonproxy", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/database/common", + "//oracle/pkg/database/provision", + "@com_github_godror_godror//:godror", + "@io_k8s_klog_v2//:klog", + ], +) + +go_test( + name = "dbdaemonproxy_test", + srcs = ["dbdaemon_proxy_test.go"], + embed = [":dbdaemonproxy"], + deps = [ + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "@com_github_godror_godror//:godror", + "@com_github_google_go_cmp//cmp", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//test/bufconn", + ], +) diff --git a/oracle/pkg/database/dbdaemonproxy/dbdaemon_proxy.go b/oracle/pkg/database/dbdaemonproxy/dbdaemon_proxy.go new file mode 100644 index 0000000..04571f6 --- /dev/null +++ b/oracle/pkg/database/dbdaemonproxy/dbdaemon_proxy.go @@ -0,0 +1,455 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbdaemonproxy provides access to the database container. +// From the security standpoint only the following requests are honored: +// - only requests from a localhost +// - only requests against predefined database and listener(s) +// - only for tightly controlled commands +// +// All requests are to be logged and audited. +// +// Only New and CheckDatabaseState functions of this package can be called +// at the instance (aka CDB) provisioning time. The rest of the functions +// are expected to be called only when a database (aka PDB) is provisioned. +package dbdaemonproxy + +import ( + "context" + "database/sql" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/godror/godror" // Register database/sql driver + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/common" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/provision" +) + +// Override library functions for the benefit of unit tests. +var ( + lsnrctl = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "lsnrctl") + } + rman = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "rman") + } + orapwd = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "orapwd") + } + dbca = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "dbca") + } + nid = func(databaseHome string) string { + return filepath.Join(databaseHome, "bin", "nid") + } + sqlOpen = func(driverName, dataSourceName string) (database, error) { + return sql.Open(driverName, dataSourceName) + } + godrorDriverConn = func(ctx context.Context, ex godror.Execer) (conn, error) { + return godror.DriverConn(ctx, ex) + } +) + +// osUtil was defined for tests. +type osUtil interface { + runCommand(bin string, params []string) error +} + +type osUtilImpl struct { +} + +func (o *osUtilImpl) runCommand(bin string, params []string) error { + ohome := os.Getenv("ORACLE_HOME") + klog.InfoS("executing command with args", "cmd", bin, "params", params, "ORACLE_SID", os.Getenv("ORACLE_SID"), "ORACLE_HOME", ohome, "TNS_ADMIN", os.Getenv("TNS_ADMIN")) + switch bin { + case lsnrctl(ohome), rman(ohome), orapwd(ohome), dbca(ohome), nid(ohome): + default: + return fmt.Errorf("command %q is not supported", bin) + } + cmd := exec.Command(bin) + cmd.Args = append(cmd.Args, params...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("command %q failed at path %q with args %v: %v", bin, cmd.Path, cmd.Args, err) + } + return nil +} + +// database defines the sql.DB APIs, which will be used in this package +type database interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + Close() error +} + +// conn defines the godror.Conn APIs, which will be used in this package +type conn interface { + Startup(godror.StartupMode) error + Shutdown(godror.ShutdownMode) error +} + +// Server holds a database config. +type Server struct { + *dbdpb.UnimplementedDatabaseDaemonProxyServer + hostName string + databaseSid *syncState + databaseHome string + pdbConnStr string + osUtil osUtil + version string +} + +func (s Server) String() string { + pdbConnStr := s.pdbConnStr + if pdbConnStr != "" { + pdbConnStr = "" + } + return fmt.Sprintf("{hostName=%q, databaseSid=%+v, databaseHome=%q, pdbConnStr=%q}", s.hostName, s.databaseSid, s.databaseHome, pdbConnStr) +} + +type syncState struct { + sync.RWMutex + val string +} + +// shutdownDatabase performs a database shutdown in a requested . +// It always connects to the local database. +// Set ORACLE_HOME and ORACLE_SID in the env to control the target database. +// A caller may decide to ignore ORA-1034 and just log a warning +// if a database has already been down (or raise an error if appropriate).. +func (s *Server) shutdownDatabase(ctx context.Context, mode godror.ShutdownMode) error { + // Consider allowing PRELIM mode connections for SHUTDOWN ABORT mode. + // This is useful when the server has maxed out on connections. + db, err := sqlOpen("godror", "oracle://?sysdba=1") + if err != nil { + klog.ErrorS(err, "dbdaemon/shutdownDatabase: failed to connect to a database") + return err + } + defer db.Close() + + oraDB, err := godrorDriverConn(ctx, db) + if err != nil { + return err + } + if err := oraDB.Shutdown(mode); err != nil { + return err + } + // The shutdown process is over after the first Shutdown call in ABORT + // mode. + if mode == godror.ShutdownAbort { + return err + } + + _, err = db.ExecContext(ctx, "alter database close normal") + if err != nil && strings.Contains(err.Error(), "ORA-01507:") { + klog.InfoS("dbdaemon/shutdownDatabase: database is already closed", "err", err) + err = nil + } + if err != nil { + return err + } + + _, err = db.ExecContext(ctx, "alter database dismount") + if err != nil && strings.Contains(err.Error(), "ORA-01507:") { + klog.InfoS("dbdaemon/shutdownDatabase: database is already dismounted", "err", err) + err = nil + } + if err != nil { + return err + } + + return oraDB.Shutdown(godror.ShutdownFinal) +} + +// startupDatabase performs a database startup in a requested mode. +// godror.StartupMode controls FORCE/RESTRICT options. +// databaseState string controls NOMOUNT/MOUNT/OPEN options. +// Setting a pfile to use on startup is currently unsupported. +// It always connects to the local database. +// Set ORACLE_HOME and ORACLE_SID in the env to control the target database. +func (s *Server) startupDatabase(ctx context.Context, mode godror.StartupMode, state string) error { + // To startup a shutdown database, open a prelim connection. + db, err := sqlOpen("godror", "oracle://?sysdba=1&prelim=1") + if err != nil { + return err + } + defer db.Close() + + oraDB, err := godrorDriverConn(ctx, db) + if err != nil { + return err + } + if err := oraDB.Startup(mode); err != nil { + return err + } + if strings.ToLower(state) == "nomount" { + return nil + } + + // To finish mounting/opening, open a normal connection. + db2, err := sqlOpen("godror", "oracle://?sysdba=1") + if err != nil { + return err + } + defer db2.Close() + + if _, err := db2.ExecContext(ctx, "alter database mount"); err != nil { + return err + } + if strings.ToLower(state) == "mount" { + return nil + } + _, err = db2.ExecContext(ctx, "alter database open") + return err +} + +// BounceDatabase is a Database Daemon method to start or stop a database. +func (s *Server) BounceDatabase(ctx context.Context, req *dbdpb.BounceDatabaseRequest) (*dbdpb.BounceDatabaseResponse, error) { + klog.InfoS("dbdaemon/BounceDatabase", "req", req, "serverObj", s) + reqDatabaseName := req.GetDatabaseName() + var ls dbdpb.DatabaseState + var operation string + // Allowed commands: startup [nomount|mount|open|force_nomount] or shutdown [immediate|transactional|abort]. + validStartupOptions := map[string]bool{"nomount": true, "mount": true, "open": true, "force_nomount": true} + // validShutdownOptions keys should match shutdownEnumMap below to prevent nil. + validShutdownOptions := map[string]bool{"immediate": true, "transactional": true, "abort": true} + switch req.Operation { + case dbdpb.BounceDatabaseRequest_STARTUP: + ls = dbdpb.DatabaseState_READY + if req.Option != "" && !validStartupOptions[req.Option] { + e := []string{fmt.Sprintf("illegal option %q requested for operation %q", req.Option, req.Operation)} + return &dbdpb.BounceDatabaseResponse{ + DatabaseState: dbdpb.DatabaseState_DATABASE_STATE_ERROR, + ErrorMsg: e, + }, nil + } + operation = "startup" + case dbdpb.BounceDatabaseRequest_SHUTDOWN: + ls = dbdpb.DatabaseState_STOPPED + if req.Option != "" && !validShutdownOptions[req.Option] { + e := []string{fmt.Sprintf("illegal option %q requested for operation %q", req.Option, req.Operation)} + return &dbdpb.BounceDatabaseResponse{ + DatabaseState: dbdpb.DatabaseState_DATABASE_STATE_ERROR, + ErrorMsg: e, + }, nil + } + operation = "shutdown" + default: + return nil, fmt.Errorf("illegal operation requested: %q", req.Operation) + } + + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + // When bouncing the DB, DB is not ready to run cmds or SQLs, it seems to be ok to block all other APIs for now. + s.databaseSid.Lock() + defer s.databaseSid.Unlock() + + // Sets env to bounce a database, needed for start and shutdown. + os.Setenv("ORACLE_SID", reqDatabaseName) + os.Setenv("ORACLE_HOME", s.databaseHome) + + var err error + shutdownEnumMap := map[string]godror.ShutdownMode{ + "immediate": godror.ShutdownImmediate, + "transactional": godror.ShutdownTransactional, + "abort": godror.ShutdownAbort, + } + if operation == "shutdown" { + // shutdownEnumMap keys should match validShutdownOptions above to prevent nil. + err = s.shutdownDatabase(ctx, shutdownEnumMap[req.Option]) + if err != nil && strings.Contains(err.Error(), "ORA-01034: ORACLE not available") { + klog.InfoS("dbdaemon/shutdownDatabase: database is already down", "err", err) + err = nil + } + } else { // startup + switch req.Option { + case "force_nomount": + err = s.startupDatabase(ctx, godror.StartupForce, "nomount") + default: + err = s.startupDatabase(ctx, godror.StartupDefault, req.Option) + } + } + + return &dbdpb.BounceDatabaseResponse{ + DatabaseState: ls, + ErrorMsg: nil, + }, err +} + +func (s *Server) runCommand(bin string, params []string) error { + // Sets env to bounce a database|listener. + os.Setenv("ORACLE_SID", s.databaseSid.val) + os.Setenv("ORACLE_HOME", s.databaseHome) + + return s.osUtil.runCommand(bin, params) +} + +// BounceListener is a Database Daemon method to start or stop a listener. +func (s *Server) BounceListener(_ context.Context, req *dbdpb.BounceListenerRequest) (*dbdpb.BounceListenerResponse, error) { + klog.InfoS("dbdaemon/BounceListener", "req", req, "serverObj", s) + + var ls dbdpb.ListenerState + var operation string + switch req.Operation { + case dbdpb.BounceListenerRequest_START: + ls = dbdpb.ListenerState_UP + operation = "start" + case dbdpb.BounceListenerRequest_STOP: + ls = dbdpb.ListenerState_DOWN + operation = "stop" + default: + return nil, fmt.Errorf("illegal operation %q requested for listener %q", req.Operation, req.ListenerName) + } + + // Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID". + s.databaseSid.RLock() + defer s.databaseSid.RUnlock() + + os.Setenv("TNS_ADMIN", req.TnsAdmin) + bin := lsnrctl(s.databaseHome) + params := []string{operation, req.ListenerName} + if err := s.runCommand(bin, params); err != nil { + return nil, fmt.Errorf(fmt.Sprintf("a listener %q command %q failed: %v", req.ListenerName, req.Operation, err)) + } + + klog.InfoS("dbdaemon/BounceListener done", "req", req) + return &dbdpb.BounceListenerResponse{ + ListenerState: ls, + ErrorMsg: nil, + }, nil +} + +// ProxyRunDbca execute the command to create a database instance +func (s *Server) ProxyRunDbca(ctx context.Context, req *dbdpb.ProxyRunDbcaRequest) (*dbdpb.ProxyRunDbcaResponse, error) { + if err := os.Setenv("ORACLE_HOME", req.GetOracleHome()); err != nil { + return nil, fmt.Errorf("dbdaemon/ProxyRunDbca: set env ORACLE_HOME failed: %v", err) + } + s.databaseSid.Lock() + defer s.databaseSid.Unlock() + if err := s.osUtil.runCommand(dbca(req.GetOracleHome()), req.GetParams()); err != nil { + return nil, fmt.Errorf("dbca cmd failed: %v", err) + } + + klog.InfoS("proxy/ProxyRunDbca: Initializing environment for Oracle...") + if err := initializeEnvironment(s, req.GetOracleHome(), req.GetDatabaseName()); err != nil { + return nil, err + } + + klog.InfoS("proxy/ProxyRunDbca: Moving Oracle config files...") + if err := provision.MoveConfigFiles(req.GetOracleHome(), req.GetDatabaseName()); err != nil { + return nil, err + } + + klog.InfoS("proxy/ProxyRunDbca: Creating symlinks to Oracle config files...") + if err := provision.RelinkConfigFiles(req.GetOracleHome(), req.GetDatabaseName()); err != nil { + return nil, err + } + + klog.InfoS("proxy/ProxyRunDbca: DONE") + + return &dbdpb.ProxyRunDbcaResponse{}, nil +} + +// ProxyRunNID execute the command to rename a database instance +func (s *Server) ProxyRunNID(ctx context.Context, req *dbdpb.ProxyRunNIDRequest) (*dbdpb.ProxyRunNIDResponse, error) { + s.databaseSid.Lock() + defer s.databaseSid.Unlock() + if err := s.osUtil.runCommand(nid(s.databaseHome), req.GetParams()); err != nil { + return nil, fmt.Errorf("nid cmd failed: %v", err) + } + s.databaseSid.val = req.DestDbName + // We need to regenerate the env file with the new db name + if err := createDotEnv(s.databaseHome, s.version, s.databaseSid.val); err != nil { + return nil, err + } + klog.InfoS("proxy/ProxyRunNID: DONE") + + return &dbdpb.ProxyRunNIDResponse{}, nil +} + +// SetEnv moves/relink oracle config files +func (s *Server) SetEnv(ctx context.Context, req *dbdpb.SetEnvRequest) (*dbdpb.SetEnvResponse, error) { + klog.InfoS("proxy/SetEnv", "req", req) + oracleHome := req.GetOracleHome() + cdbName := req.GetCdbName() + spfile := req.GetSpfilePath() + defaultSpfile := filepath.Join(fmt.Sprintf(consts.ConfigDir, consts.DataMount, cdbName), fmt.Sprintf("spfile%s.ora", cdbName)) + + // move config files to default locations first + if spfile != defaultSpfile { + if err := provision.MoveFile(spfile, defaultSpfile); err != nil { + return &dbdpb.SetEnvResponse{}, fmt.Errorf("Proxy/SetEnv: failed to move spfile to default location: %v", err) + } + } + + spfileLink := filepath.Join(oracleHome, "dbs", fmt.Sprintf("spfile%s.ora", cdbName)) + if _, err := os.Stat(spfileLink); err == nil { + os.Remove(spfileLink) + } + if err := os.Symlink(defaultSpfile, spfileLink); err != nil { + return &dbdpb.SetEnvResponse{}, fmt.Errorf("Proxy/SetEnv symlink creation failed for %s: %v", defaultSpfile, err) + } + return &dbdpb.SetEnvResponse{}, nil +} + +//Sets Oracle specific environment variables and creates the .env file +func initializeEnvironment(s *Server, home string, dbName string) error { + s.databaseHome = home + s.databaseSid.val = dbName + if err := os.Setenv("ORACLE_HOME", home); err != nil { + return fmt.Errorf("dbdaemon/initializeEnvironment: set env ORACLE_HOME failed: %v", err) + } + if err := createDotEnv(home, s.version, dbName); err != nil { + return err + } + return nil +} + +func createDotEnv(dbHome, dbVersion, dbName string) error { + dotEnvFileName := fmt.Sprintf("%s/%s.env", consts.OracleDir, dbName) + dotEnvFile, err := os.Create(dotEnvFileName) + if err != nil { + return err + } + dotEnvFile.WriteString(fmt.Sprintf("export ORACLE_HOME=%s\n", dbHome)) + dotEnvFile.WriteString(fmt.Sprintf("ORACLE_BASE=%s\n", common.GetSourceOracleBase(dbVersion))) + dotEnvFile.WriteString(fmt.Sprintf("export ORACLE_SID=%s\n", dbName)) + dotEnvFile.WriteString(fmt.Sprintf("export PATH=%s/bin:%s/OPatch:/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin\n", dbHome, dbHome)) + dotEnvFile.WriteString(fmt.Sprintf("export LD_LIBRARY_PATH=%s/lib:/usr/lib\n", dbHome)) + return dotEnvFile.Close() +} + +// New creates a new Database Daemon Server object. +// It first gets called on a CDB provisioning and at this time +// a PDB name is not known yet (to be supplied via a separate call). +func New(hostname, cdbNameFromYaml string) (*Server, error) { + oracleHome, _, version, err := provision.FetchMetaDataFromImage(provision.MetaDataFile) + s := &Server{hostName: hostname, osUtil: &osUtilImpl{}, databaseSid: &syncState{}, version: version} + if err != nil { + return nil, fmt.Errorf("error while fetching metadata from image: %v", err) + } + klog.Infof("Initializing environment for Oracle...") + err = initializeEnvironment(s, oracleHome, cdbNameFromYaml) + if err != nil { + return nil, fmt.Errorf("an error occured while initializing the environment for Oracle: %v", err) + } + return s, nil +} diff --git a/oracle/pkg/database/dbdaemonproxy/dbdaemon_proxy_test.go b/oracle/pkg/database/dbdaemonproxy/dbdaemon_proxy_test.go new file mode 100644 index 0000000..619ce8b --- /dev/null +++ b/oracle/pkg/database/dbdaemonproxy/dbdaemon_proxy_test.go @@ -0,0 +1,666 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbdaemonproxy + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "testing" + + "github.com/godror/godror" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +type fakeOsUtil struct { + fakeRunCommand func(bin string, params []string) error +} + +func (fake *fakeOsUtil) runCommand(bin string, params []string) error { + if fake.fakeRunCommand == nil { + return errors.New("fake impl not provided") + } + return fake.fakeRunCommand(bin, params) +} + +type fakeDatabase struct { + fakeExecContext func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + fakeClose func() error +} + +func (fake *fakeDatabase) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + if fake.fakeExecContext == nil { + return nil, errors.New("fake impl not provided") + } + return fake.fakeExecContext(ctx, query, args) +} + +func (fake *fakeDatabase) Close() error { + if fake.fakeClose == nil { + return errors.New("fake impl not provided") + } + return fake.fakeClose() +} + +type fakeConn struct { + fakeStartup func(godror.StartupMode) error + fakeShutdown func(godror.ShutdownMode) error +} + +func (fake *fakeConn) Startup(mode godror.StartupMode) error { + if fake.fakeStartup == nil { + return errors.New("fake impl not provided") + } + return fake.fakeStartup(mode) +} + +func (fake *fakeConn) Shutdown(mode godror.ShutdownMode) error { + if fake.fakeShutdown == nil { + return errors.New("fake impl not provided") + } + return fake.fakeShutdown(mode) +} + +func TestBounceListener(t *testing.T) { + ctx := context.Background() + client, cleanup := newFakeDatabaseDaemonProxyClient(t, + &Server{ + databaseSid: &syncState{val: "MYDB"}, + osUtil: &fakeOsUtil{ + fakeRunCommand: func(bin string, params []string) error { + return nil + }, + }, + }, + ) + defer cleanup() + testCases := []struct { + name string + request *dbdpb.BounceListenerRequest + want *dbdpb.BounceListenerResponse + }{ + { + name: "valid listener start request", + request: &dbdpb.BounceListenerRequest{ + ListenerName: "fakeListenerName", + TnsAdmin: "fakeTnsAdmin", + Operation: dbdpb.BounceListenerRequest_START, + }, + want: &dbdpb.BounceListenerResponse{ + ListenerState: dbdpb.ListenerState_UP, + ErrorMsg: nil, + }, + }, + { + name: "valid listener stop request", + request: &dbdpb.BounceListenerRequest{ + ListenerName: "fakeListenerName", + TnsAdmin: "fakeTnsAdmin", + Operation: dbdpb.BounceListenerRequest_STOP, + }, + want: &dbdpb.BounceListenerResponse{ + ListenerState: dbdpb.ListenerState_DOWN, + ErrorMsg: nil, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + got, err := client.BounceListener(ctx, tc.request) + if err != nil { + t.Fatalf("BounceListener(ctx) failed: %v", err) + } + if diff := cmp.Diff(tc.want.ListenerState, got.ListenerState); diff != "" { + t.Errorf("BounceListener got unexpected response: -want +got %v", diff) + } + }) + } +} + +func TestBounceListenerErrors(t *testing.T) { + ctx := context.Background() + server := &Server{ + databaseSid: &syncState{val: "MYDB"}, + } + client, cleanup := newFakeDatabaseDaemonProxyClient(t, server) + defer cleanup() + testCases := []struct { + name string + util osUtil + request *dbdpb.BounceListenerRequest + }{ + { + name: "invalid listener wrong operation request", + util: &fakeOsUtil{ + fakeRunCommand: func(bin string, params []string) error { + return nil + }, + }, + request: &dbdpb.BounceListenerRequest{ + ListenerName: "fakeListenerName", + TnsAdmin: "fakeTnsAdmin", + Operation: dbdpb.BounceListenerRequest_UNKNOWN, + }, + }, + { + name: "failed to run lsnrctl", + util: &fakeOsUtil{ + fakeRunCommand: func(bin string, params []string) error { + return errors.New("fake error") + }, + }, + request: &dbdpb.BounceListenerRequest{ + ListenerName: "fakeListenerName", + TnsAdmin: "fakeTnsAdmin", + Operation: dbdpb.BounceListenerRequest_START, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + server.osUtil = tc.util + if _, err := client.BounceListener(ctx, tc.request); err == nil { + t.Fatalf("BounceListener(ctx) succeeded, want not-nil error") + } + }) + } +} + +func TestBounceDatabase(t *testing.T) { + db1 := &fakeDatabase{ + fakeExecContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + fakeClose: func() error { + return nil + }, + } + db2 := &fakeDatabase{ + fakeClose: func() error { + return nil + }, + } + + c := &fakeConn{ + fakeStartup: func(mode godror.StartupMode) error { + return nil + }, + } + + sqlOpenBak := sqlOpen + godrorDriverConnBak := godrorDriverConn + defer func() { + sqlOpen = sqlOpenBak + godrorDriverConn = godrorDriverConnBak + }() + + sqlOpen = func(driverName, dataSourceName string) (database, error) { + switch dataSourceName { + case "oracle://?sysdba=1&prelim=1": + return db1, nil + case "oracle://?sysdba=1": + return db2, nil + default: + return nil, fmt.Errorf("failed to find mock db for %s", dataSourceName) + } + } + godrorDriverConn = func(ctx context.Context, ex godror.Execer) (conn, error) { + return c, nil + } + + ctx := context.Background() + server := &Server{ + databaseSid: &syncState{val: "MYDB"}, + } + client, cleanup := newFakeDatabaseDaemonProxyClient(t, server) + defer cleanup() + testCases := []struct { + name string + request *dbdpb.BounceDatabaseRequest + wantSQLs []string + execContext func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + shutdown func(mode godror.ShutdownMode) error + }{ + { + name: "valid bounce CDB: startup (normal)", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + Option: "", + }, + wantSQLs: []string{ + "alter database mount", + "alter database open", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + }, + { + name: "valid bounce CDB: startup (nomount)", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + Option: "nomount", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + }, + { + name: "valid bounce CDB: startup (force_nomount)", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + Option: "force_nomount", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + }, + { + name: "valid bounce CDB: startup (mount)", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + Option: "mount", + }, + wantSQLs: []string{ + "alter database mount", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + }, + { + name: "valid bounce CDB: startup (open)", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + Option: "open", + }, + wantSQLs: []string{ + "alter database mount", + "alter database open", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + }, + { + name: "valid bounce CDB: shutdown (normal)", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + Option: "", + }, + wantSQLs: []string{ + "alter database close normal", + "alter database dismount", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + }, + { + name: "valid bounce CDB: shutdown (normal) ignore database is already closed", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + Option: "", + }, + wantSQLs: []string{ + "alter database close normal", + "alter database dismount", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, errors.New("ORA-01507: already closed") + }, + }, + { + name: "valid bounce CDB: shutdown (normal) ignore oracle not available", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + Option: "", + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + shutdown: func(mode godror.ShutdownMode) error { + return errors.New("ORA-01034: ORACLE not available") + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var gotSQLs []string + db2.fakeExecContext = func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + gotSQLs = append(gotSQLs, query) + return tc.execContext(ctx, query, args) + } + c.fakeShutdown = func(mode godror.ShutdownMode) error { + return nil + } + if tc.shutdown != nil { + c.fakeShutdown = tc.shutdown + } + if _, err := client.BounceDatabase(ctx, tc.request); err != nil { + t.Errorf("BounceDatabase(ctx, %v) failed: %v", tc.request, err) + } + if diff := cmp.Diff(tc.wantSQLs, gotSQLs); diff != "" { + t.Errorf("BounceDatabase got unexpected SQLs: -want +got %v", diff) + } + }) + } +} + +func TestBounceDatabaseErrors(t *testing.T) { + db1 := &fakeDatabase{ + fakeExecContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + }, + fakeClose: func() error { + return nil + }, + } + db2 := &fakeDatabase{ + fakeClose: func() error { + return nil + }, + } + + c := &fakeConn{} + sqlOpenBak := sqlOpen + godrorDriverConnBak := godrorDriverConn + defer func() { + sqlOpen = sqlOpenBak + godrorDriverConn = godrorDriverConnBak + }() + + sqlOpen = func(driverName, dataSourceName string) (database, error) { + switch dataSourceName { + case "oracle://?sysdba=1&prelim=1": + return db1, nil + case "oracle://?sysdba=1": + return db2, nil + default: + return nil, fmt.Errorf("failed to find mock db for %s", dataSourceName) + } + } + godrorDriverConn = func(ctx context.Context, ex godror.Execer) (conn, error) { + return c, nil + } + + ctx := context.Background() + server := &Server{ + databaseSid: &syncState{val: "MYDB"}, + } + client, cleanup := newFakeDatabaseDaemonProxyClient(t, server) + defer cleanup() + testCases := []struct { + name string + request *dbdpb.BounceDatabaseRequest + execContext func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + startup func(mode godror.StartupMode) error + shutdown func(mode godror.ShutdownMode) error + }{ + { + name: "invalid bounce CDB", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_UNKNOWN, + }, + }, + { + name: "failed to startup CDB", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + }, + startup: func(mode godror.StartupMode) error { + return errors.New("fake error") + }, + }, + { + name: "failed to open CDB", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, errors.New("fake error") + }, + }, + { + name: "failed to shutdown CDB", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + }, + shutdown: func(mode godror.ShutdownMode) error { + return errors.New("fake error") + }, + }, + { + name: "failed to close CDB", + request: &dbdpb.BounceDatabaseRequest{ + DatabaseName: "GCLOUD", + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + }, + execContext: func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, errors.New("fake error") + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db2.fakeExecContext = func(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return nil, nil + } + if tc.execContext != nil { + db2.fakeExecContext = tc.execContext + } + c.fakeStartup = func(mode godror.StartupMode) error { + return nil + } + if tc.startup != nil { + c.fakeStartup = tc.startup + } + c.fakeShutdown = func(mode godror.ShutdownMode) error { + return nil + } + if tc.shutdown != nil { + c.fakeShutdown = tc.shutdown + } + if _, err := client.BounceDatabase(ctx, tc.request); err == nil { + t.Errorf("BounceDatabase(ctx, %v) succeeded, want not-nil error", tc.request) + } + }) + } +} + +func TestProxyRunDbca(t *testing.T) { + ctx := context.Background() + server := &Server{ + databaseSid: &syncState{val: "MYDB"}, + } + client, cleanup := newFakeDatabaseDaemonProxyClient(t, server) + testDir, err := ioutil.TempDir("", "TestProxyRunDbca") + if err != nil { + t.Fatalf("failed to create test dir: %v", err) + } + fakeDBName := "DBCADB" + fakeHome := filepath.Join(testDir, "u01") + fakeDataMount := filepath.Join(testDir, "u02") + dataMountBak := consts.DataMount + consts.DataMount = fakeDataMount + fakeConfigDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, fakeDBName) + sourceConfigDir := filepath.Join(fakeHome, "dbs") + consts.OracleDir = "/tmp" + for _, dir := range []string{sourceConfigDir, fakeConfigDir} { + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("failed to set up test dir %v", err) + } + } + for _, f := range []string{fmt.Sprintf("spfile%s.ora", fakeDBName), fmt.Sprintf("orapw%s", fakeDBName)} { + if _, err := os.Create(filepath.Join(sourceConfigDir, f)); err != nil { + t.Fatalf("failed to set up config file %v", err) + } + } + defer func() { + cleanup() + os.RemoveAll(testDir) + consts.DataMount = dataMountBak + }() + + testCases := []struct { + name string + request *dbdpb.ProxyRunDbcaRequest + wantBin string + wantParams []string + wantSid string + wantDBHome string + }{ + { + name: "RunDbca success", + request: &dbdpb.ProxyRunDbcaRequest{ + OracleHome: fakeHome, + DatabaseName: fakeDBName, + Params: []string{"-createDatabase", "-databaseType", "MULTIPURPOSE"}, + }, + wantBin: filepath.Join(fakeHome, "bin/dbca"), + wantParams: []string{"-createDatabase", "-databaseType", "MULTIPURPOSE"}, + wantSid: fakeDBName, + wantDBHome: fakeHome, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var gotBin string + var gotParams []string + server.osUtil = &fakeOsUtil{ + fakeRunCommand: func(bin string, params []string) error { + gotBin = bin + gotParams = append(gotParams, params...) + return nil + }, + } + if _, err := client.ProxyRunDbca(ctx, tc.request); err != nil { + t.Fatalf("ProxyRunDbca(ctx, %v) failed: %v", tc.request, err) + } + if gotBin != tc.wantBin { + t.Errorf("ProxyRunDbca executed %v, want %v", gotBin, tc.wantBin) + } + if diff := cmp.Diff(gotParams, tc.wantParams); diff != "" { + t.Errorf("ProxyRunDbca executed unexpected params: -want +got %v", diff) + } + if server.databaseSid.val != tc.wantSid { + t.Errorf("ProxyRunDbca set sid to %v, want %v", server.databaseSid.val, tc.wantSid) + } + if server.databaseHome != tc.wantDBHome { + t.Errorf("ProxyRunDbca set DB home to %v, want %v", server.databaseHome, tc.wantDBHome) + } + }) + } +} + +func TestProxyRunDbcaErrors(t *testing.T) { + ctx := context.Background() + server := &Server{ + databaseSid: &syncState{val: "MYDB"}, + } + client, cleanup := newFakeDatabaseDaemonProxyClient(t, server) + testDir, err := ioutil.TempDir("", "TestProxyRunDbca") + if err != nil { + t.Fatalf("failed to create test dir: %v", err) + } + fakeDBName := "DBCADB" + fakeHome := filepath.Join(testDir, "u01") + fakeDataMount := filepath.Join(testDir, "u02") + dataMountBak := consts.DataMount + consts.DataMount = fakeDataMount + fakeConfigDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, fakeDBName) + for _, dir := range []string{filepath.Join(fakeHome, "dbs"), fakeConfigDir} { + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("failed to set up test dir %v", err) + } + } + for _, f := range []string{fmt.Sprintf("spfile%s.ora", fakeDBName), fmt.Sprintf("orapw%s", fakeDBName)} { + if _, err := os.Create(filepath.Join(fakeConfigDir, f)); err != nil { + t.Fatalf("failed to set up config file %v", err) + } + } + defer func() { + cleanup() + os.RemoveAll(testDir) + consts.DataMount = dataMountBak + }() + + request := &dbdpb.ProxyRunDbcaRequest{ + OracleHome: fakeHome, + DatabaseName: fakeDBName, + Params: []string{"-createDatabase", "-databaseType", "MULTIPURPOSE"}, + } + + server.osUtil = &fakeOsUtil{ + fakeRunCommand: func(bin string, params []string) error { + return errors.New("fake error") + }, + } + if _, err := client.ProxyRunDbca(ctx, request); err == nil { + t.Fatalf("ProxyRunDbca(ctx, %v) succeeded, want not-nil error", request) + } +} + +func newFakeDatabaseDaemonProxyClient(t *testing.T, server *Server) (dbdpb.DatabaseDaemonProxyClient, func()) { + t.Helper() + grpcSvr := grpc.NewServer() + + dbdpb.RegisterDatabaseDaemonProxyServer(grpcSvr, server) + lis := bufconn.Listen(2 * 1024 * 1024) + go grpcSvr.Serve(lis) + + dbdConn, err := grpc.Dial("test", + grpc.WithInsecure(), + grpc.WithContextDialer( + func(ctx context.Context, s string) (conn net.Conn, err error) { + return lis.Dial() + }), + ) + if err != nil { + t.Fatalf("failed to dial to dbDaemon: %v", err) + } + return dbdpb.NewDatabaseDaemonProxyClient(dbdConn), func() { + dbdConn.Close() + grpcSvr.GracefulStop() + } +} diff --git a/oracle/pkg/database/lib/detach/BUILD.bazel b/oracle/pkg/database/lib/detach/BUILD.bazel new file mode 100644 index 0000000..21af955 --- /dev/null +++ b/oracle/pkg/database/lib/detach/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "detach", + srcs = ["detach.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/lib/detach", + visibility = ["//visibility:public"], +) + +go_test( + name = "detach_test", + srcs = ["detach_test.go"], + embed = [":detach"], +) diff --git a/oracle/pkg/database/lib/detach/detach.go b/oracle/pkg/database/lib/detach/detach.go new file mode 100644 index 0000000..a590207 --- /dev/null +++ b/oracle/pkg/database/lib/detach/detach.go @@ -0,0 +1,76 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package detach provides basic blocks for running goroutines +// with a detached context. +package detach + +import ( + "context" + "sync" + "time" +) + +type ctx struct { + done <-chan struct{} + closed <-chan struct{} +} + +func (c ctx) Value(key interface{}) interface{} { return nil } +func (c ctx) Deadline() (deadline time.Time, ok bool) { return } +func (c ctx) Done() <-chan struct{} { return c.done } + +func (c ctx) Err() error { + select { + case <-c.done: + return context.Canceled + default: + return nil + } +} + +// A Task describes the status of a detached function call started by calling Go. +type Task struct { + cancel chan<- struct{} + cancelOnce *sync.Once + closed <-chan struct{} +} + +// Go starts a new goroutine with a detached context and +// returns a Task that can be used to cancel and/or wait for the function to +// return. +func Go(f func(context.Context)) Task { + closeChan := make(chan struct{}) + cancelChan := make(chan struct{}) + dCtx := ctx{ + closed: closeChan, + done: cancelChan, + } + + go func() { + defer close(closeChan) + f(dCtx) + }() + + return Task{cancelChan, new(sync.Once), closeChan} +} + +// Cancel cancels a Task. +// After the first call, subsequent calls to Cancel do nothing. +func (t Task) Cancel() { + t.cancelOnce.Do(func() { close(t.cancel) }) +} + +// Finished returns a channel that is closed on the task function completion. +func (t Task) Finished() <-chan struct{} { return t.closed } diff --git a/oracle/pkg/database/lib/detach/detach_test.go b/oracle/pkg/database/lib/detach/detach_test.go new file mode 100644 index 0000000..11d4605 --- /dev/null +++ b/oracle/pkg/database/lib/detach/detach_test.go @@ -0,0 +1,67 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package detach + +import ( + "context" + "testing" + "time" +) + +func TestGoFinished(t *testing.T) { + + ran := false + var goroutineCtx context.Context + + task := Go(func(ctx context.Context) { + ran = true + goroutineCtx = ctx + }) + <-task.Finished() + + if !ran { + t.Errorf("TestGoFinished: background task did not run") + } + + if err := goroutineCtx.Err(); err != nil { + t.Errorf("TestGoFinished: unexpected context error %v", err) + } +} + +func TestGoCancelled(t *testing.T) { + cancelled := false + var goroutineCtx context.Context + + task := Go(func(ctx context.Context) { + goroutineCtx = ctx + + select { + case <-ctx.Done(): + cancelled = true + case <-time.After(time.Second): + break + } + }) + task.Cancel() + <-task.Finished() + + if !cancelled { + t.Errorf("TestGoCancelled: expected goroutine context to be cancelled") + } + + if err := goroutineCtx.Err(); err != context.Canceled { + t.Errorf("TestGoCancelled: context.Err() want %v got %v", context.Canceled, err) + } +} diff --git a/oracle/pkg/database/lib/lro/BUILD.bazel b/oracle/pkg/database/lib/lro/BUILD.bazel new file mode 100644 index 0000000..5e24718 --- /dev/null +++ b/oracle/pkg/database/lib/lro/BUILD.bazel @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "lro", + srcs = [ + "job.go", + "server.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/lib/lro", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/database/lib/detach", + "@com_github_google_uuid//:uuid", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@go_googleapis//google/rpc:status_go_proto", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/emptypb", + ], +) + +go_test( + name = "lro_test", + srcs = [ + "job_test.go", + "server_test.go", + ], + embed = [":lro"], + deps = [ + "//oracle/pkg/agents/oracle", + "@com_github_google_go_cmp//cmp", + "@com_github_google_uuid//:uuid", + "@go_googleapis//google/longrunning:longrunning_go_proto", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/anypb", + ], +) diff --git a/oracle/pkg/database/lib/lro/job.go b/oracle/pkg/database/lib/lro/job.go new file mode 100644 index 0000000..97d2582 --- /dev/null +++ b/oracle/pkg/database/lib/lro/job.go @@ -0,0 +1,251 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lro + +import ( + "context" + "fmt" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + log "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/lib/detach" +) + +const ( + // TaskIDMetadataTag is the tag for the task ID in gRPC metadata. + TaskIDMetadataTag = "taskID" + + taskTimeOutMetadataTag = "taskTimeoutSec" + + // JobStartIndicator is a log line we use to identify when the job has been started. + JobStartIndicator = "CreateAndRunLROJobWithID: Create and run LRO job with id %v" +) + +const ( + completionStatusOK = "OK" + completionStatusError = "Error" +) + +// Job represents a long running operation and its metadata. +type Job struct { + id string + name string + + resp *anypb.Any + err error + + lro *Server + + call func(ctx context.Context) (proto.Message, error) + task *detach.Task +} + +// Cancel cancels the job. +func (j *Job) Cancel() error { + log.Infof("Cancel: job [%s] is cancelled", j.id) + j.task.Cancel() + return nil +} + +// Delete deletes the job. +// It's not implemented yet. +func (j *Job) Delete() error { + if j.IsDone() { + return nil + } + return status.Errorf(codes.Aborted, "Can't delete job with ID %q while it's still running", j.id) +} + +// Status gets the current status of the job, returning error or response on completion. +func (j *Job) Status() (bool, *anypb.Any, error) { + if !j.IsDone() { + return false, nil, nil + } + + if j.err != nil { + log.Errorf("Job %v failed with error=%v", j.id, j.err) + return true, nil, j.err + } + + return true, j.resp, nil +} + +// Wait waits for the job to complete or timeout. +func (j *Job) Wait(timeout time.Duration) error { + var err error + select { + case <-j.task.Finished(): + log.Infof("Job with ID %q has finished", j.id) + err = nil + case <-time.After(timeout): + log.Infof("Job with ID %q has timed out", j.id) + err = status.Errorf(codes.DeadlineExceeded, "LRO job with ID %q didn't complete in time", j.id) + } + + return err +} + +// catchPanic catches the panic to prevent the program from being shut down, and properly handles +// the state of the job. +func catchPanic(j *Job, f func(context.Context)) func(context.Context) { + return func(ctx context.Context) { + defer func() { + if r := recover(); r != nil { + e := fmt.Errorf("caught panic in agent execution. Panic Message: %v", r) + log.Error(e) + j.err = e + + j.lro.EndOperation(j.id, completionStatusError) + } + }() + f(ctx) + } +} + +func taskTimeout(context context.Context) (time.Duration, error) { + md, ok := metadata.FromIncomingContext(context) + if !ok { + return 0, fmt.Errorf("context has no timeout info") + } + + data := md.Get(taskTimeOutMetadataTag) + if len(data) == 0 || data[0] == "" { + return 0, fmt.Errorf("fails to parse out the timeout info") + } + + if len(data) > 1 { + log.Warningf("taskTimeout: More than one task id in the metadata %v", data) + } + + return time.ParseDuration(fmt.Sprintf("%ss", data[0])) +} + +// start uses detach.Go to start an async job. +func (j *Job) start(ctx context.Context) { + log.Infof("Start job with ID %s", j.id) + timeOutDuration, _ := taskTimeout(ctx) + task := detach.Go(catchPanic(j, func(jobCtx context.Context) { + var resp proto.Message + if timeOutDuration > 0 { + var cancel context.CancelFunc + jobCtx, cancel = context.WithTimeout(jobCtx, timeOutDuration) + defer cancel() + } + + resp, j.err = j.call(jobCtx) + if resp == nil { + j.resp = nil + } else if any, ok := resp.(*anypb.Any); ok { + j.resp = any + } else { + any := &anypb.Any{} + if err := any.MarshalFrom(resp); err != nil { + j.err = status.Errorf(codes.Internal, "Failed to marshal response to any: %v", err) + } + j.resp = any + } + if j.err == nil { + j.lro.EndOperation(j.id, completionStatusOK) + } else { + j.lro.EndOperation(j.id, completionStatusError) + } + })) + j.task = &task +} + +// IsDone returns whether the job is done. +func (j *Job) IsDone() bool { + select { + case <-j.task.Finished(): + return true + default: + return false + } +} + +// ID returns the ID of the job. +func (j *Job) ID() string { + return j.id +} + +// Name returns the name of the job. +func (j *Job) Name() string { + return j.name +} + +// CreateJobID creates a new job id based on uuid. +func CreateJobID() string { + return "Job" + "_" + uuid.New().String() +} + +func addAndStartJob(ctx context.Context, lro *Server, job *Job) (*Job, error) { + if err := lro.AddJob(job.id, job); err != nil { + if status.Code(err) == codes.AlreadyExists { + log.Warningf("LRO with job id %q already exists", job.id) + return job, nil + } + + return nil, fmt.Errorf("failed to add job for id=%v: %w", job.id, err) + } + log.Infof(JobStartIndicator, job.id) + job.start(ctx) + return job, nil + +} + +// CreateAndRunLROJobWithID creates an LRO job that can be cancelled. +// The method passed in is the main part of the call, +// adding into the job set and then starting it. +// It uses the given lro job id as the id. +var CreateAndRunLROJobWithID = func(ctx context.Context, id, name string, lro *Server, call func(ctx context.Context) (proto.Message, error)) (*Job, error) { + if id == "" { + id = CreateJobID() + } + job := &Job{ + id: id, + name: name, + call: call, + lro: lro, + } + + return addAndStartJob(ctx, lro, job) +} + +// CreateAndRunLROJobWithContext creates an LRO job that can be cancelled. +// The method passed in is the main part of the call, +// adding into the job set and then starting it. +// It pulls the job id from grpc context. +func CreateAndRunLROJobWithContext(ctx context.Context, name string, lro *Server, call func(ctx context.Context) (proto.Message, error)) (*Job, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return CreateAndRunLROJobWithID(ctx, "", name, lro, call) + } + var id string + data := md.Get(TaskIDMetadataTag) + if len(data) > 0 && data[0] != "" { + if len(data) > 1 { + log.Warningf("More than one task id in the metadata %v", data) + } + id = data[0] + } + return CreateAndRunLROJobWithID(ctx, id, name, lro, call) +} diff --git a/oracle/pkg/database/lib/lro/job_test.go b/oracle/pkg/database/lib/lro/job_test.go new file mode 100644 index 0000000..c521e0a --- /dev/null +++ b/oracle/pkg/database/lib/lro/job_test.go @@ -0,0 +1,353 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lro + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +const ( + panicMsg = "fakeJob panicked" + fakeJobWaitTime = time.Second +) + +type testEnv struct { + resp proto.Message + shouldPanic bool + err error + done chan struct{} +} + +func (t testEnv) fakeJob(context.Context) (proto.Message, error) { + if t.done != nil { + <-t.done + } + + if t.shouldPanic { + panic(panicMsg) + } + + if t.err != nil { + return nil, t.err + } + return t.resp, t.err +} + +func TestCreateAndRunLROJobWithID(t *testing.T) { + ctx := context.Background() + lro := NewServer(context.Background()) + lroID := "TestCreateAndRunLROJob" + + jobFunc := func(context.Context) (proto.Message, error) { + return nil, nil + } + + if job, err := CreateAndRunLROJobWithID(ctx, lroID, "Test", lro, jobFunc); err != nil || job == nil { + t.Errorf("CreateAndRunLROJobWithID failed to create LRO job with err=%v.", err) + } + + if job, err := CreateAndRunLROJobWithID(ctx, lroID, "Test", lro, jobFunc); err != nil || job == nil { + t.Errorf("CreateAndRunLROJobWithID failed to create LRO job with the same id with err=%v.", err) + } +} + +func TestCreateAndRunLROJobWithContext(t *testing.T) { + lro := NewServer(context.Background()) + id := "TestCreateAndRunLROJob" + + jobFunc := func(context.Context) (proto.Message, error) { + return nil, nil + } + + md := metadata.New( + map[string]string{ + "taskID": id, + }) + ctx := metadata.NewIncomingContext(context.Background(), md) + + if job, err := CreateAndRunLROJobWithContext(ctx, "Test", lro, jobFunc); err != nil || job == nil { + t.Errorf("CreateAndRunLROJobWithContext failed to create LRO job with err=%v.", err) + } + + if job, err := CreateAndRunLROJobWithContext(ctx, "Test", lro, jobFunc); err != nil || job == nil { + t.Errorf("CreateAndRunLROJobWithContext failed to create LRO job with the same id with err=%v.", err) + } +} + +func TestCreateAndRunLROJobWithTimeout(t *testing.T) { + ctx := context.TODO() + lro := NewServer(ctx) + + jobFunc := func(jobCtx context.Context) (proto.Message, error) { + if _, deadlineSet := jobCtx.Deadline(); !deadlineSet { + t.Errorf("ctx.Deadline() = _, false, want deadline set") + } + return nil, nil + } + + md := metadata.New( + map[string]string{ + taskTimeOutMetadataTag: "3", + }) + + ctx = metadata.NewIncomingContext(ctx, md) + + _, _ = CreateAndRunLROJobWithContext(ctx, "Test", lro, jobFunc) +} + +func TestCancelJob(t *testing.T) { + tests := []struct { + name string + returnImmediately bool + wantErr bool + }{ + { + name: "CancelFinished", + wantErr: false, + }, + { + name: "CancelRunning", + returnImmediately: false, + wantErr: false, + }, + } + ctx := context.Background() + lro := NewServer(ctx) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + env := testEnv{} + if !tc.returnImmediately { + env.done = make(chan struct{}) + } + fakeJob := &Job{call: env.fakeJob, lro: lro} + fakeJob.start(ctx) + if tc.returnImmediately { + if err := fakeJob.Wait(fakeJobWaitTime); err != nil { + t.Error("fakeJob timed out") + } + } + if err := fakeJob.Cancel(); tc.wantErr != (err != nil) { + t.Errorf("TestCancelJob(%v) failed: gotErr=%v,wantErr =%v", tc.name, err, tc.wantErr) + } + if !tc.returnImmediately { + close(env.done) + } + }) + } +} + +func TestDeleteJob(t *testing.T) { + tests := []struct { + name string + returnImmediately bool + expectedError bool + }{ + { + name: "DeleteSuccess", + returnImmediately: true, + expectedError: false, + }, + { + name: "DeleteFail", + returnImmediately: false, + expectedError: true, + }, + } + ctx := context.Background() + lro := NewServer(ctx) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + env := testEnv{} + if !tc.returnImmediately { + env.done = make(chan struct{}) + } + + fakeJob := &Job{call: env.fakeJob, lro: lro} + fakeJob.start(ctx) + if tc.returnImmediately { + if err := fakeJob.Wait(fakeJobWaitTime); err != nil { + t.Error("fakeJob timed out") + } + } + if err := fakeJob.Delete(); tc.expectedError != (err != nil) { + t.Errorf("TestDeleteJob(%v) failed. expected error: %v.", tc.name, tc.expectedError) + } + + if !tc.returnImmediately { + close(env.done) + } + }) + } +} + +func TestWaitJob(t *testing.T) { + tests := []struct { + name string + err error + returnImmediately bool + expectedError bool + }{ + { + name: "WaitSuccess", + err: status.Error(codes.Unknown, "Fail"), + returnImmediately: true, + expectedError: false, + }, + { + name: "WaitTimeOut", + err: status.Error(codes.Unknown, "Fail"), + returnImmediately: false, + expectedError: true, + }, + } + ctx := context.Background() + lro := NewServer(ctx) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + env := testEnv{} + if !tc.returnImmediately { + env.done = make(chan struct{}) + } + + fakeJob := &Job{call: env.fakeJob, lro: lro} + fakeJob.start(ctx) + + if err := fakeJob.Wait(fakeJobWaitTime); tc.expectedError != (err != nil) { + t.Errorf("TestWaitJob(%v) failed. Wait expected error: %v.", tc.name, tc.expectedError) + } + + if !tc.returnImmediately { + close(env.done) + } + }) + } +} + +func TestGetStatus(t *testing.T) { + tests := []struct { + name string + err error + returnImmediately bool + expectedFinished bool + expectedError bool + }{ + { + name: "FinishedSuccess", + err: nil, + returnImmediately: true, + expectedFinished: true, + expectedError: false, + }, + { + name: "FinishedWithError", + err: status.Error(codes.Unknown, "Fail"), + returnImmediately: true, + expectedFinished: true, + expectedError: true, + }, + { + name: "NotFinished", + err: status.Error(codes.Unknown, "Fail"), + returnImmediately: false, + expectedFinished: false, + expectedError: false, + }, + } + ctx := context.Background() + lro := NewServer(ctx) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + envJobResponse := &dbdpb.FileExistsResponse{Exists: true} + env := &testEnv{err: tc.err, resp: envJobResponse} + if !tc.returnImmediately { + env.done = make(chan struct{}) + } + + fakeJob := &Job{call: env.fakeJob, lro: lro} + fakeJob.start(ctx) + + // Call GetStatus multiple times and expect to get the same result + for i := 0; i < 2; i++ { + if tc.returnImmediately { + if err := fakeJob.Wait(fakeJobWaitTime); err != nil { + t.Error("fakeJob timed out") + } + } + finished, result, err := fakeJob.Status() + + if tc.expectedFinished != finished { + t.Errorf("TestGetStatus(%v) failed. GetStatus returns finished=%v when %v is expected", tc.name, finished, tc.expectedFinished) + } + + if tc.expectedError != (err != nil) { + t.Errorf("TestGetStatus(%v) failed. GetStatus expected error: %v", tc.name, tc.expectedError) + } + + if tc.expectedFinished && !tc.expectedError { + if result == nil { + t.Errorf("TestGetStatus(%v) failed. GetStatus returns no result when result is expected.", tc.name) + } else { + message, err := result.UnmarshalNew() + if err != nil { + t.Errorf("TestGetStatus(%v) failed. UnmarshalNew returned an error %v.", tc.name, err) + } + feResult := message.(*dbdpb.FileExistsResponse) + if diff := cmp.Diff(envJobResponse, feResult, protocmp.Transform()); diff != "" { + t.Errorf("response wrong -want +got: %v", diff) + } + + } + } else if result != nil { + t.Errorf("TestGetStatus(%v) failed. GetStatus returns result %v when it shouldn't.", tc.name, result) + } + } + + if !tc.returnImmediately { + close(env.done) + } + }) + } +} + +func TestCatchPanic(t *testing.T) { + ctx := context.Background() + lro := NewServer(ctx) + env := &testEnv{shouldPanic: true, resp: &dbdpb.FileExistsResponse{Exists: true}} + fakeJob := &Job{call: env.fakeJob, lro: lro} + fakeJob.start(ctx) + + if err := fakeJob.Wait(fakeJobWaitTime); err != nil { + t.Errorf("fakeJob.Wait returned error: %v", err) + } + + if fakeJob.resp != nil { + t.Errorf("fakeJob.resp = %v; want nil", fakeJob.resp) + } + if !strings.Contains(fakeJob.err.Error(), panicMsg) { + t.Errorf("got error %s; want %s", fakeJob.err.Error(), panicMsg) + } +} diff --git a/oracle/pkg/database/lib/lro/server.go b/oracle/pkg/database/lib/lro/server.go new file mode 100644 index 0000000..fe119fa --- /dev/null +++ b/oracle/pkg/database/lib/lro/server.go @@ -0,0 +1,347 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lro contains an implementation of +// https://pkg.go.dev/google.golang.org/genproto/googleapis/longrunning#OperationsServer +package lro + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + opspb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/emptypb" + log "k8s.io/klog/v2" +) + +const ( + defaultPageSize int = 10 + // DefaultWaitOperationTimeOut is the timeout for WaitOperation. + DefaultWaitOperationTimeOut = 1 * time.Hour + + ttlAfterDelete = 10 * time.Minute + ttlAfterComplete = 12 * time.Hour + + jobCleanupInterval = time.Minute +) + +type job interface { + // Cancel errors if the job is not cancelable. + Cancel() error + // Delete is called on job deletion to clean up resources held by the job. + Delete() error + // done, result, error. This is done in one call to be thread safe. + Status() (bool, *anypb.Any, error) + // Waits until the task is done: result, error. This should use wait groups or something else to do an async wait. + Wait(timeout time.Duration) error + // IsDone returns if the job has completed. + IsDone() bool + // Name returns the job name for metrics/logging purposes. + Name() string +} + +type ttlJob struct { + job job + startTime time.Time + completeTime time.Time + + mu sync.Mutex + deleteTime time.Time +} + +// Server is a gRPC based operation server which +// implements google/longrunning/operations.proto . +type Server struct { + mu sync.Mutex + jobs map[string]*ttlJob +} + +// GetOperation gets the status of the LRO operation. +// It is the implementation of GetOperation in +// google/longrunning/operations.proto. +func (s *Server) GetOperation(_ context.Context, request *opspb.GetOperationRequest) (*opspb.Operation, error) { + job, err := s.validateAndGetOperation(request.GetName()) + if err != nil { + return nil, err + } + + jobID := request.GetName() + resp := GetOperationData(jobID, job.job) + + return resp, nil +} + +// CancelOperation cancels a long running operation. +// It is the implementation of CancelOperation +// in google/longrunning/operations.proto. +func (s *Server) CancelOperation(_ context.Context, request *opspb.CancelOperationRequest) (*emptypb.Empty, error) { + job, err := s.validateAndGetOperation(request.GetName()) + if err != nil { + return nil, err + } + + return &emptypb.Empty{}, job.job.Cancel() +} + +// ListOperations is part of google/longrunning/operations.proto. +// It is not implemented fully yet. +func (s *Server) ListOperations(_ context.Context, request *opspb.ListOperationsRequest) (*opspb.ListOperationsResponse, error) { + pageSize := int(request.GetPageSize()) + if pageSize == 0 { + pageSize = defaultPageSize + } + s.mu.Lock() + defer s.mu.Unlock() + // Zip through the jobs + var operations []*opspb.Operation + var nextID string + for _, id := range sortedMapKeys(s.jobs) { + // Skip until the index is past the next page token id. + if request.GetPageToken() == "" || request.GetPageToken() <= id { + if len(operations) >= pageSize { + nextID = id + break + } + job := s.jobs[id] + operations = append(operations, GetOperationData(id, job.job)) + } + } + return &opspb.ListOperationsResponse{Operations: operations, NextPageToken: nextID}, nil +} + +// DeleteOperation is part of google/longrunning/operations.proto. +func (s *Server) DeleteOperation(_ context.Context, request *opspb.DeleteOperationRequest) (*emptypb.Empty, error) { + job, err := s.validateAndGetOperation(request.GetName()) + if err != nil { + return nil, err + } + + job.mu.Lock() + defer job.mu.Unlock() + job.deleteTime = time.Now() + + return &emptypb.Empty{}, nil +} + +// WaitOperation is part of google/longrunning/operations.proto. +func (s *Server) WaitOperation(_ context.Context, request *opspb.WaitOperationRequest) (*opspb.Operation, error) { + job, err := s.validateAndGetOperation(request.GetName()) + if err != nil { + return nil, err + } + + duration := DefaultWaitOperationTimeOut + if timeout := request.GetTimeout(); timeout != nil { + err = timeout.CheckValid() + if err != nil { + return nil, grpcstatus.Errorf(codes.InvalidArgument, "Invalid timeout %v for WaitOperation", timeout) + } + duration = timeout.AsDuration() + } + + j := job.job + // Wait for the operation to finish and then return the result. + if err := j.Wait(duration); err != nil { + // Error on the wait itself. + log.Infof("WaitOperation: failed to wait for job %v error=%v", request.GetName(), err) + return nil, err + } + + return GetOperationData(request.GetName(), j), nil +} + +// DeleteExpiredJobs deletes the jobs that are considered as expired. +func (s *Server) DeleteExpiredJobs(ttlAfterDelete time.Duration, ttlAfterComplete time.Duration) { + now := time.Now() + + s.mu.Lock() + defer s.mu.Unlock() + + for id, j := range s.jobs { + shouldDelete := false + + // Check if Delete has been explicitly called for this job + if isDeletedJobExpired(j, now, ttlAfterDelete) { + shouldDelete = true + } + + // Check if the jobs has completed for some time + if j.completeTime.IsZero() && j.job.IsDone() { + j.completeTime = now + } + + if !j.completeTime.IsZero() && now.Sub(j.completeTime) > ttlAfterComplete { + shouldDelete = true + } + + if shouldDelete { + delete(s.jobs, id) + if err := j.job.Delete(); err != nil { + log.Warning("Job %v deletion returned an error: %v", id, err) + } else { + log.Infof("Job %v has been deleted.", id) + } + } + } +} + +func (s *Server) validateAndGetOperation(operationID string) (*ttlJob, error) { + if operationID == "" { + return nil, grpcstatus.Error(codes.InvalidArgument, "bad request: empty operation ID") + } + + job, ok := s.getJob(operationID) + if !ok { + return nil, grpcstatus.Errorf(codes.NotFound, "LRO with ID %q NOT found", operationID) + } + return job, nil +} + +// AddJob adds a job into the server to be tracked. +func (s *Server) AddJob(id string, job job) error { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.jobs[id]; ok { + log.Warningf("Job %v already exists", id) + return grpcstatus.Errorf(codes.AlreadyExists, "LRO with ID %q already exists", id) + } + + // Start the operation if we know it doesn't exist. + s.startOperation(job.Name()) + s.jobs[id] = &ttlJob{job: job, startTime: time.Now()} + return nil +} + +func (s *Server) getJob(id string) (*ttlJob, bool) { + s.mu.Lock() + defer s.mu.Unlock() + j, ok := s.jobs[id] + return j, ok +} + +func (s *Server) deleteJob(id string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.jobs, id) + log.Infof("Job %v has been deleted.", id) +} + +func cleanup(ctx context.Context, lro *Server) { + log.Info("Starting cleanup goroutine.") + tick := time.NewTicker(jobCleanupInterval) + defer tick.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-tick.C: + lro.DeleteExpiredJobs(ttlAfterDelete, ttlAfterComplete) + } + } +} + +// NewServer returns Long running operation server. +func NewServer(ctx context.Context) *Server { + lro := &Server{ + jobs: make(map[string]*ttlJob), + } + go cleanup(ctx, lro) + return lro +} + +// EndOperation records the result of the operation. +func (s *Server) EndOperation(id string, status string) { + if job, ok := s.getJob(id); ok { + log.Infof("EndOperation: job %v status %v", job.job.Name(), status) + } +} + +// WaitAndUnmarshalResult waits until the operation with the opName finishes, +// and either populates the result or the error. +func (s *Server) WaitAndUnmarshalResult(ctx context.Context, opName string, targetProto proto.Message) error { + op, err := s.WaitOperation(ctx, &opspb.WaitOperationRequest{Name: opName}) + if err != nil { + return fmt.Errorf("WaitOperation returns error: %v", err) + } + if op.GetError() != nil { + return errors.New(op.GetError().GetMessage()) + } + if op.GetResponse() == nil || targetProto == nil { + return nil + } + return op.GetResponse().UnmarshalTo(targetProto) +} + +func (s *Server) startOperation(name string) { + log.Infof("startOperation: job %v", name) +} + +// GetOperationData fills in the operation data for this specific job. +func GetOperationData(id string, j job) *opspb.Operation { + done, result, e := j.Status() + return BuildOperation(id, done, result, e) +} + +// BuildOperation builds the operation response for this specific grpcstatus. +func BuildOperation(id string, done bool, result *anypb.Any, e error) *opspb.Operation { + // Nothing to return at all. + if result == nil && e == nil { + return &opspb.Operation{Done: done, Name: id} + } + // Can return partial results + if e != nil { + if st, ok := grpcstatus.FromError(e); ok { + return &opspb.Operation{Done: done, Name: id, Result: &opspb.Operation_Error{ + Error: st.Proto(), + }} + } + + return &opspb.Operation{Done: done, Name: id, Result: &opspb.Operation_Error{ + Error: &status.Status{ + Code: int32(codes.Unknown), + Message: e.Error(), + }, + }} + } + return &opspb.Operation{Done: done, Name: id, Result: &opspb.Operation_Response{ + Response: result, + }} +} + +// sortedMapKeys is used in ListOperation to make sure everything is in order. +func sortedMapKeys(m map[string]*ttlJob) []string { + keys := make([]string, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func isDeletedJobExpired(job *ttlJob, now time.Time, ttl time.Duration) bool { + job.mu.Lock() + defer job.mu.Unlock() + + return !job.deleteTime.IsZero() && now.Sub(job.deleteTime) > ttl +} diff --git a/oracle/pkg/database/lib/lro/server_test.go b/oracle/pkg/database/lib/lro/server_test.go new file mode 100644 index 0000000..358a851 --- /dev/null +++ b/oracle/pkg/database/lib/lro/server_test.go @@ -0,0 +1,720 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lro + +import ( + "context" + "os" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + lrspb "google.golang.org/genproto/googleapis/longrunning" + opspb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + log "k8s.io/klog/v2" +) + +func TestMain(m *testing.M) { + os.Exit(m.Run()) +} + +func TestGetOperationDataPreserveErrorCode(t *testing.T) { + tests := []struct { + name string + err error + expectedCode int32 + }{ + { + name: "grpc status", + err: status.Error(codes.Unimplemented, "Fail"), + expectedCode: 12, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + f := &fakeJob{e: tc.err} + + err := GetOperationData("", f).GetError() + if err.Code != tc.expectedCode { + t.Errorf("Error code: Want %d; Got %d", tc.expectedCode, err.Code) + t.Errorf("Error %v", err) + } + }) + } +} + +func TestGetOperation(t *testing.T) { + marshalledResponse, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + log.Errorf("Failed to marshal operation %v", err) + return + } + tests := []struct { + name string + id string + getStatusError error + response *lrspb.OperationInfo // Using this class just to be different from the others. + expectedResponse *lrspb.Operation + expectedError bool + done bool + numGetStatus int + }{ + { + name: "Undone GetOperation error status", + id: "frog", + getStatusError: status.Error(codes.Unknown, "Fail"), + numGetStatus: 1, + response: &lrspb.OperationInfo{}, + expectedResponse: &lrspb.Operation{ + Name: "frog", + Done: false, + Result: &opspb.Operation_Error{ + Error: status.Convert(status.Error(codes.Unknown, "Fail")).Proto(), + }, + }, + }, + { + name: "Done GetOperation error status", + id: "frog", + getStatusError: status.Error(codes.Unknown, "Fail"), + response: &lrspb.OperationInfo{ResponseType: "frog"}, + numGetStatus: 1, + done: true, + expectedResponse: &lrspb.Operation{ + Name: "frog", + Done: true, + Result: &opspb.Operation_Error{ + Error: status.Convert(status.Error(codes.Unknown, "Fail")).Proto(), + }, + }, + }, + { + name: "Undone GetOperation", + response: &lrspb.OperationInfo{ResponseType: "frog"}, + id: "frog", + numGetStatus: 1, + expectedResponse: &lrspb.Operation{ + Name: "frog", + Done: false, + Result: &opspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + }, + { + name: "Done GetOperation", + response: &lrspb.OperationInfo{ResponseType: "frog"}, + id: "frog", + numGetStatus: 1, + done: true, + expectedResponse: &lrspb.Operation{ + Name: "frog", + Done: true, + Result: &opspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + }, + { + name: "GetOperation no job", + id: "", + response: &lrspb.OperationInfo{ResponseType: "frog"}, + getStatusError: status.Error(codes.Unknown, "Fail"), + numGetStatus: 1, + expectedError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + lro := NewServer(context.Background()) + var f *fakeJob + if tc.id != "" { + res, err := anypb.New(tc.response) + if err != nil { + t.Fatalf("error marshaling response=%v", err) + } + f = &fakeJob{e: tc.getStatusError, response: res, done: tc.done} + _ = lro.AddJob(tc.id, f) + } + + response, err := lro.GetOperation(context.Background(), &lrspb.GetOperationRequest{Name: tc.id}) + + if tc.expectedError != (err != nil) { + t.Errorf("error return %v expected=%v", err, tc.expectedError) + } + + if f != nil { + if tc.numGetStatus != f.numGetStatus { + t.Errorf("getStatus wrong attempts: %v, expected %v", f.numGetStatus, tc.numGetStatus) + } + } + + if diff := cmp.Diff(tc.expectedResponse, response, protocmp.Transform()); diff != "" { + t.Errorf("response wrong -want +got: %v", diff) + } + }) + } +} + +func TestCancelOperation(t *testing.T) { + tests := []struct { + name string + id string + cancelError error + expectedError bool + numCancels int + }{ + { + name: "CancelOperation error status", + id: "frog", + cancelError: status.Error(codes.Unknown, "Fail"), + numCancels: 1, + expectedError: true, + }, + { + name: "CancelOperation success", + id: "frog", + numCancels: 1, + expectedError: false, + }, + { + name: "CancelOperation missing", + id: "", + numCancels: 0, + expectedError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + lro := NewServer(context.Background()) + var f *fakeJob + if tc.id != "" { + res, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + t.Fatalf("error marshalling response %v", err) + } + f = &fakeJob{e: tc.cancelError, response: res, done: true} + _ = lro.AddJob(tc.id, f) + } + + _, err := lro.CancelOperation(context.Background(), &lrspb.CancelOperationRequest{Name: tc.id}) + + if tc.expectedError != (err != nil) { + t.Errorf("error got %v expected error=%v", err, tc.expectedError) + } + + if f != nil { + if tc.numCancels != f.numCancels { + t.Errorf("failed. Response wrong: Was %v, expected %v", f.numCancels, tc.numCancels) + } + } + }) + } +} + +func TestEndOperation(t *testing.T) { + tests := []struct { + name string + id string + status []string + }{ + { + name: "single", + id: "frog", + status: []string{"OK"}, + }, + { + name: "multiple", + id: "frog", + status: []string{"OK", "Error", "OK"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + lro := NewServer(context.Background()) + res, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + t.Fatalf("error marshalling response %v", err) + } + f := &fakeJob{name: tc.name, response: res, done: true} + _ = lro.AddJob(tc.id, f) + + for _, val := range tc.status { + lro.EndOperation(tc.id, val) + } + }) + } +} + +func TestDeleteOperation(t *testing.T) { + tests := []struct { + name string + id string + expectedError bool + }{ + { + name: "DeleteOperation success", + id: "frog", + expectedError: false, + }, + { + name: "DeleteOperation missing", + id: "", + expectedError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + lro := NewServer(context.Background()) + var f *fakeJob + if tc.id != "" { + res, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + t.Fatalf("error marshaling response=%v", err) + } + + f = &fakeJob{response: res} + _ = lro.AddJob(tc.id, f) + if _, ok := lro.getJob(tc.id); !ok { + t.Errorf("job not added correctly") + } + } + + _, err := lro.DeleteOperation(context.Background(), &lrspb.DeleteOperationRequest{Name: tc.id}) + + if tc.expectedError != (err != nil) { + t.Errorf("got error %v expected=%v", err, tc.expectedError) + } + + if f != nil { + if f.numDeletes != 0 { + t.Errorf("num deletes wrong: Was %v, expected 0", f.numDeletes) + } + } + _, ok := lro.getJob(tc.id) + if !tc.expectedError && !ok { + t.Errorf("job should still exist %v", tc.id) + } + }) + } +} + +func TestAddDuplicateJob(t *testing.T) { + lro := NewServer(context.Background()) + newF := &fakeJob{} + if err := lro.AddJob("frog", newF); err != nil { + t.Errorf("AddJob(Duplicate) failed - AddJob got err: %v, expected no error", err) + } + if err := lro.AddJob("frog", newF); err == nil { + t.Errorf("AddJob(Duplicate) failed - AddJob got no error: expected error") + } +} + +func TestWaitOperation(t *testing.T) { + marshalledResponse, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + log.Errorf("Failed to marshal response %v", err) + return + } + tests := []struct { + name string + id string + waitError error + expectedError bool + numWaits int + expectedResponse *lrspb.Operation + }{ + { + name: "error status", + id: "frog", + waitError: status.Error(codes.Unknown, "Fail"), + numWaits: 1, + expectedError: true, + expectedResponse: nil, + }, + { + name: "success", + id: "frog", + numWaits: 1, + expectedError: false, + expectedResponse: &lrspb.Operation{ + Name: "frog", + Done: false, + Result: &opspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + }, + { + name: "missing", + id: "", + numWaits: 0, + expectedError: true, + expectedResponse: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + lro := NewServer(context.Background()) + var f *fakeJob + if tc.id != "" { + res, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + t.Fatalf("error marshaling response=%v", err) + } + + f = &fakeJob{waitError: tc.waitError, response: res} + _ = lro.AddJob(tc.id, f) + if _, ok := lro.getJob(tc.id); !ok { + t.Errorf("job not added correctly") + } + } + + response, err := lro.WaitOperation(context.Background(), &lrspb.WaitOperationRequest{Name: tc.id}) + + if tc.expectedError != (err != nil) { + t.Fatalf("errors got %v expected=%v", err, tc.expectedError) + } + + if f != nil && tc.numWaits != f.numWaits { + t.Errorf("wait wrong attempts: %v, expected %v", f.numWaits, tc.numWaits) + } + + if diff := cmp.Diff(tc.expectedResponse, response, protocmp.Transform()); diff != "" { + t.Errorf("response wrong -want +got: %v", diff) + } + }) + } +} + +type ListJobStruct struct { + id string + done bool + responseError error +} + +func TestListOperation(t *testing.T) { + marshalledResponse, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + log.Errorf("Failed to marshal operation %v", err) + return + } + tests := []struct { + name string + pageSize int32 + pageToken string + jobs []ListJobStruct + expectedError bool + expectedResponse *lrspb.ListOperationsResponse + }{ + { + name: "ListOperation one", + jobs: []ListJobStruct{ + { + id: "frog", + done: true, + }, + }, + expectedError: false, + expectedResponse: &lrspb.ListOperationsResponse{ + Operations: []*lrspb.Operation{ + { + Name: "frog", + Done: true, + Result: &lrspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + }, + }, + }, + { + name: "ListOperation empty", + expectedError: false, + expectedResponse: &lrspb.ListOperationsResponse{}, + }, + { + name: "ListOperation limited", + pageSize: 2, + jobs: []ListJobStruct{ + { + id: "frog", + done: true, + }, + { + id: "1", + }, + { + id: "2", + }, + }, + expectedError: false, + expectedResponse: &lrspb.ListOperationsResponse{ + NextPageToken: "frog", + Operations: []*lrspb.Operation{ + { + Name: "1", + Done: false, + Result: &lrspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + { + Name: "2", + Done: false, + Result: &lrspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + }, + }, + }, + { + name: "ListOperation nextPage", + pageSize: 2, + pageToken: "frog", + jobs: []ListJobStruct{ + { + id: "frog", + done: true, + }, + { + id: "1", + }, + { + id: "2", + }, + }, + expectedError: false, + expectedResponse: &lrspb.ListOperationsResponse{ + NextPageToken: "", + Operations: []*lrspb.Operation{ + { + Name: "frog", + Done: true, + Result: &lrspb.Operation_Response{ + Response: marshalledResponse, + }, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + lro := NewServer(context.Background()) + for _, j := range tc.jobs { + res, err := anypb.New(&lrspb.OperationInfo{ResponseType: "frog"}) + if err != nil { + t.Errorf("error marshaling response=%v", err) + } + + newF := &fakeJob{e: j.responseError, response: res, done: j.done} + _ = lro.AddJob(j.id, newF) + } + + response, err := lro.ListOperations(context.Background(), &lrspb.ListOperationsRequest{PageSize: tc.pageSize, PageToken: tc.pageToken}) + + if tc.expectedError != (err != nil) { + t.Errorf("error got %v, expected error=%v", err, tc.expectedError) + } + + if diff := cmp.Diff(tc.expectedResponse, response, protocmp.Transform()); diff != "" { + t.Errorf("response wrong -want +got: %v", diff) + } + }) + } +} + +func TestDeleteExpiredJobs(t *testing.T) { + tests := []struct { + name string + completeTime time.Time + deleteTime time.Time + jobDone bool + wantDelete bool + }{ + { + name: "Delete expired job with delete issued", + deleteTime: time.Now().Add(-10 * time.Minute), + wantDelete: true, + }, + { + name: "Keep job with delete issued", + deleteTime: time.Now().Add(time.Second), + }, + { + name: "Delete completed job", + completeTime: time.Now().Add(-10 * time.Minute), + wantDelete: true, + }, + { + name: "Keep completed job", + completeTime: time.Now().Add(time.Second), + jobDone: true, + }, + { + name: "Keep completed job with completeTime updated", + jobDone: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + lro := NewServer(context.Background()) + fj := &fakeJob{done: tc.jobDone} + + if err := lro.AddJob(tc.name, fj); err != nil { + t.Errorf("lro.AddJob failed with err=%v", err) + } + + job, _ := lro.getJob(tc.name) + job.completeTime = tc.completeTime + job.deleteTime = tc.deleteTime + + lro.DeleteExpiredJobs(5*time.Minute, 5*time.Minute) + + job, ok := lro.getJob(tc.name) + if tc.wantDelete == ok { + t.Errorf("wantDelete=%v, deleted=%v", tc.wantDelete, !ok) + } + + if job != nil { + if !tc.completeTime.IsZero() && job.completeTime != tc.completeTime { + t.Errorf("completeTime has been updated while it shouldn't.") + } + + if tc.jobDone && job.completeTime.IsZero() { + t.Errorf("completeTime is empty.") + } + } + }) + } +} + +func TestMutexes(t *testing.T) { + numLoops := 1000 + lro := &Server{ + jobs: make(map[string]*ttlJob), + } + + done := make(chan bool) + + go func() { + for range time.Tick(10 * time.Millisecond) { + lro.DeleteExpiredJobs(0, time.Minute) + select { + case <-done: + lro.DeleteExpiredJobs(0, time.Minute) + close(done) + return + default: + continue + } + } + }() + + // Run a bunch of go routines to do a lot of stuff. + var wg sync.WaitGroup + wg.Add(numLoops) + for i := 0; i < numLoops; i++ { + go func() { + id := uuid.New().String() + defer wg.Done() + newF := &fakeJob{} + err := lro.AddJob(id, newF) + if err != nil { + t.Errorf("Mutexes(Main) AddJob(%v) error=%v", id, err) + } + if _, err := lro.CancelOperation(context.Background(), &opspb.CancelOperationRequest{Name: id}); err != nil { + t.Errorf("Mutexes(Main) CancelOperation(%v) error=%v", id, err) + } + if _, err = lro.GetOperation(context.Background(), &opspb.GetOperationRequest{Name: id}); err != nil { + t.Errorf("Mutexes(Main) GetOperation(%v) error=%v", id, err) + } + if _, err = lro.DeleteOperation(context.Background(), &opspb.DeleteOperationRequest{Name: id}); err != nil { + t.Errorf("Mutexes(Main) DeleteOperation(%v) error=%v", id, err) + } + }() + } + + wg.Wait() + done <- true + + <-done + + if len(lro.jobs) != 0 { + t.Errorf("Mutexex(Main) jobs still exist, should be empty") + } +} + +type fakeJob struct { + name string + e error + waitError error + done bool + response *anypb.Any + numCancels int + numDeletes int + numGetStatus int + numWaits int +} + +func (f *fakeJob) Cancel() error { + f.numCancels++ + return f.e +} + +func (f *fakeJob) Delete() error { + f.numDeletes++ + return f.e +} + +func (f *fakeJob) Wait(time.Duration) error { + f.numWaits++ + return f.waitError +} + +// done, result, error. This is done in one call to be thread safe. +func (f *fakeJob) Status() (bool, *anypb.Any, error) { + f.numGetStatus++ + return f.done, f.response, f.e +} + +func (f *fakeJob) IsDone() bool { + return f.done +} + +func (f *fakeJob) Name() string { + return f.name +} diff --git a/oracle/pkg/database/provision/BUILD.bazel b/oracle/pkg/database/provision/BUILD.bazel new file mode 100644 index 0000000..3f378ad --- /dev/null +++ b/oracle/pkg/database/provision/BUILD.bazel @@ -0,0 +1,50 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "provision", + srcs = [ + "bootstrap_database_task.go", + "cdb.go", + "common.go", + "init_file_generator.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/provision", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/consts", + "//oracle/pkg/agents/oracle", + "//oracle/pkg/agents/security", + "//oracle/pkg/database/common", + "@com_github_godror_godror//:godror", + "@io_k8s_klog_v2//:klog", + ], +) + +go_test( + name = "provision_test", + srcs = [ + "bootstrap_database_task_test.go", + "common_test.go", + ], + data = [":provision_files"], + embed = [":provision"], + deps = [ + "//oracle/pkg/agents/oracle", + "@com_github_google_go_cmp//cmp", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//test/bufconn", + ], +) + +filegroup( + name = "provision_files", + srcs = [ + "bootstrap-database-crcf.template", + "bootstrap-database-initfile.template", + "bootstrap-database-initfile-oracle-xe.template", + "bootstrap-database-listener.template", + "bootstrap-database-tnsnames.template", + "sqlnet.ora", + ], + visibility = ["//visibility:public"], +) diff --git a/oracle/pkg/database/provision/bootstrap-database-crcf.template b/oracle/pkg/database/provision/bootstrap-database-crcf.template new file mode 100644 index 0000000..61f1458 --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap-database-crcf.template @@ -0,0 +1,13 @@ +CREATE CONTROLFILE REUSE set DATABASE "{{ .DatabaseName }}" RESETLOGS ARCHIVELOG + MAXLOGFILES 16 + MAXLOGMEMBERS 3 + MAXDATAFILES 100 + MAXINSTANCES 8 + MAXLOGHISTORY 292 +LOGFILE + GROUP 1 '{{ .DataFilesDir }}/redo01.log' SIZE 1G BLOCKSIZE 512, + GROUP 2 '{{ .DataFilesDir }}/redo02.log' SIZE 1G BLOCKSIZE 512, + GROUP 3 '{{ .DataFilesDir }}/redo03.log' SIZE 1G BLOCKSIZE 512 +DATAFILE +{{ .DataFilesMultiLine }} +CHARACTER SET AL32UTF8 \ No newline at end of file diff --git a/oracle/pkg/database/provision/bootstrap-database-initfile-oracle-xe.template b/oracle/pkg/database/provision/bootstrap-database-initfile-oracle-xe.template new file mode 100644 index 0000000..101e4dd --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap-database-initfile-oracle-xe.template @@ -0,0 +1,5 @@ +*.control_files='/opt/oracle/oradata/{{ .SourceDBName }}/control01.ctl' +*.db_name='{{ .DestDBName }}' +*.enable_pluggable_database=true +*.nls_language='AMERICAN' +*.nls_territory='AMERICA' diff --git a/oracle/pkg/database/provision/bootstrap-database-initfile.template b/oracle/pkg/database/provision/bootstrap-database-initfile.template new file mode 100644 index 0000000..4f61cb1 --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap-database-initfile.template @@ -0,0 +1,5 @@ +*.control_files='/u01/app/oracle/oradata/{{ .SourceDBName }}/control01.ctl','/u01/app/oracle/fast_recovery_area/{{ .SourceDBName }}/control02.ctl' +*.db_name='{{ .DestDBName }}' +*.enable_pluggable_database=true +*.nls_language='AMERICAN' +*.nls_territory='AMERICA' diff --git a/oracle/pkg/database/provision/bootstrap-database-listener.template b/oracle/pkg/database/provision/bootstrap-database-listener.template new file mode 100644 index 0000000..60353c5 --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap-database-listener.template @@ -0,0 +1,27 @@ +{{ .ListenerName }} = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = IPC)(KEY = REGLSNR_{{ .ListenerPort }})) + (ADDRESS = (PROTOCOL = {{ .ListenerProtocol }})(HOST = {{ .DatabaseHost }})(PORT={{ .ListenerPort }})) + ) + + +SID_LIST_{{ .ListenerName }} = + (SID_LIST = + (SID_DESC = + (GLOBAL_DBNAME = {{ .DatabaseName }}{{ .DBDomain }}) + (ORACLE_HOME = {{ .DatabaseHome }}) + (SID_NAME = {{ .DatabaseName }}) + ) +{{- range .PluggableDatabaseNames}} + (SID_DESC = + (GLOBAL_DBNAME = {{.}}{{ $.DBDomain }}) + (ORACLE_HOME = {{ $.DatabaseHome }}) + (SID_NAME = {{ $.DatabaseName }}) + ) +{{- end}} + ) + +ADR_BASE_{{ .ListenerName }} = {{ .DatabaseBase }} +SECURE_REGISTER_{{ .ListenerName }} = (IPC) + +VALID_NODE_CHECKING_REGISTRATION_{{ .ListenerName }}=SUBNET \ No newline at end of file diff --git a/oracle/pkg/database/provision/bootstrap-database-tnsnames.template b/oracle/pkg/database/provision/bootstrap-database-tnsnames.template new file mode 100644 index 0000000..9d0cd5d --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap-database-tnsnames.template @@ -0,0 +1,20 @@ +{{ .DatabaseName }} = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = {{ .ListenerProtocol }})(HOST = localhost)(PORT = {{ .ListenerPort }})) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = {{ .DatabaseName }}{{ .DBDomain }}) + ) + ) + + +{{range .PluggableDatabaseNames -}} +{{.}} = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = {{ $.ListenerProtocol }})(HOST = localhost)(PORT = {{ $.ListenerPort }})) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = {{.}}{{ $.DBDomain }}) + ) + ) +{{- end}} \ No newline at end of file diff --git a/oracle/pkg/database/provision/bootstrap_database_task.go b/oracle/pkg/database/provision/bootstrap_database_task.go new file mode 100644 index 0000000..cf7aeee --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap_database_task.go @@ -0,0 +1,808 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provision + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + _ "github.com/godror/godror" // Register database/sql driver + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/security" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/common" +) + +// Max number of retries for db startup. +const startupRetries = 5 + +// BootstrapTask defines a task can be invoked to bootstrap an Oracle DB. +type BootstrapTask struct { + db oracleDB + uid uint32 + gid uint32 + subTasks []task + osUtil osUtil + dbdClient dbdpb.DatabaseDaemonClient + cdbRenaming bool + isSeeded bool +} + +// GetName returns task name. +func (task *BootstrapTask) GetName() string { + return "Bootstrap" +} + +// Call triggers bootstrap process for an Oracle DB. +func (task *BootstrapTask) Call(ctx context.Context) error { + return doSubTasks(ctx, task.GetName(), task.subTasks) +} + +func (task *BootstrapTask) initUIDGID(ctx context.Context) error { + uid, gid, err := oracleUser(task.osUtil) + if err != nil { + return fmt.Errorf("failed to find uid gid: %v", err) + } + task.uid = uid + task.gid = gid + return nil +} + +func (task *BootstrapTask) createDirs(ctx context.Context) error { + dirs := []string{ + task.db.GetDataFilesDir(), + task.db.GetConfigFilesDir(), + task.db.GetFlashDir(), + task.db.GetListenerDir(), + task.db.GetAdumpDir(), + task.db.GetCdumpDir(), + } + + if task.db.IsCDB() { + dirs = append(dirs, filepath.Join(task.db.GetDataFilesDir(), "pdbseed")) + } + if err := MakeDirs(ctx, dirs, task.uid, task.gid); err != nil { + return fmt.Errorf("failed to create prerequisite directories: %v", err) + } + return nil +} + +func (task *BootstrapTask) setSourceEnv(ctx context.Context) error { + // Sets env to mount the starter DB for running nid. + if err := os.Setenv("ORACLE_SID", task.db.GetSourceDatabaseName()); err != nil { + return err + } + return os.Setenv("ORACLE_HOME", task.db.GetDatabaseHome()) +} + +func (task *BootstrapTask) setEnv(ctx context.Context) error { + // Sets env to mount the starter DB for running nid. + if err := os.Setenv("ORACLE_SID", task.db.GetDatabaseName()); err != nil { + return err + } + return os.Setenv("ORACLE_HOME", task.db.GetDatabaseHome()) +} + +func (task *BootstrapTask) setParameters(ctx context.Context) error { + + klog.InfoS("nomounting database for setting parameters") + retry := 0 + var err error + for ; retry < startupRetries; retry++ { + _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetDatabaseName(), + Option: "nomount", + }) + if err == nil { + break + } + klog.InfoS("setParameters: startup nomount failed", "attempt", retry, "err", err) + } + + if retry == startupRetries { + return fmt.Errorf("setParameters: startup nomount failed: %v", err) + } + + // We want to be running in nomount to set all spfile parameters. + klog.InfoS("setting parameters in spfile") + if err := task.setParametersHelper(ctx); err != nil { + return fmt.Errorf("setParameters: set server parameters: %v", err) + } + // Since parameters are set in spfile, we do a bounce. + klog.InfoS("bouncing database for setting parameters") + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + DatabaseName: task.db.GetDatabaseName(), + }); err != nil { + return fmt.Errorf("setParameters: shutdown after setting parameters failed: %v", err) + } + + // For seeded database, we start the CDB in nomount mode required for the subsequent task moveDatabase. + // For unseeded database, the subsequent task is prepDatabase which has a step to start the CDB in normal mounted mode. + if task.isSeeded { + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetDatabaseName(), + Option: "force_nomount", + }); err != nil { + return fmt.Errorf("setParameters: force startup nomount failed: %v", err) + } + } + return nil +} + +func (task *BootstrapTask) moveDatabase(ctx context.Context) error { + dbf := []string{} + for _, f := range task.db.GetDataFiles() { + dbf = append(dbf, fmt.Sprintf("'%s'", filepath.Join(task.db.GetDataFilesDir(), f))) + } + multiline := strings.Join(dbf, ",\n") + c := &controlfileInput{ + DatabaseName: task.db.GetDatabaseName(), + DataFilesDir: task.db.GetDataFilesDir(), + DataFilesMultiLine: multiline, + } + + ctl, err := template.New(filepath.Base(ControlFileTemplateName)).ParseFiles(ControlFileTemplateName) + if err != nil { + return fmt.Errorf("moveDatabase: parsing %q failed: %v", ControlFileTemplateName, err) + } + + ctlBuf := &bytes.Buffer{} + if err := ctl.Execute(ctlBuf, c); err != nil { + return fmt.Errorf("moveDatabase: executing %q failed: %v", ControlFileTemplateName, err) + } + + if _, err := runSQLPlus(ctx, task.db.GetVersion(), task.db.GetDatabaseName(), []string{ctlBuf.String()}, false); err != nil { + return fmt.Errorf("moveDatabase: controlfile creation failed: %v", err) + } + sqlResp, err := task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{"alter database open resetlogs"}, + Suppress: false, + }) + if err != nil { + return fmt.Errorf("moveDatabase: resetlogs failed: %v", err) + } + klog.InfoS("reset logs after database move", "output", sqlResp) + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + DatabaseName: task.db.GetDatabaseName(), + Option: "immediate", + }); err != nil { + return fmt.Errorf("moveDatabase: shutdown failed: %v", err) + } + klog.InfoS("database shutdown after move") + + return nil +} + +// setupUsers creates all users required for the DB at instance creation. +func (task *BootstrapTask) setupUsers(ctx context.Context) error { + checkUserCmd := "select * from all_users where username='%s'" + cmds := task.db.GetCreateUserCmds() + if cmds == nil { + klog.Errorf("error retrieving create user commands potentially caused by error generating temporary password.") + } + for _, cu := range cmds { + resp, err := task.dbdClient.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{fmt.Sprintf(checkUserCmd, strings.ToUpper(cu.user))}}) + if err != nil { + return fmt.Errorf("check user %s failed: %v", cu.user, err) + } + if len(resp.GetMsg()) > 0 { + continue + } + if _, err := task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: cu.cmds, + Suppress: true, + }); err != nil { + return fmt.Errorf("creating user %s failed: %v", cu.user, err) + } + klog.InfoS("creating user done", "user", cu.user, "command", cu.cmds[1:]) + } + return nil +} + +func (task *BootstrapTask) moveConfigFiles(ctx context.Context) error { + for i := range task.db.GetConfigFiles() { + sf := filepath.Join(task.db.GetSourceConfigFilesDir(), task.db.GetSourceConfigFiles()[i]) + tf := filepath.Join(task.db.GetConfigFilesDir(), task.db.GetConfigFiles()[i]) + if err := MoveFile(sf, tf); err != nil { + return fmt.Errorf("moveConfigFiles: failed to move config file from %s to %s : %v", sf, tf, err) + } + } + return nil +} + +func (task *BootstrapTask) moveDataFiles(ctx context.Context) error { + for _, f := range task.db.GetDataFiles() { + if err := MoveFile(filepath.Join(task.db.GetSourceDataFilesDir(), f), filepath.Join(task.db.GetDataFilesDir(), f)); err != nil { + return fmt.Errorf("moveDataFiles: failed to move data file from %s to %s : %v", f, task.db.GetDataFilesDir(), err) + } + } + return nil +} + +// relinkConfigFiles creates softlinks under the Oracle standard paths from the +// persistent configuration in the PD. +func (task *BootstrapTask) relinkConfigFiles(ctx context.Context) error { + for _, f := range task.db.GetConfigFiles() { + destn := filepath.Join(task.db.GetSourceConfigFilesDir(), f) + if _, err := os.Stat(destn); err == nil { + if err := os.Remove(destn); err != nil { + return fmt.Errorf("relinkConfigFiles: unable to delete existing file %s: %v", f, err) + } + } + if err := os.Symlink(filepath.Join(task.db.GetConfigFilesDir(), f), filepath.Join(task.db.GetSourceConfigFilesDir(), f)); err != nil { + return fmt.Errorf("relinkConfigFiles: symlink creation failed for %s to oracle directories: %v", f, err) + } + } + return nil +} + +func (task *BootstrapTask) runNID(ctx context.Context) error { + if task.cdbRenaming { + return nil + } + + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetDatabaseName(), + Option: "mount", + }); err != nil { + return fmt.Errorf("runNID: startup mount failed: %v", err) + } + + klog.InfoS("runNID: startup mount returned no errors") + // running NID to change the DBID not the name of the database. + if _, err := task.dbdClient.NID(ctx, &dbdpb.NIDRequest{ + Sid: task.db.GetDatabaseName(), + DatabaseName: "", + }); err != nil { + return fmt.Errorf("nid cmd failed: %v", err) + } + + klog.InfoS("runNID: nid executed successfully") + return nil +} + +func getLocalListener(listeners map[string]*consts.Listener) (string, error) { + for name, l := range listeners { + if l.Local { + return name, nil + } + } + return "", fmt.Errorf("no local listener defined") +} + +func (task *BootstrapTask) setParametersHelper(ctx context.Context) error { + localListener, err := getLocalListener(task.db.GetListeners()) + if err != nil { + return fmt.Errorf("parameter validation failed: %v", err) + } + //The following section would override the system specific init parameters specified in the spec. + parameters := []string{ + fmt.Sprintf("audit_file_dest='%s/app/oracle/admin/%s/adump'", task.db.GetMountPointAdmin(), task.db.GetDatabaseName()), + "audit_trail='db'", + fmt.Sprintf("control_files='%s/control01.ctl'", task.db.GetDataFilesDir()), + "db_block_size=8192", + fmt.Sprintf("db_domain='%s'", task.db.GetDBDomain()), + fmt.Sprintf("db_name='%s'", task.db.GetDatabaseName()), + fmt.Sprintf("db_unique_name='%s'", task.db.GetDatabaseUniqueName()), + "db_recovery_file_dest_size=100G", + fmt.Sprintf("db_recovery_file_dest='%s'", task.db.GetFlashDir()), + fmt.Sprintf("diagnostic_dest='%s/app/oracle'", task.db.GetMountPointDiag()), + fmt.Sprintf("dispatchers='(PROTOCOL=TCP) (SERVICE=%sXDB)'", task.db.GetDatabaseName()), + fmt.Sprintf("enable_pluggable_database=%s", strings.ToUpper(strconv.FormatBool(task.db.IsCDB()))), + "filesystemio_options=SETALL", + fmt.Sprintf("local_listener='(DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=REGLSNR_%d)))'", task.db.GetListeners()[localListener].Port), + "open_cursors=300", + "processes=300", + "remote_login_passwordfile='EXCLUSIVE'", + "undo_tablespace='UNDOTBS1'", + fmt.Sprintf("log_archive_dest_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=%s'", task.db.GetDatabaseUniqueName()), + "log_archive_dest_state_1=enable", + "log_archive_format='%t_%s_%r.arch'", + "standby_file_management=AUTO", + } + + if task.isSeeded && task.db.GetVersion() != consts.Oracle18c { + /* We do not change the pga_aggregate_target and sga_target parameters for Oracle 18c XE because of limitations + Oracle places on memory allocation for the Express Edition. The parameter "compatible" comes preset with the + desired value for Oracle 18c XE */ + parameters = append(parameters, fmt.Sprintf("pga_aggregate_target=%dM", task.db.GetDatabaseParamPGATargetMB())) + parameters = append(parameters, fmt.Sprintf("sga_target=%dM", task.db.GetDatabaseParamSGATargetMB())) + parameters = append(parameters, fmt.Sprintf("compatible='%s.0'", task.db.GetVersion())) + } + + if task.db.IsCDB() { + parameters = append(parameters, "common_user_prefix='gcsql$'") + } + + // We might want to send the whole batch over at once, but this way its + // easier to see where it failed. + for _, p := range parameters { + // Most of these cannot be set in scope=memory, so we only use scope=spfile. + stmt := fmt.Sprintf("alter system set %s scope=spfile", p) + klog.InfoS("setParametersHelper: executing", "stmt", stmt) + if _, err := task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{stmt}, + Suppress: false, + }); err != nil { + return fmt.Errorf("failed to set %q: %v", p, err) + } + klog.InfoS("setParametersHelper: stmt executed successfully") + } + return nil +} + +func (task *BootstrapTask) prepDatabase(ctx context.Context) error { + password, err := security.RandOraclePassword() + if err != nil { + return fmt.Errorf("error generating temporary password") + } + + // Post NID, we need to start the database and resetlogs. + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetDatabaseName(), + Option: "mount", + }); err != nil { + return fmt.Errorf("prepDatabase: startup mount failed: %v", err) + } + klog.InfoS("prepDatabase: startup mount") + // Enabling archivelog mode. + commands := []string{"alter database archivelog"} + // resetlogs is only required after seeded database is renamed using a NID operation + if task.cdbRenaming || !task.isSeeded { + commands = append(commands, "alter database open") + } else { + commands = append(commands, "alter database open resetlogs") + } + sqlResp, err := task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: commands, + Suppress: false, + }) + + if err != nil { + return fmt.Errorf("prepDatabase: enabling archive log and resetlogs open failed: %v", err) + } + klog.InfoS("prepDatabase: archive log mode and resetlogs open", "output", sqlResp) + + // /u02/app/oracle/oradata//temp01.dbf is already part of database in unseeded use case, so it is skipped. + if task.isSeeded { + tempfile := []string{fmt.Sprintf("ALTER TABLESPACE TEMP ADD TEMPFILE '%s/temp01.dbf' SIZE 1G REUSE AUTOEXTEND ON", task.db.GetDataFilesDir())} + sqlResp, err = task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: tempfile, + Suppress: false, + }) + if err != nil { + return fmt.Errorf("prepDatabase: adding tempfile failed: %v", err) + } + klog.InfoS("prepDatabase: add tempfile", "output", sqlResp) + } + sys := []string{ + ChangePasswordCmd("sys", password), + ChangePasswordCmd("system", password)} + sqlResp, err = task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sys, + Suppress: true, + }) + if err != nil { + return fmt.Errorf("prepDatabase: change sys & system password failed: %v", err) + } + klog.InfoS("prepDatabase: sys & system password change", "output", sqlResp) + return nil +} + +func (task *BootstrapTask) fixOratab(ctx context.Context) error { + if err := replace(task.db.GetOratabFile(), task.db.GetSourceDatabaseName(), task.db.GetDatabaseName(), task.uid, task.gid); err != nil { + return fmt.Errorf("oratab replacing dbname: %v", err) + } + if err := replace(task.db.GetOratabFile(), task.db.GetSourceDatabaseHost(), task.db.GetHostName(), task.uid, task.gid); err != nil { + return fmt.Errorf("oratab replacing hostname: %v", err) + } + return nil +} + +func (task *BootstrapTask) cleanup(ctx context.Context) error { + if err := os.RemoveAll(task.db.GetSourceDataFilesDir()); err != nil { + klog.ErrorS(err, "BootstrapTask: failed to cleanup source data directory") + } + return nil +} + +func (task *BootstrapTask) initListeners(ctx context.Context) error { + lType := "SECURE" + _, err := task.dbdClient.CreateListener(ctx, &dbdpb.CreateListenerRequest{ + DatabaseName: task.db.GetDatabaseName(), + Port: task.db.GetListeners()[lType].Port, + Protocol: task.db.GetListeners()[lType].Protocol, + OracleHome: task.db.GetDatabaseHome(), + DbDomain: task.db.GetDBDomain(), + }) + return err +} + +// recreateFlashDir creates a flash dir if it does not exist. +func (task *BootstrapTask) recreateFlashDir(ctx context.Context) error { + if _, err := os.Stat(task.db.GetFlashDir()); os.IsNotExist(err) { + klog.InfoS("recreateFlashDir: recreating flash directory", "flashDir", task.db.GetFlashDir()) + + if err := MakeDirs(ctx, []string{task.db.GetFlashDir()}, task.uid, task.gid); err != nil { + return fmt.Errorf("recreateFlashDir: creating flash directory %q failed: %v", task.db.GetFlashDir(), err) + } + } + return nil +} + +func (task *BootstrapTask) startDB(ctx context.Context) error { + backupMode := false + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetDatabaseName(), + Option: "open", + }); err != nil { + // StartupDatabase failed at this point. It failed because of ORA-10873, there is file in backup mode. + if strings.Contains(err.Error(), "ORA-10873:") { + backupMode = true + } else { + // The cdb startup failed because of non backup error. + return fmt.Errorf("startDB: start db failed: %v", err) + } + } + + var sqls []string + if backupMode { + sqls = append(sqls, "alter database end backup", "alter database open") + } + if task.db.IsCDB() { + sqls = append(sqls, "alter pluggable database all open") + } + + if sqls != nil { + if _, err := task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: sqls, + Suppress: false, + }); err != nil { + return fmt.Errorf("startDB: open db failed: %v", err) + } + } + klog.InfoS("startDB: successfully open db") + return nil +} + +func (task *BootstrapTask) createPDBSeedTemp(ctx context.Context) error { + sqlResp, err := task.dbdClient.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{"select name, con_id from v$tempfile where con_id in (select con_id from v$containers where name='PDB$SEED')"}, + Suppress: false, + }) + + if err != nil { + return fmt.Errorf("createPDBSeedTemp: failed to query temp file for PDB$SEED: %v", err) + } + + if len(sqlResp.GetMsg()) > 0 { + return nil + } + + // Ask dbdaemon to remove empty /u01/app/oracle/admin//dpdump/ directory + // in its own container as it might cause issues with Oracle 19x on FUSE filesystems. + dpDumpDir := fmt.Sprintf("%s/admin/%s/dpdump/", common.GetSourceOracleBase(task.db.GetVersion()), task.db.GetSourceDatabaseName()) + if _, err := task.dbdClient.DeleteDir(ctx, &dbdpb.DeleteDirRequest{Path: dpDumpDir, Force: true}); err != nil { + klog.ErrorS(err, "createPDBSeedTemp: unable to delete", dpDumpDir) + } + + if _, err := task.dbdClient.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{ + Commands: []string{ + "alter session set container=PDB$SEED", + "alter session set \"_oracle_script\"=TRUE", + "alter pluggable database PDB$SEED close", + "alter pluggable database PDB$SEED open read write", + fmt.Sprintf("alter tablespace TEMP add tempfile '%s/temp01.dbf' size 500m reuse", filepath.Join(task.db.GetDataFilesDir(), "pdbseed")), + "alter pluggable database PDB$SEED close", + "alter pluggable database PDB$SEED open read only", + }, + Suppress: false, + }); err != nil { + return fmt.Errorf("createPDBSeedTemp: failed to add temp file for PDB$SEED: %v", err) + } + return nil +} + +var runSQLPlus = func(ctx context.Context, version, dbname string, sqls []string, suppress bool) ([]string, error) { + + // Required for local connections + // (when no SID is specified on connect string) + if err := os.Setenv("ORACLE_SID", dbname); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + if err := os.Setenv("ORACLE_HOME", common.GetSourceOracleHome(version)); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + + if err := os.Setenv("TNS_ADMIN", fmt.Sprintf(consts.ListenerDir, consts.DataMount)); err != nil { + return nil, fmt.Errorf("failed to set env variable: %v", err) + } + defer func() { + if err := os.Unsetenv("TNS_ADMIN"); err != nil { + klog.Warningf("failed to unset env variable: %v", err) + } + }() + + // For connect string format refer to + // https://github.com/godror/godror/blob/main/drv.go + prelim := false + db, err := sql.Open("godror", "oracle://?sysdba=1") // "/ as sysdba" + if pingErr := db.Ping(); err != nil || pingErr != nil { // Force a connection with Ping. + // Connection pool opened but ping failed, close this pool. + if err == nil { + err = pingErr + if err := db.Close(); err != nil { + klog.Warningf("failed to close db connection: %v", err) + } + } + + // Try a preliminary connection for CREATE (S)PFILE only. + // In this case we wont be able to get DBMS_OUTPUT. + if !strings.HasPrefix(strings.ToLower(sqls[0]), "create spfile") && + !strings.HasPrefix(strings.ToLower(sqls[0]), "create pfile") { + klog.Errorf("Failed to connect to oracle: %v", err) + return nil, err + } + prelim = true + db, err = sql.Open("godror", "oracle://?sysdba=1&prelim=1") // "/ as sysdba" + if err != nil { + klog.Errorf("Failed to connect to oracle: %v", err) + return nil, err + } + } + defer db.Close() + + // This will fail on prelim connections, so ignore errors in that case. + if _, err := db.ExecContext(ctx, "BEGIN DBMS_OUTPUT.ENABLE(); END;"); err != nil && !prelim { + klog.Errorf("Failed to enable dbms_output: %v", err) + return nil, err + } + + sqlForLogging := strings.Join(sqls, ";") + if suppress { + sqlForLogging = "suppressed" + } + klog.Infof("Executing SQL command: %q", sqlForLogging) + + output := []string{} + for _, sql := range sqls { + if _, err := db.ExecContext(ctx, sql); err != nil { + klog.Errorf("Failed to execute: %q:\n%v", sql, err) + return nil, err + } + out, err := dbmsOutputGetLines(ctx, db) + if err != nil && !prelim { + klog.Errorf("Failed to get DMBS_OUTPUT for %q:\n%v", sql, err) + return nil, err + } + output = append(output, out...) + } + klog.Infof("output: %q", strings.Join(output, "\n")) + return output, nil +} + +func dbmsOutputGetLines(ctx context.Context, db *sql.DB) ([]string, error) { + lines := make([]string, 0, 1024) + status := 0 + // 0 is success, until it fails there may be more lines buffered. + for status == 0 { + var line string + if _, err := db.ExecContext(ctx, "BEGIN DBMS_OUTPUT.GET_LINE(:line, :status); END;", + sql.Named("line", sql.Out{Dest: &line}), + sql.Named("status", sql.Out{Dest: &status, In: true})); err != nil { + return nil, err + } + if status == 0 { + lines = append(lines, line) + } + } + return lines, nil +} + +func (task *BootstrapTask) renameDatabase(ctx context.Context) error { + + if !task.cdbRenaming { + return nil + } + + // Prepare a SPFile. + i := initFileInput{ + SourceDBName: task.db.GetSourceDatabaseName(), + DestDBName: task.db.GetDatabaseName(), + } + initOraFileContent, err := i.LoadInitOraTemplate(task.db.GetVersion()) + if err != nil { + return err + } + + initOraFileName := fmt.Sprintf("init%s.ora", task.db.GetDatabaseName()) + dbsDir := filepath.Join(task.db.GetDatabaseHome(), "dbs") + + lByte := []byte(initOraFileContent) + if err := ioutil.WriteFile(filepath.Join(dbsDir, initOraFileName), lByte, 0600); err != nil { + return err + } + klog.InfoS("renameDatabase: prepare init file succeeded") + + // Start the database. + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetSourceDatabaseName(), + Option: "mount", + AvoidConfigBackup: true, + }); err != nil { + return fmt.Errorf("renameDatabase: startup mount failed: %v", err) + } + klog.InfoS("renameDatabase: startup mount succeeded") + + // running NID to change the DBID not the name of the database. + if _, err := task.dbdClient.NID(ctx, &dbdpb.NIDRequest{ + Sid: task.db.GetSourceDatabaseName(), + DatabaseName: task.db.GetDatabaseName(), + }); err != nil { + return fmt.Errorf("nid cmd failed: %v", err) + } + klog.InfoS("renameDatabase: nid command succeeded") + + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_STARTUP, + DatabaseName: task.db.GetDatabaseName(), + Option: "mount", + AvoidConfigBackup: true, + }); err != nil { + return fmt.Errorf("renameDatabase: shutdown failed: %v", err) + } + klog.InfoS("renameDatabase: startup succeeded after nid command") + + sqlResp, err := runSQLPlus(ctx, task.db.GetVersion(), task.db.GetDatabaseName(), []string{"alter database open resetlogs"}, + false) + if err != nil { + return fmt.Errorf("renameDatabase: resetlogs failed: %v", err) + } + klog.InfoS("reset logs after database move succeeded", "output", sqlResp) + + if _, err := runSQLPlus(ctx, task.db.GetVersion(), task.db.GetDatabaseName(), []string{"CREATE SPFILE from PFILE"}, false); err != nil { + return fmt.Errorf("renameDatabase: spfile generation succeeded: %v", err) + } + klog.InfoS("spfile generation succeeded") + + klog.InfoS("shutting database ") + if _, err := task.dbdClient.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{ + Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN, + DatabaseName: task.db.GetDatabaseName(), + }); err != nil { + return fmt.Errorf("renameDatabase: shutdown failed: %v", err) + } + + klog.InfoS("renameDatabase: executed successfully") + return nil +} + +// NewBootstrapDatabaseTask returns a Task which can be invoked to bootstrap a DB. +func NewBootstrapDatabaseTask(ctx context.Context, iscdb bool, isSeeded bool, cdbNameFromImage, cdbNameFromYaml, version, zone, host, DBDomain string, pgaMB, sgaMB uint64, provisioned bool, dbdClient dbdpb.DatabaseDaemonClient) (*BootstrapTask, error) { + var db oracleDB + if !iscdb { + return nil, errors.New("only support CDB provisioning") + } + + var cdbRenaming bool + if !strings.EqualFold(cdbNameFromImage, cdbNameFromYaml) { + klog.InfoS("NewBootstrapDatabaseTask", "cdbName FromImage", cdbNameFromImage, "cdbName FromYaml", cdbNameFromYaml) + cdbRenaming = true + } + + db = newOracleCDB(ctx, cdbNameFromImage, cdbNameFromYaml, version, zone, host, DBDomain, pgaMB, sgaMB) + bootstrapTask := &BootstrapTask{ + db: db, + osUtil: &OSUtilImpl{}, + dbdClient: dbdClient, + cdbRenaming: cdbRenaming, + isSeeded: isSeeded, + } + if provisioned { + bootstrapTask.subTasks = []task{ + &simpleTask{name: "initUIDGID", callFun: bootstrapTask.initUIDGID}, + &simpleTask{name: "relinkConfigFiles", callFun: bootstrapTask.relinkConfigFiles}, + &simpleTask{name: "recreateFlashDir", callFun: bootstrapTask.recreateFlashDir}, + &simpleTask{name: "setEnv", callFun: bootstrapTask.setEnv}, + &simpleTask{name: "startDB", callFun: bootstrapTask.startDB}, + &simpleTask{name: "initListeners", callFun: bootstrapTask.initListeners}, + } + } else { + bootstrapTask.subTasks = []task{ + &simpleTask{name: "renameDatabase", callFun: bootstrapTask.renameDatabase}, + &simpleTask{name: "initUIDGID", callFun: bootstrapTask.initUIDGID}, + &simpleTask{name: "createDirs", callFun: bootstrapTask.createDirs}, + &simpleTask{name: "setSourceEnv", callFun: bootstrapTask.setSourceEnv}, + &simpleTask{name: "moveDataFiles", callFun: bootstrapTask.moveDataFiles}, + &simpleTask{name: "moveConfigFiles", callFun: bootstrapTask.moveConfigFiles}, + &simpleTask{name: "relinkConfigFiles", callFun: bootstrapTask.relinkConfigFiles}, + &simpleTask{name: "setParameters", callFun: bootstrapTask.setParameters}, + &simpleTask{name: "moveDatabase", callFun: bootstrapTask.moveDatabase}, + &simpleTask{name: "runNID", callFun: bootstrapTask.runNID}, + &simpleTask{name: "prepDatabase", callFun: bootstrapTask.prepDatabase}, + &simpleTask{name: "fixOratab", callFun: bootstrapTask.fixOratab}, + &simpleTask{name: "setupUsers", callFun: bootstrapTask.setupUsers}, + &simpleTask{name: "cleanup", callFun: bootstrapTask.cleanup}, + &simpleTask{name: "initListeners", callFun: bootstrapTask.initListeners}, + } + } + if iscdb { + bootstrapTask.subTasks = append(bootstrapTask.subTasks, &simpleTask{name: "createPDBSeedTemp", callFun: bootstrapTask.createPDBSeedTemp}) + } + + return bootstrapTask, nil +} + +// NewBootstrapDatabaseTaskForUnseeded returns a Task for bootstrapping a CDB created during instance creation. +func NewBootstrapDatabaseTaskForUnseeded(cdbName, dbUniqueName, dbDomain string, dbdClient dbdpb.DatabaseDaemonClient) *BootstrapTask { + cdb := &oracleCDB{ + cdbName: cdbName, + uniqueName: dbUniqueName, + DBDomain: dbDomain, + } + bootstrapTask := &BootstrapTask{ + db: cdb, + osUtil: &OSUtilImpl{}, + dbdClient: dbdClient, + cdbRenaming: false, + } + + // The following are the tasks common for the seeded and unseeded workflow. + // All the remaining tasks are all related to database relocation from u01 to u02 and u03 + bootstrapTask.subTasks = []task{ + &simpleTask{name: "setParameters", callFun: bootstrapTask.setParameters}, + &simpleTask{name: "prepDatabase", callFun: bootstrapTask.prepDatabase}, + &simpleTask{name: "setupUsers", callFun: bootstrapTask.setupUsers}, + &simpleTask{name: "createPDBSeedTemp", callFun: bootstrapTask.createPDBSeedTemp}, + } + return bootstrapTask +} + +// NewBootstrapDatabaseTaskForStandby returns a Task for bootstrapping a standby instance. +func NewBootstrapDatabaseTaskForStandby(cdbName, dbDomain string, dbdClient dbdpb.DatabaseDaemonClient) *BootstrapTask { + cdb := &oracleCDB{ + cdbName: cdbName, + } + bootstrapTask := &BootstrapTask{ + db: cdb, + osUtil: &OSUtilImpl{}, + dbdClient: dbdClient, + cdbRenaming: false, + } + + bootstrapTask.subTasks = []task{ + &simpleTask{name: "setupUsers", callFun: bootstrapTask.setupUsers}, + } + return bootstrapTask +} diff --git a/oracle/pkg/database/provision/bootstrap_database_task_test.go b/oracle/pkg/database/provision/bootstrap_database_task_test.go new file mode 100644 index 0000000..fcbd764 --- /dev/null +++ b/oracle/pkg/database/provision/bootstrap_database_task_test.go @@ -0,0 +1,123 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provision + +import ( + "context" + "errors" + "net" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +func TestSetParametersHelper(t *testing.T) { + dbdServer := &fakeServer{} + client, cleanup := newFakeDatabaseDaemonClient(t, dbdServer) + ctx := context.Background() + defer cleanup() + testCDB := &oracleCDB{ + sourceCDBName: "GCLOUD", + cdbName: "TEST", + version: "12.2", + host: "testhost", + uniqueName: "TEST_generic", + DBDomain: "gke", + databaseParamSGATargetMB: 0, + databaseParamPGATargetMB: 0, + } + bootstrapTask := &BootstrapTask{ + dbdClient: client, + db: testCDB, + isSeeded: false, + } + wantSQLs := []string{ + "alter system set audit_file_dest='/u02/app/oracle/admin/TEST/adump' scope=spfile", + "alter system set audit_trail='db' scope=spfile", + "alter system set control_files='/u02/app/oracle/oradata/TEST/control01.ctl' scope=spfile", + "alter system set db_block_size=8192 scope=spfile", + "alter system set db_domain='gke' scope=spfile", + "alter system set db_name='TEST' scope=spfile", + "alter system set db_unique_name='TEST_generic' scope=spfile", + "alter system set db_recovery_file_dest_size=100G scope=spfile", + "alter system set db_recovery_file_dest='/u03/app/oracle/fast_recovery_area/TEST' scope=spfile", + "alter system set diagnostic_dest='/u02/app/oracle' scope=spfile", + "alter system set dispatchers='(PROTOCOL=TCP) (SERVICE=TESTXDB)' scope=spfile", + "alter system set enable_pluggable_database=TRUE scope=spfile", + "alter system set filesystemio_options=SETALL scope=spfile", + "alter system set local_listener='(DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=REGLSNR_6021)))' scope=spfile", + "alter system set open_cursors=300 scope=spfile", + "alter system set processes=300 scope=spfile", + "alter system set remote_login_passwordfile='EXCLUSIVE' scope=spfile", + "alter system set undo_tablespace='UNDOTBS1' scope=spfile", + "alter system set log_archive_dest_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=TEST_generic' scope=spfile", + "alter system set log_archive_dest_state_1=enable scope=spfile", + "alter system set log_archive_format='%t_%s_%r.arch' scope=spfile", + "alter system set standby_file_management=AUTO scope=spfile", + "alter system set common_user_prefix='gcsql$' scope=spfile", + } + var gotSQLs []string + dbdServer.fakeRunSQLPlus = func(ctx context.Context, request *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + gotSQLs = append(gotSQLs, request.GetCommands()...) + return &dbdpb.RunCMDResponse{}, nil + } + + if err := bootstrapTask.setParametersHelper(ctx); err != nil { + t.Fatalf("BootstrapTask.setParametersHelper got %v, want nil", err) + } + if diff := cmp.Diff(wantSQLs, gotSQLs); diff != "" { + t.Errorf("BootstrapTask.setParametersHelper called unexpected sqls: -want +got %v", diff) + } +} + +type fakeServer struct { + *dbdpb.UnimplementedDatabaseDaemonServer + fakeRunSQLPlus func(context.Context, *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) +} + +func (f *fakeServer) RunSQLPlus(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) { + if f.fakeRunSQLPlus == nil { + return nil, errors.New("RunSQLPlus fake not found") + } + return f.fakeRunSQLPlus(ctx, req) +} + +func newFakeDatabaseDaemonClient(t *testing.T, server *fakeServer) (dbdpb.DatabaseDaemonClient, func()) { + t.Helper() + grpcSvr := grpc.NewServer() + + dbdpb.RegisterDatabaseDaemonServer(grpcSvr, server) + lis := bufconn.Listen(2 * 1024 * 1024) + go grpcSvr.Serve(lis) + + dbdConn, err := grpc.Dial("test", + grpc.WithInsecure(), + grpc.WithContextDialer( + func(ctx context.Context, s string) (conn net.Conn, err error) { + return lis.Dial() + }), + ) + if err != nil { + t.Fatalf("failed to dial to dbDaemon: %v", err) + } + return dbdpb.NewDatabaseDaemonClient(dbdConn), func() { + dbdConn.Close() + grpcSvr.GracefulStop() + } +} diff --git a/oracle/pkg/database/provision/cdb.go b/oracle/pkg/database/provision/cdb.go new file mode 100644 index 0000000..4006ee5 --- /dev/null +++ b/oracle/pkg/database/provision/cdb.go @@ -0,0 +1,224 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provision + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/security" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/common" +) + +// oracleCDB provides Oracle CDB information. +type oracleCDB struct { + sourceCDBName string + cdbName string + version string + host string + uniqueName string + DBDomain string + databaseParamSGATargetMB uint64 + databaseParamPGATargetMB uint64 +} + +// newOracleCDB constructs a CDB information provider. +func newOracleCDB(ctx context.Context, sourceDBName, cdbName string, version, zone, host, DBDomain string, paramPGATargetMB, paramSGATargetMB uint64) *oracleCDB { + uniqueName := fmt.Sprintf("%s_%s", cdbName, strings.Replace(zone, "-", "", -1)) + return &oracleCDB{ + sourceCDBName: sourceDBName, + cdbName: cdbName, + version: version, + uniqueName: uniqueName, + host: host, + DBDomain: DBDomain, + databaseParamSGATargetMB: paramSGATargetMB, + databaseParamPGATargetMB: paramPGATargetMB, + } +} + +// GetVersion returns the version of the oracle DB. +func (db *oracleCDB) GetVersion() string { + return db.version +} + +// GetDataFilesDir returns data files directory location. +func (db *oracleCDB) GetDataFilesDir() string { + return fmt.Sprintf(consts.DataDir, consts.DataMount, db.cdbName) +} + +// GetSourceDataFilesDir returns data files directory location of the pre-built DB. +func (db *oracleCDB) GetSourceDataFilesDir() string { + return filepath.Join(common.GetSourceOracleDataDirectory(db.GetVersion()), db.GetSourceDatabaseName()) +} + +// GetConfigFilesDir returns config file directory location. +func (db *oracleCDB) GetConfigFilesDir() string { + return fmt.Sprintf(consts.ConfigDir, consts.DataMount, db.cdbName) +} + +// GetSourceConfigFilesDir returns config file directory location of the pre-built DB. +func (db *oracleCDB) GetSourceConfigFilesDir() string { + return filepath.Join(db.GetDatabaseHome(), "dbs") +} + +// GetAdumpDir returns adump directory location. +func (db *oracleCDB) GetAdumpDir() string { + return filepath.Join(db.GetDatabaseBase(), "admin", db.GetDatabaseName(), "adump") +} + +// GetCdumpDir returns cdump directory location. +func (db *oracleCDB) GetCdumpDir() string { + return filepath.Join(db.GetDatabaseBase(), "admin", db.GetDatabaseName(), "cdump") +} + +// GetFlashDir returns flash directory location. +func (db *oracleCDB) GetFlashDir() string { + return fmt.Sprintf(consts.RecoveryAreaDir, consts.LogMount, db.cdbName) +} + +// GetListenerDir returns listeners directory location. +func (db *oracleCDB) GetListenerDir() string { + return fmt.Sprintf(consts.ListenerDir, consts.DataMount) +} + +// GetDatabaseBase returns oracle base location. +func (db *oracleCDB) GetDatabaseBase() string { + return consts.OracleBase +} + +// GetDatabaseName returns database name. +func (db *oracleCDB) GetDatabaseName() string { + return db.cdbName +} + +// GetSourceDatabaseName returns database name of the pre-built DB. +func (db *oracleCDB) GetSourceDatabaseName() string { + return db.sourceCDBName +} + +// GetDatabaseHome returns database home location. +func (db *oracleCDB) GetDatabaseHome() string { + return common.GetSourceOracleHome(db.GetVersion()) +} + +// GetDataFiles returns initial data files associated with the DB. +func (db *oracleCDB) GetDataFiles() []string { + return []string{"system01.dbf", "sysaux01.dbf", "undotbs01.dbf", "users01.dbf", "pdbseed/undotbs01.dbf", "pdbseed/sysaux01.dbf", "pdbseed/system01.dbf"} +} + +// GetSourceConfigFiles returns initial config files associated with pre-built DB. +func (db *oracleCDB) GetSourceConfigFiles() []string { + return []string{fmt.Sprintf("spfile%s.ora", db.GetSourceDatabaseName()), fmt.Sprintf("orapw%s", db.GetSourceDatabaseName())} +} + +// GetConfigFiles returns config files associated with current DB. +func (db *oracleCDB) GetConfigFiles() []string { + return []string{fmt.Sprintf("spfile%s.ora", db.GetDatabaseName()), fmt.Sprintf("orapw%s", db.GetDatabaseName())} +} + +// GetMountPointDatafiles returns the mount point of the data files. +func (db *oracleCDB) GetMountPointDatafiles() string { + return fmt.Sprintf("/%s", consts.DataMount) +} + +// GetMountPointAdmin returns the mount point of the admin directory. +func (db *oracleCDB) GetMountPointAdmin() string { + return fmt.Sprintf("/%s", consts.DataMount) +} + +// GetListeners returns listeners of the DB. +func (db *oracleCDB) GetListeners() map[string]*consts.Listener { + return consts.ListenerNames +} + +// GetDatabaseUniqueName returns database unique name. +func (db *oracleCDB) GetDatabaseUniqueName() string { + return db.uniqueName +} + +// GetDBDomain returns DB domain. +func (db *oracleCDB) GetDBDomain() string { + return db.DBDomain +} + +// GetMountPointDiag returns the mount point of the diag directory. +func (db *oracleCDB) GetMountPointDiag() string { + return fmt.Sprintf("/%s", consts.DataMount) +} + +// GetDatabaseParamPGATargetMB returns PGA value in MB. +func (db *oracleCDB) GetDatabaseParamPGATargetMB() uint64 { + return db.databaseParamPGATargetMB +} + +// GetDatabaseParamSGATargetMB returns SGA value in MB. +func (db *oracleCDB) GetDatabaseParamSGATargetMB() uint64 { + return db.databaseParamSGATargetMB +} + +// GetOratabFile returns oratab file location. +func (db *oracleCDB) GetOratabFile() string { + return consts.OraTab +} + +// GetSourceDatabaseHost returns host name of the pre-built DB. +func (db *oracleCDB) GetSourceDatabaseHost() string { + return consts.SourceDatabaseHost +} + +// GetHostName returns host name. +func (db *oracleCDB) GetHostName() string { + return db.host +} + +// GetCreateUserCmds returns create user commands to setup users. +func (db *oracleCDB) GetCreateUserCmds() []*createUser { + var ( + sPwd, pPwd string + err error + ) + if sPwd, err = security.RandOraclePassword(); err != nil { + return nil + } + if pPwd, err = security.RandOraclePassword(); err != nil { + return nil + } + + return []*createUser{ + { + user: consts.SecurityUser, + cmds: []string{ + CreateUserCmd(consts.SecurityUser, sPwd), + fmt.Sprintf("grant create session to %s container=all", consts.SecurityUser), + fmt.Sprintf("grant create trigger to %s container=all", consts.SecurityUser), + fmt.Sprintf("grant administer database trigger to %s container=all", consts.SecurityUser)}, + }, + { + // This CDB user should have no permissions on CDB as it will import/export user provided DMPs + user: consts.PDBLoaderUser, + cmds: []string{ + CreateUserCmd(consts.PDBLoaderUser, pPwd)}, + }, + } +} + +// IsCDB returns true if this is a CDB. +func (db *oracleCDB) IsCDB() bool { + return true +} diff --git a/oracle/pkg/database/provision/common.go b/oracle/pkg/database/provision/common.go new file mode 100644 index 0000000..9a638ba --- /dev/null +++ b/oracle/pkg/database/provision/common.go @@ -0,0 +1,465 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provision + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "text/template" + + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" +) + +var ( + // ListenerTemplateName is the filepath for the listener file template in the container. + ListenerTemplateName = filepath.Join(consts.ScriptDir, "bootstrap-database-listener.template") + + // TnsnamesTemplateName is the filepath for the tnsnames file template in the container. + TnsnamesTemplateName = filepath.Join(consts.ScriptDir, "bootstrap-database-tnsnames.template") + + // ControlFileTemplateName is the filepath for the control file template in the container. + ControlFileTemplateName = filepath.Join(consts.ScriptDir, "bootstrap-database-crcf.template") + + // InitOraTemplateName is the filepath for the initOra file template in the container for Oracle EE/SE. + InitOraTemplateName = filepath.Join(consts.ScriptDir, "bootstrap-database-initfile.template") + + // InitOraXeTemplateName is the filepath for the initOra file template in the container for Oracle 18c XE. + InitOraXeTemplateName = filepath.Join(consts.ScriptDir, "bootstrap-database-initfile-oracle-xe.template") + + fileSQLNet = "sqlnet.ora" + // SQLNetSrc is the filepath for the control file template in the container. + SQLNetSrc = filepath.Join(consts.ScriptDir, fileSQLNet) + + // MetaDataFile is the filepath of the Database image metadata(Oracle Home, + // CDB name, Version) file in the image. + MetaDataFile = "/home/oracle/.metadata" +) + +// ListenerInput is the struct, which will be applied to the listener template. +type ListenerInput struct { + PluggableDatabaseNames []string + DatabaseName string + DatabaseBase string + DatabaseHome string + ListenerName string + ListenerPort string + ListenerProtocol string + DatabaseHost string + DBDomain string +} + +type controlfileInput struct { + DatabaseName string + DataFilesDir string + DataFilesMultiLine string +} + +// oracleDB defines APIs for the DB information provider. +// Information provider need implement this interface to support oracle DB task. +type oracleDB interface { + // GetVersion returns the version of the oracle DB. + GetVersion() string + // GetDataFilesDir returns data files directory location. + GetDataFilesDir() string + // GetSourceDataFilesDir returns data files directory location of the pre-built DB. + GetSourceDataFilesDir() string + // GetConfigFilesDir returns config file directory location. + GetConfigFilesDir() string + // GetSourceConfigFilesDir returns config file directory location of the pre-built DB. + GetSourceConfigFilesDir() string + // GetAdumpDir returns adump directory location. + GetAdumpDir() string + // GetCdumpDir returns cdump directory location. + GetCdumpDir() string + // GetFlashDir returns flash directory location. + GetFlashDir() string + // GetListenerDir returns listeners directory location. + GetListenerDir() string + // GetDatabaseBase returns oracle base location. + GetDatabaseBase() string + // GetDatabaseName returns database name. + GetDatabaseName() string + // GetSourceDatabaseName returns database name of the pre-built DB. + GetSourceDatabaseName() string + // GetDatabaseHome returns database home location. + GetDatabaseHome() string + // GetDataFiles returns initial data files associated with the DB. + GetDataFiles() []string + // GetSourceConfigFiles returns initial config files associated with pre-built DB. + GetSourceConfigFiles() []string + // GetConfigFiles returns config files associated with current DB. + GetConfigFiles() []string + // GetMountPointDatafiles returns the mount point of the data files. + GetMountPointDatafiles() string + // GetMountPointAdmin returns the mount point of the admin directory. + GetMountPointAdmin() string + // GetListeners returns listeners of the DB. + GetListeners() map[string]*consts.Listener + // GetDatabaseUniqueName returns database unique name. + GetDatabaseUniqueName() string + // GetDBDomain returns DB domain. + GetDBDomain() string + // GetMountPointDiag returns the mount point of the diag directory. + GetMountPointDiag() string + // GetDatabaseParamPGATargetMB returns PGA value in MB. + GetDatabaseParamPGATargetMB() uint64 + // GetDatabaseParamSGATargetMB returns SGA value in MB. + GetDatabaseParamSGATargetMB() uint64 + // GetOratabFile returns oratab file location. + GetOratabFile() string + // GetSourceDatabaseHost returns host name of the pre-built DB. + GetSourceDatabaseHost() string + // GetHostName returns host name. + GetHostName() string + // GetCreateUserCmds returns create user commands to setup users. + GetCreateUserCmds() []*createUser + // IsCDB returns true if this is a cdb. + IsCDB() bool +} + +// createUser provides user name and sql commands to create the user in this DB. +type createUser struct { + user string + cmds []string +} + +// osUtil was added for unit test. +type osUtil interface { + Lookup(username string) (*user.User, error) + LookupGroup(name string) (*user.Group, error) +} + +// OSUtilImpl contains utility methods for fetching user/group metadata. +type OSUtilImpl struct{} + +// Lookup method obtains the user's metadata (uid, gid, username, name, homedir). +func (*OSUtilImpl) Lookup(username string) (*user.User, error) { + return user.Lookup(username) +} + +// LookupGroup method obtains the group's metadata (gid, name). +func (*OSUtilImpl) LookupGroup(name string) (*user.Group, error) { + return user.LookupGroup(name) +} + +type task interface { + GetName() string + Call(ctx context.Context) error +} + +// simpleTask is a task which should be testable. Task should bring the system +// to a state which can be verified. +type simpleTask struct { + name string + callFun func(ctx context.Context) error +} + +func (task *simpleTask) GetName() string { + return task.name +} + +func (task *simpleTask) Call(ctx context.Context) error { + return task.callFun(ctx) +} + +func doSubTasks(ctx context.Context, parentTaskName string, subTasks []task) error { + klog.InfoS("parent task: running", "task", parentTaskName) + for _, sub := range subTasks { + klog.InfoS("subtask: running", "parent task", parentTaskName, "sub task", sub.GetName()) + if err := sub.Call(ctx); err != nil { + klog.ErrorS(err, "Subtask failed", "parent task", parentTaskName, "sub task", sub.GetName()) + return err + } + klog.InfoS("subtask: Done", "parent task", parentTaskName, "sub task", sub.GetName()) + } + klog.InfoS("parent task: Done", "task", parentTaskName) + + return nil +} + +// oracleUser returns uid and gid of the Oracle user. +func oracleUser(util osUtil) (uint32, uint32, error) { + u, err := util.Lookup(consts.OraUser) + if err != nil { + return 0, 0, fmt.Errorf("oracleUser: could not determine the current user: %v", err) + } + + if u.Username == "root" { + return 0, 0, fmt.Errorf("oracleUser: this program is designed to run by the Oracle software installation owner (e.g. oracle), not %q", u.Username) + } + + // Oracle user's primary group name should be either dba or oinstall. + groups := consts.OraGroup + var gids []string + for _, group := range groups { + g, err := util.LookupGroup(group) + // Not both groups are mandatory, e.g. oinstall may not exist. + klog.InfoS("looking up groups", "group", group, "g", g) + if err != nil { + continue + } + gids = append(gids, g.Gid) + } + for _, g := range gids { + if u.Gid == g { + usr, err := strconv.ParseUint(u.Uid, 10, 32) + if err != nil { + return 0, 0, err + } + grp, err := strconv.ParseUint(u.Gid, 10, 32) + if err != nil { + return 0, 0, err + } + return uint32(usr), uint32(grp), nil + } + } + return 0, 0, fmt.Errorf("oracleUser: current user's primary group (GID=%q) is not dba|oinstall (GID=%q)", u.Gid, gids) +} + +// LoadTemplateListener applies listener input to listener and tns template. +// It returns listener tns and sqlnet in string. +// In contrast to pfile, env file and a control file, there may be multiple listeners +// and a search/replace in that file is different, so it's easier to load it while +// iterating over listeners, not ahead of time. This method also generates the tnsnames +// based on the port numbers of the listeners. +func LoadTemplateListener(l *ListenerInput, name, port, protocol string) (string, string, string, error) { + l.ListenerName = name + l.ListenerPort = port + l.ListenerProtocol = protocol + t, err := template.New(filepath.Base(ListenerTemplateName)).ParseFiles(ListenerTemplateName) + if err != nil { + return "", "", "", fmt.Errorf("LoadTemplateListener: parsing %q failed: %v", ListenerTemplateName, err) + } + + listenerBuf := &bytes.Buffer{} + if err := t.Execute(listenerBuf, l); err != nil { + return "", "", "", fmt.Errorf("LoadTemplateListener: executing %q failed: %v", ListenerTemplateName, err) + } + + tns, err := template.New(filepath.Base(TnsnamesTemplateName)).ParseFiles(TnsnamesTemplateName) + if err != nil { + return "", "", "", fmt.Errorf("LoadTemplateListener: parsing %q failed: %v", TnsnamesTemplateName, err) + } + + tnsBuf := &bytes.Buffer{} + if err := tns.Execute(tnsBuf, l); err != nil { + return "", "", "", fmt.Errorf("LoadTemplateListener: executing %q failed: %v", TnsnamesTemplateName, err) + } + + sqlnet, err := ioutil.ReadFile(SQLNetSrc) + if err != nil { + return "", "", "", fmt.Errorf("initDBListeners: unable to read sqlnet from scripts directory: %v", err) + } + return listenerBuf.String(), tnsBuf.String(), string(sqlnet), nil +} + +// MakeDirs creates directories in the container. +func MakeDirs(ctx context.Context, dirs []string, uid, gid uint32) error { + for _, odir := range dirs { + if err := os.MkdirAll(odir, 0750); err != nil { + return fmt.Errorf("create a directory %q failed: %v", odir, err) + } + klog.InfoS("created a directory", "dir", odir) + } + return nil +} + +// replace does a search and replace of term to toterm in place. +func replace(outFile, term, toterm string, uid, gid uint32) error { + input, err := ioutil.ReadFile(outFile) + if err != nil { + return fmt.Errorf("replace: reading %q failed: %v", outFile, err) + } + out := bytes.Replace(input, []byte(term), []byte(toterm), -1) + if err := ioutil.WriteFile(outFile, out, 0600); err != nil { + return fmt.Errorf("replace: error writing file: %v", err) + } + + return nil +} + +// MoveFile moves a file between directories. +// os.Rename() gives error "invalid cross-device link" for Docker container with Volumes. +func MoveFile(sourceFile, destFile string) error { + klog.Infof("Moving %s to %s", sourceFile, destFile) + inputFile, err := os.Open(sourceFile) + if err != nil { + return fmt.Errorf("couldn't open source file: %s", err) + } + defer func() { + if err := inputFile.Close(); err != nil { + klog.Warningf("failed to close $v: %v", inputFile, err) + } + }() + outputFile, err := os.Create(destFile) + if err != nil { + return fmt.Errorf("couldn't open dest file: %s", err) + } + defer func() { + if err := outputFile.Close(); err != nil { + klog.Warningf("failed to close $v: %v", outputFile, err) + } + }() + _, err = io.Copy(outputFile, inputFile) + if err != nil { + return fmt.Errorf("writing to output file failed: %s", err) + } + // The copy was successful, so now delete the original file + return os.Remove(sourceFile) +} + +// MoveConfigFiles moves Database config files from Oracle standard paths to the +// persistent configuration in the PD. +func MoveConfigFiles(OracleHome, CDBName string) error { + // /u02/app/oracle/oraconfig/ + configDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, CDBName) + // /u01/app/oracle/product/12.2/db/dbs/ + sourceConfigDir := filepath.Join(OracleHome, "dbs") + for _, f := range []string{fmt.Sprintf("spfile%s.ora", CDBName), fmt.Sprintf("orapw%s", CDBName)} { + sf := filepath.Join(sourceConfigDir, f) + tf := filepath.Join(configDir, f) + tfDir := filepath.Dir(tf) + if err := os.MkdirAll(tfDir, 0750); err != nil { + return fmt.Errorf("MoveConfigFiles: failed to create dir %s: %v", tfDir, err) + } + if err := MoveFile(sf, tf); err != nil { + return fmt.Errorf("MoveConfigFiles: move config file %s to %s failed: %v", sf, tf, err) + } + } + return nil +} + +// RelinkConfigFiles creates softlinks under the Oracle standard paths from the +// persistent configuration files in the PD. +func RelinkConfigFiles(OracleHome, CDBName string) error { + configDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, CDBName) + sourceConfigDir := filepath.Join(OracleHome, "dbs") + for _, f := range []string{fmt.Sprintf("spfile%s.ora", CDBName), fmt.Sprintf("orapw%s", CDBName)} { + destn := filepath.Join(sourceConfigDir, f) + if _, err := os.Stat(destn); err == nil { + if err := os.Remove(destn); err != nil { + return fmt.Errorf("unable to delete existing file %s: %v", f, err) + } + } + if err := os.Symlink(filepath.Join(configDir, f), filepath.Join(sourceConfigDir, f)); err != nil { + return fmt.Errorf("symlink creation failed for %s to oracle directories: %v", f, err) + } + } + return nil +} + +// CreateUserCmd returns sql cmd to create a user with provided identifier. +func CreateUserCmd(user, identifier string) string { + return fmt.Sprintf("create user %s identified by %s", user, identifier) +} + +// ChangePasswordCmd returns sql cmd to change user identifier. +func ChangePasswordCmd(user, newIdentifier string) string { + return fmt.Sprintf("alter user %s identified by %s ", user, newIdentifier) +} + +// GrantUserCmd returns sql cmd to grant permissions to a user. +// Permissions are either a single permission or a list of permissions separated by comma. +func GrantUserCmd(user, permissions string) string { + return fmt.Sprintf("grant %s to %s", permissions, user) +} + +// FetchMetaDataFromImage returns Oracle Home, CDB name, Version by parsing +// database image metadata file. +func FetchMetaDataFromImage(path string) (string, string, string, error) { + f, err := os.Open(path) + if err != nil { + return "", "", "", err + } + defer func() { + if err := f.Close(); err != nil { + klog.Warningf("failed to close %v: %v", f, err) + } + }() + + var cdbName, oracleHome, version string + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + kv := strings.Split(line, "=") + + switch kv[0] { + case "ORACLE_HOME": + oracleHome = kv[1] + case "ORACLE_SID": + cdbName = kv[1] + case "VERSION": + version = kv[1] + } + } + return oracleHome, cdbName, version, nil +} + +// GetDefaultInitParams returns default init parameters, which will be set in DB creation. +func GetDefaultInitParams(dbName string) map[string]string { + controlFileLoc := filepath.Join(fmt.Sprintf(consts.DataDir, consts.DataMount, dbName), "control01.ctl") + initParamDict := make(map[string]string) + initParamDict["log_archive_dest_1"] = "'LOCATION=USE_DB_RECOVERY_FILE_DEST'" + initParamDict["enable_pluggable_database"] = "TRUE" + initParamDict["common_user_prefix"] = "'gcsql$'" + initParamDict["control_files"] = fmt.Sprintf("'%s'", controlFileLoc) + return initParamDict +} + +// MapToSlice converts map[string]string into a string slice with format "=". +func MapToSlice(kv map[string]string) []string { + var result []string + for k, v := range kv { + result = append(result, fmt.Sprintf("%s=%s", k, v)) + } + return result +} + +// MergeInitParams merges default parameters and user specified parameters, and +// returns merged parameters. +func MergeInitParams(defaultParams map[string]string, userParams []string) (map[string]string, error) { + mergedParams := make(map[string]string) + + for _, userParam := range userParams { + kv := strings.Split(userParam, "=") + if len(kv) != 2 { + return nil, fmt.Errorf("MergeInitParam: user param %s is not separated by =", userParam) + } + klog.InfoS("provision/MergeInitParams: adding user param", "key", kv[0], "val", kv[1]) + mergedParams[kv[0]] = kv[1] + } + // We only support merging of user params and reject any params trying to override our internal setting used by controller. + // For example, if we permit overrides and the user tries to reassign common_user_prefix with says xyz instead of the default gcsql$, our health checks will break. + for k, v := range defaultParams { + if val, ok := mergedParams[k]; ok { + klog.InfoS("provision/MergeInitParams: overriding user param", "key", k, "user defined val", val, "override val", v) + } + mergedParams[k] = v + klog.InfoS("provision/MergeInitParams: adding default param", "key", k, "val", v) + } + + return mergedParams, nil +} diff --git a/oracle/pkg/database/provision/common_test.go b/oracle/pkg/database/provision/common_test.go new file mode 100644 index 0000000..d65d3c4 --- /dev/null +++ b/oracle/pkg/database/provision/common_test.go @@ -0,0 +1,111 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provision + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestInitParamOverridesAndMerges(t *testing.T) { + + testCases := []struct { + name string + dbName string + dbDomain string + userParams []string + defaultParams map[string]string + expectedParams map[string]string + }{ + { + name: "Verify override of common_user_prefix by user params doesn't happen", + userParams: []string{ + "common_user_prefix='aravsql$'", + }, + defaultParams: map[string]string{ + "common_user_prefix": "'gcsql$'", + "-control_files": "'/u02/app/oracle/oradata/mydb/control01.ctl'", + "DB_DOMAIN": "'gke'", + "log_archive_dest_1": "'LOCATION=USE_DB_RECOVERY_FILE_DEST'", + "enable_pluggable_database": "TRUE", + }, + expectedParams: map[string]string{ + "common_user_prefix": "'gcsql$'", + "-control_files": "'/u02/app/oracle/oradata/mydb/control01.ctl'", + "DB_DOMAIN": "'gke'", + "log_archive_dest_1": "'LOCATION=USE_DB_RECOVERY_FILE_DEST'", + "enable_pluggable_database": "TRUE", + }, + }, + { + name: "Verify override of enable_pluggable_database by user params doesn't happen", + userParams: []string{ + "enable_pluggable_database=FALSE", + }, + defaultParams: map[string]string{ + "common_user_prefix": "'gcsql$'", + "-control_files": "'/u02/app/oracle/oradata/mydb/control01.ctl'", + "DB_DOMAIN": "'gke'", + "log_archive_dest_1": "'LOCATION=USE_DB_RECOVERY_FILE_DEST'", + "enable_pluggable_database": "TRUE", + }, + expectedParams: map[string]string{ + "common_user_prefix": "'gcsql$'", + "-control_files": "'/u02/app/oracle/oradata/mydb/control01.ctl'", + "DB_DOMAIN": "'gke'", + "log_archive_dest_1": "'LOCATION=USE_DB_RECOVERY_FILE_DEST'", + "enable_pluggable_database": "TRUE", + }, + }, + { + name: "Verify merge of non-internal params happens correctly", + userParams: []string{ + "open_cursors=300", + "db_block_size=8192", + "processes=300", + }, + defaultParams: map[string]string{ + "common_user_prefix": "'gcsql$'", + "-control_files": "'/u02/app/oracle/oradata/mydb/control01.ctl'", + "DB_DOMAIN": "'gke'", + "log_archive_dest_1": "'LOCATION=USE_DB_RECOVERY_FILE_DEST'", + "enable_pluggable_database": "TRUE", + }, + expectedParams: map[string]string{ + "common_user_prefix": "'gcsql$'", + "-control_files": "'/u02/app/oracle/oradata/mydb/control01.ctl'", + "DB_DOMAIN": "'gke'", + "log_archive_dest_1": "'LOCATION=USE_DB_RECOVERY_FILE_DEST'", + "enable_pluggable_database": "TRUE", + "open_cursors": "300", + "db_block_size": "8192", + "processes": "300", + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + resultantParams, err := MergeInitParams(tc.defaultParams, tc.userParams) + if err != nil { + t.Fatalf("provision.MergeInitParams merging failed for %v, %v: %v", tc.defaultParams, tc.userParams, err) + } + t.Logf("expected params is %s", resultantParams) + if diff := cmp.Diff(tc.expectedParams, resultantParams); diff != "" { + t.Errorf("Imported data is incorrect (-want +got):\n%s", diff) + } + }) + } +} diff --git a/oracle/pkg/database/provision/init_file_generator.go b/oracle/pkg/database/provision/init_file_generator.go new file mode 100644 index 0000000..abf429c --- /dev/null +++ b/oracle/pkg/database/provision/init_file_generator.go @@ -0,0 +1,47 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provision + +import ( + "bytes" + "fmt" + "path/filepath" + "text/template" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts" +) + +type initFileInput struct { + SourceDBName string + DestDBName string +} + +// LoadInitOraTemplate generates an init ora content using the template and the required parameters. +func (i *initFileInput) LoadInitOraTemplate(dbVersion string) (string, error) { + templateName := InitOraTemplateName + if dbVersion == consts.Oracle18c { + templateName = InitOraXeTemplateName + } + t, err := template.New(filepath.Base(templateName)).ParseFiles(templateName) + if err != nil { + return "", fmt.Errorf("LoadInitOraTemplate: parsing %q failed: %v", templateName, err) + } + + initOraBuf := &bytes.Buffer{} + if err := t.Execute(initOraBuf, i); err != nil { + return "", fmt.Errorf("LoadInitOraTemplate: executing %q failed: %v", templateName, err) + } + return initOraBuf.String(), nil +} diff --git a/oracle/pkg/database/provision/sqlnet.ora b/oracle/pkg/database/provision/sqlnet.ora new file mode 100644 index 0000000..cfcc83b --- /dev/null +++ b/oracle/pkg/database/provision/sqlnet.ora @@ -0,0 +1,13 @@ +sqlnet.crypto_checksum_server = REQUIRED +sqlnet.encryption_server = REQUIRED +sqlnet.crypto_checksum_types_server = (SHA1) +sqlnet.encryption_types_server = (AES256) +# to specify a the time interval, in minutes, to send a probe +# to verify that client/server connections are active +sqlnet.expire_time = 15 +# to specify the time, in seconds, for a client to connect with the database +# server and provide the necessary authentication information. +sqlnet.inbound_connect_timeout = 180 +default_sdu_size=16384 +SQLNET.ALLOWED_LOGON_VERSION_SERVER=12a +SQLNET.ALLOWED_LOGON_VERSION_CLIENT=12a \ No newline at end of file diff --git a/oracle/pkg/k8s/BUILD.bazel b/oracle/pkg/k8s/BUILD.bazel new file mode 100644 index 0000000..f7b351f --- /dev/null +++ b/oracle/pkg/k8s/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "k8s", + srcs = [ + "condition.go", + "event.go", + ], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/k8s", + visibility = ["//visibility:public"], + deps = [ + "//oracle/api/v1alpha1", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + ], +) + +go_test( + name = "k8s_test", + srcs = ["condition_test.go"], + embed = [":k8s"], + deps = [ + "//common/api/v1alpha1", + "//oracle/api/v1alpha1", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + ], +) diff --git a/oracle/pkg/k8s/condition.go b/oracle/pkg/k8s/condition.go new file mode 100644 index 0000000..09b7894 --- /dev/null +++ b/oracle/pkg/k8s/condition.go @@ -0,0 +1,127 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +const ( + // Condition Types + Ready = "Ready" + DatabaseInstanceReady = "DatabaseInstanceReady" + DatabaseInstanceTimeout = "DatabaseInstanceTimeout" + UserReady = "UserReady" + StandbyReady = "StandbyReady" + + // Condition Reasons + // Backup schedule concurrent policy is relying on the backup ready condition’s reason, + // BackupReady and BackupFailed means backup job is not running and scheduler will continue creating backup. + BackupReady = "BackupReady" + BackupInProgress = "BackupInProgress" + BackupFailed = "BackupFailed" + CreateComplete = "CreateComplete" + CreateFailed = "CreateFailed" + CreateInProgress = "CreateInProgress" + CreatePending = "CreatePending" + ImportComplete = "ImportComplete" + ImportFailed = "ImportFailed" + ImportInProgress = "ImportInProgress" + ImportPending = "ImportPending" + RestoreComplete = "RestoreComplete" + RestoreFailed = "RestoreFailed" + RestoreInProgress = "RestoreInProgress" + SyncInProgress = "SyncInProgress" + UserOutOfSync = "UserOutOfSync" + SyncComplete = "SyncComplete" + ManuallySetUpStandbyInProgress = "ManuallySetUpStandbyInProgress" + PromoteStandbyInProgress = "PromoteStandbyInProgress" + PromoteStandbyComplete = "PromoteStandbyComplete" + PromoteStandbyFailed = "PromoteStandbyFailed" + + ExportComplete = "ExportComplete" + ExportFailed = "ExportFailed" + ExportInProgress = "ExportInProgress" + ExportPending = "ExportPending" + + ParameterUpdateInProgress = "ParameterUpdateInProgress" + ParameterUpdateRollback = "ParameterUpdateRollback" +) + +var ( + v1Now = func() v1.Time { + return v1.Now().Rfc3339Copy() + } +) + +type StatusCond struct { + instanceStatus v1alpha1.InstanceStatus +} + +func FindCondition(conditions []v1.Condition, name string) *v1.Condition { + for i, c := range conditions { + if c.Type == name { + return &conditions[i] + } + } + return nil +} + +func ConditionStatusEquals(cond *v1.Condition, status v1.ConditionStatus) bool { + if cond == nil { + return false + } + return cond.Status == status +} + +func ConditionReasonEquals(cond *v1.Condition, reason string) bool { + if cond == nil { + return false + } + return cond.Reason == reason +} + +func InstanceUpsertCondition(iStatus *v1alpha1.InstanceStatus, name string, status v1.ConditionStatus, reason, message string) *v1.Condition { + iStatus.Conditions = Upsert(iStatus.Conditions, name, status, reason, message) + return FindCondition(iStatus.Conditions, name) +} + +func Upsert(conditions []v1.Condition, name string, status v1.ConditionStatus, reason, message string) []v1.Condition { + + if cond := FindCondition(conditions, name); cond != nil { + if !ConditionStatusEquals(cond, status) { // LastTransitionTime refers to the time Status changes + cond.Status = status + cond.LastTransitionTime = v1Now() + } + cond.Reason = reason + cond.Message = message + return conditions + } + + cond := v1.Condition{Type: name, Status: status, Reason: reason, Message: message, LastTransitionTime: v1Now()} + conditions = append(conditions, cond) + return conditions +} + +func ElapsedTimeFromLastTransitionTime(condition *v1.Condition, roundTo time.Duration) time.Duration { + if condition == nil { + return 0 + } + return v1Now().Sub(condition.LastTransitionTime.Time).Round(roundTo) +} diff --git a/oracle/pkg/k8s/condition_test.go b/oracle/pkg/k8s/condition_test.go new file mode 100644 index 0000000..ab5cbdf --- /dev/null +++ b/oracle/pkg/k8s/condition_test.go @@ -0,0 +1,336 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "testing" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1alpha1 "github.com/GoogleCloudPlatform/elcarro-oracle-operator/common/api/v1alpha1" + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/api/v1alpha1" +) + +func TestUpsertNew(t *testing.T) { + iStatus := v1alpha1.InstanceStatus{} + + InstanceUpsertCondition(&iStatus, "TestName", v1.ConditionTrue, "reason", "message") + + if len(iStatus.GenericInstanceStatus.Conditions) != 1 { + t.Errorf("TestUpsertNew") + } + + if iStatus.GenericInstanceStatus.Conditions[0].Type != "TestName" { + t.Errorf("TestUpsertNew 2") + } +} + +func TestUpsertOld(t *testing.T) { + iStatus := v1alpha1.InstanceStatus{ + GenericInstanceStatus: commonv1alpha1.GenericInstanceStatus{ + Conditions: []v1.Condition{ + { + Type: "TestName", + Status: v1.ConditionFalse, + }, + }, + }, + } + + InstanceUpsertCondition(&iStatus, "TestName", v1.ConditionTrue, "reason", "message") + + if len(iStatus.Conditions) != 1 { + t.Errorf("TestUpsertNew") + } + + if iStatus.Conditions[0].Type != "TestName" || iStatus.Conditions[0].Status != v1.ConditionTrue { + t.Errorf("TestUpsertNew 2") + } +} + +func TestUpsertDoNotDelete(t *testing.T) { + iStatus := v1alpha1.InstanceStatus{ + GenericInstanceStatus: commonv1alpha1.GenericInstanceStatus{ + Conditions: []v1.Condition{ + { + Type: "TestName", + Status: v1.ConditionFalse, + }, + }, + }, + } + + InstanceUpsertCondition(&iStatus, "TestName2", v1.ConditionTrue, "reason", "message") + + if len(iStatus.Conditions) != 2 { + t.Errorf("TestUpsertNew") + } + + cond := FindCondition(iStatus.Conditions, "TestName2") + if cond.Type != "TestName2" || cond.Status != v1.ConditionTrue { + t.Errorf("TestUpsertNew 2") + } +} + +func TestInstanceUpsertCondition(t *testing.T) { + now := time.Now() + oldTime := v1.Time{Time: time.Unix(now.Unix()-1000, 0)} + newTime := v1.Time{Time: now} + v1Now = func() v1.Time { + return newTime + } + + testCases := []struct { + Name string + NewCond *v1.Condition + ExistingConds []v1.Condition + wantNumConds int + }{ + { + Name: "Upsert new to empty list", + NewCond: &v1.Condition{ + Type: "NewCond", + Status: v1.ConditionTrue, + Reason: "NewReason", + Message: "NewMessage", + LastTransitionTime: newTime, + }, + wantNumConds: 1, + }, + { + Name: "Upsert new to non-empty list", + NewCond: &v1.Condition{ + Type: "NewCond", + Status: v1.ConditionTrue, + Reason: "NewReason", + LastTransitionTime: newTime, + }, + ExistingConds: []v1.Condition{ + { + Type: "OldCond", + Status: v1.ConditionTrue, + Reason: "OldReason", + }}, + wantNumConds: 2, + }, + { + Name: "Upsert existing - status transition", + NewCond: &v1.Condition{ + Type: "OldCond", + Status: v1.ConditionTrue, + Reason: "OldReason", + LastTransitionTime: newTime, + }, + ExistingConds: []v1.Condition{ + { + Type: "OldCond", + Status: v1.ConditionFalse, + Reason: "OldReason", + LastTransitionTime: oldTime, + }, + }, + wantNumConds: 1, + }, + { + Name: "Upsert existing - now status transition", + NewCond: &v1.Condition{ + Type: "OldCond", + Status: v1.ConditionFalse, + Reason: "NewReason", + LastTransitionTime: oldTime, + }, + ExistingConds: []v1.Condition{ + { + Type: "OldCond", + Status: v1.ConditionFalse, + Reason: "OldReason", + LastTransitionTime: oldTime, + }, + }, + wantNumConds: 1, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + iStatus := &v1alpha1.InstanceStatus{ + GenericInstanceStatus: commonv1alpha1.GenericInstanceStatus{ + Conditions: tc.ExistingConds, + }, + } + updatedCond := InstanceUpsertCondition(iStatus, tc.NewCond.Type, tc.NewCond.Status, tc.NewCond.Reason, tc.NewCond.Message) + + if tc.wantNumConds != len(iStatus.Conditions) { + t.Errorf("Wrong number of conditions: got %d, want %d", len(iStatus.Conditions), tc.wantNumConds) + } + + foundCond := false + for i, _ := range iStatus.Conditions { + if updatedCond == &iStatus.Conditions[i] { + foundCond = true + break + } + } + if !foundCond { + t.Errorf("InstanceUpsertCondition did not return a pointer to the actual struct inside the status object - it probably returned a pointer to a copy") + } + + if tc.NewCond.Type != updatedCond.Type || tc.NewCond.Status != updatedCond.Status || tc.NewCond.Reason != updatedCond.Reason || tc.NewCond.Message != updatedCond.Message || tc.NewCond.LastTransitionTime != updatedCond.LastTransitionTime { + t.Errorf("Condition not correctly updated: got %+v, want %+v", *updatedCond, *tc.NewCond) + } + }) + } +} + +func TestUpsert(t *testing.T) { + now := time.Now() + oldTime := v1.Time{Time: time.Unix(now.Unix()-1000, 0)} + newTime := v1.Time{Time: now} + v1Now = func() v1.Time { + return newTime + } + + testCases := []struct { + Name string + NewCond *v1.Condition + ExistingConds []v1.Condition + wantNumConds int + }{ + { + Name: "Upsert new to empty list", + NewCond: &v1.Condition{ + Type: "NewCond", + Status: v1.ConditionTrue, + Reason: "NewReason", + Message: "NewMessage", + LastTransitionTime: newTime, + }, + wantNumConds: 1, + }, + { + Name: "Upsert new to non-empty list", + NewCond: &v1.Condition{ + Type: "NewCond", + Status: v1.ConditionTrue, + Reason: "NewReason", + LastTransitionTime: newTime, + }, + ExistingConds: []v1.Condition{ + { + Type: "OldCond", + Status: v1.ConditionTrue, + Reason: "OldReason", + }}, + wantNumConds: 2, + }, + { + Name: "Upsert existing - status transition", + NewCond: &v1.Condition{ + Type: "OldCond", + Status: v1.ConditionTrue, + Reason: "OldReason", + LastTransitionTime: newTime, + }, + ExistingConds: []v1.Condition{ + { + Type: "OldCond", + Status: v1.ConditionFalse, + Reason: "OldReason", + LastTransitionTime: oldTime, + }, + }, + wantNumConds: 1, + }, + { + Name: "Upsert existing - now status transition", + NewCond: &v1.Condition{ + Type: "OldCond", + Status: v1.ConditionFalse, + Reason: "NewReason", + LastTransitionTime: oldTime, + }, + ExistingConds: []v1.Condition{ + { + Type: "OldCond", + Status: v1.ConditionFalse, + Reason: "OldReason", + LastTransitionTime: oldTime, + }, + }, + wantNumConds: 1, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + dStatus := &commonv1alpha1.DatabaseStatus{ + Conditions: tc.ExistingConds, + } + dStatus.Conditions = Upsert(dStatus.Conditions, tc.NewCond.Type, tc.NewCond.Status, tc.NewCond.Reason, tc.NewCond.Message) + + if tc.wantNumConds != len(dStatus.Conditions) { + t.Errorf("Wrong number of conditions: got %d, want %d", len(dStatus.Conditions), tc.wantNumConds) + } + + foundCond := FindCondition(dStatus.Conditions, tc.NewCond.Type) + if foundCond == nil { + t.Errorf("New Condition not found in database status conditions after upsert %+v", dStatus.Conditions) + } + + if tc.NewCond.Type != foundCond.Type || tc.NewCond.Status != foundCond.Status || tc.NewCond.Reason != foundCond.Reason || tc.NewCond.Message != foundCond.Message || tc.NewCond.LastTransitionTime != foundCond.LastTransitionTime { + t.Errorf("Condition not correctly updated: got %+v, want %+v", foundCond.LastTransitionTime, tc.NewCond.LastTransitionTime) + } + }) + } +} + +func TestElapsedTimeFromLastTransitionTime(t *testing.T) { + now, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + if err != nil { + t.Fatalf("Unable to parse time string %v", err) + } + + oldTime := v1.Time{Time: time.Unix(now.Unix()-1000, 0)} + newTime := v1.Time{Time: now} + v1Now = func() v1.Time { + return newTime + } + + testCases := []struct { + Name string + Cond *v1.Condition + WantDuration time.Duration + }{ + { + Name: "Round to seconds", + Cond: &v1.Condition{ + LastTransitionTime: oldTime, + }, + WantDuration: 1000000000000, + }, + { + Name: "Handle nil condition", + Cond: nil, + WantDuration: 0, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + gotDuration := ElapsedTimeFromLastTransitionTime(tc.Cond, time.Second) + if gotDuration != tc.WantDuration { + t.Errorf("Wrong duration: got %d, want %d", gotDuration, tc.WantDuration) + } + }) + } +} diff --git a/oracle/pkg/k8s/event.go b/oracle/pkg/k8s/event.go new file mode 100644 index 0000000..a6dbe55 --- /dev/null +++ b/oracle/pkg/k8s/event.go @@ -0,0 +1,27 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +// database event reason list +const ( + CreatingDatabase = "Creating" + CreatedDatabase = "Created" + DatabaseAlreadyExists = "DatabaseAlreadyExists" + CreatingUser = "Creating" + CreatedUser = "Created" + SyncingUser = "Syncing" + SyncedUser = "Synced" + FailedToSyncUser = "Failed" +) diff --git a/oracle/scripts/authentikos.yaml b/oracle/scripts/authentikos.yaml new file mode 100644 index 0000000..1d7230c --- /dev/null +++ b/oracle/scripts/authentikos.yaml @@ -0,0 +1,88 @@ +# Copyright 2019 Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: authentikos + namespace: default +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: authentikos +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: authentikos +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: authentikos +subjects: +- kind: ServiceAccount + name: authentikos + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: authentikos + namespace: default + labels: + app: authentikos +spec: + replicas: 1 + selector: + matchLabels: + app: authentikos + template: + metadata: + labels: + app: authentikos + spec: + serviceAccountName: authentikos + containers: + - name: authentikos + image: gcr.io/istio-testing/authentikos:0.0.4 + imagePullPolicy: Always + args: + - --verbose + - --secret=http-cookiefile + - --key=cookies + - --creds=/etc/creds/service-account.json + - --namespace=test-pods + - --scopes=https://www.googleapis.com/auth/gerritcodereview + - | + --template=.googlesource.com TRUE / TRUE {{.Now | .TimeToUnix | .Add 3600}} o {{.Token}} + source.developers.google.com FALSE / TRUE {{.Now | .TimeToUnix | .Add 3600}} o {{.Token}} + volumeMounts: + - name: creds + mountPath: /etc/creds + readOnly: true + volumes: + - name: creds + secret: + defaultMode: 0644 + secretName: default-compute-service-account diff --git a/oracle/scripts/deploy/csi/gce_pd_storage_class.yaml b/oracle/scripts/deploy/csi/gce_pd_storage_class.yaml new file mode 100644 index 0000000..11bbd01 --- /dev/null +++ b/oracle/scripts/deploy/csi/gce_pd_storage_class.yaml @@ -0,0 +1,22 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-gce-pd +provisioner: pd.csi.storage.gke.io +parameters: + type: pd-standard +volumeBindingMode: WaitForFirstConsumer diff --git a/oracle/scripts/deploy/csi/gce_pd_volume_snapshot_class.yaml b/oracle/scripts/deploy/csi/gce_pd_volume_snapshot_class.yaml new file mode 100644 index 0000000..5bb65ff --- /dev/null +++ b/oracle/scripts/deploy/csi/gce_pd_volume_snapshot_class.yaml @@ -0,0 +1,20 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: csi-gce-pd-snapshot-class +driver: pd.csi.storage.gke.io +deletionPolicy: Delete diff --git a/oracle/scripts/deploy/install-18c-xe.sh b/oracle/scripts/deploy/install-18c-xe.sh new file mode 100755 index 0000000..3630876 --- /dev/null +++ b/oracle/scripts/deploy/install-18c-xe.sh @@ -0,0 +1,251 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o nounset +set -o errexit + +CLUSTER_NAME=gkecluster +CDB_NAME=GCLOUD +ZONE=us-central1-a +readonly OPERATOR_DIR="${HOME}/oracleop" + +function usage() { + echo "------USAGE------ + This tool installs the El Carro Operator and provisions the following resources: + * a Kubernetes cluster on GKE to host the operator and database containers + * an Oracle 18c XE database image + * an Oracle database (PDB) instance in the cluster + + install-18c-xe.sh --service_account SERVICE_ACCOUNT_[NAME|EMAIL] [--cdb_name DB_NAME] [MORE_OPTIONS] + + REQUIRED FLAGS + -a, service_account + GCP service account that should be used to create a cluster and by the El Carro Operator. + + OPTIONAL FLAGS + -c, --cdb_name + Name of the container database (CDB), the default value is 'GCLOUD'. + This name should only contain uppercase letters and numbers. + -k, --cluster_name + Name of GKE cluster to be created, default value is 'gkecluster' + -z, --gke_zone + Zone to create a cluster in, the default value is 'us-central1-a'. + " + exit 1 +} + +function parse_options() { + opts=$(getopt -o g:a:c:k:z: \ + --longoptions gcs_oracle_binaries_path:,service_account:,cdb_name:,cluster_name:,gke_zone: -n "$(basename "$0")" -- "$@") + eval set -- "$opts" + while true; do + case "$1" in + -a | --service_account) + shift + GKE_SA=$1 + shift + ;; + -c | --cdb_name) + shift + CDB_NAME=$(echo "$1" | tr '[:lower:]' '[:upper:]') + shift + ;; + -k | --cluster_name) + shift + CLUSTER_NAME=$1 + shift + ;; + -z | --gke_zone) + shift + ZONE=$(echo "$1" | tr '[:upper:]' '[:lower:]') + shift + ;; + --) + shift + break + ;; + *) + echo Invalid argument "$1" + usage + exit 1 + ;; + esac + done + + if [[ -z ${GKE_SA+x} ]]; then + usage + fi +} + +function init_env() { + PROJECT=$(gcloud config get-value project 2>/dev/null) + + if [ -z "${PROJECT}" ]; then + echo "could not determine current gcloud project" + exit 1 + fi + + echo "current project: ${PROJECT}" + + readonly DB_IMAGE="gcr.io/$(echo "${PROJECT}" | tr : /)/oracle-database-images/oracle-18c-xe-seeded-$(echo "$CDB_NAME" | tr '[:upper:]' '[:lower:]')" + readonly RELEASE_DIR="$(dirname "$0/")/.." +} + +function enable_apis() { + echo "enabling container.googleapis.com" + gcloud services enable container.googleapis.com + + echo "enabling anthos.googleapis.com" + gcloud services enable anthos.googleapis.com + + echo "enabling cloudbuild.googleapis.com" + gcloud services enable cloudbuild.googleapis.com +} + +function create_cluster() { + if [ -z "$(gcloud beta container clusters list --filter "name=${CLUSTER_NAME} zone=${ZONE}")" ]; then + + local GKE_SA_EMAIL + if [[ $GKE_SA = *@* ]]; then + GKE_SA_EMAIL=${GKE_SA} + else + GKE_SA_EMAIL=$(gcloud iam service-accounts list --format='value(email)' --filter="name:${GKE_SA}@") + if [ -z "${GKE_SA_EMAIL}" ]; then + echo "Unknown account $GKE_SA" + exit 1 + fi + fi + + echo "adding monitoring and logging permissions to ${GKE_SA_EMAIL}" + gcloud projects add-iam-policy-binding "${PROJECT}" \ + --member serviceAccount:${GKE_SA_EMAIL} \ + --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding "${PROJECT}" \ + --member serviceAccount:${GKE_SA_EMAIL} \ + --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding "${PROJECT}" \ + --member serviceAccount:${GKE_SA_EMAIL} \ + --role roles/logging.logWriter + + readonly GCR_GCS_PATH=$(gsutil ls | grep -E '^gs://artifacts.*appspot.com/$') + echo "adding project container repository bucket ${GCR_GCS_PATH} read permission to ${GKE_SA_EMAIL}" + gsutil iam ch serviceAccount:${GKE_SA_EMAIL}:roles/storage.objectViewer "${GCR_GCS_PATH}" + + gcloud beta container clusters create ${CLUSTER_NAME} --release-channel rapid \ + --machine-type=n1-standard-2 --num-nodes 2 --zone ${ZONE} \ + --scopes gke-default,compute-rw,cloud-platform,https://www.googleapis.com/auth/dataaccessauditlogging \ + --service-account "${GKE_SA_EMAIL}" \ + --image-type cos \ + --addons GcePersistentDiskCsiDriver + else + echo "cluster (name=${CLUSTER_NAME} zone=${ZONE}) already exists" + + if [ "$(kubectl config current-context)" != "gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" ]; then + echo "current kubectl config is" "$(kubectl config current-context), please run:" + echo "> gcloud container clusters get-credentials ${CLUSTER_NAME} --zone ${ZONE}" + echo "> kubectl config set-context gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" + exit 1 + fi + fi +} + +function install_csi_resources() { + kubectl apply -f "${RELEASE_DIR}/deploy/csi" +} + +function build_image() { + echo "using Oracle database image: ${DB_IMAGE}" + + if [ -z "$(gcloud container images list-tags "${DB_IMAGE}" --filter="tags:latest" 2> /dev/null)" ]; then + pushd "${RELEASE_DIR}/dbimage" > /dev/null + bash image_build.sh --db_version=18c --create_cdb=true --cdb_name="${CDB_NAME}" --no_dry_run + popd > /dev/null + else + echo "Oracle container image ${DB_IMAGE}:latest already exists" + fi +} + +function install_operator_resources() { + kubectl apply -f "${RELEASE_DIR}/operator.yaml" +} + +function create_demo_resources() { + local -r CRD_DIR="${OPERATOR_DIR}/resources" + local -r CRD_NS=db + + kubectl create ns ${CRD_NS} || kubectl get ns ${CRD_NS} + mkdir -p "${CRD_DIR}" + + local -r CRD_INSTANCE_PATH="${CRD_DIR}/instance.yaml" + local -r CRD_PDB_PATH="${CRD_DIR}/database_pdb1.yaml" + + cat "${RELEASE_DIR}/samples/v1alpha1_instance_18c_XE_express.yaml" | \ + sed "s|gcr.io/\${PROJECT_ID}/oracle-database-images/oracle-18c-xe-seeded-\${DB}|${DB_IMAGE}|g" | \ + sed "s/\${DB}/${CDB_NAME}/g" > "${CRD_INSTANCE_PATH}" + + cp "${RELEASE_DIR}/samples/v1alpha1_database_pdb1_express.yaml" "${CRD_PDB_PATH}" + + kubectl apply -f "${CRD_INSTANCE_PATH}" -n ${CRD_NS} + kubectl apply -f "${CRD_PDB_PATH}" -n ${CRD_NS} +} + +function wait_for_resource_creation() { + local -r SLEEP=30 + local ITERATIONS=60 + local reason + local db_reason + local pdb_reason + + until [ $ITERATIONS -eq 0 ]; do + reason=$(kubectl get instances -n db -o jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].reason}') + db_reason=$(kubectl get instances -n db -o jsonpath='{.items[0].status.conditions[?(@.type=="DatabaseInstanceReady")].reason}') + pdb_reason=$(kubectl get database pdb1 -n db -o jsonpath='{.status.conditions[?(@.type=="Ready")].reason}') + + echo "Waiting for startup, statuses:" "InstanceReady=$reason, InstanceDatabaseReady=$db_reason, DatabaseReady=$pdb_reason" + + if [ "$pdb_reason" = "CreateComplete" ]; then + break + fi + + sleep $SLEEP + ITERATIONS=$(( $ITERATIONS-1 )) + done + + if [ $ITERATIONS -eq 0 ] ; then + echo "Timed out waiting for Instance to start up" + exit 1 + fi +} + +function print_connect_string() { + local -r db_domain=$(kubectl get instances -n db -o jsonpath='{.items[0].spec.dbDomain}') + local -r url=$(kubectl get instances -n db -o jsonpath='{.items[0].status.url}') + + echo "Oracle Operator is installed. Database connection command:" + echo "> sqlplus scott/tiger@${url}/pdb1.${db_domain}" +} + +parse_options "$@" +init_env + +enable_apis +build_image +create_cluster +install_csi_resources +install_operator_resources +create_demo_resources +wait_for_resource_creation +print_connect_string diff --git a/oracle/scripts/deploy/install.sh b/oracle/scripts/deploy/install.sh new file mode 100755 index 0000000..329ba29 --- /dev/null +++ b/oracle/scripts/deploy/install.sh @@ -0,0 +1,267 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o nounset +set -o errexit + +CLUSTER_NAME=gkecluster +CDB_NAME=GCLOUD +ZONE=us-central1-a +readonly OPERATOR_DIR="${HOME}/oracleop" + +function usage() { + echo "------USAGE------ + This tool installs the El Carro Operator and provisions the following resources: + * a Kubernetes cluster on GKE to host the operator and database containers + * an Oracle 12c EE database image using the provided binaries + * an Oracle database (PDB) instance in the cluster + + install.sh --gcs_oracle_binaries_path GCS_PATH --service_account SERVICE_ACCOUNT_[NAME|EMAIL] [--cdb_name DB_NAME] [MORE_OPTIONS] + + REQUIRED FLAGS + -g, --gcs_oracle_binaries_path + GCS path containing Oracle 12.2 installation files. + -a, service_account + GCP service account that should be used to create a cluster and by the El Carro Operator. + + OPTIONAL FLAGS + -c, --cdb_name + Name of the container database (CDB), the default value is 'GCLOUD'. + This name should only contain uppercase letters and numbers. + -k, --cluster_name + Name of the GKE cluster to be created, default value is 'gkecluster' + -z, --gke_zone + Zone to create a cluster in, the default value is 'us-central1-a'. + " + exit 1 +} + +function parse_options() { + opts=$(getopt -o g:a:c:k:z: \ + --longoptions gcs_oracle_binaries_path:,service_account:,cdb_name:,cluster_name:,gke_zone: -n "$(basename "$0")" -- "$@") + eval set -- "$opts" + while true; do + case "$1" in + -g | --gcs_oracle_binaries_path) + shift + GCS_PATH=$1 + shift + ;; + -a | --service_account) + shift + GKE_SA=$1 + shift + ;; + -c | --cdb_name) + shift + CDB_NAME=$(echo "$1" | tr '[:lower:]' '[:upper:]') + shift + ;; + -k | --cluster_name) + shift + CLUSTER_NAME=$1 + shift + ;; + -z | --gke_zone) + shift + ZONE=$(echo "$1" | tr '[:upper:]' '[:lower:]') + shift + ;; + --) + shift + break + ;; + *) + echo Invalid argument "$1" + usage + exit 1 + ;; + esac + done + + if [[ -z ${GCS_PATH+x} || -z ${GKE_SA+x} ]]; then + usage + fi + + [[ $GCS_PATH = gs://* ]] || (echo "{-g,--gcs_oracle_binaries_path} should start with gs://"; exit 1) + [[ $GCS_PATH = gs://gs://* ]] && (echo "{-g,--gcs_oracle_binaries_path} should have format gs://bucket[/path], gs://gs:// is an invalid prefix"; exit 1) + [[ "$GCS_PATH" =~ ^gs://[a-z0-9][a-z0-9._-]+[a-z0-9](/.*)?$ ]] || (echo "{-g,--gcs_oracle_binaries_path} bucket name is invalid, see https://cloud.google.com/storage/docs/naming-buckets"; exit 1) +} + +function init_env() { + PROJECT=$(gcloud config get-value project 2>/dev/null) + + if [ -z "${PROJECT}" ]; then + echo "could not determine current gcloud project" + exit 1 + fi + + echo "current project: ${PROJECT}" + + readonly DB_IMAGE="gcr.io/$(echo "${PROJECT}" | tr : /)/oracle-database-images/oracle-12.2-ee-seeded-$(echo "$CDB_NAME" | tr '[:upper:]' '[:lower:]')" + readonly RELEASE_DIR="$(dirname "$0/")/.." +} + +function enable_apis() { + echo "enabling container.googleapis.com" + gcloud services enable container.googleapis.com + + echo "enabling anthos.googleapis.com" + gcloud services enable anthos.googleapis.com + + echo "enabling cloudbuild.googleapis.com" + gcloud services enable cloudbuild.googleapis.com +} + +function create_cluster() { + if [ -z "$(gcloud beta container clusters list --filter "name=${CLUSTER_NAME} zone=${ZONE}")" ]; then + + local GKE_SA_EMAIL + if [[ $GKE_SA = *@* ]]; then + GKE_SA_EMAIL=${GKE_SA} + else + GKE_SA_EMAIL=$(gcloud iam service-accounts list --format='value(email)' --filter="name:${GKE_SA}@") + if [ -z "${GKE_SA_EMAIL}" ]; then + echo "Unknown account $GKE_SA" + exit 1 + fi + fi + + echo "adding monitoring and logging permissions to ${GKE_SA_EMAIL}" + gcloud projects add-iam-policy-binding "${PROJECT}" \ + --member serviceAccount:${GKE_SA_EMAIL} \ + --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding "${PROJECT}" \ + --member serviceAccount:${GKE_SA_EMAIL} \ + --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding "${PROJECT}" \ + --member serviceAccount:${GKE_SA_EMAIL} \ + --role roles/logging.logWriter + + readonly GCR_GCS_PATH=$(gsutil ls | grep -E '^gs://artifacts.*appspot.com/$') + echo "adding project container repository bucket ${GCR_GCS_PATH} read permission to ${GKE_SA_EMAIL}" + gsutil iam ch serviceAccount:${GKE_SA_EMAIL}:roles/storage.objectViewer "${GCR_GCS_PATH}" + + gcloud beta container clusters create ${CLUSTER_NAME} --release-channel rapid \ + --machine-type=n1-standard-2 --num-nodes 2 --zone ${ZONE} \ + --scopes gke-default,compute-rw,cloud-platform,https://www.googleapis.com/auth/dataaccessauditlogging \ + --service-account "${GKE_SA_EMAIL}" \ + --image-type cos \ + --addons GcePersistentDiskCsiDriver + else + echo "cluster (name=${CLUSTER_NAME} zone=${ZONE}) already exists" + + if [ "$(kubectl config current-context)" != "gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" ]; then + echo "current kubectl config is" "$(kubectl config current-context), please run:" + echo "> gcloud container clusters get-credentials ${CLUSTER_NAME} --zone ${ZONE}" + echo "> kubectl config set-context gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" + exit 1 + fi + fi +} + +function install_csi_resources() { + kubectl apply -f "${RELEASE_DIR}/deploy/csi" +} + +function build_image() { + echo "using Oracle database image: ${DB_IMAGE}" + + if [ -z "$(gcloud container images list-tags "${DB_IMAGE}" --filter="tags:latest" 2> /dev/null)" ]; then + + # strip off part of the path after the bucket: gs://bucket/dir1/dir becomes gs://bucket + local -r GCS_BUCKET="gs://$(echo ${GCS_PATH} | tr "/" "\n" | head -n3 | tail -n1)" + gsutil iam ch serviceAccount:$(gcloud projects describe ${PROJECT} --format="value(projectNumber)")@cloudbuild.gserviceaccount.com:roles/storage.objectViewer "${GCS_BUCKET}" + + pushd "${RELEASE_DIR}/dbimage" > /dev/null + bash image_build.sh --install_path="${GCS_PATH}" --db_version=12.2 --mem_pct=45 --create_cdb=true --cdb_name="${CDB_NAME}" --no_dry_run + popd > /dev/null + else + echo "Oracle container image ${DB_IMAGE}:latest already exists" + fi +} + +function install_operator_resources() { + kubectl apply -f "${RELEASE_DIR}/operator.yaml" +} + +function create_demo_resources() { + local -r CRD_DIR="${OPERATOR_DIR}/resources" + local -r CRD_NS=db + + kubectl create ns ${CRD_NS} || kubectl get ns ${CRD_NS} + mkdir -p "${CRD_DIR}" + + local -r CRD_INSTANCE_PATH="${CRD_DIR}/instance.yaml" + local -r CRD_PDB_PATH="${CRD_DIR}/database_pdb1.yaml" + + cat "${RELEASE_DIR}/samples/v1alpha1_instance_express.yaml" | \ + sed "s|gcr.io/\${PROJECT_ID}/oracle-database-images/oracle-12.2-ee-seeded-\${DB}|${DB_IMAGE}|g" | \ + sed "s/\${DB}/${CDB_NAME}/g" > "${CRD_INSTANCE_PATH}" + + cp "${RELEASE_DIR}/samples/v1alpha1_database_pdb1_express.yaml" "${CRD_PDB_PATH}" + + kubectl apply -f "${CRD_INSTANCE_PATH}" -n ${CRD_NS} + kubectl apply -f "${CRD_PDB_PATH}" -n ${CRD_NS} +} + +function wait_for_resource_creation() { + local -r SLEEP=30 + local ITERATIONS=60 + local reason + local db_reason + local pdb_reason + + until [ $ITERATIONS -eq 0 ]; do + reason=$(kubectl get instances -n db -o jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].reason}') + db_reason=$(kubectl get instances -n db -o jsonpath='{.items[0].status.conditions[?(@.type=="DatabaseInstanceReady")].reason}') + pdb_reason=$(kubectl get database pdb1 -n db -o jsonpath='{.status.conditions[?(@.type=="Ready")].reason}') + + echo "Waiting for startup, statuses:" "InstanceReady=$reason, InstanceDatabaseReady=$db_reason, DatabaseReady=$pdb_reason" + + if [ "$pdb_reason" = "CreateComplete" ]; then + break + fi + + sleep $SLEEP + ITERATIONS=$(( $ITERATIONS-1 )) + done + + if [ $ITERATIONS -eq 0 ] ; then + echo "Timed out waiting for Instance to start up" + exit 1 + fi +} + +function print_connect_string() { + local -r db_domain=$(kubectl get instances -n db -o jsonpath='{.items[0].spec.dbDomain}') + local -r url=$(kubectl get instances -n db -o jsonpath='{.items[0].status.url}') + + echo "Oracle Operator is installed. Database connection command:" + echo "> sqlplus scott/tiger@${url}/pdb1.${db_domain}" +} + +parse_options "$@" +init_env + +enable_apis +build_image +create_cluster +install_csi_resources +install_operator_resources +create_demo_resources +wait_for_resource_creation +print_connect_string diff --git a/oracle/scripts/fmt_check.sh b/oracle/scripts/fmt_check.sh new file mode 100755 index 0000000..44ebb22 --- /dev/null +++ b/oracle/scripts/fmt_check.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +# Dump all changes +changes="$(git diff . ':(exclude).bazelrc')" +if [ "x$changes" != "x" ]; then + echo "$changes" + exit 1 +fi +echo "Formatting OK" diff --git a/oracle/scripts/fmt_fixup.sh b/oracle/scripts/fmt_fixup.sh new file mode 100755 index 0000000..1f39490 --- /dev/null +++ b/oracle/scripts/fmt_fixup.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +# check all go +gofmt -w ./ + +# tidy up go.mod +go mod tidy + +# check all protos (on workstations, prow (debian) is too old to format the same way) +if clang-format --version | grep google3-trunk; then + proto_files="$(find ./ -name '*.proto')" + for proto in ${proto_files[@]}; do + clang-format --style=google -i "$proto" + done +fi + diff --git a/oracle/scripts/generate_version.sh b/oracle/scripts/generate_version.sh new file mode 100755 index 0000000..b80ddf2 --- /dev/null +++ b/oracle/scripts/generate_version.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +pkg=$1 +version=${2:-$(git describe)} +cat < 1: + for i in range(1, int(nodes) + 1): + go_test( + name = "{}_node_{}".format(name, i), + srcs = srcs, + deps = deps, + args = ["-ginkgo.parallel.total", str(nodes), "-ginkgo.parallel.node", str(i)], + **kwargs + ) + else: + go_test( + name = name, + srcs = srcs, + deps = deps, + **kwargs + ) diff --git a/oracle/scripts/install_prow_deps.sh b/oracle/scripts/install_prow_deps.sh new file mode 100755 index 0000000..cff8632 --- /dev/null +++ b/oracle/scripts/install_prow_deps.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Install's all the build/test/run dependencies for prow, this meant to run on +# the k8s kubekins package which includes go/bazel/gcloud sdk/k8s and built on +# a debian container. + +set -e + +# Setup bazel caching +# DO NOT use this cache from your local machine. +CC_HASH=$(sha256sum $(which ${CC:-gcc}) | cut -c1-8) +PY_HASH=$(sha256sum $(which python) | cut -c1-8) +CACHE_KEY="CC:${CC_HASH:-err},PY:${PY_HASH:-err}" + +cat << EOF >> .bazelrc +build --remote_cache=https://storage.googleapis.com/graybox-bazel-cache/${CACHE_KEY} +build --google_default_credentials +EOF + +INSTALL_TMP_DIR=$(mktemp -d) +cd $INSTALL_TMP_DIR + +# add debian 10 buildah repo from https://github.com/containers/buildah/blob/master/install.md +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O Release.key +apt-key add - < Release.key + +# everything we can get from debian packages. +apt-get update -qq +apt-get install -y \ + clang-format buildah fuse-overlayfs gettext-base + +# Use fuse-overlayfs to run buildah within k8s container. +sed -i -e 's|#mount_program = "/usr/bin/fuse-overlayfs"|mount_program = "/usr/bin/fuse-overlayfs"|' /etc/containers/storage.conf + +# Link the kubekins install to the typical debian location to match Dev +# machines. +ln -s /google-cloud-sdk /usr/lib/google-cloud-sdk + +# install binaries for testing +KUBEBUILDER_VER="2.3.1" +HOST_OS=$(go env GOOS) +HOST_ARCH=$(go env GOARCH) + +# Get kubebuilder (includes kubectl, kube-apiserver, etcd) +curl -sSL https://go.kubebuilder.io/dl/${KUBEBUILDER_VER}/${HOST_OS}/${HOST_ARCH} \ + -o kubebuilder.tar.gz +mkdir kubebuilder +tar xvf kubebuilder.tar.gz --strip-components=1 -C kubebuilder +# Typical install location /usr/local/kubebuilder/bin/ +rm -fr /usr/local/kubebuilder +mv kubebuilder /usr/local/ + +# If we need a specific kubectl from gcr.io/k8s-testimages/kubekins-e2e +# rm /usr/local/bin/kubectl +# ln -s /google-cloud-sdk/bin/kubectl.1.18 /usr/local/bin/kubectl + +gcloud auth configure-docker --quiet + +# cleanup +cd / +rm -rf /var/lib/apt/lists/* +rm -rf "$INSTALL_TMP_DIR" diff --git a/oracle/scripts/integration_test_cluster/cleanup_integration_test_clusters.sh b/oracle/scripts/integration_test_cluster/cleanup_integration_test_clusters.sh new file mode 100755 index 0000000..446caf1 --- /dev/null +++ b/oracle/scripts/integration_test_cluster/cleanup_integration_test_clusters.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Remove stale integration test clusters from the PROW_PROJECT + +set -o errexit +set -o nounset +set -o pipefail + +[[ -z "$PROW_CLUSTER_ZONE" ]] && { echo "PROW_CLUSTER_ZONE envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_PROJECT" ]] && { echo "PROW_PROJECT envvar was not set. Did you try to test without make?" ; exit 1; } + +# -6 hours from now +STALE_TIME="-P6H" + +# Look for clusters inttests-XXX created more than STALE_TIME hours ago +STALE_CLUSTERS=$(gcloud beta container clusters list --project ${PROW_PROJECT} \ +--filter "name:inttests- AND createTime<${STALE_TIME}" --format="value(name)") + +for c in $STALE_CLUSTERS; do + echo " * Deleting stale cluster * ${c}"; + set -x #echo on + gcloud beta container clusters delete --async -q "${c}" --zone="${PROW_CLUSTER_ZONE}" --project="${PROW_PROJECT}" + set +x #echo off +done + +# Look for PVCs created more than STALE_TIME hours ago +STALE_PVCS=$(gcloud compute disks list --project ${PROW_PROJECT} \ +--filter "creationTimestamp<${STALE_TIME} AND users=null" --format="value(name)") + +for c in $STALE_PVCS; do + echo " * Deleting orphan pvc * ${c}"; + set -x #echo on + # Ignore errors as there might be concurrent jobs running + gcloud compute disks delete -q "${c}" --zone="${PROW_CLUSTER_ZONE}" --project="${PROW_PROJECT}" || true + set +x #echo off +done + diff --git a/oracle/scripts/integration_test_cluster/create_integration_test_cluster.sh b/oracle/scripts/integration_test_cluster/create_integration_test_cluster.sh new file mode 100755 index 0000000..e33b06e --- /dev/null +++ b/oracle/scripts/integration_test_cluster/create_integration_test_cluster.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Create a new GKE integration test cluster +# Use environment variables to get the name of the cluster/zone/project + +set -o errexit +set -o nounset +set -o pipefail + +[[ -z "$PROW_CLUSTER" ]] && { echo "PROW_CLUSTER envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_CLUSTER_ZONE" ]] && { echo "PROW_CLUSTER_ZONE envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_PROJECT" ]] && { echo "PROW_PROJECT envvar was not set. Did you try to test without make?" ; exit 1; } + +MACHINE="n1-standard-4" +NODECOUNT="8" + +echo "Creating cluster '${PROW_CLUSTER}' (this may take a few minutes)..." +echo "If this fails due to insufficient project quota, request more quota at GCP console" +echo + +set -x #echo on +time gcloud beta container clusters create "${PROW_CLUSTER}" \ +--release-channel rapid \ +--machine-type="${MACHINE}" \ +--num-nodes="${NODECOUNT}" \ +--zone="${PROW_CLUSTER_ZONE}" \ +--project="${PROW_PROJECT}" \ +--scopes "gke-default,compute-rw,cloud-platform,https://www.googleapis.com/auth/dataaccessauditlogging" \ +--enable-gcfs \ +--workload-pool="${PROW_PROJECT}.svc.id.goog" + +gcloud container clusters get-credentials ${PROW_CLUSTER} --zone ${PROW_CLUSTER_ZONE} --project ${PROW_PROJECT} +kubectl config set-context gke_${PROW_PROJECT}_${PROW_CLUSTER_ZONE}_${PROW_CLUSTER} + +# Create the csi-gce-pd storage class and the csi-gce-pd-snapshot-class volume snapshot class +kubectl create -f scripts/deploy/csi/gce_pd_storage_class.yaml +kubectl create -f scripts/deploy/csi/gce_pd_volume_snapshot_class.yaml + +# Create service account for this k8s cluster +scripts/integration_test_cluster/create_service_account.sh + +set +x #echo off \ No newline at end of file diff --git a/oracle/scripts/integration_test_cluster/create_service_account.sh b/oracle/scripts/integration_test_cluster/create_service_account.sh new file mode 100755 index 0000000..31c1fcd --- /dev/null +++ b/oracle/scripts/integration_test_cluster/create_service_account.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Prepare the service account for integration tests +# Use environment variables to get the name of the cluster/zone/project + +set -o errexit +set -o nounset +set -o pipefail + +[[ -z "$PROW_PROJECT" ]] && { echo "PROW_PROJECT envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_INT_TEST_SA" ]] && { echo "PROW_INT_TEST_SA envvar was not set. Did you try to test without make?" ; exit 1; } + +set -x #echo on + +# Create service account for integration tests (ignore errors if it already exists) +export SA="${PROW_INT_TEST_SA}@${PROW_PROJECT}.iam.gserviceaccount.com" +gcloud iam service-accounts create "${PROW_INT_TEST_SA}" || true + +# GCS bucket permissions for integration tests +gsutil iam ch serviceAccount:$SA:objectCreator gs://${PROW_PROJECT} +gsutil iam ch serviceAccount:$SA:objectViewer gs://${PROW_PROJECT} +gsutil iam ch serviceAccount:$SA:legacyBucketReader gs://${PROW_PROJECT} + +set +x #echo off diff --git a/oracle/scripts/integration_test_cluster/delete_integration_test_cluster.sh b/oracle/scripts/integration_test_cluster/delete_integration_test_cluster.sh new file mode 100755 index 0000000..e00debf --- /dev/null +++ b/oracle/scripts/integration_test_cluster/delete_integration_test_cluster.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Delete the integration test cluster +# Use environment variables to get the name of the cluster/zone/project + +set -o errexit +set -o nounset +set -o pipefail + +[[ -z "$PROW_CLUSTER" ]] && { echo "PROW_CLUSTER envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_CLUSTER_ZONE" ]] && { echo "PROW_CLUSTER_ZONE envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_PROJECT" ]] && { echo "PROW_PROJECT envvar was not set. Did you try to test without make?" ; exit 1; } + +echo "Deleting cluster '${PROW_CLUSTER}' (this may take a few minutes)..." + +set -x #echo on +time gcloud beta container clusters delete --async -q "${PROW_CLUSTER}" --zone="${PROW_CLUSTER_ZONE}" --project="${PROW_PROJECT}" + +# Delete service account +scripts/integration_test_cluster/delete_service_account.sh + +set +x #echo off diff --git a/oracle/scripts/integration_test_cluster/delete_service_account.sh b/oracle/scripts/integration_test_cluster/delete_service_account.sh new file mode 100755 index 0000000..fa8a305 --- /dev/null +++ b/oracle/scripts/integration_test_cluster/delete_service_account.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Delete the service account for integration tests +# Use environment variables to get the name of the cluster/zone/project + +set -o errexit +set -o nounset +set -o pipefail + +[[ -z "$PROW_PROJECT" ]] && { echo "PROW_PROJECT envvar was not set. Did you try to test without make?" ; exit 1; } +[[ -z "$PROW_INT_TEST_SA" ]] && { echo "PROW_INT_TEST_SA envvar was not set. Did you try to test without make?" ; exit 1; } + +set -x #echo on + +# Delete service account for integration tests +export SA="${PROW_INT_TEST_SA}@${PROW_PROJECT}.iam.gserviceaccount.com" +gcloud iam service-accounts delete $SA -q + +set +x #echo off diff --git a/oracle/scripts/redeploy.sh b/oracle/scripts/redeploy.sh new file mode 100755 index 0000000..7013311 --- /dev/null +++ b/oracle/scripts/redeploy.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Name: +# redeploy.sh +# Usage: +# redeploy.sh [] +# +# Example: redeploy.sh gke_${USER}-playground-operator_us-central1-a_cluster4 ${USER}-playground-operator +# +# If the project is not provided as a (2nd) parameter, a standard SWE +# project pattern is assumed. +# +# If a namespace is not provided as a (3rd) parameter, ns defaults to db. +# (can also be set via DEV_NS env variable). +# +# If noinstall is provided as a (4th) parameter, only the cleanup part runs. +# No CRD install, no Operator build/push, no deploy. +# This option is useful for deploying from an official build by running: +# kubectl apply -f operator.yaml +# +# Function: +# This script is meant to be used interactively during the dev cycle to +# reset and redeploy Operator K8s resources, including rebuilding of the images +# pushed to GCR. + +[[ "$#" -lt 1 ]] && { echo "Usage: $(basename $0) [ [ [ []]]]"; exit 1; } + +CLUSTER="${1}" + +if [[ ! -z "${2}" ]] ;then + PROJECTID="${2}" +else + PROJECTID="${USER}-playground-operator" +fi + +if [[ ! -z "${3}" ]] ;then + NS="${3}" +else + # use DEV_NS env var or 'db' as default + NS=${DEV_NS:-db} +fi + +NOINSTALL=false +if [[ ! -z "${4}" && "${4}" == "noinstall" ]] ;then + NOINSTALL=true +fi + +if [[ ! -z "${5}" ]] ;then + INSTNAME="${5}" +else + # use DEV_INSTNAME env var or 'mydb' as default + INSTNAME=${DEV_INSTNAME:-mydb} +fi + +kubectl config use-context ${CLUSTER} +kubectl config current-context +echo "Deployment project: ${PROJECTID}" +echo "Deployment namespace: ${NS}" +echo "No install? ${NOINSTALL}" +echo "Instance name: ${INSTNAME}" +echo "*** verify cluster context and the project before proceeding ***" +echo "Press any key to continue..." +read -n 1 input + +# set -x +set -o pipefail + +kubectl get all -n operator-system +kubectl delete deployment.apps/operator-controller-manager -n operator-system +kubectl delete service/operator-controller-manager-metrics-service -n operator-system +kubectl get all -n operator-system + +kubectl delete deployment.apps/"${INSTNAME}"-agent-deployment -n $NS +kubectl delete service/"${INSTNAME}"-agent-svc -n $NS +kubectl delete service/"${INSTNAME}"-dbdaemon-svc -n $NS + +kubectl get all -n $NS +kubectl get storageclasses,volumesnapshotclasses +kubectl get pv,pvc,sts -n $NS +gcloud compute disks list --project ${PROJECTID} --filter=name~${CLUSTER}.*pvc +for pvc in $(kubectl get pvc -n $NS -o jsonpath='{range .items[*]}{@.spec.volumeName}{"\n"}'); do gcloud compute disks list --project ${PROJECTID} |grep $pvc; done +kubectl delete sts "${INSTNAME}"-sts -n $NS +for i in $(seq 2 4); do kubectl patch pvc "${INSTNAME}"-pvc-u0${i}-"${INSTNAME}"-sts-0 -n $NS -p '{"metadata":{"finalizers": []}}' --type=merge; kubectl delete pvc "${INSTNAME}"-pvc-u0${i}-"${INSTNAME}"-sts-0 -n $NS; done +kubectl delete service/"${INSTNAME}"-svc -n $NS +kubectl delete service/"${INSTNAME}"-svc-node -n $NS +kubectl get pv,pvc,sts -n $NS +gcloud compute disks list --project ${PROJECTID} --filter=name~${CLUSTER}.*pvc +for pvc in $(kubectl get pvc -n $NS -o jsonpath='{range .items[*]}{@.spec.volumeName}{"\n"}'); do gcloud compute disks list --project ${PROJECTID} |grep $pvc; done +kubectl get all -n $NS + +make uninstall + +if [[ ${NOINSTALL} == true ]] ;then + echo "No install option requested. Cleanup done. Exiting..." + exit 0 +fi + +# Setup image targets for make. +export PROW_IMAGE_REPO=gcr.io/${PROJECTID} +export PROW_PROJECT=${PROJECTID} +export PROW_IMAGE_TAG=latest +date; make deploy + +kubectl get instances -n $NS +kubectl get databases -n $NS +kubectl get backups -n $NS +kubectl get configs -n $NS +kubectl get events --sort-by=.metadata.creationTimestamp -n operator-system + diff --git a/oracle/scripts/setup_monitoring.sh b/oracle/scripts/setup_monitoring.sh new file mode 100755 index 0000000..559f6b0 --- /dev/null +++ b/oracle/scripts/setup_monitoring.sh @@ -0,0 +1,105 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +function apply_mainfests { + if [ "$(ls -A manifests/setup)" ]; then + kubectl apply -f manifests/setup + fi + + kubectl apply -f manifests/ + if [ $? -ne 0 ]; then + echo "failed to update prometheus operator to access all namespaces " + exit 1 + fi +} + +function help_msg { + echo "Please run the script either as $0 install or $0 uninstall" + exit 1 +} + +function envcheck { + if [[ -z "${PATH_TO_RELEASE}" ]]; then + PATH_TO_RELEASE=${PWD} + fi + if [[ -z "${GOPATH}" ]]; then + echo "GOPATH environment variable is not set. Please rerun after setting GOPATH." + exit 1 + fi + export PATH="${GOPATH}/bin":$PATH +} + +function install { + # Installing pre-requisites + GO111MODULE="on" go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb && echo "jb CMD installed" || { echo "jb CMD install failed."; exit 1; } + + go get github.com/brancz/gojsontoyaml && echo "jsontoyaml CMD installed" || { echo "jsontoyaml CMD install failed."; exit 1; } + + go get github.com/google/go-jsonnet/cmd/jsonnet && echo "jsonnet CMD installed" || { echo "jsonnet CMD install failed."; exit 1; } + + git clone -b release-0.7 https://github.com/prometheus-operator/kube-prometheus && echo " kube-prometheus installed" || { echo "kube-prometheus install failed."; exit 1; } + + # Copy dashboards to kube-prometheus + cp ${PATH_TO_RELEASE}/dashboards/db-dashboard.json kube-prometheus/db-dashboard.json + if [ $? -ne 0 ]; then + echo "dashboards not found" + exit 1 + fi + cp ${PATH_TO_RELEASE}/dashboards/install-dashboards.jsonnet kube-prometheus/install-dashboards.jsonnet + if [ $? -ne 0 ]; then + echo "dashboards installer not found" + exit 1 + fi + + cd kube-prometheus + + # Setup the kube-prometheus + kubectl create -f manifests/setup + until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done + kubectl create -f manifests/ + + # Modify the prometheus operator to allow access to all namespaces. + # Prometheus runs in monitoring namespace + jb update || { echo "failed to update jsonnet config"; exit 1; } + + ${PWD}/build.sh install-dashboards.jsonnet + apply_mainfests +} + +function uninstall { + git clone -b release-0.7 https://github.com/prometheus-operator/kube-prometheus && echo " kube-prometheus staged" || { echo "kube-prometheus staging failed."; exit 1; } + cd kube-prometheus + kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup +} + + +if [ $# -eq 1 ]; then + envcheck + case $1 in + "install") + install + ;; + "uninstall") + uninstall + ;; + *) + echo "Unrecognized parameter $1." + help_msg + ;; + esac +else + help_msg +fi diff --git a/oracle/ui.yaml b/oracle/ui.yaml new file mode 100644 index 0000000..57a5e45 --- /dev/null +++ b/oracle/ui.yaml @@ -0,0 +1,176 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: ui +## RBAC +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ui +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - backups/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - configs/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - databases/status + verbs: + - get + - patch + - update +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oracle.db.anthosapis.com + resources: + - instances/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ui +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ui +subjects: +- kind: ServiceAccount + name: default + namespace: ui +## Workload +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui + namespace: ui +spec: + selector: + matchLabels: + app: ui + template: + metadata: + labels: + app: ui + spec: + containers: + - name: ui + image: gcr.io/elcarro/oracle.db.anthosapis.com/ui:latest + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: ui + namespace: ui +spec: + selector: + app: ui + ports: + - name: http + port: 80 + targetPort: 8080 + diff --git a/third_party/dashboards/README.md b/third_party/dashboards/README.md new file mode 100644 index 0000000..6d93e0f --- /dev/null +++ b/third_party/dashboards/README.md @@ -0,0 +1,2 @@ +This dashboard is a fork of the opensource dashboard at https://grafana.com/grafana/dashboards/3333. +This dashboard has been modified to represent a managed DB. diff --git a/third_party/dashboards/db-dashboard.json b/third_party/dashboards/db-dashboard.json new file mode 100644 index 0000000..8ef1430 --- /dev/null +++ b/third_party/dashboards/db-dashboard.json @@ -0,0 +1,1219 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.5.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [] + }, + "editable": true, + "gnetId": 3333, + "graphTooltip": 1, + "hideControls": false, + "id": null, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "tags": [], + "type": "dashboards" + } + ], + "refresh": false, + "rows": [ + { + "collapse": false, + "height": "125px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "125px", + "id": 12, + "interval": "$interval", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "80%", + "prefix": "", + "prefixFontSize": "80%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "calculatedInterval": "10m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_up{instance=\"$host\"}", + "format": "time_series", + "interval": "5m", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "0,1", + "title": "db status", + "transparent": false, + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "DEAD", + "value": "0" + }, + { + "op": "=", + "text": "ALIVE", + "value": "1" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "format": "short", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "125px", + "id": 13, + "interval": "$interval", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "80%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "calculatedInterval": "10m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_sessions_active{instance=\"$host\"}", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": "", + "title": "active sessions", + "transparent": false, + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "125px", + "id": 51, + "interval": "$interval", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "80%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "calculatedInterval": "10m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_activity_user_commits{instance=\"$host\"}", + "format": "time_series", + "interval": "5m", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "90,95", + "title": "user commits", + "transparent": false, + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "125px", + "id": 52, + "interval": "$interval", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "80%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": null, + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "calculatedInterval": "10m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_activity_execute_count{instance=\"$host\"}", + "format": "time_series", + "interval": "5m", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "", + "title": "execute count", + "transparent": false, + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Widgets", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "id": 57, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "db_exporter_last_scrape_duration_seconds{instance=\"$host\"}\t", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "last scrape duration seconds", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "db_exporter_scrapes_total{instance=\"$host\"}", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "total scrapes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Exporter Status", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "fill": 2, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "calculatedInterval": "2m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_wait_time_concurrency{instance=\"$host\"}", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "wait time concurrency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "fill": 2, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "calculatedInterval": "2m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_wait_time_commit{instance=\"$host\"}", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "wait time commit", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "fill": 2, + "grid": {}, + "id": 55, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "calculatedInterval": "2m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_wait_time_system_io{instance=\"$host\"}", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "wait time system io", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "fill": 2, + "grid": {}, + "id": 56, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "calculatedInterval": "2m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_wait_time_user_io{instance=\"$host\"}\t", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "wait time user io", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "fill": 2, + "grid": {}, + "id": 59, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "calculatedInterval": "2m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_wait_time_application{instance=\"$host\"}\t", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "wait time application", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "editable": true, + "error": false, + "fill": 2, + "grid": {}, + "id": 60, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "calculatedInterval": "2m", + "datasourceErrors": {}, + "errors": {}, + "expr": "db_wait_time_network{instance=\"$host\"}\t", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "wait time network", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Table Locks", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "oracle" + ], + "templating": { + "list": [ + { + "allFormat": "glob", + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "multiFormat": "regex values", + "name": "host", + "options": [], + "query": "label_values(db_up, instance)", + "refresh": 1, + "refresh_on_load": false, + "regex": "", + "sort": 1, + "tagValuesQuery": null, + "tags": [], + "tagsQuery": null, + "type": "query", + "useTags": false + }, + { + "allFormat": "glob", + "auto": true, + "auto_count": 200, + "auto_min": "1s", + "current": { + "text": "1m", + "value": "1m" + }, + "datasource": "Prometheus", + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "multiFormat": "glob", + "name": "interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval" + }, + { + "selected": false, + "text": "1s", + "value": "1s" + }, + { + "selected": false, + "text": "5s", + "value": "5s" + }, + { + "selected": true, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "1s,5s,1m,5m,1h,6h,1d", + "refresh": 2, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "collapse": false, + "enable": true, + "notice": false, + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "status": "Stable", + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "type": "timepicker" + }, + "timezone": "browser", + "title": "db", + "version": 18, + "description": "Database dashboard monitors db status, active sessions, user commits, wait times and more." +} diff --git a/third_party/monitoring/BUILD.bazel b/third_party/monitoring/BUILD.bazel new file mode 100644 index 0000000..e681791 --- /dev/null +++ b/third_party/monitoring/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "monitoring", + srcs = ["monitoring.go"], + importpath = "github.com/GoogleCloudPlatform/elcarro-oracle-operator/third_party/monitoring", + visibility = ["//visibility:public"], + deps = [ + "//oracle/pkg/agents/common", + "//oracle/pkg/agents/oracle", + "@com_github_prometheus_client_golang//prometheus", + "@in_gopkg_yaml_v2//:yaml_v2", + "@io_k8s_klog_v2//:klog", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/third_party/monitoring/LICENSE b/third_party/monitoring/LICENSE new file mode 100644 index 0000000..1d7a8a4 --- /dev/null +++ b/third_party/monitoring/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2016 Seth Miller + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/monitoring/README.md b/third_party/monitoring/README.md new file mode 100644 index 0000000..e244a9d --- /dev/null +++ b/third_party/monitoring/README.md @@ -0,0 +1,2 @@ +This module is a fork of the https://github.com/iamseth/oracledb_exporter. +This module is modified from the original version to include Oracle DB support. diff --git a/third_party/monitoring/monitoring.go b/third_party/monitoring/monitoring.go new file mode 100644 index 0000000..de21ac6 --- /dev/null +++ b/third_party/monitoring/monitoring.go @@ -0,0 +1,534 @@ +// Copyright 2021 Google LLC +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +// Package monitoring is used for monitoring agent. +// This is based off iamseth/oracledb_exporter. +// The significant difference is the reliance on yaml instead of toml +// and the use of gRPC via dbdaemon instead of oracle client and tcp. +package monitoring + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "gopkg.in/yaml.v2" + "k8s.io/klog/v2" + + "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common" + dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle" +) + +// Metric name parts. +const ( + namespace = "db" + exporter = "exporter" +) + +// Metric describes labels, type, and other information of a metric. +type Metric struct { + Context string `yaml:"context"` + Labels []string `yaml:"labels"` + MetricsDesc map[string]string `yaml:"metricsdesc"` + MetricsType map[string]string `yaml:"metricstype"` + MetricsBuckets map[string]map[string]string `yaml:"metricsbuckets"` + FieldToAppend string `yaml:"fieldtoappend"` + Request string `yaml:"request"` + IgnoreZeroResult bool `yaml:"ignorezeroresult"` +} + +// Metrics are used to load multiple metrics from file. +type Metrics struct { + Metric []Metric `yaml:"metric"` +} + +// Metrics to scrap. Use external file (default-metrics.toml and customize if provided). +var ( + metricsToScrap Metrics + additionalMetrics Metrics + hashMap map[int][]byte + queryTimeout = "5" +) + +// Exporter collects Oracle DB metrics. It implements prometheus.Collector. +type Exporter struct { + duration, error prometheus.Gauge + totalScrapes prometheus.Counter + scrapeErrors *prometheus.CounterVec + up prometheus.Gauge + dbdClient dbdpb.DatabaseDaemonClient + closeConn func() error + customMetrics string + defaultFileMetrics string + dbservice string + dbport int +} + +// getEnv returns the value of an environment variable, or returns the provided fallback value. +func getEnv(key, fallback string) string { + if value, ok := os.LookupEnv(key); ok { + return value + } + return fallback +} + +func atoi(stringValue string) int { + intValue, err := strconv.Atoi(stringValue) + if err != nil { + klog.Fatalf("error while converting to int: %v", err) + panic(err) + } + return intValue +} + +func createDBDClient(ctx context.Context, service string, port int) (dbdpb.DatabaseDaemonClient, func() error, error) { + klog.InfoS("connecting to DB:%s, port: %d", service, port) + conn, err := common.DatabaseDaemonDialService(ctx, fmt.Sprintf("%s:%d", service, port), grpc.WithBlock()) + if err != nil { + return nil, func() error { return nil }, err + } + return dbdpb.NewDatabaseDaemonClient(conn), conn.Close, nil +} + +// NewExporter returns a new Oracle DB exporter for the provided DSN. +func NewExporter(ctx context.Context, defaultFileMetrics, customMetrics, service string, port int, qt string) (*Exporter, error) { + // Load default and custom metrics. + hashMap = make(map[int][]byte) + if qt != "" { + queryTimeout = qt + } + reloadMetrics(defaultFileMetrics, customMetrics) + dbdClient, closeConn, err := createDBDClient(ctx, service, port) + if err != nil { + return nil, err + } + + return &Exporter{ + duration: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "last_scrape_duration_seconds", + Help: "Duration of the last scrape of metrics from Oracle DB.", + }), + totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "scrapes_total", + Help: "Total number of times Oracle DB was scraped for metrics.", + }), + scrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "scrape_errors_total", + Help: "Total number of times an error occurred scraping an Oracle database.", + }, []string{"collector"}), + error: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "last_scrape_error", + Help: "Whether the last scrape of metrics from Oracle DB resulted in an error (1 for error, 0 for success).", + }), + up: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "up", + Help: "Whether the Oracle database server is up.", + }), + dbdClient: dbdClient, + closeConn: closeConn, + defaultFileMetrics: defaultFileMetrics, + customMetrics: customMetrics, + dbservice: service, + dbport: port, + }, nil +} + +// Describe describes all the metrics exported by the Oracle DB exporter. +func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { + // We cannot know in advance what metrics the exporter will generate + // So we use the poor man's describe method: Run a collect + // and send the descriptors of all the collected metrics. The problem + // here is that we need to connect to the Oracle DB. If it is currently + // unavailable, the descriptors will be incomplete. Since this is a + // stand-alone exporter and not used as a library within other code + // implementing additional metrics, the worst that can happen is that we + // don't detect inconsistent metrics created by this exporter itself. + // Also, a change in the monitored Oracle instance may change the + // exported metrics during the runtime of the exporter. + + metricCh := make(chan prometheus.Metric) + doneCh := make(chan struct{}) + + go func() { + for m := range metricCh { + ch <- m.Desc() + } + close(doneCh) + }() + + e.Collect(metricCh) + close(metricCh) + <-doneCh + +} + +// Collect implements prometheus.Collector. +func (e *Exporter) Collect(ch chan<- prometheus.Metric) { + e.scrape(ch) + ch <- e.duration + ch <- e.totalScrapes + ch <- e.error + e.scrapeErrors.Collect(ch) + ch <- e.up +} + +func (e *Exporter) scrape(ch chan<- prometheus.Metric) { + e.totalScrapes.Inc() + var err error + defer func(begun time.Time) { + e.duration.Set(time.Since(begun).Seconds()) + if err == nil { + e.error.Set(0) + } else { + e.error.Set(1) + } + }(time.Now()) + + if _, err = e.dbdClient.RunSQLPlus(context.Background(), &dbdpb.RunSQLPlusCMDRequest{Commands: []string{"select sysdate from dual"}}); err != nil { + if strings.Contains(err.Error(), "ORA-") { + klog.Infoln("Reconnecting to DB") + e.dbdClient, e.closeConn, err = createDBDClient(context.Background(), e.dbservice, e.dbport) + if err != nil { + klog.Errorln("Error pinging oracle:", err) + e.closeConn() + e.up.Set(0) + return + } + } + } else { + klog.Infoln("Successfully pinged Oracle database: ") + e.up.Set(1) + } + + if checkIfMetricsChanged(e.defaultFileMetrics, e.customMetrics) { + reloadMetrics(e.defaultFileMetrics, e.customMetrics) + } + + wg := sync.WaitGroup{} + + for _, metric := range metricsToScrap.Metric { + wg.Add(1) + metric := metric //https://golang.org/doc/faq#closures_and_goroutines + + go func() { + defer wg.Done() + + if len(metric.Request) == 0 { + klog.Errorln("Error scraping for ", metric.MetricsDesc, ". Did you forget to define request in your toml file?") + return + } + + if len(metric.MetricsDesc) == 0 { + klog.Errorln("Error scraping for query", metric.Request, ". Did you forget to define metrics desc in your toml file?") + return + } + + for column, metricType := range metric.MetricsType { + if metricType == "histogram" { + _, ok := metric.MetricsBuckets[column] + if !ok { + klog.Errorln("Unable to find MetricsBuckets configuration key for metric. (metric=" + column + ")") + return + } + } + } + + scrapeStart := time.Now() + if err = ScrapeMetric(e.dbdClient, ch, metric); err != nil { + klog.Errorln("Error scraping for", metric.Context, "_", metric.MetricsDesc, ":", err) + e.scrapeErrors.WithLabelValues(metric.Context).Inc() + } else { + klog.Infoln("Successfully scraped metric: ", metric.Context, metric.MetricsDesc, time.Since(scrapeStart)) + } + }() + } + wg.Wait() +} + +// GetMetricType returns the prometheus type of a metric. +func GetMetricType(metricType string, metricsType map[string]string) prometheus.ValueType { + var strToPromType = map[string]prometheus.ValueType{ + "gauge": prometheus.GaugeValue, + "counter": prometheus.CounterValue, + "histogram": prometheus.UntypedValue, + } + + strType, ok := metricsType[strings.ToLower(metricType)] + if !ok { + return prometheus.GaugeValue + } + valueType, ok := strToPromType[strings.ToLower(strType)] + if !ok { + panic(errors.New("Error while getting prometheus type " + strings.ToLower(strType))) + } + return valueType +} + +// ScrapeMetric calls ScrapeGenericValues using Metric struct values. +func ScrapeMetric(dbdClient dbdpb.DatabaseDaemonClient, ch chan<- prometheus.Metric, metricDefinition Metric) error { + klog.InfoS("Calling function ScrapeGenericValues(): %v", metricDefinition) + return ScrapeGenericValues(dbdClient, ch, metricDefinition.Context, metricDefinition.Labels, + metricDefinition.MetricsDesc, metricDefinition.MetricsType, metricDefinition.MetricsBuckets, + metricDefinition.FieldToAppend, metricDefinition.IgnoreZeroResult, + metricDefinition.Request) +} + +// ScrapeGenericValues is a generic method for retrieving metrics. +func ScrapeGenericValues(dbdClient dbdpb.DatabaseDaemonClient, ch chan<- prometheus.Metric, context string, labels []string, + metricsDesc map[string]string, metricsType map[string]string, metricsBuckets map[string]map[string]string, fieldToAppend string, ignoreZeroResult bool, request string) error { + metricsCount := 0 + genericParser := func(row map[string]string) error { + // Construct labels and values. + labelsValues := []string{} + for _, label := range labels { + labelsValues = append(labelsValues, row[label]) + } + // Construct Prometheus values to sent back. + for metric, metricHelp := range metricsDesc { + value, err := strconv.ParseFloat(strings.TrimSpace(row[strings.ToUpper(metric)]), 64) + // If not a float, skip current metric. + if err != nil { + klog.Errorln("Unable to convert current value to float (metric=" + metric + + ",metricHelp=" + metricHelp + ",value=<" + row[strings.ToUpper(metric)] + ">)") + continue + } + // If metric does not use a field, content in metric's name. + if strings.Compare(fieldToAppend, "") == 0 { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, context, metric), + metricHelp, + labels, nil, + ) + if metricsType[strings.ToLower(metric)] == "histogram" { + count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64) + if err != nil { + klog.Errorln("Unable to convert count value to int (metric=" + metric + + ",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)") + continue + } + buckets := make(map[float64]uint64) + for field, le := range metricsBuckets[metric] { + lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64) + if err != nil { + klog.Errorln("Unable to convert bucket limit value to float (metric=" + metric + + ",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)") + continue + } + counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64) + if err != nil { + klog.Errorln("Unable to convert ", field, " value to int (metric="+metric+ + ",metricHelp="+metricHelp+",value=<"+row[field]+">)") + continue + } + buckets[lelimit] = counter + } + ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...) + } else { + ch <- prometheus.MustNewConstMetric(desc, GetMetricType(metric, metricsType), value, labelsValues...) + } + // If no labels, use metric name + } else { + desc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, context, cleanName(row[strings.ToUpper(fieldToAppend)])), + metricHelp, + nil, nil, + ) + if metricsType[strings.ToLower(metric)] == "histogram" { + count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64) + if err != nil { + klog.Errorln("Unable to convert count value to int (metric=" + metric + + ",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)") + continue + } + buckets := make(map[float64]uint64) + for field, le := range metricsBuckets[metric] { + lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64) + if err != nil { + klog.Errorln("Unable to convert bucket limit value to float (metric=" + metric + + ",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)") + continue + } + counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64) + if err != nil { + klog.Errorln("Unable to convert ", field, " value to int (metric="+metric+ + ",metricHelp="+metricHelp+",value=<"+row[field]+">)") + continue + } + buckets[lelimit] = counter + } + ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets) + } else { + ch <- prometheus.MustNewConstMetric(desc, GetMetricType(metric, metricsType), value) + } + } + metricsCount++ + } + return nil + } + err := GeneratePrometheusMetrics(dbdClient, genericParser, request) + klog.Errorln("ScrapeGenericValues() - metricsCount: ", metricsCount) + if err != nil { + return err + } + if !ignoreZeroResult && metricsCount == 0 { + return errors.New("No metrics found while parsing") + } + return err +} + +// ParseSQLResponse parses the JSON result-set (returned by runSQLPlus API) and +// returns a list of rows with column-value mapping. +func ParseSQLResponse(resp *dbdpb.RunCMDResponse) ([]map[string]string, error) { + var rows []map[string]string + for _, msg := range resp.GetMsg() { + row := make(map[string]string) + if err := json.Unmarshal([]byte(msg), &row); err != nil { + return nil, fmt.Errorf("failed to parse %s: %v", msg, err) + } + + rows = append(rows, row) + } + return rows, nil +} + +// GeneratePrometheusMetrics parses metric query SQL results. +// Inspired by https://kylewbanks.com/blog/query-result-to-map-in-golang +func GeneratePrometheusMetrics(dbdClient dbdpb.DatabaseDaemonClient, parse func(row map[string]string) error, query string) error { + + // Add a timeout. + timeout, err := strconv.Atoi(queryTimeout) + if err != nil { + klog.Fatal("error while converting timeout option value: ", err) + panic(err) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + + resp, err := dbdClient.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{query}, Suppress: true, Quiet: true}) + if err != nil { + return err + } + + if ctx.Err() == context.DeadlineExceeded { + return errors.New("Oracle query timed out") + } + rows, err := ParseSQLResponse(resp) + if err != nil { + return err + } + for _, r := range rows { + // Call function to parse row. + if err := parse(r); err != nil { + return err + } + } + return nil + +} + +// Oracle gives back names with special characters. +// This function cleans things up for Prometheus. +func cleanName(s string) string { + s = strings.Replace(s, " ", "_", -1) // Remove spaces + s = strings.Replace(s, "(", "", -1) // Remove open parenthesis + s = strings.Replace(s, ")", "", -1) // Remove close parenthesis + s = strings.Replace(s, "/", "", -1) // Remove forward slashes + s = strings.Replace(s, "*", "", -1) // Remove asterisks + s = strings.ToLower(s) + return s +} + +func hashFile(h hash.Hash, fn string) error { + f, err := os.Open(fn) + if err != nil { + return err + } + defer f.Close() + if _, err := io.Copy(h, f); err != nil { + return err + } + return nil +} + +func checkIfMetricsChanged(defaultFileMetrics, customMetrics string) bool { + for i, _customMetrics := range strings.Split(customMetrics, ",") { + if len(_customMetrics) == 0 { + continue + } + klog.Info("Checking modifications in following metrics definition file:", _customMetrics) + h := sha256.New() + if err := hashFile(h, _customMetrics); err != nil { + klog.Errorln("Unable to get file hash", err) + return false + } + // If any of files has been changed, reload metrics. + if !bytes.Equal(hashMap[i], h.Sum(nil)) { + klog.Infoln(_customMetrics, "has been changed. Reloading metrics...") + hashMap[i] = h.Sum(nil) + return true + } + } + return false +} + +func reloadMetrics(defaultFileMetrics, customMetrics string) { + // Truncate metricsToScrap. + metricsToScrap.Metric = []Metric{} + + defYAMLFile, err := ioutil.ReadFile(defaultFileMetrics) + if err != nil { + klog.Errorln(err) + panic(errors.New("Error while loading " + defaultFileMetrics)) + } + + // Load default metrics. + if err := yaml.Unmarshal(defYAMLFile, &metricsToScrap); err != nil { + klog.Errorln(err) + } else { + klog.Infoln("Successfully loaded default metrics from: " + defaultFileMetrics) + } + + // Load custom metrics. + if strings.Compare(customMetrics, "") != 0 { + for _, _customMetrics := range strings.Split(customMetrics, ",") { + cusYAMLFile, err := ioutil.ReadFile(_customMetrics) + if err != nil { + klog.Errorln(err) + panic(errors.New("Error while loading " + defaultFileMetrics)) + } + if err := yaml.Unmarshal(cusYAMLFile, &additionalMetrics); err != nil { + klog.Errorln(err) + } else { + klog.Infoln("Successfully loaded custom metrics from: " + _customMetrics) + } + metricsToScrap.Metric = append(metricsToScrap.Metric, additionalMetrics.Metric...) + } + } else { + klog.Infoln("No custom metrics defined.") + } +} diff --git a/tools.go b/tools.go new file mode 100644 index 0000000..ee03d07 --- /dev/null +++ b/tools.go @@ -0,0 +1,25 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build tools + +package tools + +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +import ( + _ "github.com/golang/protobuf/protoc-gen-go" + _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" + _ "sigs.k8s.io/kustomize/kustomize/v4" +)