diff --git a/applicationset/controllers/applicationset_controller_test.go b/applicationset/controllers/applicationset_controller_test.go index bc581b09cbf21..0f4ae35d47be2 100644 --- a/applicationset/controllers/applicationset_controller_test.go +++ b/applicationset/controllers/applicationset_controller_test.go @@ -1719,7 +1719,7 @@ func TestValidateGeneratedApplications(t *testing.T) { kubeclientset := kubefake.NewSimpleClientset(objects...) argoDBMock := dbmocks.ArgoDB{} - argoDBMock.On("GetCluster", mock.Anything, "https://kubernetes.default.svc").Return(&myCluster, nil) + argoDBMock.On("GetClusterByUrl", mock.Anything, "https://kubernetes.default.svc").Return(&myCluster, nil) argoDBMock.On("ListClusters", mock.Anything).Return(&argov1alpha1.ClusterList{Items: []argov1alpha1.Cluster{ myCluster, }}, nil) @@ -1825,8 +1825,8 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build() goodCluster := argov1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"} badCluster := argov1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"} - argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil) - argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil) + argoDBMock.On("GetClusterByUrl", mock.Anything, "https://good-cluster").Return(&goodCluster, nil) + argoDBMock.On("GetClusterByUrl", mock.Anything, "https://bad-cluster").Return(&badCluster, nil) argoDBMock.On("ListClusters", mock.Anything).Return(&argov1alpha1.ClusterList{Items: []argov1alpha1.Cluster{ goodCluster, }}, nil) diff --git a/cmd/argocd/commands/admin/cluster.go b/cmd/argocd/commands/admin/cluster.go index 04f16340a70fe..e747933a3314c 100644 --- a/cmd/argocd/commands/admin/cluster.go +++ b/cmd/argocd/commands/admin/cluster.go @@ -346,7 +346,7 @@ func NewClusterEnableNamespacedMode() *cobra.Command { continue } - cluster, err := argoDB.GetCluster(ctx, server) + cluster, err := argoDB.GetClusterByUrl(ctx, server) if err != nil { return fmt.Errorf("error getting cluster from server: %w", err) } @@ -401,7 +401,7 @@ func NewClusterDisableNamespacedMode() *cobra.Command { continue } - cluster, err := argoDB.GetCluster(ctx, server) + cluster, err := argoDB.GetClusterByUrl(ctx, server) if err != nil { return fmt.Errorf("error getting cluster from server: %w", err) } @@ -502,7 +502,7 @@ func NewClusterConfig() *cobra.Command { kubeclientset, err := kubernetes.NewForConfig(conf) errors.CheckError(err) - cluster, err := db.NewDB(namespace, settings.NewSettingsManager(ctx, kubeclientset, namespace), kubeclientset).GetCluster(ctx, serverUrl) + cluster, err := db.NewDB(namespace, settings.NewSettingsManager(ctx, kubeclientset, namespace), kubeclientset).GetClusterByUrl(ctx, serverUrl) errors.CheckError(err) err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output) errors.CheckError(err) diff --git a/controller/appcontroller.go b/controller/appcontroller.go index 89353d755076b..ac7c5b890cea3 100644 --- a/controller/appcontroller.go +++ b/controller/appcontroller.go @@ -992,7 +992,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic // Attempt to validate the destination via its URL if validDestination { - if cluster, err = ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server); err != nil { + if cluster, err = ctrl.db.GetClusterByUrl(context.Background(), app.Spec.Destination.Server); err != nil { log.Warnf("Unable to locate cluster URL for Application being deleted: %v", err) validDestination = false } @@ -1720,7 +1720,7 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool { return false } if ctrl.clusterFilter != nil { - cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server) + cluster, err := ctrl.db.GetClusterByUrl(context.Background(), app.Spec.Destination.Server) if err != nil { return ctrl.clusterFilter(nil) } diff --git a/controller/cache/cache.go b/controller/cache/cache.go index c0a798b43d28a..e24e9b23b06d8 100644 --- a/controller/cache/cache.go +++ b/controller/cache/cache.go @@ -380,7 +380,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e return clusterCache, nil } - cluster, err := c.db.GetCluster(context.Background(), server) + cluster, err := c.db.GetClusterByUrl(context.Background(), server) if err != nil { return nil, fmt.Errorf("error getting cluster: %w", err) } diff --git a/controller/sync.go b/controller/sync.go index 1c592f5c0a733..ba43907c213e4 100644 --- a/controller/sync.go +++ b/controller/sync.go @@ -131,7 +131,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha return } - clst, err := m.db.GetCluster(context.Background(), app.Spec.Destination.Server) + clst, err := argo.GetClusterByDestination(m.db, app.Spec.Destination) if err != nil { state.Phase = common.OperationError state.Message = err.Error() diff --git a/docs/developer-guide/code-contributions.md b/docs/developer-guide/code-contributions.md index b57d9df6d8ae6..7da2da37d18dd 100644 --- a/docs/developer-guide/code-contributions.md +++ b/docs/developer-guide/code-contributions.md @@ -2,12 +2,19 @@ ## Preface -The Argo CD project continuously grows, both in terms of features and community size. It gets adopted by more and more organisations which entrust Argo CD to handle their critical production workloads. Thus, we need to take great care with any changes that affect compatibility, performance, scalability, stability and security of Argo CD. For this reason, every new feature or larger enhancement must be properly designed and discussed before it gets accepted into the code base. +The Argo CD project continuously grows, both in terms of features and community size. +It gets adopted by more and more organisations which entrust Argo CD to handle their critical production workloads. +Thus, we need to take great care with any changes that affect compatibility, performance, scalability, stability +and security of Argo CD. For this reason, every new feature or larger enhancement must be properly designed and +discussed before it gets accepted into the code base. -We do welcome and encourage everyone to participate in the Argo CD project, but please understand that we can't accept each and every contribution from the community, for various reasons. +We do welcome and encourage everyone to participate in the Argo CD project, but please understand that we can't accept +each and every contribution from the community, for various reasons. If you want to submit code for a great new feature or enhancement, we kindly ask you to take a look at the -enhancement process outlined below before you start to write code or submit a PR. This will ensure that your idea is well aligned with the project's strategy and technical requirements, and it will help greatly in getting your code merged into our code base. +enhancement process outlined below before you start to write code or submit a PR. +This will ensure that your idea is well aligned with the project's strategy and technical requirements, +and it will help greatly in getting your code merged into our code base. Before submitting code for a new feature (and also, to some extent, for more complex bug fixes) please [raise an Enhancement Proposal or Bug Issue](https://github.com/argoproj/argo-cd/issues/new/choose) @@ -18,7 +25,9 @@ Each enhancement proposal needs to go through our before we accept code contributions. To facilitate triage and to provide transparency, we use [this GitHub project](https://github.com/orgs/argoproj/projects/18) to keep track of this process' outcome. -_Please_ do not spend too much time on larger features or refactorings before the corresponding enhancement has been triaged. This may save everyone some amount of frustration and time, as the enhancement proposal might be rejected, and the code would never get merged. However, sometimes it's helpful to have some PoC code along with a proposal. +_Please_ do not spend too much time on larger features or refactorings before the corresponding enhancement has been +triaged. This may save everyone some amount of frustration and time, as the enhancement proposal might be rejected, +and the code would never get merged. However, sometimes it's helpful to have some PoC code along with a proposal. We will do our best to triage incoming enhancement proposals quickly, with one of the following outcomes: @@ -30,7 +39,8 @@ Depending on how many enhancement proposals we receive at given times, it may ta Also, please make sure you have read our [Toolchain Guide](toolchain-guide.md) -to understand our toolchain and our continuous integration processes. It contains some invaluable information to get started with the complex code base that makes up Argo CD. +to understand our toolchain and our continuous integration processes. +It contains some invaluable information to get started with the complex code base that makes up Argo CD. ## Quick start @@ -45,68 +55,99 @@ If the issue is already attached to next [version milestone](https://github.com/argoproj/argo-cd/milestones), we have decided to also dedicate some of our time on reviews to PRs received for these issues. -We encourage our community to pick up issues that are labeled in this way *and* are attached to the next version's milestone, with a promise for them to get a proper review with the clear intention for the incoming PRs to get merged. +We encourage our community to pick up issues that are labeled in this way *and* are attached to the next version's +milestone, with a promise for them to get a proper review with the clear intention for the incoming PRs to get merged. ## Triage process ### Overview -Our triage process for enhancements proposals ensures that we take a look at all incoming enhancements to determine whether we will accept code submissions to implement them. +Our triage process for enhancements proposals ensures that we take a look at all incoming enhancements +to determine whether we will accept code submissions to implement them. The process works as follows: -* New Enhancement Proposals raised on our GitHub issue tracker are moved to the _Incoming_ column of the project's board. These are the proposals that are in the queue for triage. +* New Enhancement Proposals raised on our GitHub issue tracker are moved to the _Incoming_ column of the project's board. +These are the proposals that are in the queue for triage. * The _Active_ column holds the issues that are currently being triaged, or will be triaged next. -* The _Accepted_ column holds the issues that have been triaged and are considered good to be implemented (e.g. the project agreed that the feature would be great to have) -* The _Declined_ column holds the issues that were rejected during triage. The issue will be updated with information about why the proposal has been rejected. -* The _Needs discussion_ column holds the issues that were found to require additional information, or even a design document, during triage. +* The _Accepted_ column holds the issues that have been triaged and are considered good to be implemented +(e.g. the project agreed that the feature would be great to have) +* The _Declined_ column holds the issues that were rejected during triage. +The issue will be updated with information about why the proposal has been rejected. +* The _Needs discussion_ column holds the issues that were found to require additional information, +or even a design document, during triage. ### Triage cadence -Triage of enhancement proposals is performed transparently, offline using issue comments and online in our weekly contributor's meeting. _Everyone_ is invited to participate in triaging, the process is not limited to participation only by maintainers. +Triage of enhancement proposals is performed transparently, offline using issue comments and online in our weekly +contributor's meeting. _Everyone_ is invited to participate in triaging, the process is not limited +to participation only by maintainers. -Usually, we will triage enhancement proposals in a First-In-First-Out order, which mean that oldest proposals will be triaged first. +Usually, we will triage enhancement proposals in a First-In-First-Out order, which mean that oldest +proposals will be triaged first. -We aim to triage at least 10 proposals a week. Depending on our available time, we may be triaging a higher or lower number of proposals in any given week. +We aim to triage at least 10 proposals a week. Depending on our available time, we may be triaging a higher or +lower number of proposals in any given week. ## Proposal states ### Accepted proposals -When a proposal is considered _Accepted_, it was decided that this enhancement would be valuable to the community at large and fits into the overall strategic roadmap of the project. +When a proposal is considered _Accepted_, it was decided that this enhancement would be valuable to the community +at large and fits into the overall strategic roadmap of the project. -Implementation of the issue may be started, either by the proposal's creator or another community member (including maintainers of the project). +Implementation of the issue may be started, either by the proposal's creator or another community member +(including maintainers of the project). -The issue should be refined enough by now to contain any concerns and guidelines to be taken into consideration during implementation. +The issue should be refined enough by now to contain any concerns and guidelines to be taken into consideration +during implementation. ### Declined proposals -We don't decline proposals lightly, and we will do our best to give a proper reasoning why we think that the proposal does not fit with the future of the project. Reasons for declining proposals may be - amongst others - that the change would be breaking for many, or that it does not meet the strategic direction of the project. Usually, discussion will be facilitated with the enhancement's creator before declining a proposal. +We don't decline proposals lightly, and we will do our best to give a proper reasoning why we think that the proposal +does not fit with the future of the project. Reasons for declining proposals may be - amongst others - that the change +would be breaking for many, or that it does not meet the strategic direction of the project. Usually, discussion will +be facilitated with the enhancement's creator before declining a proposal. Once a proposal is in _Declined_ state it's unlikely that we will accept code contributions for its implementation. ### Proposals that need discussion -Sometimes, we can't completely understand a proposal from its GitHub issue and require more information on the original intent or on more details about the implementation. If we are confronted with such an issue during the triage, we move this issue to the _Needs discussion_ column to indicate that we expect the issue's creator to supply more information on their idea. We may ask you to provide this information, either by adding that information to the issue itself or by joining one of our -[regular contributor's meeting](#regular-contributor-meeting) +Sometimes, we can't completely understand a proposal from its GitHub issue and require more +information on the original intent or on more details about the implementation. If we are confronted with such an +issue during the triage, we move this issue to the _Needs discussion_ column to indicate that we expect the issue's +creator to supply more information on their idea. We may ask you to provide this information, either by adding that +information to the issue itself or by joining one of our [regular contributor's meeting](#regular-contributor-meeting) to discuss the proposal with us. Also, issues that we find to require a more formal design document will be moved to this column. ## Design documents -For some enhancement proposals (especially those that will change behavior of Argo CD substantially, are attached with some caveats or where upgrade/downgrade paths are not clear), a more formal design document will be required in order to fully discuss and understand the enhancement in the broader community. This requirement is usually determined during triage. If you submitted an enhancement proposal, we may ask you to provide this more formal write down, along with some concerns or topics that need to be addressed. +For some enhancement proposals (especially those that will change behavior of Argo CD substantially, +are attached with some caveats or where upgrade/downgrade paths are not clear), a more formal design document will be +required in order to fully discuss and understand the enhancement in the broader community. +This requirement is usually determined during triage. If you submitted an enhancement proposal, we may ask you to +provide this more formal write down, along with some concerns or topics that need to be addressed. -Design documents are usually submitted as PR and use [this template](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/001-proposal-template.md) as a guide what kind of information we're looking for. Discussion will take place in the review process. When a design document gets merged, we consider it as approved and code can be written and submitted to implement this specific design. +Design documents are usually submitted as PR and use +[this template](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/001-proposal-template.md) as a guide +what kind of information we're looking for. Discussion will take place in the review process. +When a design document gets merged, we consider it as approved and code can be written and submitted to implement +this specific design. ## Regular contributor meeting -Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. We do invite you to join this virtual meetings if you want to bring up certain things (including your enhancement proposals), participate in our triaging or just want to get to know other contributors. +Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. +We do invite you to join this virtual meetings if you want to bring up certain things +(including your enhancement proposals), participate in our triaging or just want to get to know other contributors. -The current cadence of our meetings is weekly, every Thursday at 4:15pm UTC (8:15am Pacific, 11:15am Eastern, 5:15pm Central European, 9:45pm Indian). We use Zoom to conduct these meetings. +The current cadence of our meetings is weekly, every Thursday at 4:15pm UTC +(8:15am Pacific, 11:15am Eastern, 5:15pm Central European, 9:45pm Indian). +We use Zoom to conduct these meetings. * [Agenda document (Google Docs, includes Zoom link)](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) If you want to discuss something, we kindly ask you to put your item on the [agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) -for one of the upcoming meetings so that we can plan in the time for discussing about it. \ No newline at end of file +for one of the upcoming meetings so that we can plan in the time for discussing it. \ No newline at end of file diff --git a/docs/developer-guide/debugging-remote-environment.md b/docs/developer-guide/debugging-remote-environment.md index 7fe7f18260205..1911eb2c710c5 100644 --- a/docs/developer-guide/debugging-remote-environment.md +++ b/docs/developer-guide/debugging-remote-environment.md @@ -2,15 +2,19 @@ In this guide, we will describe how to debug a remote ArgoCD environment with [Telepresence](https://telepresence.io/). -Telepresence allows you to connect & debug a service deployed in a remote environment and to "cherry-pick" one service to run locally, staying connected to the remote cluster. This will: +Telepresence allows you to connect & debug a service deployed in a remote environment and to "cherry-pick" +one service to run locally, staying connected to the remote cluster. This will: * Reduce resource footprint on the local machine * Decrease the feedback loop time * Result in more confidence about the delivered code. -To read more about it, refer to the official documentation at [telepresence.io](https://telepresence.io/) or [Medium](https://medium.com/containers-101/development-environment-using-telepresence-634bd7210c26). +To read more about it, refer to the official documentation at +[telepresence.io](https://telepresence.io/) or +[Medium](https://medium.com/containers-101/development-environment-using-telepresence-634bd7210c26). ## Install ArgoCD + First of all, install ArgoCD on your cluster ```shell kubectl create ns argocd @@ -18,6 +22,7 @@ curl -sSfL https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/i ``` ## Connect + Connect to one of the services, for example, to debug the main ArgoCD server run: ```shell kubectl config set-context --current --namespace argocd @@ -31,6 +36,7 @@ telepresence intercept argocd-server --port 8080:http --env-file .envrc.remote # With this, any traffic that hits your argocd-server service in the cluster (e.g through a LB / ingress) will be forwarded to your laptop on port 8080. So that you can now start argocd-server locally to debug or test new code. If you launch argocd-server using the environment variables in `.envrc.remote`, he is able to fetch all the configmaps, secrets and so one from the cluster and transparently connect to the other microservices so that no further configuration should be neccesary and he behaves exactly the same as in the cluster. List current status of Telepresence using: + ```shell telepresence status ``` diff --git a/docs/developer-guide/running-locally.md b/docs/developer-guide/running-locally.md index f4f5dd646da20..cadf4f2d87875 100644 --- a/docs/developer-guide/running-locally.md +++ b/docs/developer-guide/running-locally.md @@ -2,11 +2,14 @@ ## Run Argo CD outside of Kubernetes -During development, it might be viable to run Argo CD outside of a Kubernetes cluster. This will greatly speed up development, as you don't have to constantly build, push and install new Argo CD Docker images with your latest changes. +During development, it might be viable to run Argo CD outside a Kubernetes cluster. +This will greatly speed up development, as you don't have to constantly build, push and install new Argo CD Docker images with your latest changes. -You will still need a working Kubernetes cluster, as described in the [Toolchain Guide](toolchain-guide.md), where Argo CD will store all of its resources and configuration. +You will still need a working Kubernetes cluster, as described in the [Toolchain Guide](toolchain-guide.md), +where Argo CD will store all of its resources and configuration. -If you followed the [Toolchain Guide](toolchain-guide.md) in setting up your toolchain, you can run Argo CD locally with these simple steps: +If you followed the [Toolchain Guide](toolchain-guide.md) in setting up your toolchain, +you can run Argo CD locally with these simple steps: ### Install Argo CD resources to your cluster @@ -125,7 +128,8 @@ $ goreman run status If not all critical processes run (marked with `*`), check logs to see why they terminated. -In case of an error like `gpg: key generation failed: Unknown elliptic curve` (a [gnupg bug](https://dev.gnupg.org/T5444)), disable GPG verification before running `make start-local`: +In case of an error like `gpg: key generation failed: Unknown elliptic curve` +(a [gnupg bug](https://dev.gnupg.org/T5444)), disable GPG verification before running `make start-local`: ```sh export ARGOCD_GPG_ENABLED=false @@ -158,7 +162,8 @@ goreman run restart repo-server Clean up when you're done: ```sh -kind delete cluster; rm -f ~/.kube/config-kind +kind delete cluster; +rm -f ~/.kube/config-kind ``` ### Scale up Argo CD in your cluster diff --git a/pkg/apis/application/v1alpha1/cluster_constants.go b/pkg/apis/application/v1alpha1/cluster_constants.go index d9fc0052de2d9..4200fe23a4ae9 100644 --- a/pkg/apis/application/v1alpha1/cluster_constants.go +++ b/pkg/apis/application/v1alpha1/cluster_constants.go @@ -25,7 +25,7 @@ const ( // EnvK8sTCPTimeout is the duration for TCP timeouts when communicating with K8s API servers EnvK8sTCPTimeout = "ARGOCD_K8S_TCP_TIMEOUT" - // EnvK8sTCPKeepalive is the interval for TCP keep alive probes to be sent when communicating with K8s API servers + // EnvK8sTCPKeepAlive is the interval for TCP keep alive probes to be sent when communicating with K8s API servers EnvK8sTCPKeepAlive = "ARGOCD_K8S_TCP_KEEPALIVE" // EnvK8sTLSHandshakeTimeout is the duration for TLS handshake timeouts when establishing connections to K8s API servers diff --git a/server/application/application.go b/server/application/application.go index 686b971d05017..6365b32a877bb 100644 --- a/server/application/application.go +++ b/server/application/application.go @@ -1158,7 +1158,7 @@ func (s *Server) getApplicationClusterConfig(ctx context.Context, a *appv1.Appli if err := argo.ValidateDestination(ctx, &a.Spec.Destination, s.db); err != nil { return nil, fmt.Errorf("error validating destination: %w", err) } - clst, err := s.db.GetCluster(ctx, a.Spec.Destination.Server) + clst, err := argo.GetClusterByDestination(s.db, a.Spec.Destination) if err != nil { return nil, fmt.Errorf("error getting cluster: %w", err) } diff --git a/server/application/terminal.go b/server/application/terminal.go index 4644facee8880..32c464c788608 100644 --- a/server/application/terminal.go +++ b/server/application/terminal.go @@ -55,7 +55,7 @@ func (s *terminalHandler) getApplicationClusterRawConfig(ctx context.Context, a if err := argo.ValidateDestination(ctx, &a.Spec.Destination, s.db); err != nil { return nil, err } - clst, err := s.db.GetCluster(ctx, a.Spec.Destination.Server) + clst, err := s.db.GetClusterByUrl(ctx, a.Spec.Destination.Server) if err != nil { return nil, err } diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 2225a5d6ff9fc..c36238089ab7e 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -88,7 +88,7 @@ func (s *Server) Create(ctx context.Context, q *cluster.ClusterCreateRequest) (* if err != nil { if status.Convert(err).Code() == codes.AlreadyExists { // act idempotent if existing spec matches new spec - existing, getErr := s.db.GetCluster(ctx, c.Server) + existing, getErr := s.db.GetClusterByUrl(ctx, c.Server) if getErr != nil { return nil, status.Errorf(codes.Internal, "unable to check existing cluster details: %v", getErr) } @@ -158,7 +158,7 @@ func (s *Server) getCluster(ctx context.Context, q *cluster.ClusterQuery) (*appv } if q.Server != "" { - c, err := s.db.GetCluster(ctx, q.Server) + c, err := s.db.GetClusterByUrl(ctx, q.Server) if err != nil { return nil, err } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index dec7d97b8d263..970f64b8051e0 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -150,7 +150,7 @@ func TestUpdateCluster_NoFieldsPaths(t *testing.T) { func TestUpdateCluster_FieldsPathSet(t *testing.T) { db := &dbmocks.ArgoDB{} var updated *v1alpha1.Cluster - db.On("GetCluster", mock.Anything, "https://127.0.0.1").Return(&v1alpha1.Cluster{ + db.On("GetClusterByUrl", mock.Anything, "https://127.0.0.1").Return(&v1alpha1.Cluster{ Name: "minikube", Server: "https://127.0.0.1", Namespaces: []string{"default", "kube-system"}, @@ -261,7 +261,7 @@ func TestDeleteClusterByName(t *testing.T) { }) assert.Nil(t, err) - _, err = db.GetCluster(context.Background(), "https://my-cluster-server") + _, err = db.GetClusterByUrl(context.Background(), "https://my-cluster-server") assert.EqualError(t, err, `rpc error: code = NotFound desc = cluster "https://my-cluster-server" not found`) }) } diff --git a/server/server.go b/server/server.go index 8336348c86911..1e63ba60f77d7 100644 --- a/server/server.go +++ b/server/server.go @@ -850,7 +850,7 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWebHandl // NOTE: if a marshaller option is not supplied, grpc-gateway will default to the jsonpb from // golang/protobuf. Which does not support types such as time.Time. gogo/protobuf does support // time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore - // we use our own Marshaler + // we use our own Marshaller gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(grpc_util.JSONMarshaler)) gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) diff --git a/ui/src/app/settings/components/cluster-details/cluster-details.tsx b/ui/src/app/settings/components/cluster-details/cluster-details.tsx index 9e1a7a7923e9f..5a9cb8eb5f52e 100644 --- a/ui/src/app/settings/components/cluster-details/cluster-details.tsx +++ b/ui/src/app/settings/components/cluster-details/cluster-details.tsx @@ -25,7 +25,7 @@ export const ClusterDetails = (props: RouteComponentProps<{server: string}>) => const loaderRef = React.useRef(); const [updating, setUpdating] = React.useState(false); return ( - timer(0, 1000).pipe(mergeMap(() => from(services.clusters.get(url, ''))))}> + timer(0, 5000).pipe(mergeMap(() => from(services.clusters.getByName(name))))}> {(cluster: Cluster) => ( ) => action: async () => { setUpdating(true); try { - const updated = await services.clusters.invalidateCache(props.match.params.server); + const updated = await services.clusters.invalidateCacheByName(props.match.params.server); loaderRef.current.setData(updated); } finally { setUpdating(false); @@ -56,18 +56,19 @@ export const ClusterDetails = (props: RouteComponentProps<{server: string}>) => { - const item = await services.clusters.get(updated.server, ''); + const item = await services.clusters.getByName(updated.name); item.name = updated.name; item.namespaces = updated.namespaces; item.labels = updated.labels; item.annotations = updated.annotations; - loaderRef.current.setData(await services.clusters.update(item, 'name', 'namespaces', 'labels', 'annotations')); + loaderRef.current.setData(await services.clusters.updateByName(item, 'name', 'server', 'namespaces', 'labels', 'annotations')); }} title='GENERAL' items={[ { title: 'SERVER', - view: cluster.server + view: cluster.server, + edit: formApi => }, { title: 'CREDENTIALS TYPE', diff --git a/ui/src/app/settings/components/clusters-list/clusters-list.tsx b/ui/src/app/settings/components/clusters-list/clusters-list.tsx index c6dea9ab372aa..b7f6510a2d31d 100644 --- a/ui/src/app/settings/components/clusters-list/clusters-list.tsx +++ b/ui/src/app/settings/components/clusters-list/clusters-list.tsx @@ -34,7 +34,7 @@ export const ClustersList = (props: RouteComponentProps<{}>) => {
ctx.navigation.goto(`./${encodeURIComponent(cluster.server)}`)}> + onClick={() => ctx.navigation.goto(`./${encodeURIComponent(cluster.name)}`)}>
diff --git a/ui/src/app/shared/services/clusters-service.ts b/ui/src/app/shared/services/clusters-service.ts index b806a5fb646ba..2ecf083022563 100644 --- a/ui/src/app/shared/services/clusters-service.ts +++ b/ui/src/app/shared/services/clusters-service.ts @@ -9,29 +9,29 @@ export class ClustersService { .then(list => list.items || []); } - public get(url: string, name: string): Promise { - const requestUrl = `/clusters/${url ? encodeURIComponent(url) : encodeURIComponent(name)}?id.type=${url ? 'url' : 'name_escaped'}`; + public getByName(name: string): Promise { + const requestUrl = `/clusters/${encodeURIComponent(name)}?id.type=name`; return requests.get(requestUrl).then(res => res.body as models.Cluster); } - public update(cluster: models.Cluster, ...paths: string[]): Promise { + public updateByName(cluster: models.Cluster, ...paths: string[]): Promise { return requests - .put(`/clusters/${encodeURIComponent(cluster.server)}`) + .put(`/clusters/${encodeURIComponent(cluster.name)}`) .query({updatedFields: paths}) .send(cluster) .then(res => res.body as models.Cluster); } - public invalidateCache(url: string): Promise { + public invalidateCacheByName(name: string): Promise { return requests - .post(`/clusters/${encodeURIComponent(url)}/invalidate-cache`) + .post(`/clusters/${encodeURIComponent(name)}/invalidate-cache`) .send({}) .then(res => res.body as models.Cluster); } - public delete(server: string): Promise { + public deleteByName(name: string): Promise { return requests - .delete(`/clusters/${encodeURIComponent(server)}`) + .delete(`/clusters/${encodeURIComponent(name)}`) .send() .then(res => res.body as models.Cluster); } diff --git a/ui/src/app/webpack.config.js b/ui/src/app/webpack.config.js index 2b65f832c0151..32b05399cbd5b 100644 --- a/ui/src/app/webpack.config.js +++ b/ui/src/app/webpack.config.js @@ -12,7 +12,8 @@ console.log(`Bundling in ${isProd ? 'production' : 'development'}...`); const proxyConf = { target: process.env.ARGOCD_API_URL || 'http://localhost:8080', - secure: false + secure: false, + changeOrigin: process.env.ARGOCD_API_CHANGE_ORIGIN === 'true', }; const config = { diff --git a/util/argo/argo.go b/util/argo/argo.go index 1c5a2c2040ca0..576f8aac73ba5 100644 --- a/util/argo/argo.go +++ b/util/argo/argo.go @@ -245,7 +245,7 @@ func ValidateRepo( return nil, fmt.Errorf("error getting permitted repo creds: %w", err) } - cluster, err := db.GetCluster(context.Background(), spec.Destination.Server) + cluster, err := GetClusterByDestination(db, spec.Destination) if err != nil { conditions = append(conditions, argoappv1.ApplicationCondition{ Type: argoappv1.ApplicationConditionInvalidSpecError, @@ -361,7 +361,7 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p }) } // Ensure the k8s cluster the app is referencing, is configured in Argo CD - _, err = db.GetCluster(ctx, spec.Destination.Server) + _, err = db.GetClusterByUrl(ctx, spec.Destination.Server) if err != nil { if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.NotFound { conditions = append(conditions, argoappv1.ApplicationCondition{ @@ -467,6 +467,19 @@ func GetAppProject(app *argoappv1.Application, projLister applicationsv1.AppProj return proj, nil } +// GetClusterByDestination returns a Cluster based on the specified spec.Destination +// by calling db. +func GetClusterByDestination(db db.ArgoDB, dest argoappv1.ApplicationDestination) (*argoappv1.Cluster, error) { + if dest.Name != "" { + // User wants to use a specific cluster, we can't solely rely on the + // app.Spec.Destination.Server URL since a user might specify + // multiple Kubernetes clusters with the same API URL but different + // tokens to access different namespaces. + return db.GetClusterByName(context.Background(), dest.Name) + } + return db.GetClusterByUrl(context.Background(), dest.Server) +} + // verifyGenerateManifests verifies a repo path can generate manifests func verifyGenerateManifests(ctx context.Context, repoRes *argoappv1.Repository, helmRepos argoappv1.Repositories, helmOptions *argoappv1.HelmOptions, app *argoappv1.Application, repoClient apiclient.RepoServerServiceClient, kustomizeOptions *argoappv1.KustomizeOptions, plugins []*argoappv1.ConfigManagementPlugin, kubeVersion string, apiVersions []string, repositoryCredentials []*argoappv1.RepoCreds, enableGenerateManifests map[string]bool, settingsMgr *settings.SettingsManager) []argoappv1.ApplicationCondition { spec := &app.Spec @@ -727,7 +740,7 @@ func parseAppName(appName string, defaultNs string, delim string) (string, strin return name, ns } -// ParseAppNamespacedName parses a namespaced name in the format namespace/name +// ParseAppQualifiedName parses a namespaced name in the format namespace/name // and returns the components. If name wasn't namespaced, defaultNs will be // returned as namespace component. func ParseAppQualifiedName(appName string, defaultNs string) (string, string) { diff --git a/util/argo/argo_test.go b/util/argo/argo_test.go index 9bf707d126c22..c59d65535262c 100644 --- a/util/argo/argo_test.go +++ b/util/argo/argo_test.go @@ -241,7 +241,7 @@ func TestValidateRepo(t *testing.T) { db.On("GetRepository", context.Background(), app.Spec.Source.RepoURL).Return(repo, nil) db.On("ListHelmRepositories", context.Background()).Return(helmRepos, nil) - db.On("GetCluster", context.Background(), app.Spec.Destination.Server).Return(cluster, nil) + db.On("GetClusterByUrl", context.Background(), app.Spec.Destination.Server).Return(cluster, nil) db.On("GetAllHelmRepositoryCredentials", context.Background()).Return(nil, nil) var receivedRequest *apiclient.ManifestRequest @@ -464,7 +464,7 @@ func TestValidatePermissions(t *testing.T) { } cluster := &argoappv1.Cluster{Server: "https://127.0.0.1:6443"} db := &dbmocks.ArgoDB{} - db.On("GetCluster", context.Background(), spec.Destination.Server).Return(cluster, nil) + db.On("GetClusterByUrl", context.Background(), spec.Destination.Server).Return(cluster, nil) conditions, err := ValidatePermissions(context.Background(), &spec, &proj, db) assert.NoError(t, err) assert.Len(t, conditions, 1) @@ -497,7 +497,7 @@ func TestValidatePermissions(t *testing.T) { } cluster := &argoappv1.Cluster{Server: "https://127.0.0.1:6443"} db := &dbmocks.ArgoDB{} - db.On("GetCluster", context.Background(), spec.Destination.Server).Return(cluster, nil) + db.On("GetClusterByUrl", context.Background(), spec.Destination.Server).Return(cluster, nil) conditions, err := ValidatePermissions(context.Background(), &spec, &proj, db) assert.NoError(t, err) assert.Len(t, conditions, 1) @@ -529,7 +529,7 @@ func TestValidatePermissions(t *testing.T) { }, } db := &dbmocks.ArgoDB{} - db.On("GetCluster", context.Background(), spec.Destination.Server).Return(nil, status.Errorf(codes.NotFound, "Cluster does not exist")) + db.On("GetClusterByUrl", context.Background(), spec.Destination.Server).Return(nil, status.Errorf(codes.NotFound, "Cluster does not exist")) conditions, err := ValidatePermissions(context.Background(), &spec, &proj, db) assert.NoError(t, err) assert.Len(t, conditions, 1) @@ -593,7 +593,7 @@ func TestValidatePermissions(t *testing.T) { }, } db := &dbmocks.ArgoDB{} - db.On("GetCluster", context.Background(), spec.Destination.Server).Return(nil, fmt.Errorf("Unknown error occurred")) + db.On("GetClusterByUrl", context.Background(), spec.Destination.Server).Return(nil, fmt.Errorf("Unknown error occurred")) _, err := ValidatePermissions(context.Background(), &spec, &proj, db) assert.Error(t, err) }) @@ -628,7 +628,7 @@ func TestValidatePermissions(t *testing.T) { Server: "https://127.0.0.1:6443", } db.On("GetClusterServersByName", context.Background(), "does-exist").Return([]string{"https://127.0.0.1:6443"}, nil) - db.On("GetCluster", context.Background(), "https://127.0.0.1:6443").Return(&cluster, nil) + db.On("GetClusterByUrl", context.Background(), "https://127.0.0.1:6443").Return(&cluster, nil) conditions, err := ValidatePermissions(context.Background(), &spec, &proj, db) assert.NoError(t, err) assert.Len(t, conditions, 0) @@ -763,7 +763,6 @@ func TestValidateDestination(t *testing.T) { assert.Equal(t, "unable to find destination server: there are 2 clusters with the same name: [https://127.0.0.1:2443 https://127.0.0.1:8443]", err.Error()) assert.False(t, dest.IsServerInferred()) }) - } func TestFilterByName(t *testing.T) { diff --git a/util/db/cluster.go b/util/db/cluster.go index c3d104d89239a..d1e01cd5af125 100644 --- a/util/db/cluster.go +++ b/util/db/cluster.go @@ -26,9 +26,11 @@ import ( var ( localCluster = appv1.Cluster{ - Name: "in-cluster", - Server: appv1.KubernetesInternalAPIServerAddr, - ConnectionState: appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}, + Name: "in-cluster", + Server: appv1.KubernetesInternalAPIServerAddr, + Info: appv1.ClusterInfo{ + ConnectionState: appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}, + }, } initLocalCluster sync.Once ) @@ -37,10 +39,10 @@ func (db *db) getLocalCluster() *appv1.Cluster { initLocalCluster.Do(func() { info, err := db.kubeclientset.Discovery().ServerVersion() if err == nil { - localCluster.ServerVersion = fmt.Sprintf("%s.%s", info.Major, info.Minor) - localCluster.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful} + localCluster.Info.ServerVersion = fmt.Sprintf("%s.%s", info.Major, info.Minor) + localCluster.Info.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful} } else { - localCluster.ConnectionState = appv1.ConnectionState{ + localCluster.Info.ConnectionState = appv1.ConnectionState{ Status: appv1.ConnectionStatusFailed, Message: err.Error(), } @@ -48,7 +50,7 @@ func (db *db) getLocalCluster() *appv1.Cluster { }) cluster := localCluster.DeepCopy() now := metav1.Now() - cluster.ConnectionState.ModifiedAt = &now + cluster.Info.ConnectionState.ModifiedAt = &now return cluster } @@ -139,7 +141,7 @@ func (db *db) WatchClusters(ctx context.Context, handleAddEvent func(cluster *appv1.Cluster), handleModEvent func(oldCluster *appv1.Cluster, newCluster *appv1.Cluster), handleDeleteEvent func(clusterServer string)) error { - localCls, err := db.GetCluster(ctx, appv1.KubernetesInternalAPIServerAddr) + localCls, err := db.GetClusterByUrl(ctx, appv1.KubernetesInternalAPIServerAddr) if err != nil { return err } @@ -209,24 +211,45 @@ func (db *db) getClusterSecret(server string) (*apiv1.Secret, error) { return nil, status.Errorf(codes.NotFound, "cluster %q not found", server) } -// GetCluster returns a cluster from a query -func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) { +// GetClusterByUrl returns a cluster from a query +func (db *db) GetClusterByUrl(_ context.Context, serverUrl string) (*appv1.Cluster, error) { informer, err := db.settingsMgr.GetSecretsInformer() if err != nil { return nil, err } - res, err := informer.GetIndexer().ByIndex(settings.ByClusterURLIndexer, server) + res, err := informer.GetIndexer().ByIndex(settings.ByClusterURLIndexer, serverUrl) if err != nil { return nil, err } if len(res) > 0 { return secretToCluster(res[0].(*apiv1.Secret)) } - if server == appv1.KubernetesInternalAPIServerAddr { + if serverUrl == appv1.KubernetesInternalAPIServerAddr { return db.getLocalCluster(), nil } - return nil, status.Errorf(codes.NotFound, "cluster %q not found", server) + return nil, status.Errorf(codes.NotFound, "cluster %q not found", serverUrl) +} + +// GetClusterByName returns a cluster from a query +func (db *db) GetClusterByName(_ context.Context, serverName string) (*appv1.Cluster, error) { + if serverName == "in-cluster" { + return db.getLocalCluster(), nil + } + + informer, err := db.settingsMgr.GetSecretsInformer() + if err != nil { + return nil, err + } + res, err := informer.GetIndexer().ByIndex(settings.ByClusterNameIndexer, serverName) + if err != nil { + return nil, err + } + if len(res) > 0 { + return secretToCluster(res[0].(*apiv1.Secret)) + } + + return nil, status.Errorf(codes.NotFound, "cluster with name %q not found", serverName) } // GetProjectClusters return project scoped clusters by given project name diff --git a/util/db/cluster_test.go b/util/db/cluster_test.go index c3b273b4fe5ef..94b3b24bb7750 100644 --- a/util/db/cluster_test.go +++ b/util/db/cluster_test.go @@ -302,15 +302,29 @@ func TestListClusters(t *testing.T) { }, }, Data: map[string][]byte{ - "server": []byte("http://mycluster2"), + "server": []byte("http://mycluster-addr"), "name": []byte("mycluster2"), }, } - invalidSecret := &v1.Secret{ + secretForServerWithExternalClusterAddrCopied := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "mycluster3", Namespace: fakeNamespace, + Labels: map[string]string{ + common.LabelKeySecretType: common.LabelValueSecretTypeCluster, + }, + }, + Data: map[string][]byte{ + "server": []byte("http://mycluster-addr"), + "name": []byte("mycluster3"), + }, + } + + invalidSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mycluster4", + Namespace: fakeNamespace, }, Data: map[string][]byte{ "name": []byte("test"), @@ -320,13 +334,19 @@ func TestListClusters(t *testing.T) { } t.Run("Valid clusters", func(t *testing.T) { - kubeclientset := fake.NewSimpleClientset(secretForServerWithInClusterAddr, secretForServerWithExternalClusterAddr, emptyArgoCDConfigMap, argoCDSecret) + kubeclientset := fake.NewSimpleClientset( + secretForServerWithInClusterAddr, + secretForServerWithExternalClusterAddr, + secretForServerWithExternalClusterAddrCopied, + emptyArgoCDConfigMap, + argoCDSecret, + ) settingsManager := settings.NewSettingsManager(context.Background(), kubeclientset, fakeNamespace) db := NewDB(fakeNamespace, settingsManager, kubeclientset) clusters, err := db.ListClusters(context.TODO()) require.NoError(t, err) - assert.Len(t, clusters.Items, 2) + assert.Len(t, clusters.Items, 3) }) t.Run("Cluster list with invalid cluster", func(t *testing.T) { @@ -369,4 +389,20 @@ func TestListClusters(t *testing.T) { require.NoError(t, err) assert.Len(t, clusters.Items, 1) }) + + t.Run("Get cluster with same API Address", func(t *testing.T) { + kubeclientset := fake.NewSimpleClientset( + secretForServerWithInClusterAddr, + secretForServerWithExternalClusterAddr, + secretForServerWithExternalClusterAddrCopied, + emptyArgoCDConfigMap, + argoCDSecret, + ) + settingsManager := settings.NewSettingsManager(context.Background(), kubeclientset, fakeNamespace) + db := NewDB(fakeNamespace, settingsManager, kubeclientset) + + clusterServer, err := db.GetClusterServersByName(context.TODO(), "mycluster3") + require.NoError(t, err) + assert.Len(t, clusterServer, 1) + }) } diff --git a/util/db/db.go b/util/db/db.go index 05ae38e75bb84..29ffcddc1513e 100644 --- a/util/db/db.go +++ b/util/db/db.go @@ -11,8 +11,8 @@ import ( "github.com/argoproj/argo-cd/v2/util/settings" ) -// SecretMaperValidation determine whether the secret should be transformed(i.e. trailing CRLF characters trimmed) -type SecretMaperValidation struct { +// SecretMapperValidation determine whether the secret should be transformed(i.e. trailing CRLF characters trimmed) +type SecretMapperValidation struct { Dest *string Transform func(string) string } @@ -27,8 +27,10 @@ type ArgoDB interface { handleAddEvent func(cluster *appv1.Cluster), handleModEvent func(oldCluster *appv1.Cluster, newCluster *appv1.Cluster), handleDeleteEvent func(clusterServer string)) error - // GetCluster returns a cluster by given server url - GetCluster(ctx context.Context, server string) (*appv1.Cluster, error) + // GetClusterByUrl returns a cluster by given server url + GetClusterByUrl(ctx context.Context, serverUrl string) (*appv1.Cluster, error) + // GetClusterByName returns a cluster by its unique name + GetClusterByName(ctx context.Context, serverName string) (*appv1.Cluster, error) // GetClusterServersByName returns a cluster server urls by given cluster name GetClusterServersByName(ctx context.Context, name string) ([]string, error) // GetProjectClusters return project scoped clusters by given project name @@ -54,22 +56,22 @@ type ArgoDB interface { // DeleteRepository deletes a repository from config DeleteRepository(ctx context.Context, name string) error - // ListRepoCredentials list all repo credential sets URL patterns + // ListRepositoryCredentials list all repo credential sets URL patterns ListRepositoryCredentials(ctx context.Context) ([]string, error) - // GetRepoCredentials gets repo credentials for given URL + // GetRepositoryCredentials gets repo credentials for given URL GetRepositoryCredentials(ctx context.Context, name string) (*appv1.RepoCreds, error) - // CreateRepoCredentials creates a repository credential set + // CreateRepositoryCredentials creates a repository credential set CreateRepositoryCredentials(ctx context.Context, r *appv1.RepoCreds) (*appv1.RepoCreds, error) - // UpdateRepoCredentials updates a repository credential set + // UpdateRepositoryCredentials updates a repository credential set UpdateRepositoryCredentials(ctx context.Context, r *appv1.RepoCreds) (*appv1.RepoCreds, error) - // DeleteRepoCredentials deletes a repository credential set from config + // DeleteRepositoryCredentials deletes a repository credential set from config DeleteRepositoryCredentials(ctx context.Context, name string) error // ListRepoCertificates lists all configured certificates ListRepoCertificates(ctx context.Context, selector *CertificateListSelector) (*appv1.RepositoryCertificateList, error) // CreateRepoCertificate creates a new certificate entry CreateRepoCertificate(ctx context.Context, certificate *appv1.RepositoryCertificateList, upsert bool) (*appv1.RepositoryCertificateList, error) - // CreateRepoCertificate creates a new certificate entry + // RemoveRepoCertificates removes a certificate entry RemoveRepoCertificates(ctx context.Context, selector *CertificateListSelector) (*appv1.RepositoryCertificateList, error) // GetAllHelmRepositoryCredentials gets all repo credentials GetAllHelmRepositoryCredentials(ctx context.Context) ([]*appv1.RepoCreds, error) @@ -119,7 +121,7 @@ func (db *db) getSecret(name string, cache map[string]*v1.Secret) (*v1.Secret, e return secret, nil } -func (db *db) unmarshalFromSecretsStr(secrets map[*SecretMaperValidation]*v1.SecretKeySelector, cache map[string]*v1.Secret) error { +func (db *db) unmarshalFromSecretsStr(secrets map[*SecretMapperValidation]*v1.SecretKeySelector, cache map[string]*v1.Secret) error { for dst, src := range secrets { if src != nil { secret, err := db.getSecret(src.Name, cache) diff --git a/util/db/db_test.go b/util/db/db_test.go index feb204e7d5cf7..0dfce813e77fd 100644 --- a/util/db/db_test.go +++ b/util/db/db_test.go @@ -464,7 +464,7 @@ func TestGetClusterSuccessful(t *testing.T) { }) db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset) - cluster, err := db.GetCluster(context.Background(), server) + cluster, err := db.GetClusterByUrl(context.Background(), server) assert.NoError(t, err) assert.Equal(t, server, cluster.Server) assert.Equal(t, name, cluster.Name) @@ -475,7 +475,7 @@ func TestGetNonExistingCluster(t *testing.T) { clientset := getClientset(nil) db := NewDB(testNamespace, settings.NewSettingsManager(context.Background(), clientset, testNamespace), clientset) - _, err := db.GetCluster(context.Background(), server) + _, err := db.GetClusterByUrl(context.Background(), server) assert.NotNil(t, err) status, ok := status.FromError(err) assert.True(t, ok) diff --git a/util/db/helmrepository.go b/util/db/helmrepository.go index f369ef3da6bc5..792d2c98509e8 100644 --- a/util/db/helmrepository.go +++ b/util/db/helmrepository.go @@ -34,11 +34,11 @@ func (db *db) getHelmRepo(repoURL string, helmRepositories []settings.HelmRepoCr Type: "helm", Name: repoInfo.Name, } - err := db.unmarshalFromSecretsStr(map[*SecretMaperValidation]*v1.SecretKeySelector{ - &SecretMaperValidation{Dest: &repo.Username, Transform: StripCRLFCharacter}: repoInfo.UsernameSecret, - &SecretMaperValidation{Dest: &repo.Password, Transform: StripCRLFCharacter}: repoInfo.PasswordSecret, - &SecretMaperValidation{Dest: &repo.TLSClientCertData, Transform: StripCRLFCharacter}: repoInfo.CertSecret, - &SecretMaperValidation{Dest: &repo.TLSClientCertKey, Transform: StripCRLFCharacter}: repoInfo.KeySecret, + err := db.unmarshalFromSecretsStr(map[*SecretMapperValidation]*v1.SecretKeySelector{ + &SecretMapperValidation{Dest: &repo.Username, Transform: StripCRLFCharacter}: repoInfo.UsernameSecret, + &SecretMapperValidation{Dest: &repo.Password, Transform: StripCRLFCharacter}: repoInfo.PasswordSecret, + &SecretMapperValidation{Dest: &repo.TLSClientCertData, Transform: StripCRLFCharacter}: repoInfo.CertSecret, + &SecretMapperValidation{Dest: &repo.TLSClientCertKey, Transform: StripCRLFCharacter}: repoInfo.KeySecret, }, make(map[string]*v1.Secret)) return repo, err } diff --git a/util/db/mocks/ArgoDB.go b/util/db/mocks/ArgoDB.go index eed84975d9080..52d0a9ba993ca 100644 --- a/util/db/mocks/ArgoDB.go +++ b/util/db/mocks/ArgoDB.go @@ -1,14 +1,14 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks import ( context "context" + db "github.com/argoproj/argo-cd/v2/util/db" mock "github.com/stretchr/testify/mock" v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - db "github.com/argoproj/argo-cd/v2/util/db" ) // ArgoDB is an autogenerated mock type for the ArgoDB type @@ -219,13 +219,36 @@ func (_m *ArgoDB) GetAllHelmRepositoryCredentials(ctx context.Context) ([]*v1alp return r0, r1 } -// GetCluster provides a mock function with given fields: ctx, server -func (_m *ArgoDB) GetCluster(ctx context.Context, server string) (*v1alpha1.Cluster, error) { - ret := _m.Called(ctx, server) +// GetClusterByName provides a mock function with given fields: ctx, serverName +func (_m *ArgoDB) GetClusterByName(ctx context.Context, serverName string) (*v1alpha1.Cluster, error) { + ret := _m.Called(ctx, serverName) var r0 *v1alpha1.Cluster if rf, ok := ret.Get(0).(func(context.Context, string) *v1alpha1.Cluster); ok { - r0 = rf(ctx, server) + r0 = rf(ctx, serverName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1alpha1.Cluster) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, serverName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetClusterByUrl provides a mock function with given fields: ctx, serverUrl +func (_m *ArgoDB) GetClusterByUrl(ctx context.Context, serverUrl string) (*v1alpha1.Cluster, error) { + ret := _m.Called(ctx, serverUrl) + + var r0 *v1alpha1.Cluster + if rf, ok := ret.Get(0).(func(context.Context, string) *v1alpha1.Cluster); ok { + r0 = rf(ctx, serverUrl) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*v1alpha1.Cluster) @@ -234,7 +257,7 @@ func (_m *ArgoDB) GetCluster(ctx context.Context, server string) (*v1alpha1.Clus var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, server) + r1 = rf(ctx, serverUrl) } else { r1 = ret.Error(1) } @@ -621,3 +644,18 @@ func (_m *ArgoDB) WatchClusters(ctx context.Context, handleAddEvent func(*v1alph return r0 } + +type mockConstructorTestingTNewArgoDB interface { + mock.TestingT + Cleanup(func()) +} + +// NewArgoDB creates a new instance of ArgoDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewArgoDB(t mockConstructorTestingTNewArgoDB) *ArgoDB { + mock := &ArgoDB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/util/db/repository_legacy.go b/util/db/repository_legacy.go index 0b5ffd84154f7..4d77187d4fe47 100644 --- a/util/db/repository_legacy.go +++ b/util/db/repository_legacy.go @@ -383,13 +383,13 @@ func (l *legacyRepositoryBackend) credentialsToRepository(repoInfo settings.Repo GitHubAppEnterpriseBaseURL: repoInfo.GithubAppEnterpriseBaseURL, Proxy: repoInfo.Proxy, } - err := l.db.unmarshalFromSecretsStr(map[*SecretMaperValidation]*apiv1.SecretKeySelector{ - &SecretMaperValidation{Dest: &repo.Username, Transform: StripCRLFCharacter}: repoInfo.UsernameSecret, - &SecretMaperValidation{Dest: &repo.Password, Transform: StripCRLFCharacter}: repoInfo.PasswordSecret, - &SecretMaperValidation{Dest: &repo.SSHPrivateKey, Transform: StripCRLFCharacter}: repoInfo.SSHPrivateKeySecret, - &SecretMaperValidation{Dest: &repo.TLSClientCertData, Transform: StripCRLFCharacter}: repoInfo.TLSClientCertDataSecret, - &SecretMaperValidation{Dest: &repo.TLSClientCertKey, Transform: StripCRLFCharacter}: repoInfo.TLSClientCertKeySecret, - &SecretMaperValidation{Dest: &repo.GithubAppPrivateKey, Transform: StripCRLFCharacter}: repoInfo.GithubAppPrivateKeySecret, + err := l.db.unmarshalFromSecretsStr(map[*SecretMapperValidation]*apiv1.SecretKeySelector{ + &SecretMapperValidation{Dest: &repo.Username, Transform: StripCRLFCharacter}: repoInfo.UsernameSecret, + &SecretMapperValidation{Dest: &repo.Password, Transform: StripCRLFCharacter}: repoInfo.PasswordSecret, + &SecretMapperValidation{Dest: &repo.SSHPrivateKey, Transform: StripCRLFCharacter}: repoInfo.SSHPrivateKeySecret, + &SecretMapperValidation{Dest: &repo.TLSClientCertData, Transform: StripCRLFCharacter}: repoInfo.TLSClientCertDataSecret, + &SecretMapperValidation{Dest: &repo.TLSClientCertKey, Transform: StripCRLFCharacter}: repoInfo.TLSClientCertKeySecret, + &SecretMapperValidation{Dest: &repo.GithubAppPrivateKey, Transform: StripCRLFCharacter}: repoInfo.GithubAppPrivateKeySecret, }, make(map[string]*apiv1.Secret)) return repo, err } @@ -402,13 +402,13 @@ func (l *legacyRepositoryBackend) credentialsToRepositoryCredentials(repoInfo se GitHubAppEnterpriseBaseURL: repoInfo.GithubAppEnterpriseBaseURL, EnableOCI: repoInfo.EnableOCI, } - err := l.db.unmarshalFromSecretsStr(map[*SecretMaperValidation]*apiv1.SecretKeySelector{ - &SecretMaperValidation{Dest: &creds.Username}: repoInfo.UsernameSecret, - &SecretMaperValidation{Dest: &creds.Password}: repoInfo.PasswordSecret, - &SecretMaperValidation{Dest: &creds.SSHPrivateKey}: repoInfo.SSHPrivateKeySecret, - &SecretMaperValidation{Dest: &creds.TLSClientCertData}: repoInfo.TLSClientCertDataSecret, - &SecretMaperValidation{Dest: &creds.TLSClientCertKey}: repoInfo.TLSClientCertKeySecret, - &SecretMaperValidation{Dest: &creds.GithubAppPrivateKey}: repoInfo.GithubAppPrivateKeySecret, + err := l.db.unmarshalFromSecretsStr(map[*SecretMapperValidation]*apiv1.SecretKeySelector{ + &SecretMapperValidation{Dest: &creds.Username}: repoInfo.UsernameSecret, + &SecretMapperValidation{Dest: &creds.Password}: repoInfo.PasswordSecret, + &SecretMapperValidation{Dest: &creds.SSHPrivateKey}: repoInfo.SSHPrivateKeySecret, + &SecretMapperValidation{Dest: &creds.TLSClientCertData}: repoInfo.TLSClientCertDataSecret, + &SecretMapperValidation{Dest: &creds.TLSClientCertKey}: repoInfo.TLSClientCertKeySecret, + &SecretMapperValidation{Dest: &creds.GithubAppPrivateKey}: repoInfo.GithubAppPrivateKeySecret, }, make(map[string]*apiv1.Secret)) return creds, err } diff --git a/util/settings/settings.go b/util/settings/settings.go index 80a1c6da46fd4..97b521ab9f643 100644 --- a/util/settings/settings.go +++ b/util/settings/settings.go @@ -252,45 +252,45 @@ func (ks *KustomizeSettings) GetOptions(source v1alpha1.ApplicationSource) (*v1a }, nil } -// Credentials for accessing a Git repository +// Repository contains the configuration to access a repository (Git / Helm) type Repository struct { - // The URL to the repository + // The URL to the Git repository / Helm Chart URL string `json:"url,omitempty"` - // the type of the repo, "git" or "helm", assumed to be "git" if empty or absent + // Type is type of the repo, can be either "git" or "helm", assumed to be "git" if empty or absent Type string `json:"type,omitempty"` - // helm only + // Name of the Helm Chart (Helm Only) Name string `json:"name,omitempty"` - // Name of the secret storing the username used to access the repo + // UsernameSecret is the name of the secret storing the username used to access the repo UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty"` - // Name of the secret storing the password used to access the repo + // PasswordSecret is the name of the secret storing the password used to access the repo PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty"` - // Name of the secret storing the SSH private key used to access the repo. Git only + // SSHPrivateKeySecret is the name of the secret storing the SSH private key used to access the repo. Git only SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty"` - // Whether to connect the repository in an insecure way (deprecated) + // InsecureIgnoreHostKey defines whether to connect the repository in an insecure way (deprecated) InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty"` - // Whether to connect the repository in an insecure way + // Insecure defines whether to connect the repository in an insecure way Insecure bool `json:"insecure,omitempty"` - // Whether the repo is git-lfs enabled. Git only. + // EnableLFS defines whether the repo is git-lfs enabled. Git only. EnableLFS bool `json:"enableLfs,omitempty"` - // Name of the secret storing the TLS client cert data + // TLSClientCertDataSecret is the name of the secret storing the TLS client cert data TLSClientCertDataSecret *apiv1.SecretKeySelector `json:"tlsClientCertDataSecret,omitempty"` - // Name of the secret storing the TLS client cert's key data + // TLSClientCertKeySecret is the name of the secret storing the TLS client cert's key data TLSClientCertKeySecret *apiv1.SecretKeySelector `json:"tlsClientCertKeySecret,omitempty"` - // Whether the repo is helm-oci enabled. Git only. + // EnableOci Whether the repo is helm-oci enabled. Git only. EnableOci bool `json:"enableOci,omitempty"` - // Github App Private Key PEM data + // GithubAppPrivateKeySecret Private Key PEM data for GitHub GithubAppPrivateKeySecret *apiv1.SecretKeySelector `json:"githubAppPrivateKeySecret,omitempty"` - // Github App ID of the app used to access the repo + // GithubAppId APP ID of the app used to access the repo GithubAppId int64 `json:"githubAppID,omitempty"` - // Github App Installation ID of the installed GitHub App + // GithubAppInstallationId Installation ID of the installed GitHub App GithubAppInstallationId int64 `json:"githubAppInstallationID,omitempty"` - // Github App Enterprise base url if empty will default to https://api.github.com + // GithubAppEnterpriseBaseURL the base url to use for GitHub Enterprise: if empty will default to https://api.github.com GithubAppEnterpriseBaseURL string `json:"githubAppEnterpriseBaseUrl,omitempty"` // Proxy specifies the HTTP/HTTPS proxy used to access the repo Proxy string `json:"proxy,omitempty"` } -// Credential template for accessing repositories +// RepositoryCredentials defines the credentials used to access a Repository type RepositoryCredentials struct { // The URL pattern the repository URL has to match URL string `json:"url,omitempty"`