From 73b3d2ed683da704ca99ce22008764631fecf09d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 22:05:55 -0500 Subject: [PATCH] [V2] Enable support for shippers (#1527) * Work on adding shipper support. * Fix fmt. * Fix reference to spec. Allow shipper to be null but still enabled if key exists. * Move supported shippers into its own key in the input specification. * Fix issue in merge. * Implement fake shipper and add fake shipper output to the fake component. * Add protoc to the test target. * Don't generate fake shipper protocol in test. * Commit fake GRPC into code. * Add unit test for running with shipper, with sending event between running componentn and running shipper. * Add docstring for shipper test. * Add changelog fragement. * Adjust paths for shipper to work on windows and better on unix. * Update changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml Co-authored-by: Craig MacKenzie * Fix fake/component to connect over npipe on windows. Co-authored-by: Craig MacKenzie --- .gitignore | 9 +- ...for-running-the-elastic-agent-shipper.yaml | 31 + control.proto | 2 + .../handlers/handler_action_application.go | 10 +- .../application/coordinator/coordinator.go | 13 +- .../application/fleet_server_bootstrap.go | 4 +- .../gateway/fleet/fleet_gateway.go | 10 +- .../pkg/agent/application/managed_mode.go | 2 +- internal/pkg/agent/application/paths/paths.go | 7 +- .../agent/application/paths/paths_darwin.go | 7 +- .../agent/application/paths/paths_windows.go | 7 +- internal/pkg/agent/cmd/inspect.go | 6 +- internal/pkg/agent/control/addr.go | 2 +- internal/pkg/agent/control/addr_windows.go | 2 +- internal/pkg/agent/control/client/client.go | 11 +- .../pkg/agent/control/cproto/control.pb.go | 166 ++--- internal/pkg/agent/control/server/server.go | 19 +- internal/pkg/agent/install/uninstall.go | 2 +- internal/pkg/fleetapi/checkin_cmd.go | 19 +- magefile.go | 35 +- pkg/component/component.go | 202 +++++- pkg/component/component_test.go | 621 +++++++++++++++++- pkg/component/fake/common/common.go | 34 + pkg/component/fake/common/event.pb.go | 235 +++++++ pkg/component/fake/common/event.proto | 29 + pkg/component/fake/common/event_grpc.pb.go | 112 ++++ pkg/component/fake/{ => component}/README.md | 0 pkg/component/fake/component/dialer.go | 27 + .../fake/component/dialer_windows.go | 27 + pkg/component/fake/component/main.go | 580 ++++++++++++++++ pkg/component/fake/main.go | 342 ---------- pkg/component/fake/shipper/README.md | 3 + pkg/component/fake/shipper/listener.go | 29 + .../fake/shipper/listener_windows.go | 38 ++ pkg/component/fake/shipper/main.go | 514 +++++++++++++++ pkg/component/input_spec.go | 8 + pkg/component/load.go | 108 ++- pkg/component/output_spec.go | 31 - pkg/component/outputs.go | 21 - pkg/component/runtime/command.go | 73 +- pkg/component/runtime/manager.go | 53 +- pkg/component/runtime/manager_shipper.go | 127 ++++ pkg/component/runtime/manager_shipper_unix.go | 33 + .../runtime/manager_shipper_windows.go | 29 + pkg/component/runtime/manager_test.go | 358 +++++++++- pkg/component/runtime/runtime.go | 21 +- pkg/component/runtime/service.go | 42 +- pkg/component/runtime/shipper.go | 13 + pkg/component/shipper_spec.go | 33 + pkg/component/spec.go | 25 +- specs/filebeat.spec.yml | 363 +++++----- specs/metricbeat.spec.yml | 350 +++++----- specs/shipper.spec.yml | 18 + 53 files changed, 3847 insertions(+), 1016 deletions(-) create mode 100644 changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml create mode 100644 pkg/component/fake/common/common.go create mode 100644 pkg/component/fake/common/event.pb.go create mode 100644 pkg/component/fake/common/event.proto create mode 100644 pkg/component/fake/common/event_grpc.pb.go rename pkg/component/fake/{ => component}/README.md (100%) create mode 100644 pkg/component/fake/component/dialer.go create mode 100644 pkg/component/fake/component/dialer_windows.go create mode 100644 pkg/component/fake/component/main.go delete mode 100644 pkg/component/fake/main.go create mode 100644 pkg/component/fake/shipper/README.md create mode 100644 pkg/component/fake/shipper/listener.go create mode 100644 pkg/component/fake/shipper/listener_windows.go create mode 100644 pkg/component/fake/shipper/main.go delete mode 100644 pkg/component/output_spec.go delete mode 100644 pkg/component/outputs.go create mode 100644 pkg/component/runtime/manager_shipper.go create mode 100644 pkg/component/runtime/manager_shipper_unix.go create mode 100644 pkg/component/runtime/manager_shipper_windows.go create mode 100644 pkg/component/runtime/shipper.go create mode 100644 pkg/component/shipper_spec.go create mode 100644 specs/shipper.spec.yml diff --git a/.gitignore b/.gitignore index 9940bf5068e..476cfd50764 100644 --- a/.gitignore +++ b/.gitignore @@ -45,7 +45,6 @@ fleet.enc.lock # Files generated with the bump version automations *.bck - # agent build/ elastic-agent @@ -54,9 +53,5 @@ elastic-agent.yml.* fleet.yml fleet.yml.lock fleet.yml.old -internal/pkg/agent/application/fleet.yml -internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec -pkg/component/fake/fake - -# VSCode -/.vscode +pkg/component/fake/component/component +pkg/component/fake/shipper/shipper diff --git a/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml b/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml new file mode 100644 index 00000000000..26430b05741 --- /dev/null +++ b/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add experimental support for running the elastic-agent-shipper + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1527 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 219 diff --git a/control.proto b/control.proto index 4bcef0ea3ed..25eef00de4c 100644 --- a/control.proto +++ b/control.proto @@ -193,6 +193,8 @@ message DiagnosticAgentResponse { // DiagnosticUnitRequest specifies a specific unit to gather diagnostics from. message DiagnosticUnitRequest { + // ID of the component. + string component_id = 1; // Type of unit. UnitType unit_type = 2; // ID of the unit. diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index 552427a16b0..462340476eb 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -49,7 +49,7 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.A } state := h.coord.State(false) - unit, ok := findUnitFromInputType(state, action.InputType) + comp, unit, ok := findUnitFromInputType(state, action.InputType) if !ok { // If the matching action is not found ack the action with the error for action result document action.StartedAt = time.Now().UTC().Format(time.RFC3339Nano) @@ -78,7 +78,7 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.A h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.ActionType, timeout) ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - res, err = h.coord.PerformAction(ctx, unit, action.InputType, params) + res, err = h.coord.PerformAction(ctx, comp, unit, action.InputType, params) } end := time.Now().UTC() @@ -151,13 +151,13 @@ func readMapString(m map[string]interface{}, key string, def string) string { return def } -func findUnitFromInputType(state coordinator.State, inputType string) (component.Unit, bool) { +func findUnitFromInputType(state coordinator.State, inputType string) (component.Component, component.Unit, bool) { for _, comp := range state.Components { for _, unit := range comp.Component.Units { if unit.Type == client.UnitTypeInput && unit.Config != nil && unit.Config.Type == inputType { - return unit, true + return comp.Component, unit, true } } } - return component.Unit{}, false + return component.Component{}, component.Unit{}, false } diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index d244f85b531..48a476a5164 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -91,14 +91,14 @@ type RuntimeManager interface { State() []runtime.ComponentComponentState // PerformAction executes an action on a unit. - PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) + PerformAction(ctx context.Context, comp component.Component, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) // SubscribeAll provides an interface to watch for changes in all components. SubscribeAll(context.Context) *runtime.SubscriptionAll // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then // it performs diagnostics for all current units. - PerformDiagnostics(context.Context, ...component.Unit) []runtime.ComponentUnitDiagnostic + PerformDiagnostics(context.Context, ...runtime.ComponentUnitDiagnosticRequest) []runtime.ComponentUnitDiagnostic } // ConfigChange provides an interface for receiving a new configuration. @@ -285,19 +285,20 @@ func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI str return nil } +// AckUpgrade performs acknowledgement for upgrade. func (c *Coordinator) AckUpgrade(ctx context.Context, acker acker.Acker) error { return c.upgradeMgr.Ack(ctx, acker) } // PerformAction executes an action on a unit. -func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { - return c.runtimeMgr.PerformAction(ctx, unit, name, params) +func (c *Coordinator) PerformAction(ctx context.Context, comp component.Component, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + return c.runtimeMgr.PerformAction(ctx, comp, unit, name, params) } // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then // it performs diagnostics for all current units. -func (c *Coordinator) PerformDiagnostics(ctx context.Context, units ...component.Unit) []runtime.ComponentUnitDiagnostic { - return c.runtimeMgr.PerformDiagnostics(ctx, units...) +func (c *Coordinator) PerformDiagnostics(ctx context.Context, req ...runtime.ComponentUnitDiagnosticRequest) []runtime.ComponentUnitDiagnostic { + return c.runtimeMgr.PerformDiagnostics(ctx, req...) } // Run runs the coordinator. diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 808feee0af0..62106c30aea 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -46,7 +46,7 @@ var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) coordinator.ComponentsModifier { return func(comps []component.Component, _ map[string]interface{}) ([]component.Component, error) { for i, comp := range comps { - if comp.Spec.InputType == fleetServer { + if comp.InputSpec != nil && comp.InputSpec.InputType == fleetServer { for j, unit := range comp.Units { if unit.Type == client.UnitTypeOutput && unit.Config.Type == elasticsearch { unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), &serverCfg.Output.Elasticsearch) @@ -89,7 +89,7 @@ func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) co func EndpointComponentModifier(fleetCfg *configuration.FleetAgentConfig) coordinator.ComponentsModifier { return func(comps []component.Component, cfg map[string]interface{}) ([]component.Component, error) { for i, comp := range comps { - if comp.Spec.InputType == endpoint { + if comp.InputSpec != nil && comp.InputSpec.InputType == endpoint { for j, unit := range comp.Units { if unit.Type == client.UnitTypeInput && unit.Config.Type == endpoint { unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), map[string]interface{}{"fleet": fleetCfg}) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index e7b994acfc6..9b31e7dcf12 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -267,11 +267,19 @@ func (f *fleetGateway) convertToCheckinComponents(components []runtime.Component component := item.Component state := item.State + var shipperReference *fleetapi.CheckinShipperReference + if component.Shipper != nil { + shipperReference = &fleetapi.CheckinShipperReference{ + ComponentID: component.Shipper.ComponentID, + UnitID: component.Shipper.UnitID, + } + } checkinComponent := fleetapi.CheckinComponent{ ID: component.ID, - Type: component.Spec.InputType, + Type: component.Type(), Status: stateString(state.State), Message: state.Message, + Shipper: shipperReference, } if state.Units != nil { diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 32cff92f8e4..af53e150888 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -271,7 +271,7 @@ func (m *managedConfigManager) waitForFleetServer(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case compState := <-sub.Ch(): - if compState.Component.Spec.InputType == "fleet-server" { + if compState.Component.InputSpec != nil && compState.Component.InputSpec.InputType == "fleet-server" { if fleetServerRunning(compState.State) { m.log.With("state", compState.State).Debugf("Fleet Server is running") return nil diff --git a/internal/pkg/agent/application/paths/paths.go b/internal/pkg/agent/application/paths/paths.go index cc0487bb2a8..2001f0be616 100644 --- a/internal/pkg/agent/application/paths/paths.go +++ b/internal/pkg/agent/application/paths/paths.go @@ -14,8 +14,11 @@ const ( // InstallPath is the installation path using for install command. InstallPath = "/opt/Elastic/Agent" - // SocketPath is the socket path used when installed. - SocketPath = "unix:///run/elastic-agent.sock" + // ControlSocketPath is the control socket path used when installed. + ControlSocketPath = "unix:///run/elastic-agent.sock" + + // ShipperSocketPipePattern is the socket path used when installed for a shipper pipe. + ShipperSocketPipePattern = "unix:///run/elastic-agent-%s-pipe.sock" // ServiceName is the service name when installed. ServiceName = "elastic-agent" diff --git a/internal/pkg/agent/application/paths/paths_darwin.go b/internal/pkg/agent/application/paths/paths_darwin.go index 1a60c53ff8d..64aeaa7d127 100644 --- a/internal/pkg/agent/application/paths/paths_darwin.go +++ b/internal/pkg/agent/application/paths/paths_darwin.go @@ -14,8 +14,11 @@ const ( // InstallPath is the installation path using for install command. InstallPath = "/Library/Elastic/Agent" - // SocketPath is the socket path used when installed. - SocketPath = "unix:///var/run/elastic-agent.sock" + // ControlSocketPath is the control socket path used when installed. + ControlSocketPath = "unix:///var/run/elastic-agent.sock" + + // ShipperSocketPipePattern is the socket path used when installed for a shipper pipe. + ShipperSocketPipePattern = "unix:///var/run/elastic-agent-%s-pipe.sock" // ServiceName is the service name when installed. ServiceName = "co.elastic.elastic-agent" diff --git a/internal/pkg/agent/application/paths/paths_windows.go b/internal/pkg/agent/application/paths/paths_windows.go index 0b81aa2061b..700634a134c 100644 --- a/internal/pkg/agent/application/paths/paths_windows.go +++ b/internal/pkg/agent/application/paths/paths_windows.go @@ -19,8 +19,11 @@ const ( // InstallPath is the installation path using for install command. InstallPath = `C:\Program Files\Elastic\Agent` - // SocketPath is the socket path used when installed. - SocketPath = `\\.\pipe\elastic-agent-system` + // ControlSocketPath is the control socket path used when installed. + ControlSocketPath = `\\.\pipe\elastic-agent-system` + + // ShipperSocketPipePattern is the socket path used when installed for a shipper pipe. + ShipperSocketPipePattern = `\\.\pipe\elastic-agent-%s-pipe.sock` // ServiceName is the service name when installed. ServiceName = "Elastic Agent" diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 6d9b884aea4..d933a8fe1bf 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -287,7 +287,8 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen return fmt.Errorf("unable to find unit with ID: %s/%s", compID, unitID) } if !opts.showSpec { - comp.Spec = component.InputRuntimeSpec{} + comp.InputSpec = nil + comp.ShipperSpec = nil } if !opts.showConfig { for key, unit := range comp.Units { @@ -314,7 +315,8 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen // Hide runtime specification unless toggled on. if !opts.showSpec { for i, comp := range comps { - comp.Spec = component.InputRuntimeSpec{} + comp.InputSpec = nil + comp.ShipperSpec = nil comps[i] = comp } } diff --git a/internal/pkg/agent/control/addr.go b/internal/pkg/agent/control/addr.go index 7008fc754e7..31344ed69c1 100644 --- a/internal/pkg/agent/control/addr.go +++ b/internal/pkg/agent/control/addr.go @@ -20,7 +20,7 @@ import ( func Address() string { // when installed the control address is fixed if info.RunningInstalled() { - return paths.SocketPath + return paths.ControlSocketPath } // unix socket path must be less than 104 characters diff --git a/internal/pkg/agent/control/addr_windows.go b/internal/pkg/agent/control/addr_windows.go index 0b5dc711a7d..4eed0f26596 100644 --- a/internal/pkg/agent/control/addr_windows.go +++ b/internal/pkg/agent/control/addr_windows.go @@ -19,7 +19,7 @@ import ( func Address() string { // when installed the control address is fixed if info.RunningInstalled() { - return paths.SocketPath + return paths.ControlSocketPath } // not install, adjust the path based on data path diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index d876ba4aca1..87440c54141 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -58,6 +58,7 @@ type Version struct { Snapshot bool `json:"snapshot" yaml:"snapshot"` } +// ComponentVersionInfo is the version information for the component. type ComponentVersionInfo struct { // Name of the component. Name string `json:"name" yaml:"name"` @@ -115,8 +116,9 @@ type DiagnosticFileResult struct { // DiagnosticUnitRequest allows a specific unit to be targeted for diagnostics. type DiagnosticUnitRequest struct { - UnitID string - UnitType UnitType + ComponentID string + UnitID string + UnitType UnitType } // DiagnosticUnitResult is a set of results for a unit. @@ -308,8 +310,9 @@ func (c *client) DiagnosticUnits(ctx context.Context, units ...DiagnosticUnitReq reqs := make([]*cproto.DiagnosticUnitRequest, 0, len(units)) for _, u := range units { reqs = append(reqs, &cproto.DiagnosticUnitRequest{ - UnitType: u.UnitType, - UnitId: u.UnitID, + ComponentId: u.ComponentID, + UnitType: u.UnitType, + UnitId: u.UnitID, }) } diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index ed681fd38ef..01588cfa4c1 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -1155,6 +1155,8 @@ type DiagnosticUnitRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // ID of the component. + ComponentId string `protobuf:"bytes,1,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // Type of unit. UnitType UnitType `protobuf:"varint,2,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` // ID of the unit. @@ -1193,6 +1195,13 @@ func (*DiagnosticUnitRequest) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{13} } +func (x *DiagnosticUnitRequest) GetComponentId() string { + if x != nil { + return x.ComponentId + } + return "" +} + func (x *DiagnosticUnitRequest) GetUnitType() UnitType { if x != nil { return x.UnitType @@ -1501,85 +1510,88 @@ var file_control_proto_rawDesc = []byte{ 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x22, 0x5f, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x09, 0x75, - 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, - 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, - 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, - 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, - 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, - 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, - 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, - 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, - 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, - 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, - 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, - 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, - 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, - 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, - 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, - 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, - 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, - 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, - 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, - 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, + 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, + 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, 0x01, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, + 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, + 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, + 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, + 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, + 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, + 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, + 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, + 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, + 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, + 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, + 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, + 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, + 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, + 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, + 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, - 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, + 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 03e19618aeb..67fe85fab2b 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -11,6 +11,8 @@ import ( "net" "time" + "github.com/elastic/elastic-agent/pkg/component/runtime" + "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" @@ -129,7 +131,7 @@ func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateRespons } components = append(components, &cproto.ComponentState{ Id: comp.Component.ID, - Name: comp.Component.Spec.BinaryName, + Name: comp.Component.Type(), State: cproto.State(comp.State.State), Message: comp.State.Message, Units: units, @@ -202,15 +204,20 @@ func (s *Server) DiagnosticAgent(ctx context.Context, _ *cproto.DiagnosticAgentR // DiagnosticUnits returns diagnostic information for the specific units (or all units if non-provided). func (s *Server) DiagnosticUnits(ctx context.Context, req *cproto.DiagnosticUnitsRequest) (*cproto.DiagnosticUnitsResponse, error) { - units := make([]component.Unit, 0, len(req.Units)) + reqs := make([]runtime.ComponentUnitDiagnosticRequest, 0, len(req.Units)) for _, u := range req.Units { - units = append(units, component.Unit{ - ID: u.UnitId, - Type: client.UnitType(u.UnitType), + reqs = append(reqs, runtime.ComponentUnitDiagnosticRequest{ + Component: component.Component{ + ID: u.ComponentId, + }, + Unit: component.Unit{ + ID: u.UnitId, + Type: client.UnitType(u.UnitType), + }, }) } - diag := s.coord.PerformDiagnostics(ctx, units...) + diag := s.coord.PerformDiagnostics(ctx, reqs...) res := make([]*cproto.DiagnosticUnitResponse, 0, len(diag)) for _, d := range diag { r := &cproto.DiagnosticUnitResponse{ diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index ef62524455f..9e3eb56e5cd 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -173,7 +173,7 @@ func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Confi } var serviceComps []component.Component for _, comp := range allComps { - if comp.Err == nil && comp.Spec.Spec.Service != nil { + if comp.Err == nil && comp.InputSpec != nil && comp.InputSpec.Spec.Service != nil { // non-error and service based component serviceComps = append(serviceComps, comp) } diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 33bcd3dab55..f1b048188c3 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -20,6 +20,7 @@ import ( const checkingPath = "/api/fleet/agents/%s/checkin" +// CheckinUnit provides information about a unit during checkin. type CheckinUnit struct { ID string `json:"id"` Type string `json:"type"` @@ -28,12 +29,20 @@ type CheckinUnit struct { Payload map[string]interface{} `json:"payload,omitempty"` } +// CheckinShipperReference provides information about a component shipper connection during checkin. +type CheckinShipperReference struct { + ComponentID string `json:"component_id"` + UnitID string `json:"unit_id"` +} + +// CheckinComponent provides information about a component during checkin. type CheckinComponent struct { - ID string `json:"id"` - Type string `json:"type"` - Status string `json:"status"` - Message string `json:"message"` - Units []CheckinUnit `json:"units,omitempty"` + ID string `json:"id"` + Type string `json:"type"` + Status string `json:"status"` + Message string `json:"message"` + Units []CheckinUnit `json:"units,omitempty"` + Shipper *CheckinShipperReference `json:"shipper,omitempty"` } // CheckinRequest consists of multiple events reported to fleet ui. diff --git a/magefile.go b/magefile.go index ed633505e49..4c4a082a432 100644 --- a/magefile.go +++ b/magefile.go @@ -226,17 +226,23 @@ func (Build) Clean() { // TestBinaries build the required binaries for the test suite. func (Build) TestBinaries() error { - p := filepath.Join("pkg", "component") - fakeBinary := "fake" - if runtime.GOOS == "windows" { - fakeBinary += ".exe" - } - outputName := filepath.Join(p, "fake", fakeBinary) - err := RunGo("build", "-o", outputName, filepath.Join(p, "fake", "main.go")) - if err != nil { - return err + p := filepath.Join("pkg", "component", "fake") + for _, name := range []string{"component", "shipper"} { + binary := name + if runtime.GOOS == "windows" { + binary += ".exe" + } + outputName := filepath.Join(p, name, binary) + err := RunGo("build", "-o", outputName, filepath.Join("github.com/elastic/elastic-agent", p, name, "...")) + if err != nil { + return err + } + err = os.Chmod(outputName, 0755) + if err != nil { + return err + } } - return os.Chmod(outputName, 0755) + return nil } // All run all the code checks. @@ -462,6 +468,15 @@ func ControlProto() error { "control.proto") } +// FakeShipperProto generates pkg/component/fake/common event protocol. +func FakeShipperProto() error { + return sh.RunV( + "protoc", + "--go_out=.", "--go_opt=paths=source_relative", + "--go-grpc_out=.", "--go-grpc_opt=paths=source_relative", + "pkg/component/fake/common/event.proto") +} + func BuildPGP() error { // go run elastic-agent/dev-tools/cmd/buildpgp/build_pgp.go --in agent/spec/GPG-KEY-elasticsearch --out elastic-agent/pkg/release/pgp.go goF := filepath.Join("dev-tools", "cmd", "buildpgp", "build_pgp.go") diff --git a/pkg/component/component.go b/pkg/component/component.go index 467fede9e40..d65fcfec9a4 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/pkg/utils" ) +// GenerateMonitoringCfgFn is a function that can inject information into the model generation process. type GenerateMonitoringCfgFn func(map[string]interface{}, map[string]string) (map[string]interface{}, error) const ( @@ -24,11 +25,6 @@ const ( defaultUnitLogLevel = client.UnitLogLevelInfo ) -var ( - // ErrOutputNotSupported is returned when an input does not support an output type - ErrOutputNotSupported = newError("input doesn't support output type") -) - // ErrInputRuntimeCheckFail error is used when an input specification runtime prevention check occurs. type ErrInputRuntimeCheckFail struct { // message is the reason defined in the check @@ -45,6 +41,15 @@ func (e *ErrInputRuntimeCheckFail) Error() string { return e.message } +// ShipperReference provides a reference to the shipper component/unit that a component is connected to. +type ShipperReference struct { + // ComponentID is the ID of the component that this component is connected to. + ComponentID string `yaml:"component_id"` + + // UnitID is the ID of the unit inside of the component that this component is connected to. + UnitID string `yaml:"unit_id"` +} + // Unit is a single input or output that a component must run. type Unit struct { // ID is the unique ID of the unit. @@ -73,11 +78,27 @@ type Component struct { // the reason that all of these units are failed. Err error `yaml:"error,omitempty"` - // Spec on how the input should run. - Spec InputRuntimeSpec `yaml:"spec,omitempty"` + // InputSpec on how the input should run. (not set when ShipperSpec set) + InputSpec *InputRuntimeSpec `yaml:"input_spec,omitempty"` + + // ShipperSpec on how the shipper should run. (not set when InputSpec set) + ShipperSpec *ShipperRuntimeSpec `yaml:"shipper_spec,omitempty"` // Units that should be running inside this component. Units []Unit `yaml:"units"` + + // Shipper references the component/unit that this component used as its output. (not set when ShipperSpec) + Shipper *ShipperReference `yaml:"shipper,omitempty"` +} + +// Type returns the type of the component. +func (c *Component) Type() string { + if c.InputSpec != nil { + return c.InputSpec.InputType + } else if c.ShipperSpec != nil { + return c.ShipperSpec.ShipperType + } + return "" } // ToComponents returns the components that should be running based on the policy and the current runtime specification. @@ -107,6 +128,8 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInj return components, nil } +// PolicyToComponents takes the policy and generated a component model along with providing a mapping between component +// and the running binary. func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Component, map[string]string, error) { const revision = "revision" outputsMap, err := toIntermediate(policy) @@ -160,18 +183,27 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp } } + shipperMap := make(map[string][]string) for inputType, inputs := range inputsMap { + var supportedShipper ShipperRuntimeSpec + var usingShipper bool + inputSpec, err := r.GetInput(inputType) if err == nil { // update the inputType to match the spec; as it could have been alias inputType = inputSpec.InputType - if !containsStr(inputSpec.Spec.Outputs, output.outputType) { - inputSpec = InputRuntimeSpec{} // empty the spec - err = ErrOutputNotSupported - } else { - err = validateRuntimeChecks(&inputSpec.Spec, vars) - if err != nil { + + // determine if we are operating with shipper support + supportedShipper, usingShipper = getSupportedShipper(r, output, inputSpec, vars) + if !usingShipper { + if !containsStr(inputSpec.Spec.Outputs, output.outputType) { inputSpec = InputRuntimeSpec{} // empty the spec + err = ErrOutputNotSupported + } else { + err = validateRuntimeChecks(&inputSpec.Spec.Runtime, vars) + if err != nil { + inputSpec = InputRuntimeSpec{} // empty the spec + } } } } @@ -200,21 +232,84 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp } if len(units) > 0 { componentID := fmt.Sprintf("%s-%s", inputType, outputName) + if usingShipper { + // using shipper for this component + connected, _ := shipperMap[supportedShipper.ShipperType] + connected = append(connected, componentID) + shipperMap[supportedShipper.ShipperType] = connected + } else { + // using output inside the component + cfg, cfgErr := ExpectedConfig(output.output) + units = append(units, Unit{ + ID: componentID, + Type: client.UnitTypeOutput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, + }) + } + components = append(components, Component{ + ID: componentID, + Err: err, + InputSpec: &inputSpec, + Units: units, + }) + componentIdsInputMap[componentID] = inputSpec.BinaryName + } + } + + // create the shipper components and units + for shipperType, connected := range shipperMap { + shipperSpec, _ := r.GetShipper(shipperType) // type always exists at this point + shipperCompID := fmt.Sprintf("%s-%s", shipperType, outputName) + + var shipperUnits []Unit + for _, componentID := range connected { + for i, component := range components { + if component.ID == componentID && component.Err == nil { + cfg, cfgErr := componentToShipperConfig(component) + shipperUnit := Unit{ + ID: componentID, + Type: client.UnitTypeInput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, + } + shipperUnits = append(shipperUnits, shipperUnit) + component.Shipper = &ShipperReference{ + ComponentID: shipperCompID, + UnitID: shipperUnit.ID, + } + cfg, cfgErr = ExpectedConfig(map[string]interface{}{ + "type": shipperType, + }) + component.Units = append(component.Units, Unit{ + ID: componentID, + Type: client.UnitTypeOutput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, + }) + components[i] = component + break + } + } + } + + if len(shipperUnits) > 0 { cfg, cfgErr := ExpectedConfig(output.output) - units = append(units, Unit{ - ID: componentID, + shipperUnits = append(shipperUnits, Unit{ + ID: shipperCompID, Type: client.UnitTypeOutput, LogLevel: output.logLevel, Config: cfg, Err: cfgErr, }) components = append(components, Component{ - ID: componentID, - Err: err, - Spec: inputSpec, - Units: units, + ID: shipperCompID, + ShipperSpec: &shipperSpec, + Units: shipperUnits, }) - componentIdsInputMap[componentID] = inputSpec.BinaryName } } } @@ -222,6 +317,69 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp return components, componentIdsInputMap, nil } +func componentToShipperConfig(comp Component) (*proto.UnitExpectedConfig, error) { + cfgUnits := make([]interface{}, 0, len(comp.Units)) + for _, unit := range comp.Units { + if unit.Err == nil && unit.Type == client.UnitTypeInput { + cfgUnits = append(cfgUnits, map[string]interface{}{ + "id": unit.ID, + "config": unit.Config.Source.AsMap(), + }) + } + } + cfg := map[string]interface{}{ + "id": comp.ID, + "units": cfgUnits, + } + return ExpectedConfig(cfg) +} + +func getSupportedShipper(r *RuntimeSpecs, output outputI, inputSpec InputRuntimeSpec, vars *transpiler.Vars) (ShipperRuntimeSpec, bool) { + const ( + enabledKey = "enabled" + ) + + shippers, err := r.GetShippers(output.outputType) + if err != nil { + return ShipperRuntimeSpec{}, false + } + supportedShippers := make([]ShipperRuntimeSpec, 0, len(shippers)) + for _, shipper := range shippers { + if containsStr(inputSpec.Spec.Shippers, shipper.ShipperType) { + // validate the runtime specification to determine if it can even run + err = validateRuntimeChecks(&shipper.Spec.Runtime, vars) + if err != nil { + // shipper cannot run + continue + } + // beta-mode the shipper is not on by default, so we need to ensure that this shipper type + // is enabled in the output configuration + shipperConfigRaw, ok := output.output[shipper.ShipperType] + if ok { + // key exists enabled by default unless explicitly disabled + enabled := true + if shipperConfig, ok := shipperConfigRaw.(map[string]interface{}); ok { + if enabledRaw, ok := shipperConfig[enabledKey]; ok { + if enabledVal, ok := enabledRaw.(bool); ok { + enabled = enabledVal + } + } + } + if enabled { + // inputs supports this shipper (and it's enabled) + supportedShippers = append(supportedShippers, shipper) + } + } + } + } + if len(supportedShippers) == 0 { + return ShipperRuntimeSpec{}, false + } + // in the case of multiple shippers the first is taken from the input specification (this allows an input to + // prefer another shipper over a different shipper) + return supportedShippers[0], true +} + // toIntermediate takes the policy and returns it into an intermediate representation that is easier to map into a set // of components. func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { @@ -377,8 +535,8 @@ type outputI struct { inputs map[string][]inputI } -func validateRuntimeChecks(spec *InputSpec, store eql.VarStore) error { - for _, prevention := range spec.Runtime.Preventions { +func validateRuntimeChecks(runtime *RuntimeSpec, store eql.VarStore) error { + for _, prevention := range runtime.Preventions { expression, err := eql.New(prevention.Condition) if err != nil { // this should not happen because the specification already validates that this diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index 0d9e97d4c94..c4b83ca6eca 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -291,9 +291,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "unknown-default", - Spec: InputRuntimeSpec{}, - Err: ErrInputNotSupported, + ID: "unknown-default", + InputSpec: &InputRuntimeSpec{}, + Err: ErrInputNotSupported, Units: []Unit{ { ID: "unknown-default", @@ -343,9 +343,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "endpoint-default", - Spec: InputRuntimeSpec{}, - Err: ErrInputNotSupportedOnPlatform, + ID: "endpoint-default", + InputSpec: &InputRuntimeSpec{}, + Err: ErrInputNotSupportedOnPlatform, Units: []Unit{ { ID: "endpoint-default", @@ -386,9 +386,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "endpoint-default", - Spec: InputRuntimeSpec{}, - Err: ErrOutputNotSupported, + ID: "endpoint-default", + InputSpec: &InputRuntimeSpec{}, + Err: ErrOutputNotSupported, Units: []Unit{ { ID: "endpoint-default", @@ -441,9 +441,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "endpoint-default", - Spec: InputRuntimeSpec{}, - Err: NewErrInputRuntimeCheckFail("No support for RHEL7 on arm64"), + ID: "endpoint-default", + InputSpec: &InputRuntimeSpec{}, + Err: NewErrInputRuntimeCheckFail("No support for RHEL7 on arm64"), Units: []Unit{ { ID: "endpoint-default", @@ -499,7 +499,7 @@ func TestToComponents(t *testing.T) { Result: []Component{ { ID: "filestream-default", - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -595,7 +595,7 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -697,7 +697,7 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -732,7 +732,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -767,7 +767,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -802,7 +802,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -828,7 +828,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -854,7 +854,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -880,7 +880,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "apm", BinaryName: "apm-server", BinaryPath: filepath.Join("..", "..", "specs", "apm-server"), @@ -907,6 +907,564 @@ func TestToComponents(t *testing.T) { }, }, }, + { + Name: "Simple w/ shipper", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + "shipper": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": true, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + "enabled": false, + }, + }, + }, + Result: []Component{ + { + ID: "filestream-default", + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-default", + UnitID: "filestream-default", + }, + }, + { + ID: "shipper-default", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "shipper-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": true, + }, + }), + }, + { + ID: "filestream-default", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "filestream-default", + "units": []interface{}{ + map[string]interface{}{ + "id": "filestream-default-filestream-0", + "config": map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + }, + }, + }), + }, + }, + }, + }, + }, + { + Name: "Complex w/ shipper", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{}, + }, + "other": map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": false, + }, + }, + "stashit": map[string]interface{}{ + "type": "logstash", + "shipper": map[string]interface{}{}, + }, + "redis": map[string]interface{}{ + "type": "redis", + "shipper": map[string]interface{}{}, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-2", + "enabled": false, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-3", + "use_output": "other", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-4", + "use_output": "other", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + "use_output": "default", + }, + map[string]interface{}{ + "type": "log", + "id": "logfile-1", + "use_output": "default", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-2", + "use_output": "other", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + "use_output": "stashit", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + "use_output": "redis", + }, + map[string]interface{}{ + "type": "apm", + "id": "apm-server-0", + }, + }, + }, + Result: []Component{ + { + ID: "filestream-default", + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + { + ID: "filestream-default-filestream-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-default", + UnitID: "filestream-default", + }, + }, + { + ID: "filestream-other", + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-other", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": false, + }, + }), + }, + { + ID: "filestream-other-filestream-3", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-3", + }), + }, + { + ID: "filestream-other-filestream-4", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-4", + }), + }, + }, + }, + { + ID: "log-default", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "log-default-logfile-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + }, "log"), + }, + { + ID: "log-default-logfile-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "log", + "id": "logfile-1", + }), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-default", + UnitID: "log-default", + }, + }, + { + ID: "shipper-default", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "filestream-default", + "units": []interface{}{ + map[string]interface{}{ + "id": "filestream-default-filestream-0", + "config": map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + }, + map[string]interface{}{ + "id": "filestream-default-filestream-1", + "config": map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }, + }, + }, + }), + }, + { + ID: "log-default", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "log-default", + "units": []interface{}{ + map[string]interface{}{ + "id": "log-default-logfile-0", + "config": map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + }, + }, + map[string]interface{}{ + "id": "log-default-logfile-1", + "config": map[string]interface{}{ + "type": "log", + "id": "logfile-1", + }, + }, + }, + }), + }, + { + ID: "shipper-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{}, + }), + }, + }, + }, + { + ID: "log-other", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-other", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": false, + }, + }), + }, + { + ID: "log-other-logfile-2", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-2", + }, "log"), + }, + }, + }, + { + ID: "log-stashit", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-stashit", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "log-stashit-logfile-3", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + }, "log"), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-stashit", + UnitID: "log-stashit", + }, + }, + { + ID: "shipper-stashit", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "log-stashit", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "log-stashit", + "units": []interface{}{ + map[string]interface{}{ + "id": "log-stashit-logfile-3", + "config": map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + }, + }, + }, + }), + }, + { + ID: "shipper-stashit", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "logstash", + "shipper": map[string]interface{}{}, + }), + }, + }, + }, + { + ID: "log-redis", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-redis", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "log-redis-logfile-4", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + }, "log"), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-redis", + UnitID: "log-redis", + }, + }, + { + ID: "shipper-redis", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "log-redis", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "log-redis", + "units": []interface{}{ + map[string]interface{}{ + "id": "log-redis-logfile-4", + "config": map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + }, + }, + }, + }), + }, + { + ID: "shipper-redis", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "redis", + "shipper": map[string]interface{}{}, + }), + }, + }, + }, + { + ID: "apm-default", + InputSpec: &InputRuntimeSpec{ + InputType: "apm", + BinaryName: "apm-server", + BinaryPath: filepath.Join("..", "..", "specs", "apm-server"), + }, + Units: []Unit{ + { + ID: "apm-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{}, + }), + }, + { + ID: "apm-default-apm-server-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "apm", + "id": "apm-server-0", + }), + }, + }, + }, + }, + }, } for _, scenario := range scenarios { @@ -927,11 +1485,24 @@ func TestToComponents(t *testing.T) { if expected.Err != nil { assert.Equal(t, expected.Err, actual.Err) assert.EqualValues(t, expected.Units, actual.Units) - } else { - assert.Equal(t, expected.Spec.InputType, actual.Spec.InputType) - assert.Equal(t, expected.Spec.BinaryName, actual.Spec.BinaryName) - assert.Equal(t, expected.Spec.BinaryPath, actual.Spec.BinaryPath) + } else if expected.InputSpec != nil { + assert.Nil(t, actual.ShipperSpec) + assert.Equal(t, expected.InputSpec.InputType, actual.InputSpec.InputType) + assert.Equal(t, expected.InputSpec.BinaryName, actual.InputSpec.BinaryName) + assert.Equal(t, expected.InputSpec.BinaryPath, actual.InputSpec.BinaryPath) + assert.EqualValues(t, expected.Units, actual.Units) + if expected.Shipper != nil { + assert.Equal(t, *expected.Shipper, *actual.Shipper) + } else { + assert.Nil(t, actual.Shipper) + } + } else if expected.ShipperSpec != nil { + assert.Nil(t, actual.InputSpec) + assert.Equal(t, expected.ShipperSpec.ShipperType, actual.ShipperSpec.ShipperType) + assert.Equal(t, expected.ShipperSpec.BinaryName, actual.ShipperSpec.BinaryName) + assert.Equal(t, expected.ShipperSpec.BinaryPath, actual.ShipperSpec.BinaryPath) assert.EqualValues(t, expected.Units, actual.Units) + assert.Nil(t, actual.Shipper) } } } diff --git a/pkg/component/fake/common/common.go b/pkg/component/fake/common/common.go new file mode 100644 index 00000000000..e3fc075547a --- /dev/null +++ b/pkg/component/fake/common/common.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package common + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +// FakeShipperConfigTLS is basic TLS configuration for a shipper. +type FakeShipperConfigTLS struct { + CAs []string `mapstructure:"certificate_authorities"` + Cert string `mapstructure:"certificate"` + Key string `mapstructure:"key"` +} + +// FakeShipperConfig is basic configuration for a shipper. +type FakeShipperConfig struct { + Server string `mapstructure:"server"` + TLS *FakeShipperConfigTLS `mapstructure:"ssl"` +} + +// ParseFakeShipperConfig parses the shipper GRPC server and ssl configuration information. +func ParseFakeShipperConfig(cfg *proto.UnitExpectedConfig) (FakeShipperConfig, error) { + var r FakeShipperConfig + err := mapstructure.Decode(cfg.Source.AsMap(), &r) + if err != nil { + return FakeShipperConfig{}, err + } + return r, nil +} diff --git a/pkg/component/fake/common/event.pb.go b/pkg/component/fake/common/event.pb.go new file mode 100644 index 00000000000..855ed0822f3 --- /dev/null +++ b/pkg/component/fake/common/event.pb.go @@ -0,0 +1,235 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: pkg/component/fake/common/event.proto + +package common + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Event message. +type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Timestamp the event was generated. + Generated *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=generated,proto3" json:"generated,omitempty"` + // Content of the event. + Content *structpb.Struct `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` +} + +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_pkg_component_fake_common_event_proto_rawDescGZIP(), []int{0} +} + +func (x *Event) GetGenerated() *timestamppb.Timestamp { + if x != nil { + return x.Generated + } + return nil +} + +func (x *Event) GetContent() *structpb.Struct { + if x != nil { + return x.Content + } + return nil +} + +// Event response message. +type EventResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *EventResponse) Reset() { + *x = EventResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventResponse) ProtoMessage() {} + +func (x *EventResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventResponse.ProtoReflect.Descriptor instead. +func (*EventResponse) Descriptor() ([]byte, []int) { + return file_pkg_component_fake_common_event_proto_rawDescGZIP(), []int{1} +} + +var File_pkg_component_fake_common_event_proto protoreflect.FileDescriptor + +var file_pkg_component_fake_common_event_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x2f, + 0x66, 0x61, 0x6b, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x74, + 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x31, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x22, 0x0f, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x46, 0x0a, 0x11, 0x46, 0x61, 0x6b, 0x65, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x09, 0x53, 0x65, + 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1e, 0x5a, + 0x19, 0x70, 0x6b, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x2f, 0x66, + 0x61, 0x6b, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_component_fake_common_event_proto_rawDescOnce sync.Once + file_pkg_component_fake_common_event_proto_rawDescData = file_pkg_component_fake_common_event_proto_rawDesc +) + +func file_pkg_component_fake_common_event_proto_rawDescGZIP() []byte { + file_pkg_component_fake_common_event_proto_rawDescOnce.Do(func() { + file_pkg_component_fake_common_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_component_fake_common_event_proto_rawDescData) + }) + return file_pkg_component_fake_common_event_proto_rawDescData +} + +var file_pkg_component_fake_common_event_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_component_fake_common_event_proto_goTypes = []interface{}{ + (*Event)(nil), // 0: common.Event + (*EventResponse)(nil), // 1: common.EventResponse + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*structpb.Struct)(nil), // 3: google.protobuf.Struct +} +var file_pkg_component_fake_common_event_proto_depIdxs = []int32{ + 2, // 0: common.Event.generated:type_name -> google.protobuf.Timestamp + 3, // 1: common.Event.content:type_name -> google.protobuf.Struct + 0, // 2: common.FakeEventProtocol.SendEvent:input_type -> common.Event + 1, // 3: common.FakeEventProtocol.SendEvent:output_type -> common.EventResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_component_fake_common_event_proto_init() } +func file_pkg_component_fake_common_event_proto_init() { + if File_pkg_component_fake_common_event_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_component_fake_common_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_component_fake_common_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_component_fake_common_event_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_component_fake_common_event_proto_goTypes, + DependencyIndexes: file_pkg_component_fake_common_event_proto_depIdxs, + MessageInfos: file_pkg_component_fake_common_event_proto_msgTypes, + }.Build() + File_pkg_component_fake_common_event_proto = out.File + file_pkg_component_fake_common_event_proto_rawDesc = nil + file_pkg_component_fake_common_event_proto_goTypes = nil + file_pkg_component_fake_common_event_proto_depIdxs = nil +} diff --git a/pkg/component/fake/common/event.proto b/pkg/component/fake/common/event.proto new file mode 100644 index 00000000000..35510edc364 --- /dev/null +++ b/pkg/component/fake/common/event.proto @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +syntax = "proto3"; + +package common; + +option cc_enable_arenas = true; +option go_package = "pkg/component/fake/common"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; + +// Event message. +message Event { + // Timestamp the event was generated. + google.protobuf.Timestamp generated = 1; + // Content of the event. + google.protobuf.Struct content = 2; +} + +// Event response message. +message EventResponse { +} + +service FakeEventProtocol { + // SendEvent sends an event over the protocol. + rpc SendEvent(Event) returns (EventResponse); +} diff --git a/pkg/component/fake/common/event_grpc.pb.go b/pkg/component/fake/common/event_grpc.pb.go new file mode 100644 index 00000000000..532841874a1 --- /dev/null +++ b/pkg/component/fake/common/event_grpc.pb.go @@ -0,0 +1,112 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.21.5 +// source: pkg/component/fake/common/event.proto + +package common + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// FakeEventProtocolClient is the client API for FakeEventProtocol service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type FakeEventProtocolClient interface { + // SendEvent sends an event over the protocol. + SendEvent(ctx context.Context, in *Event, opts ...grpc.CallOption) (*EventResponse, error) +} + +type fakeEventProtocolClient struct { + cc grpc.ClientConnInterface +} + +func NewFakeEventProtocolClient(cc grpc.ClientConnInterface) FakeEventProtocolClient { + return &fakeEventProtocolClient{cc} +} + +func (c *fakeEventProtocolClient) SendEvent(ctx context.Context, in *Event, opts ...grpc.CallOption) (*EventResponse, error) { + out := new(EventResponse) + err := c.cc.Invoke(ctx, "/common.FakeEventProtocol/SendEvent", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FakeEventProtocolServer is the server API for FakeEventProtocol service. +// All implementations must embed UnimplementedFakeEventProtocolServer +// for forward compatibility +type FakeEventProtocolServer interface { + // SendEvent sends an event over the protocol. + SendEvent(context.Context, *Event) (*EventResponse, error) + mustEmbedUnimplementedFakeEventProtocolServer() +} + +// UnimplementedFakeEventProtocolServer must be embedded to have forward compatible implementations. +type UnimplementedFakeEventProtocolServer struct { +} + +func (UnimplementedFakeEventProtocolServer) SendEvent(context.Context, *Event) (*EventResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendEvent not implemented") +} +func (UnimplementedFakeEventProtocolServer) mustEmbedUnimplementedFakeEventProtocolServer() {} + +// UnsafeFakeEventProtocolServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FakeEventProtocolServer will +// result in compilation errors. +type UnsafeFakeEventProtocolServer interface { + mustEmbedUnimplementedFakeEventProtocolServer() +} + +func RegisterFakeEventProtocolServer(s grpc.ServiceRegistrar, srv FakeEventProtocolServer) { + s.RegisterService(&FakeEventProtocol_ServiceDesc, srv) +} + +func _FakeEventProtocol_SendEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Event) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FakeEventProtocolServer).SendEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/common.FakeEventProtocol/SendEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FakeEventProtocolServer).SendEvent(ctx, req.(*Event)) + } + return interceptor(ctx, in, info, handler) +} + +// FakeEventProtocol_ServiceDesc is the grpc.ServiceDesc for FakeEventProtocol service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FakeEventProtocol_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "common.FakeEventProtocol", + HandlerType: (*FakeEventProtocolServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendEvent", + Handler: _FakeEventProtocol_SendEvent_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/component/fake/common/event.proto", +} diff --git a/pkg/component/fake/README.md b/pkg/component/fake/component/README.md similarity index 100% rename from pkg/component/fake/README.md rename to pkg/component/fake/component/README.md diff --git a/pkg/component/fake/component/dialer.go b/pkg/component/fake/component/dialer.go new file mode 100644 index 00000000000..8f55407123a --- /dev/null +++ b/pkg/component/fake/component/dialer.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package main + +import ( + "context" + "crypto/x509" + "net" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func dialContext(ctx context.Context, addr string, cp *x509.CertPool, serverName string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, strings.TrimPrefix(addr, "unix://"), grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(cp, serverName))) +} + +func dialer(ctx context.Context, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", addr) +} diff --git a/pkg/component/fake/component/dialer_windows.go b/pkg/component/fake/component/dialer_windows.go new file mode 100644 index 00000000000..98793bc09e4 --- /dev/null +++ b/pkg/component/fake/component/dialer_windows.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package main + +import ( + "context" + "crypto/x509" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/elastic/elastic-agent-libs/api/npipe" +) + +func dialContext(ctx context.Context, addr string, cp *x509.CertPool, serverName string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, addr, grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(cp, serverName))) +} + +func dialer(ctx context.Context, addr string) (net.Conn, error) { + return npipe.DialContext(addr)(ctx, "", "") +} diff --git a/pkg/component/fake/component/main.go b/pkg/component/fake/component/main.go new file mode 100644 index 00000000000..2f10148357f --- /dev/null +++ b/pkg/component/fake/component/main.go @@ -0,0 +1,580 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent/pkg/component/fake/common" +) + +const ( + fake = "fake" + fakeShipper = "fake-shipper" + + configuringMsg = "Configuring" + stoppingMsg = "Stopping" + stoppedMsg = "Stopped" +) + +func main() { + err := run() + if err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} + +func run() error { + logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + ver := client.VersionInfo{ + Name: fake, + Version: "1.0", + Meta: map[string]string{ + "input": fake, + }, + } + c, _, err := client.NewV2FromReader(os.Stdin, ver) + if err != nil { + return fmt.Errorf("failed to create GRPC client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + n := make(chan os.Signal, 1) + signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + defer func() { + signal.Stop(n) + cancel() + }() + go func() { + select { + case <-n: + cancel() + case <-ctx.Done(): + } + }() + + err = c.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start GRPC client: %w", err) + } + + s := newStateManager(logger) + for { + select { + case <-ctx.Done(): + return nil + case change := <-c.UnitChanges(): + switch change.Type { + case client.UnitChangedAdded: + s.added(change.Unit) + case client.UnitChangedModified: + s.modified(change.Unit) + case client.UnitChangedRemoved: + s.removed(change.Unit) + } + case err := <-c.Errors(): + if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) + } + } + } +} + +type stateManager struct { + logger zerolog.Logger + inputs map[string]runningUnit + output runningUnit +} + +func newStateManager(logger zerolog.Logger) *stateManager { + return &stateManager{logger: logger, inputs: make(map[string]runningUnit)} +} + +func (s *stateManager) added(unit *client.Unit) { + if unit.Type() == client.UnitTypeOutput { + if s.output != nil { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate output unit; only supports one", nil) + return + } + r, err := newRunningUnit(s.logger, s, unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.output = r + return + } + + _, ok := s.inputs[unit.ID()] + if ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate input unit", nil) + return + } + r, err := newRunningUnit(s.logger, s, unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.inputs[unit.ID()] = r +} + +func (s *stateManager) modified(unit *client.Unit) { + if unit.Type() == client.UnitTypeOutput { + if s.output == nil { + _ = unit.UpdateState(client.UnitStateFailed, "Error: modified a non-existing output unit", nil) + return + } + err := s.output.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } + return + } + + existing, ok := s.inputs[unit.ID()] + if !ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) + return + } + err := existing.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } +} + +func (s *stateManager) removed(unit *client.Unit) { + if unit.Type() == client.UnitTypeOutput { + if s.output != nil { + s.output = nil + } + return + } + + _, ok := s.inputs[unit.ID()] + if !ok { + return + } + delete(s.inputs, unit.ID()) +} + +type runningUnit interface { + Unit() *client.Unit + Update(u *client.Unit) error +} + +type sendEvent struct { + evt *common.Event + timeout time.Duration + doneCh chan error +} + +type fakeShipperOutput struct { + logger zerolog.Logger + unit *client.Unit + cfg *proto.UnitExpectedConfig + + evtCh chan sendEvent + + runner errgroup.Group + canceller context.CancelFunc +} + +func newFakeShipperOutput(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeShipperOutput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + + f := &fakeShipperOutput{ + logger: logger, + unit: unit, + cfg: cfg, + evtCh: make(chan sendEvent), + } + + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{f.logger}) + + f.start(unit, cfg) + + return f, nil +} + +func (f *fakeShipperOutput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeShipperOutput) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this input to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + go func() { + f.stop() + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fakeShipper { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + + f.stop() + f.cfg = config + f.start(u, config) + + return nil +} + +func (f *fakeShipperOutput) sendEvent(event map[string]interface{}, timeout time.Duration) error { + content, err := structpb.NewStruct(event) + if err != nil { + return err + } + evt := &common.Event{ + Generated: timestamppb.Now(), + Content: content, + } + doneCh := make(chan error) + f.evtCh <- sendEvent{ + evt: evt, + timeout: timeout, + doneCh: doneCh, + } + return <-doneCh +} + +func (f *fakeShipperOutput) start(unit *client.Unit, cfg *proto.UnitExpectedConfig) { + ctx, cancel := context.WithCancel(context.Background()) + f.canceller = cancel + f.runner.Go(func() error { + for { + err := f.run(ctx, unit, cfg) + if err != nil { + if errors.Is(err, context.Canceled) { + // don't restart + return err + } + // restartable error + f.logger.Error().Err(err) + _ = unit.UpdateState(client.UnitStateFailed, err.Error(), nil) + // delay restart + <-time.After(time.Second) + } + } + }) +} + +func (f *fakeShipperOutput) stop() { + if f.canceller != nil { + f.canceller() + f.canceller = nil + f.runner.Wait() + } +} + +func (f *fakeShipperOutput) run(ctx context.Context, unit *client.Unit, cfg *proto.UnitExpectedConfig) error { + f.logger.Debug().Str("state", client.UnitStateConfiguring.String()).Str("message", configuringMsg).Msg("restarting shipper output") + _ = unit.UpdateState(client.UnitStateConfiguring, configuringMsg, nil) + + shipperCfg, err := common.ParseFakeShipperConfig(cfg) + if err != nil { + return fmt.Errorf("failed to parse fake shipper config: %w", err) + } + if shipperCfg.TLS == nil || len(shipperCfg.TLS.CAs) == 0 { + return fmt.Errorf("fake shipper ssl configuration missing") + } + certPool := x509.NewCertPool() + for _, certPEM := range shipperCfg.TLS.CAs { + if ok := certPool.AppendCertsFromPEM([]byte(certPEM)); !ok { + return errors.New("failed to append CA for shipper connection") + } + } + conn, err := dialContext(ctx, shipperCfg.Server, certPool, unit.ID()) + if err != nil { + return fmt.Errorf("grpc client failed to connect: %w", err) + } + defer conn.Close() + + connectedMsg := fmt.Sprintf("GRPC fake event pipe connected %q", shipperCfg.Server) + f.logger.Debug().Str("state", client.UnitStateHealthy.String()).Str("message", connectedMsg).Msg("connected to output") + _ = unit.UpdateState(client.UnitStateHealthy, connectedMsg, nil) + + client := common.NewFakeEventProtocolClient(conn) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case evt := <-f.evtCh: + evtCtx, evtCanceller := context.WithTimeout(ctx, evt.timeout) + _, err := client.SendEvent(evtCtx, evt.evt, grpc.WaitForReady(true)) + evtCanceller() + evt.doneCh <- err + } + } +} + +type fakeInput struct { + logger zerolog.Logger + manager *stateManager + unit *client.Unit + cfg *proto.UnitExpectedConfig + + state client.UnitState + stateMsg string + + canceller context.CancelFunc +} + +func newFakeInput(logger zerolog.Logger, logLevel client.UnitLogLevel, manager *stateManager, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeInput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + state, msg, err := getStateFromConfig(cfg) + if err != nil { + return nil, err + } + + i := &fakeInput{ + logger: logger, + manager: manager, + unit: unit, + cfg: cfg, + state: state, + stateMsg: msg, + } + + logger.Trace().Msg("registering set_state action for unit") + unit.RegisterAction(&stateSetterAction{i}) + logger.Trace().Msg("registering send_event action for unit") + unit.RegisterAction(&sendEventAction{i}) + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{i.logger}) + logger.Debug().Str("state", i.state.String()).Str("message", i.stateMsg).Msg("updating unit state") + _ = unit.UpdateState(i.state, i.stateMsg, nil) + + logTimer := 10 * time.Second + if logTimerValue, ok := cfg.Source.Fields["log_timer"]; ok { + logTimeStr := logTimerValue.GetStringValue() + if logTimeStr != "" { + logTimer, err = time.ParseDuration(logTimeStr) + if err != nil { + return nil, err + } + } + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + t := time.NewTicker(logTimer) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + logger.Trace().Dur("log_timer", logTimer).Msg("trace log ticker") + } + } + }() + i.canceller = cancel + + return i, nil +} + +func (f *fakeInput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeInput) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this input to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + f.canceller() + go func() { + <-time.After(1 * time.Second) + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fake { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + + state, stateMsg, err := getStateFromConfig(config) + if err != nil { + return fmt.Errorf("unit config parsing error: %w", err) + } + f.state = state + f.stateMsg = stateMsg + f.logger.Debug().Str("state", f.state.String()).Str("message", f.stateMsg).Msg("updating unit state") + _ = u.UpdateState(f.state, f.stateMsg, nil) + return nil +} + +type stateSetterAction struct { + input *fakeInput +} + +func (s *stateSetterAction) Name() string { + return "set_state" +} + +func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + s.input.logger.Trace().Msg("executing set_state action") + state, stateMsg, err := getStateFromMap(params) + if err != nil { + return nil, err + } + s.input.state = state + s.input.stateMsg = stateMsg + s.input.logger.Debug().Str("state", s.input.state.String()).Str("message", s.input.stateMsg).Msg("updating unit state") + _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) + return nil, nil +} + +type sendEventAction struct { + input *fakeInput +} + +func (s *sendEventAction) Name() string { + return "send_event" +} + +func (s *sendEventAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + const ( + timeoutField = "timeout" + timeoutDefault = 3 * time.Second + ) + + s.input.logger.Trace().Msg("executing send_event action") + + // timeout is taken from the action to define the timeout + timeout := timeoutDefault + if timeoutRaw, ok := params[timeoutField]; ok { + if timeoutStr, ok := timeoutRaw.(string); ok { + dur, err := time.ParseDuration(timeoutStr) + if err != nil { + return nil, fmt.Errorf("failed to parse timeout duration: %w", err) + } + timeout = dur + } + } + + if s.input.manager.output != nil { + output, ok := s.input.manager.output.(*fakeShipperOutput) + if !ok { + return nil, fmt.Errorf("output is not fake-shipper output, cannot send event, got type %T", s.input.manager.output) + } + err := output.sendEvent(params, timeout) + if err != nil { + return nil, err + } + return nil, nil + } + return nil, errors.New("no output configured to send event") +} + +type killAction struct { + logger zerolog.Logger +} + +func (s *killAction) Name() string { + return "kill" +} + +func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { + s.logger.Trace().Msg("executing kill action") + os.Exit(1) + return nil, nil +} + +func newRunningUnit(logger zerolog.Logger, manager *stateManager, unit *client.Unit) (runningUnit, error) { + _, logLevel, config := unit.Expected() + if config.Type == "" { + return nil, fmt.Errorf("unit config type empty") + } + if unit.Type() == client.UnitTypeOutput { + switch config.Type { + case fakeShipper: + return newFakeShipperOutput(logger, logLevel, unit, config) + } + return nil, fmt.Errorf("unknown output unit config type: %s", config.Type) + } + switch config.Type { + case fake: + return newFakeInput(logger, logLevel, manager, unit, config) + } + return nil, fmt.Errorf("unknown input unit config type: %s", config.Type) +} + +func getStateFromConfig(cfg *proto.UnitExpectedConfig) (client.UnitState, string, error) { + return getStateFromMap(cfg.Source.AsMap()) +} + +func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { + state, ok := cfg["state"] + if !ok { + return client.UnitStateStarting, "", errors.New("missing required state parameter") + } + stateTypeI, ok := state.(int) + if !ok { + // try float64 (JSON) does it differently than YAML + stateTypeF, ok := state.(float64) + if !ok { + return client.UnitStateStarting, "", fmt.Errorf("state parameter is not a valid unit state: %T", state) + } + stateTypeI = int(stateTypeF) + } + stateType := client.UnitState(stateTypeI) + stateMsgStr := "" + stateMsg, ok := cfg["message"] + if ok { + stateMsgStr, _ = stateMsg.(string) + } + return stateType, stateMsgStr, nil +} + +func toZerologLevel(level client.UnitLogLevel) zerolog.Level { + switch level { + case client.UnitLogLevelError: + return zerolog.ErrorLevel + case client.UnitLogLevelWarn: + return zerolog.WarnLevel + case client.UnitLogLevelInfo: + return zerolog.InfoLevel + case client.UnitLogLevelDebug: + return zerolog.DebugLevel + case client.UnitLogLevelTrace: + return zerolog.TraceLevel + } + return zerolog.InfoLevel +} diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go deleted file mode 100644 index 0464d55b8ef..00000000000 --- a/pkg/component/fake/main.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/signal" - "syscall" - "time" - - "github.com/rs/zerolog" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" -) - -const ( - fake = "fake" - - stoppingMsg = "Stopping" - stoppedMsg = "Stopped" -) - -func main() { - err := run() - if err != nil { - fmt.Fprintf(os.Stderr, "%s\n", err) - os.Exit(1) - } -} - -func run() error { - logger := zerolog.New(os.Stderr).With().Timestamp().Logger() - ver := client.VersionInfo{ - Name: fake, - Version: "1.0", - Meta: map[string]string{ - "input": fake, - }, - } - c, _, err := client.NewV2FromReader(os.Stdin, ver) - if err != nil { - return fmt.Errorf("failed to create GRPC client: %w", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - n := make(chan os.Signal, 1) - signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - defer func() { - signal.Stop(n) - cancel() - }() - go func() { - select { - case <-n: - cancel() - case <-ctx.Done(): - } - }() - - err = c.Start(ctx) - if err != nil { - return fmt.Errorf("failed to start GRPC client: %w", err) - } - - s := newStateManager(logger) - for { - select { - case <-ctx.Done(): - return nil - case change := <-c.UnitChanges(): - switch change.Type { - case client.UnitChangedAdded: - s.added(change.Unit) - case client.UnitChangedModified: - s.modified(change.Unit) - case client.UnitChangedRemoved: - s.removed(change.Unit) - } - case err := <-c.Errors(): - if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { - fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) - } - } - } -} - -type unitKey struct { - unitType client.UnitType - unitID string -} - -type stateManager struct { - logger zerolog.Logger - units map[unitKey]runningUnit -} - -func newStateManager(logger zerolog.Logger) *stateManager { - return &stateManager{logger: logger, units: make(map[unitKey]runningUnit)} -} - -func (s *stateManager) added(unit *client.Unit) { - k := newUnitKey(unit) - _, ok := s.units[k] - if ok { - _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) - return - } - r, err := newRunningUnit(s.logger, unit) - if err != nil { - _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) - return - } - s.units[k] = r -} - -func (s *stateManager) modified(unit *client.Unit) { - existing, ok := s.units[newUnitKey(unit)] - if !ok { - _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) - return - } - err := existing.Update(unit) - if err != nil { - _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) - } -} - -func (s *stateManager) removed(unit *client.Unit) { - k := newUnitKey(unit) - _, ok := s.units[k] - if !ok { - return - } - delete(s.units, k) -} - -type runningUnit interface { - Unit() *client.Unit - Update(u *client.Unit) error -} - -type fakeInput struct { - logger zerolog.Logger - unit *client.Unit - cfg *proto.UnitExpectedConfig - - state client.UnitState - stateMsg string - - canceller context.CancelFunc -} - -func newFakeInput(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeInput, error) { - logger = logger.Level(toZerologLevel(logLevel)) - state, msg, err := getStateFromConfig(cfg) - if err != nil { - return nil, err - } - - i := &fakeInput{ - logger: logger, - unit: unit, - cfg: cfg, - state: state, - stateMsg: msg, - } - - logger.Trace().Msg("registering set_state action for unit") - unit.RegisterAction(&stateSetterAction{i}) - logger.Trace().Msg("registering kill action for unit") - unit.RegisterAction(&killAction{i}) - logger.Debug().Str("state", i.state.String()).Str("message", i.stateMsg).Msg("updating unit state") - _ = unit.UpdateState(i.state, i.stateMsg, nil) - - logTimer := 10 * time.Second - if logTimerValue, ok := cfg.Source.Fields["log_timer"]; ok { - logTimeStr := logTimerValue.GetStringValue() - if logTimeStr != "" { - logTimer, err = time.ParseDuration(logTimeStr) - if err != nil { - return nil, err - } - } - } - ctx, cancel := context.WithCancel(context.Background()) - go func() { - t := time.NewTicker(logTimer) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - logger.Trace().Dur("log_timer", logTimer).Msg("trace log ticker") - } - } - }() - i.canceller = cancel - - return i, nil -} - -func (f *fakeInput) Unit() *client.Unit { - return f.unit -} - -func (f *fakeInput) Update(u *client.Unit) error { - expected, _, config := u.Expected() - if expected == client.UnitStateStopped { - // agent is requesting this input to stop - f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") - _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) - f.canceller() - go func() { - <-time.After(1 * time.Second) - f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") - _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) - }() - return nil - } - - if config.Type == "" { - return fmt.Errorf("unit missing config type") - } - if config.Type != fake { - return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) - } - - state, stateMsg, err := getStateFromConfig(config) - if err != nil { - return fmt.Errorf("unit config parsing error: %w", err) - } - f.state = state - f.stateMsg = stateMsg - f.logger.Debug().Str("state", f.state.String()).Str("message", f.stateMsg).Msg("updating unit state") - _ = u.UpdateState(f.state, f.stateMsg, nil) - return nil -} - -type stateSetterAction struct { - input *fakeInput -} - -func (s *stateSetterAction) Name() string { - return "set_state" -} - -func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { - s.input.logger.Trace().Msg("executing set_state action") - state, stateMsg, err := getStateFromMap(params) - if err != nil { - return nil, err - } - s.input.state = state - s.input.stateMsg = stateMsg - s.input.logger.Debug().Str("state", s.input.state.String()).Str("message", s.input.stateMsg).Msg("updating unit state") - _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) - return nil, nil -} - -type killAction struct { - input *fakeInput -} - -func (s *killAction) Name() string { - return "kill" -} - -func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { - s.input.logger.Trace().Msg("executing kill action") - os.Exit(1) - return nil, nil -} - -func newRunningUnit(logger zerolog.Logger, unit *client.Unit) (runningUnit, error) { - _, logLevel, config := unit.Expected() - if config.Type == "" { - return nil, fmt.Errorf("unit config type empty") - } - switch config.Type { - case fake: - return newFakeInput(logger, logLevel, unit, config) - } - return nil, fmt.Errorf("unknown unit config type: %s", config.Type) -} - -func newUnitKey(unit *client.Unit) unitKey { - return unitKey{ - unitType: unit.Type(), - unitID: unit.ID(), - } -} - -func getStateFromConfig(cfg *proto.UnitExpectedConfig) (client.UnitState, string, error) { - return getStateFromMap(cfg.Source.AsMap()) -} - -func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { - state, ok := cfg["state"] - if !ok { - return client.UnitStateStarting, "", errors.New("missing required state parameter") - } - stateTypeI, ok := state.(int) - if !ok { - // try float64 (JSON) does it differently than YAML - stateTypeF, ok := state.(float64) - if !ok { - return client.UnitStateStarting, "", fmt.Errorf("state parameter is not a valid unit state: %T", state) - } - stateTypeI = int(stateTypeF) - } - stateType := client.UnitState(stateTypeI) - stateMsgStr := "" - stateMsg, ok := cfg["message"] - if ok { - stateMsgStr, _ = stateMsg.(string) - } - return stateType, stateMsgStr, nil -} - -func toZerologLevel(level client.UnitLogLevel) zerolog.Level { - switch level { - case client.UnitLogLevelError: - return zerolog.ErrorLevel - case client.UnitLogLevelWarn: - return zerolog.WarnLevel - case client.UnitLogLevelInfo: - return zerolog.InfoLevel - case client.UnitLogLevelDebug: - return zerolog.DebugLevel - case client.UnitLogLevelTrace: - return zerolog.TraceLevel - } - return zerolog.InfoLevel -} diff --git a/pkg/component/fake/shipper/README.md b/pkg/component/fake/shipper/README.md new file mode 100644 index 00000000000..a1e9add5d7d --- /dev/null +++ b/pkg/component/fake/shipper/README.md @@ -0,0 +1,3 @@ +# Fake Shipper + +Fake shipper that can be controlled with actions through the GRPC control protocol. Allows unit tests to simulate control and communication with a shipper. diff --git a/pkg/component/fake/shipper/listener.go b/pkg/component/fake/shipper/listener.go new file mode 100644 index 00000000000..ce4d6a99a41 --- /dev/null +++ b/pkg/component/fake/shipper/listener.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows + +package main + +import ( + "fmt" + "net" + "os" + "strings" +) + +func createListener(path string) (net.Listener, error) { + if !strings.HasPrefix(path, "unix://") { + return nil, fmt.Errorf("listener path must start with unix://; got %s", path) + } + path = strings.TrimPrefix(path, "unix://") + if _, err := os.Stat(path); !os.IsNotExist(err) { + os.Remove(path) + } + lis, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + return lis, err +} diff --git a/pkg/component/fake/shipper/listener_windows.go b/pkg/component/fake/shipper/listener_windows.go new file mode 100644 index 00000000000..018fc3850ca --- /dev/null +++ b/pkg/component/fake/shipper/listener_windows.go @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package main + +import ( + "fmt" + "net" + "os/user" + + "github.com/elastic/elastic-agent-libs/api/npipe" +) + +// createListener creates a named pipe listener on Windows +func createListener(path string) (net.Listener, error) { + sd, err := securityDescriptor() + if err != nil { + return nil, err + } + return npipe.NewListener(path, sd) +} + +func securityDescriptor() (string, error) { + u, err := user.Current() + if err != nil { + return "", fmt.Errorf("failed to get current user: %w", err) + } + // Named pipe security and access rights. + // We create the pipe and the specific users should only be able to write to it. + // See docs: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipe-security-and-access-rights + // String definition: https://docs.microsoft.com/en-us/windows/win32/secauthz/ace-strings + // Give generic read/write access to the specified user. + descriptor := "D:P(A;;GA;;;" + u.Uid + ")" + return descriptor, nil +} diff --git a/pkg/component/fake/shipper/main.go b/pkg/component/fake/shipper/main.go new file mode 100644 index 00000000000..347b9365090 --- /dev/null +++ b/pkg/component/fake/shipper/main.go @@ -0,0 +1,514 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "os" + "os/signal" + "sync" + "syscall" + + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/rs/zerolog" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent/pkg/component/fake/common" +) + +const ( + fakeActionOutput = "fake-action-output" + fakeShipper = "fake-shipper" + + healthyMsg = "Healthy" + stoppingMsg = "Stopping" + stoppedMsg = "Stopped" + + recordActionEventID = "id" +) + +func main() { + err := run() + if err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} + +func run() error { + logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + ver := client.VersionInfo{ + Name: fakeShipper, + Version: "1.0", + Meta: map[string]string{ + "shipper": fakeShipper, + }, + } + c, _, err := client.NewV2FromReader(os.Stdin, ver) + if err != nil { + return fmt.Errorf("failed to create GRPC client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + n := make(chan os.Signal, 1) + signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + defer func() { + signal.Stop(n) + cancel() + }() + go func() { + select { + case <-n: + cancel() + case <-ctx.Done(): + } + }() + + err = c.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start GRPC client: %w", err) + } + + s := newStateManager(logger) + for { + select { + case <-ctx.Done(): + return nil + case change := <-c.UnitChanges(): + switch change.Type { + case client.UnitChangedAdded: + s.added(change.Unit) + case client.UnitChangedModified: + s.modified(change.Unit) + case client.UnitChangedRemoved: + s.removed(change.Unit) + } + case err := <-c.Errors(): + if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) + } + } + } +} + +type unitKey struct { + unitType client.UnitType + unitID string +} + +type stateManager struct { + logger zerolog.Logger + unitsMx sync.RWMutex + units map[unitKey]runningUnit +} + +func newStateManager(logger zerolog.Logger) *stateManager { + return &stateManager{logger: logger, units: make(map[unitKey]runningUnit)} +} + +func (s *stateManager) added(unit *client.Unit) { + s.unitsMx.Lock() + defer s.unitsMx.Unlock() + k := newUnitKey(unit) + _, ok := s.units[k] + if ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) + return + } + r, err := newRunningUnit(s.logger, s, unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.units[k] = r +} + +func (s *stateManager) modified(unit *client.Unit) { + s.unitsMx.Lock() + defer s.unitsMx.Unlock() + existing, ok := s.units[newUnitKey(unit)] + if !ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) + return + } + err := existing.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } +} + +func (s *stateManager) removed(unit *client.Unit) { + s.unitsMx.Lock() + defer s.unitsMx.Unlock() + k := newUnitKey(unit) + _, ok := s.units[k] + if !ok { + return + } + delete(s.units, k) +} + +func (s *stateManager) received(ctx context.Context, event *common.Event) error { + var cnt map[string]interface{} + if event.Content != nil { + cnt = event.Content.AsMap() + } + s.logger.Trace().Fields(map[string]interface{}{ + "timestamp": event.Generated.AsTime(), + "content": cnt, + }).Msg("received event") + idRaw, ok := cnt[recordActionEventID] + if !ok { + return nil + } + id, ok := idRaw.(string) + if !ok { + return nil + } + s.unitsMx.RLock() + defer s.unitsMx.RUnlock() + for k, u := range s.units { + if ctx.Err() != nil { + return ctx.Err() + } + if k.unitType == client.UnitTypeOutput { + actionOutput, ok := u.(*fakeActionOutputRuntime) + if ok { + if actionOutput.received(ctx, id, event) { + // caught by output + break + } + } + } + } + return nil +} + +type runningUnit interface { + Unit() *client.Unit + Update(u *client.Unit) error +} + +type fakeActionOutputRuntime struct { + logger zerolog.Logger + unit *client.Unit + cfg *proto.UnitExpectedConfig + + subsMx sync.RWMutex + subs map[string]chan *common.Event + + previousMx sync.RWMutex + previous map[string]*common.Event +} + +func newFakeActionOutputRuntime(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeActionOutputRuntime, error) { + logger = logger.Level(toZerologLevel(logLevel)) + + i := &fakeActionOutputRuntime{ + logger: logger, + unit: unit, + cfg: cfg, + subs: make(map[string]chan *common.Event), + previous: make(map[string]*common.Event), + } + + logger.Trace().Msg("registering record event action for unit") + unit.RegisterAction(&recordEventAction{i}) + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{logger}) + logger.Debug().Str("state", client.UnitStateHealthy.String()).Str("message", healthyMsg).Msg("updating unit state") + _ = unit.UpdateState(client.UnitStateHealthy, healthyMsg, nil) + + return i, nil +} + +func (f *fakeActionOutputRuntime) Unit() *client.Unit { + return f.unit +} + +func (f *fakeActionOutputRuntime) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + go func() { + f.cleanup() + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fakeActionOutput { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + // nothing to really do + return nil +} + +func (f *fakeActionOutputRuntime) subscribe(id string) <-chan *common.Event { + f.previousMx.RLock() + e, ok := f.previous[id] + if ok { + f.previousMx.RUnlock() + f.logger.Trace().Str(recordActionEventID, id).Msg("event already received; directly sending to subscription") + c := make(chan *common.Event, 1) + c <- e + return c + } + f.previousMx.RUnlock() + + f.subsMx.Lock() + defer f.subsMx.Unlock() + c, ok := f.subs[id] + if ok { + return c + } + c = make(chan *common.Event, 1) + f.subs[id] = c + f.logger.Trace().Str(recordActionEventID, id).Msg("subscribing for an event") + return c +} + +func (f *fakeActionOutputRuntime) unsubscribe(id string) { + f.subsMx.Lock() + defer f.subsMx.Unlock() + f.logger.Trace().Str(recordActionEventID, id).Msg("unsubscribing for an event") + delete(f.subs, id) +} + +func (f *fakeActionOutputRuntime) cleanup() { + f.subsMx.Lock() + defer f.subsMx.Unlock() + for k, c := range f.subs { + close(c) + delete(f.subs, k) + } +} + +func (f *fakeActionOutputRuntime) received(ctx context.Context, id string, event *common.Event) bool { + f.subsMx.RLock() + defer f.subsMx.RUnlock() + c, ok := f.subs[id] + if ok { + f.logger.Trace().Str("id", id).Msg("subscription exists for event id") + f.previousMx.Lock() + f.previous[id] = event + f.previousMx.Unlock() + select { + case <-ctx.Done(): + return false + case c <- event: + return true + } + } + f.logger.Trace().Str("id", id).Msg("no subscription exists for event id") + return false +} + +// recordEventAction is an action that returns a result only once an event comes over the fake shipper protocol +type recordEventAction struct { + f *fakeActionOutputRuntime +} + +func (r *recordEventAction) Name() string { + return "record_event" +} + +func (r *recordEventAction) Execute(ctx context.Context, params map[string]interface{}) (map[string]interface{}, error) { + eventIDRaw, ok := params[recordActionEventID] + if !ok { + return nil, fmt.Errorf("missing required 'id' parameter") + } + eventID, ok := eventIDRaw.(string) + if !ok { + return nil, fmt.Errorf("'id' parameter not string type, got %T", eventIDRaw) + } + r.f.logger.Trace().Str(recordActionEventID, eventID).Msg("registering record event action") + c := r.f.subscribe(eventID) + defer r.f.unsubscribe(eventID) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case e, ok := <-c: + r.f.logger.Trace().Fields(map[string]interface{}{ + "timestamp": e.Generated.AsTime(), + "content": e.Content.AsMap(), + }).Msg("record_event action got subscribed event") + if !ok { + return nil, fmt.Errorf("never recieved event") + } + return map[string]interface{}{ + "timestamp": e.Generated.String(), + "event": e.Content.AsMap(), + }, nil + } +} + +type fakeShipperInput struct { + common.UnimplementedFakeEventProtocolServer + + logger zerolog.Logger + manager *stateManager + unit *client.Unit + cfg *proto.UnitExpectedConfig + + srv *grpc.Server + wg errgroup.Group +} + +func newFakeShipperInput(logger zerolog.Logger, logLevel client.UnitLogLevel, manager *stateManager, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeShipperInput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + + i := &fakeShipperInput{ + logger: logger, + manager: manager, + unit: unit, + cfg: cfg, + } + + srvCfg, err := common.ParseFakeShipperConfig(cfg) + if err != nil { + return nil, err + } + + logger.Info().Str("server", srvCfg.Server).Msg("starting GRPC fake shipper server") + lis, err := createListener(srvCfg.Server) + if err != nil { + return nil, err + } + if srvCfg.TLS == nil || srvCfg.TLS.Cert == "" || srvCfg.TLS.Key == "" { + return nil, fmt.Errorf("ssl configuration missing") + } + cert, err := tls.X509KeyPair([]byte(srvCfg.TLS.Cert), []byte(srvCfg.TLS.Key)) + if err != nil { + return nil, err + } + srv := grpc.NewServer(grpc.Creds(credentials.NewServerTLSFromCert(&cert))) + i.srv = srv + common.RegisterFakeEventProtocolServer(srv, i) + i.wg.Go(func() error { + return srv.Serve(lis) + }) + + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{logger}) + logger.Debug().Str("state", client.UnitStateHealthy.String()).Str("message", healthyMsg).Msg("updating unit state") + _ = unit.UpdateState(client.UnitStateHealthy, healthyMsg, nil) + + return i, nil +} + +func (f *fakeShipperInput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeShipperInput) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + go func() { + if f.srv != nil { + f.srv.Stop() + f.wg.Wait() + f.srv = nil + } + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fakeActionOutput { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + // nothing to really do + return nil +} + +func (f *fakeShipperInput) SendEvent(ctx context.Context, event *common.Event) (*common.EventResponse, error) { + err := f.manager.received(ctx, event) + if err != nil { + return nil, err + } + return &common.EventResponse{}, nil +} + +// killAction is an action that causes the whole component to exit (used in testing to simulate crashes) +type killAction struct { + logger zerolog.Logger +} + +func (s *killAction) Name() string { + return "kill" +} + +func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { + s.logger.Trace().Msg("executing kill action") + os.Exit(1) + return nil, nil +} + +func newRunningUnit(logger zerolog.Logger, manager *stateManager, unit *client.Unit) (runningUnit, error) { + _, logLevel, config := unit.Expected() + if config.Type == "" { + return nil, fmt.Errorf("unit config type empty") + } + if unit.Type() == client.UnitTypeOutput { + switch config.Type { + case fakeActionOutput: + return newFakeActionOutputRuntime(logger, logLevel, unit, config) + } + return nil, fmt.Errorf("unknown output unit config type: %s", config.Type) + } else if unit.Type() == client.UnitTypeInput { + switch config.Type { + case fakeShipper: + return newFakeShipperInput(logger, logLevel, manager, unit, config) + } + return nil, fmt.Errorf("unknown input unit config type: %s", config.Type) + } + return nil, fmt.Errorf("unknown unit type: %+v", unit.Type()) +} + +func newUnitKey(unit *client.Unit) unitKey { + return unitKey{ + unitType: unit.Type(), + unitID: unit.ID(), + } +} + +func toZerologLevel(level client.UnitLogLevel) zerolog.Level { + switch level { + case client.UnitLogLevelError: + return zerolog.ErrorLevel + case client.UnitLogLevelWarn: + return zerolog.WarnLevel + case client.UnitLogLevelInfo: + return zerolog.InfoLevel + case client.UnitLogLevelDebug: + return zerolog.DebugLevel + case client.UnitLogLevelTrace: + return zerolog.TraceLevel + } + return zerolog.InfoLevel +} diff --git a/pkg/component/input_spec.go b/pkg/component/input_spec.go index 0a45a830cd7..335d3d60201 100644 --- a/pkg/component/input_spec.go +++ b/pkg/component/input_spec.go @@ -17,6 +17,7 @@ type InputSpec struct { Description string `config:"description" yaml:"description" validate:"required"` Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` + Shippers []string `config:"shippers" yaml:"shippers"` Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` @@ -45,6 +46,13 @@ func (s *InputSpec) Validate() error { } } } + for i, a := range s.Shippers { + for j, b := range s.Shippers { + if i != j && a == b { + return fmt.Errorf("input '%s' defines the shipper '%s' more than once", s.Name, a) + } + } + } for idx, prevention := range s.Runtime.Preventions { _, err := eql.New(prevention.Condition) if err != nil { diff --git a/pkg/component/load.go b/pkg/component/load.go index 4144a353172..be197bec288 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -24,6 +24,10 @@ var ( ErrInputNotSupported = newError("input not supported") // ErrInputNotSupportedOnPlatform is returned when the input is supported but not on this platform ErrInputNotSupportedOnPlatform = newError("input not supported on this platform") + // ErrOutputNotSupported is returned when the output is not supported on any platform + ErrOutputNotSupported = newError("output not supported") + // ErrOutputNotSupportedOnPlatform is returned when the input is supported but not on this platform + ErrOutputNotSupportedOnPlatform = newError("output not supported on this platform") ) // InputRuntimeSpec returns the specification for running this input on the current platform. @@ -34,6 +38,14 @@ type InputRuntimeSpec struct { Spec InputSpec `yaml:"spec"` } +// ShipperRuntimeSpec returns the specification for running this shipper on the current platform. +type ShipperRuntimeSpec struct { + ShipperType string `yaml:"shipper_type"` + BinaryName string `yaml:"binary_name"` + BinaryPath string `yaml:"binary_path"` + Spec ShipperSpec `yaml:"spec"` +} + // RuntimeSpecs return all the specifications for inputs that are supported on the current platform. type RuntimeSpecs struct { // platform that was loaded @@ -47,6 +59,12 @@ type RuntimeSpecs struct { // aliasMapping maps aliases to real input name aliasMapping map[string]string + + // shipperSpecs only the shipper specs for the current platform + shipperSpecs map[string]ShipperRuntimeSpec + + // shipperOutputs maps the supported outputs of a shipper to a shippers name + shipperOutputs map[string][]string } type loadRuntimeOpts struct { @@ -78,9 +96,11 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp if err != nil { return RuntimeSpecs{}, err } - var types []string - mapping := make(map[string]InputRuntimeSpec) - aliases := make(map[string]string) + var inputTypes []string + inputSpecs := make(map[string]InputRuntimeSpec) + inputAliases := make(map[string]string) + shipperSpecs := make(map[string]ShipperRuntimeSpec) + shipperOutputs := make(map[string][]string) for _, match := range matches { binaryName := filepath.Base(match[:len(match)-len(specGlobPattern)+1]) binaryPath := match[:len(match)-len(specGlobPattern)+1] @@ -106,43 +126,66 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp return RuntimeSpecs{}, fmt.Errorf("failed reading spec %s: %w", match, err) } for _, input := range spec.Inputs { - if !containsStr(types, input.Name) { - types = append(types, input.Name) + if !containsStr(inputTypes, input.Name) { + inputTypes = append(inputTypes, input.Name) } if !containsStr(input.Platforms, platform.String()) { // input spec doesn't support this platform continue } - if existing, exists := mapping[input.Name]; exists { + if existing, exists := inputSpecs[input.Name]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input '%s' already exists in spec '%s'", match, input.Name, existing.BinaryName) } - if existing, exists := aliases[input.Name]; exists { + if existing, exists := inputAliases[input.Name]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input '%s' collides with an alias from another input '%s'", match, input.Name, existing) } for _, alias := range input.Aliases { - if existing, exists := mapping[alias]; exists { + if existing, exists := inputSpecs[alias]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input alias '%s' collides with an already defined input in spec '%s'", match, alias, existing.BinaryName) } - if existing, exists := aliases[alias]; exists { + if existing, exists := inputAliases[alias]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input alias '%s' collides with an already defined input alias for input '%s'", match, alias, existing) } } - mapping[input.Name] = InputRuntimeSpec{ + inputSpecs[input.Name] = InputRuntimeSpec{ InputType: input.Name, BinaryName: binaryName, BinaryPath: binaryPath, Spec: input, } for _, alias := range input.Aliases { - aliases[alias] = input.Name + inputAliases[alias] = input.Name + } + } + for _, shipper := range spec.Shippers { + // map the native outputs that the shipper supports + for _, output := range shipper.Outputs { + shippers, _ := shipperOutputs[output] + shippers = append(shippers, shipper.Name) + shipperOutputs[output] = shippers + } + if !containsStr(shipper.Platforms, platform.String()) { + // input spec doesn't support this platform (but shipper is still mapped into shipperOutputs) + continue + } + if existing, exists := shipperSpecs[shipper.Name]; exists { + return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': shipper '%s' already exists in spec '%s'", match, shipper.Name, existing.BinaryName) + } + shipperSpecs[shipper.Name] = ShipperRuntimeSpec{ + ShipperType: shipper.Name, + BinaryName: binaryName, + BinaryPath: binaryPath, + Spec: shipper, } } } return RuntimeSpecs{ - platform: platform, - inputTypes: types, - inputSpecs: mapping, - aliasMapping: aliases, + platform: platform, + inputTypes: inputTypes, + inputSpecs: inputSpecs, + aliasMapping: inputAliases, + shipperSpecs: shipperSpecs, + shipperOutputs: shipperOutputs, }, nil } @@ -169,6 +212,41 @@ func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { return InputRuntimeSpec{}, ErrInputNotSupported } +// GetShipper returns the shipper runtime specification for this shipper on this platform. +func (r *RuntimeSpecs) GetShipper(shipperType string) (ShipperRuntimeSpec, bool) { + runtime, ok := r.shipperSpecs[shipperType] + return runtime, ok +} + +// GetShippers returns the shippers that support the outputType. +func (r *RuntimeSpecs) GetShippers(outputType string) ([]ShipperRuntimeSpec, error) { + shipperNames, ok := r.shipperOutputs[outputType] + if !ok { + // no shippers support that outputType + return nil, nil + } + platformErr := false + shippers := make([]ShipperRuntimeSpec, 0, len(shipperNames)) + for _, name := range shipperNames { + shipper, ok := r.shipperSpecs[name] + if !ok { + // not supported on this platform + platformErr = true + continue + } + shippers = append(shippers, shipper) + } + if len(shippers) > 0 { + return shippers, nil + } + if platformErr { + // supported by at least one shipper, but not on this platform + return nil, ErrOutputNotSupportedOnPlatform + } + // not supported by any shippers + return nil, ErrOutputNotSupported +} + // ServiceSpecs returns only the input specification that are based on the service runtime. func (r *RuntimeSpecs) ServiceSpecs() []InputRuntimeSpec { var services []InputRuntimeSpec diff --git a/pkg/component/output_spec.go b/pkg/component/output_spec.go deleted file mode 100644 index d7d80b9d074..00000000000 --- a/pkg/component/output_spec.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package component - -import "fmt" - -// OutputSpec is the specification for an output type. -type OutputSpec struct { - Name string `config:"name" yaml:"name" validate:"required"` - Description string `config:"description" yaml:"description" validate:"required"` - Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` - - Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` -} - -// Validate ensures correctness of output specification. -func (s *OutputSpec) Validate() error { - if s.Command == nil { - return fmt.Errorf("input %s must define either command or service", s.Name) - } - for i, a := range s.Platforms { - for j, b := range s.Platforms { - if i != j && a == b { - return fmt.Errorf("input %s defines the platform %s more than once", s.Name, a) - } - } - } - return nil -} diff --git a/pkg/component/outputs.go b/pkg/component/outputs.go deleted file mode 100644 index faaddbdfd8c..00000000000 --- a/pkg/component/outputs.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package component - -const ( - // Elasticsearch represents the elasticsearch output - Elasticsearch = "elasticsearch" - // Kafka represents the kafka output - Kafka = "kafka" - // Logstash represents the logstash output - Logstash = "logstash" - // Redis represents the redis output - Redis = "redis" - // Shipper represents support for using the elastic-agent-shipper - Shipper = "shipper" -) - -// Outputs defines the outputs that a component can support -var Outputs = []string{Elasticsearch, Kafka, Logstash, Redis, Shipper} diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 22c1898fcdc..2575a35d5f1 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -31,8 +31,8 @@ const ( runDirMod = 0770 - envAgentComponentID = "AGENT_COMPONENT_ID" - envAgentComponentInputType = "AGENT_COMPONENT_INPUT_TYPE" + envAgentComponentID = "AGENT_COMPONENT_ID" + envAgentComponentType = "AGENT_COMPONENT_TYPE" stateUnknownMessage = "Unknown" ) @@ -68,19 +68,21 @@ type CommandRuntime struct { // NewCommandRuntime creates a new command runtime for the provided component. func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { - if comp.Spec.Spec.Command == nil { - return nil, errors.New("must have command defined in specification") - } - return &CommandRuntime{ + c := &CommandRuntime{ current: comp, + monitor: monitor, ch: make(chan ComponentState), actionCh: make(chan actionMode), procCh: make(chan procState), compCh: make(chan component.Component), actionState: actionStop, state: newComponentState(&comp), - monitor: monitor, - }, nil + } + cmdSpec := c.getCommandSpec() + if cmdSpec == nil { + return nil, errors.New("must have command defined in specification") + } + return c, nil } // Run starts the runtime for the component. @@ -89,8 +91,9 @@ func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (Com // called before any of the other methods in the interface and once the context is done none of those methods should // ever be called again. func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { - checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin - restartPeriod := c.current.Spec.Spec.Command.Timeouts.Restart + cmdSpec := c.getCommandSpec() + checkinPeriod := cmdSpec.Timeouts.Checkin + restartPeriod := cmdSpec.Timeouts.Restart c.forceCompState(client.UnitStateStarting, "Starting") t := time.NewTicker(checkinPeriod) defer t.Stop() @@ -269,19 +272,19 @@ func (c *CommandRuntime) start(comm Communicator) error { // already running return nil } - cmdSpec := c.current.Spec.Spec.Command + cmdSpec := c.getCommandSpec() env := make([]string, 0, len(cmdSpec.Env)+2) for _, e := range cmdSpec.Env { env = append(env, fmt.Sprintf("%s=%s", e.Name, e.Value)) } env = append(env, fmt.Sprintf("%s=%s", envAgentComponentID, c.current.ID)) - env = append(env, fmt.Sprintf("%s=%s", envAgentComponentInputType, c.current.Spec.InputType)) + env = append(env, fmt.Sprintf("%s=%s", envAgentComponentType, c.getSpecType())) uid, gid := os.Geteuid(), os.Getegid() workDir, err := c.workDir(uid, gid) if err != nil { return err } - path, err := filepath.Abs(c.current.Spec.BinaryPath) + path, err := filepath.Abs(c.getSpecBinaryPath()) if err != nil { return fmt.Errorf("failed to determine absolute path: %w", err) } @@ -293,7 +296,7 @@ func (c *CommandRuntime) start(comm Communicator) error { if err := c.monitor.Prepare(); err != nil { return err } - args := c.monitor.EnrichArgs(c.current.ID, c.current.Spec.BinaryName, cmdSpec.Args) + args := c.monitor.EnrichArgs(c.current.ID, c.getSpecBinaryName(), cmdSpec.Args) // differentiate data paths dataPath := filepath.Join(paths.Home(), "run", c.current.ID) @@ -331,7 +334,7 @@ func (c *CommandRuntime) stop(ctx context.Context) error { // cleanup reserved resources related to monitoring defer c.monitor.Cleanup(c.current.ID) //nolint:errcheck // this is ok - cmdSpec := c.current.Spec.Spec.Command + cmdSpec := c.getCommandSpec() go func(info *process.Info, timeout time.Duration) { t := time.NewTimer(timeout) defer t.Stop() @@ -409,6 +412,46 @@ func (c *CommandRuntime) workDir(uid int, gid int) (string, error) { return path, nil } +func (c *CommandRuntime) getSpecType() string { + if c.current.InputSpec != nil { + return c.current.InputSpec.InputType + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.ShipperType + } + return "" +} + +func (c *CommandRuntime) getSpecBinaryName() string { + if c.current.InputSpec != nil { + return c.current.InputSpec.BinaryName + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.BinaryName + } + return "" +} + +func (c *CommandRuntime) getSpecBinaryPath() string { + if c.current.InputSpec != nil { + return c.current.InputSpec.BinaryPath + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.BinaryPath + } + return "" +} + +func (c *CommandRuntime) getCommandSpec() *component.CommandSpec { + if c.current.InputSpec != nil { + return c.current.InputSpec.Spec.Command + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.Spec.Command + } + return nil +} + func attachOutErr(cmd *exec.Cmd) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index d713a9a10c4..0c4befc5e2f 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -56,6 +56,12 @@ type ComponentComponentState struct { State ComponentState `yaml:"state"` } +// ComponentUnitDiagnosticRequest used to request diagnostics from specific unit. +type ComponentUnitDiagnosticRequest struct { + Component component.Component + Unit component.Unit +} + // ComponentUnitDiagnostic provides a structure to map a component/unit to diagnostic results. type ComponentUnitDiagnostic struct { Component component.Component @@ -82,8 +88,9 @@ type Manager struct { waitMx sync.RWMutex waitReady map[string]waitForReady - mx sync.RWMutex - current map[string]*componentRuntimeState + mx sync.RWMutex + current map[string]*componentRuntimeState + shipperConns map[string]*shipperConn subMx sync.RWMutex subscriptions map[string][]*Subscription @@ -109,6 +116,7 @@ func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentI tracer: tracer, waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), + shipperConns: make(map[string]*shipperConn), subscriptions: make(map[string][]*Subscription), errCh: make(chan error), monitor: monitor, @@ -286,7 +294,7 @@ func (m *Manager) State() []ComponentComponentState { } // PerformAction executes an action on a unit. -func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { +func (m *Manager) PerformAction(ctx context.Context, comp component.Component, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { id, err := uuid.NewV4() if err != nil { return nil, err @@ -298,7 +306,7 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s return nil, err } } - runtime := m.getRuntimeFromUnit(unit) + runtime := m.getRuntimeFromUnit(comp, unit) if runtime == nil { return nil, ErrNoUnit } @@ -345,21 +353,21 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then // it performs diagnostics for all current units. -func (m *Manager) PerformDiagnostics(ctx context.Context, units ...component.Unit) []ComponentUnitDiagnostic { +func (m *Manager) PerformDiagnostics(ctx context.Context, req ...ComponentUnitDiagnosticRequest) []ComponentUnitDiagnostic { // build results from units var results []ComponentUnitDiagnostic - if len(units) > 0 { - for _, u := range units { - r := m.getRuntimeFromUnit(u) + if len(req) > 0 { + for _, q := range req { + r := m.getRuntimeFromUnit(q.Component, q.Unit) if r == nil { results = append(results, ComponentUnitDiagnostic{ - Unit: u, + Unit: q.Unit, Err: ErrNoUnit, }) } else { results = append(results, ComponentUnitDiagnostic{ Component: r.currComp, - Unit: u, + Unit: q.Unit, }) } } @@ -395,7 +403,7 @@ func (m *Manager) PerformDiagnostics(ctx context.Context, units ...component.Uni // already in error don't perform diagnostics continue } - diag, err := m.performDiagAction(ctx, r.Unit) + diag, err := m.performDiagAction(ctx, r.Component, r.Unit) if err != nil { r.Err = err } else { @@ -608,6 +616,13 @@ func (m *Manager) update(components []component.Component, teardown bool) error m.mx.Lock() defer m.mx.Unlock() + // prepare the components to add consistent shipper connection information between + // the connected components in the model + err := m.connectShippers(components) + if err != nil { + return err + } + touched := make(map[string]bool) for _, comp := range components { touched[comp.ID] = true @@ -742,13 +757,15 @@ func (m *Manager) getRuntimeFromToken(token string) *componentRuntimeState { return nil } -func (m *Manager) getRuntimeFromUnit(unit component.Unit) *componentRuntimeState { +func (m *Manager) getRuntimeFromUnit(comp component.Component, unit component.Unit) *componentRuntimeState { m.mx.RLock() defer m.mx.RUnlock() - for _, comp := range m.current { - for _, u := range comp.currComp.Units { - if u.Type == unit.Type && u.ID == unit.ID { - return comp + for _, c := range m.current { + if c.currComp.ID == comp.ID { + for _, u := range c.currComp.Units { + if u.Type == unit.Type && u.ID == unit.ID { + return c + } } } } @@ -769,7 +786,7 @@ func (m *Manager) getListenAddr() string { return m.listenAddr } -func (m *Manager) performDiagAction(ctx context.Context, unit component.Unit) ([]*proto.ActionDiagnosticUnitResult, error) { +func (m *Manager) performDiagAction(ctx context.Context, comp component.Component, unit component.Unit) ([]*proto.ActionDiagnosticUnitResult, error) { ctx, cancel := context.WithTimeout(ctx, diagnosticTimeout) defer cancel() @@ -778,7 +795,7 @@ func (m *Manager) performDiagAction(ctx context.Context, unit component.Unit) ([ return nil, err } - runtime := m.getRuntimeFromUnit(unit) + runtime := m.getRuntimeFromUnit(comp, unit) if runtime == nil { return nil, ErrNoUnit } diff --git a/pkg/component/runtime/manager_shipper.go b/pkg/component/runtime/manager_shipper.go new file mode 100644 index 00000000000..bb6cb7678e8 --- /dev/null +++ b/pkg/component/runtime/manager_shipper.go @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "fmt" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/internal/pkg/core/authority" + "github.com/elastic/elastic-agent/pkg/component" +) + +func (m *Manager) connectShippers(components []component.Component) error { + // ensure that all shipper components have created connection information (must happen before we connect the units) + shippersTouched := make(map[string]bool) + for i, comp := range components { + if comp.ShipperSpec != nil { + // running shipper (ensure connection information is created) + shippersTouched[comp.ID] = true + conn, ok := m.shipperConns[comp.ID] + if !ok { + ca, err := authority.NewCA() + if err != nil { + return fmt.Errorf("failed to create connection CA for shipper %q: %w", comp.ID, err) + } + conn = &shipperConn{ + addr: getShipperAddr(comp.ID), + ca: ca, + pairs: make(map[string]*authority.Pair), + } + m.shipperConns[comp.ID] = conn + } + + // each input unit needs its corresponding + pairsTouched := make(map[string]bool) + for j, unit := range comp.Units { + if unit.Type == client.UnitTypeInput { + pairsTouched[unit.ID] = true + pair, err := pairGetOrCreate(conn, unit.ID) + if err != nil { + return fmt.Errorf("failed to get/create certificate pait for shipper %q/%q: %w", comp.ID, unit.ID, err) + } + cfg, cfgErr := injectShipperConn(unit.Config, conn.addr, conn.ca, pair) + unit.Config = cfg + unit.Err = cfgErr + comp.Units[j] = unit + } + } + + // cleanup any pairs that are no-longer used + for pairID := range conn.pairs { + touch, _ := pairsTouched[pairID] + if !touch { + delete(conn.pairs, pairID) + } + } + components[i] = comp + } + } + + // cleanup any shippers that are no-longer used + for shipperID := range m.shipperConns { + touch, _ := shippersTouched[shipperID] + if !touch { + delete(m.shipperConns, shipperID) + } + } + + // connect the output units with the same connection information + for i, comp := range components { + if comp.Shipper != nil { + conn, ok := m.shipperConns[comp.Shipper.ComponentID] + if !ok { + return fmt.Errorf("component %q references a non-existing shipper %q", comp.ID, comp.Shipper.ComponentID) + } + pair, ok := conn.pairs[comp.ID] + if !ok { + return fmt.Errorf("component %q references shipper %q that doesn't know about the component", comp.ID, comp.Shipper.ComponentID) + } + for j, unit := range comp.Units { + if unit.Type == client.UnitTypeOutput { + cfg, cfgErr := injectShipperConn(unit.Config, conn.addr, conn.ca, pair) + unit.Config = cfg + unit.Err = cfgErr + comp.Units[j] = unit + } + } + components[i] = comp + } + } + + return nil +} + +func pairGetOrCreate(conn *shipperConn, pairID string) (*authority.Pair, error) { + var err error + pair, ok := conn.pairs[pairID] + if ok { + return pair, nil + } + pair, err = conn.ca.GeneratePairWithName(pairID) + if err != nil { + return nil, err + } + conn.pairs[pairID] = pair + return pair, nil +} + +func injectShipperConn(cfg *proto.UnitExpectedConfig, addr string, ca *authority.CertificateAuthority, pair *authority.Pair) (*proto.UnitExpectedConfig, error) { + if cfg == nil { + // unit configuration had an error generating (do nothing) + return cfg, nil + } + source := cfg.Source.AsMap() + source["server"] = addr + source["ssl"] = map[string]interface{}{ + "certificate_authorities": []interface{}{ + string(ca.Crt()), + }, + "certificate": string(pair.Crt), + "key": string(pair.Key), + } + return component.ExpectedConfig(source) +} diff --git a/pkg/component/runtime/manager_shipper_unix.go b/pkg/component/runtime/manager_shipper_unix.go new file mode 100644 index 00000000000..f3805a3ebb4 --- /dev/null +++ b/pkg/component/runtime/manager_shipper_unix.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package runtime + +import ( + "crypto/sha256" + "fmt" + "path/filepath" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +func getShipperAddr(componentID string) string { + // when installed the address is fixed to a location + if info.RunningInstalled() { + return fmt.Sprintf(paths.ShipperSocketPipePattern, componentID) + } + + // unix socket path must be less than 104 characters + path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), fmt.Sprintf("elastic-agent-%s-pipe", componentID))) + if len(path) < 104 { + return path + } + // place in global /tmp to ensure that its small enough to fit; current path is way to long + // for it to be used, but needs to be unique per Agent (in the case that multiple are running) + return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) +} diff --git a/pkg/component/runtime/manager_shipper_windows.go b/pkg/component/runtime/manager_shipper_windows.go new file mode 100644 index 00000000000..bb45b4650dc --- /dev/null +++ b/pkg/component/runtime/manager_shipper_windows.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package runtime + +import ( + "crypto/sha256" + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +func getShipperAddr(componentID string) string { + // when installed the address is fixed to a location + if info.RunningInstalled() { + return fmt.Sprintf(paths.ShipperSocketPipePattern, componentID) + } + + // not install, adjust the path based on data path + data := paths.Data() + // entire string cannot be longer than 256 characters, this forces the + // length to always be 87 characters (but unique per data path) + return fmt.Sprintf(`\\.\pipe\elastic-agent-%x-%s-pipe`, sha256.Sum256([]byte(data)), componentID) +} diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index b71a24c35e0..a26c9f037a4 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" "go.elastic.co/apm/apmtest" @@ -43,6 +45,16 @@ var ( }, }, } + fakeShipperSpec = component.ShipperSpec{ + Name: "fake-shipper", + Command: &component.CommandSpec{ + Timeouts: component.CommandTimeoutSpec{ + Checkin: 30 * time.Second, + Restart: 10 * time.Millisecond, // quick restart during tests + Stop: 30 * time.Second, + }, + }, + } ) func TestManager_SimpleComponentErr(t *testing.T) { @@ -169,10 +181,10 @@ func TestManager_FakeInput_StartStop(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -294,10 +306,10 @@ func TestManager_FakeInput_BadUnitToGood(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -465,10 +477,10 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -620,10 +632,10 @@ func TestManager_FakeInput_Configure(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -746,10 +758,10 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -904,10 +916,10 @@ func TestManager_FakeInput_ActionState(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -949,7 +961,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { // subscription channel go func() { actionCtx, actionCancel := context.WithTimeout(context.Background(), 15*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "set_state", map[string]interface{}{ "state": int(client.UnitStateDegraded), "message": "Action Set Degraded", }) @@ -1034,10 +1046,10 @@ func TestManager_FakeInput_Restarts(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1085,7 +1097,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { if !killed { killed = true actionCtx, actionCancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - _, err := m.PerformAction(actionCtx, comp.Units[0], "kill", nil) + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "kill", nil) actionCancel() if !errors.Is(err, context.DeadlineExceeded) { // should have got deadline exceeded for this call @@ -1173,10 +1185,10 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1294,10 +1306,10 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1336,7 +1348,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) } else if unit.State == client.UnitStateHealthy { actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "invalid_missing_action", nil) actionCancel() if err == nil { subErrCh <- fmt.Errorf("should have returned an error") @@ -1418,7 +1430,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") runtimeSpec := component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", @@ -1427,8 +1439,8 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { } components := []component.Component{ { - ID: "fake-0", - Spec: runtimeSpec, + ID: "fake-0", + InputSpec: &runtimeSpec, Units: []component.Unit{ { ID: "fake-input-0-0", @@ -1460,8 +1472,8 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { }, }, { - ID: "fake-1", - Spec: runtimeSpec, + ID: "fake-1", + InputSpec: &runtimeSpec, Units: []component.Unit{ { ID: "fake-input-1-0", @@ -1493,8 +1505,8 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { }, }, { - ID: "fake-2", - Spec: runtimeSpec, + ID: "fake-2", + InputSpec: &runtimeSpec, Units: []component.Unit{ { ID: "fake-input-2-0", @@ -1630,10 +1642,10 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1687,7 +1699,7 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { } actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "invalid_missing_action", nil) actionCancel() if err == nil { subErrCh <- fmt.Errorf("should have returned an error") @@ -1745,6 +1757,281 @@ LOOP: require.NoError(t, err) } +func TestManager_FakeShipper(t *testing.T) { + /* + This test runs one instance of the fake/component and an instance of the fake/shipper. They get connected + together, and it ensures that a test event is sent between each instance. Below is a breakdown on how this + test performs this work and ensures that an event is sent between the two instances. + + 1. Wait for the shipper input (GRPC server) is healthy. + 2. Wait for the component output (GRPC client) is healthy. + 3. Create a unique ID to use for the event ID. + 4. Send `record_event` action to the shipper input (GRPC server); won't return until it actually gets the event. + 5. Send `send_event` action to the component fake input (GRPC client); returns once sent. + 6. Wait for `record_event` action to return from the shipper input (GRPC server). + */ + + testPaths(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + require.NoError(t, err) + errCh := make(chan error) + go func() { + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + componentPath := testBinary(t, "component") + shipperPath := testBinary(t, "shipper") + comps := []component.Component{ + { + ID: "fake-default", + InputSpec: &component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: componentPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }), + }, + { + ID: "fake-default", + Type: client.UnitTypeOutput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake-shipper", + }), + }, + }, + Shipper: &component.ShipperReference{ + ComponentID: "fake-shipper-default", + UnitID: "fake-default", + }, + }, + { + ID: "fake-shipper-default", + ShipperSpec: &component.ShipperRuntimeSpec{ + ShipperType: "fake-shipper", + BinaryName: "", + BinaryPath: shipperPath, + Spec: fakeShipperSpec, + }, + Units: []component.Unit{ + { + ID: "fake-default", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "id": "fake-default", + "type": "fake-shipper", + "units": []interface{}{ + map[string]interface{}{ + "id": "fake-input", + "config": map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }, + }, + }, + }), + }, + { + ID: "fake-default", + Type: client.UnitTypeOutput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake-action-output", + }), + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + shipperOn := false + compConnected := false + + sendEvent := func() (bool, error) { + if !shipperOn || !compConnected { + // wait until connected + return false, nil + } + + // send an event between component and the fake shipper + eventID, err := uuid.NewV4() + if err != nil { + return true, err + } + + // wait for the event on the shipper side + gotEvt := make(chan error) + go func() { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 10*time.Second) + _, err := m.PerformAction(actionCtx, comps[1], comps[1].Units[1], "record_event", map[string]interface{}{ + "id": eventID.String(), + }) + actionCancel() + gotEvt <- err + }() + + // send the fake event + actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) + _, err = m.PerformAction(actionCtx, comps[0], comps[0].Units[0], "send_event", map[string]interface{}{ + "id": eventID.String(), + }) + actionCancel() + if err != nil { + return true, err + } + + err = <-gotEvt + if err == nil { + t.Logf("successfully sent event from fake input to fake shipper, event ID: %s", eventID.String()) + } + return true, err + } + + shipperSub := m.Subscribe(subCtx, "fake-shipper-default") + compSub := m.Subscribe(subCtx, "fake-default") + for { + select { + case <-subCtx.Done(): + return + case state := <-shipperSub.Ch(): + t.Logf("shipper state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("shipper failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-default"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + shipperOn = true + ok, err := sendEvent() + if ok { + if err != nil { + subErrCh <- err + } else { + // successful; turn it all off + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + case state := <-compSub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeOutput, UnitID: "fake-default"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + compConnected = true + ok, err := sendEvent() + if ok { + if err != nil { + subErrCh <- err + } else { + // successful; turn it all off + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting || unit.State == client.UnitStateConfiguring { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update(comps) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + func newErrorLogger(t *testing.T) *logger.Logger { t.Helper() @@ -1796,7 +2083,10 @@ func testPaths(t *testing.T) { versioned := paths.IsVersionHome() topPath := paths.Top() - tmpDir := t.TempDir() + tmpDir, err := os.MkdirTemp("", "at-*") + if err != nil { + t.Fatalf("failed to create temp directory: %s", err) + } paths.SetVersionHome(false) paths.SetTop(tmpDir) @@ -1807,11 +2097,11 @@ func testPaths(t *testing.T) { }) } -func testBinary(t *testing.T) string { +func testBinary(t *testing.T, name string) string { t.Helper() var err error - binaryPath := filepath.Join("..", "fake", "fake") + binaryPath := filepath.Join("..", "fake", name, name) binaryPath, err = filepath.Abs(binaryPath) if err != nil { t.Fatalf("failed abs %s: %s", binaryPath, err) diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index e06702b2141..0ed1b46c26c 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -57,12 +57,23 @@ type ComponentRuntime interface { func NewComponentRuntime(comp component.Component, logger *logger.Logger, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Err != nil { return NewFailedRuntime(comp) - } else if comp.Spec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) - } else if comp.Spec.Spec.Service != nil { - return NewServiceRuntime(comp, logger) } - return nil, errors.New("unknown component runtime") + if comp.InputSpec != nil { + if comp.InputSpec.Spec.Command != nil { + return NewCommandRuntime(comp, monitor) + } + if comp.InputSpec.Spec.Service != nil { + return NewServiceRuntime(comp, logger) + } + return nil, errors.New("unknown component runtime") + } + if comp.ShipperSpec != nil { + if comp.ShipperSpec.Spec.Command != nil { + return NewCommandRuntime(comp, monitor) + } + return nil, errors.New("components for shippers can only support command runtime") + } + return nil, errors.New("component missing specification") } type componentRuntimeState struct { diff --git a/pkg/component/runtime/service.go b/pkg/component/runtime/service.go index 9c55b9fa4d7..41cf1b517cc 100644 --- a/pkg/component/runtime/service.go +++ b/pkg/component/runtime/service.go @@ -23,8 +23,10 @@ const ( ) var ( + // ErrOperationSpecUndefined error for missing specification. ErrOperationSpecUndefined = errors.New("operation spec undefined") - ErrInvalidServiceSpec = errors.New("invalid service spec") + // ErrInvalidServiceSpec error invalid service specification. + ErrInvalidServiceSpec = errors.New("invalid service spec") ) type executeServiceCommandFunc func(ctx context.Context, log *logger.Logger, binaryPath string, spec *component.ServiceOperationsCommandSpec) error @@ -46,7 +48,13 @@ type ServiceRuntime struct { // NewServiceRuntime creates a new command runtime for the provided component. func NewServiceRuntime(comp component.Component, logger *logger.Logger) (ComponentRuntime, error) { - if comp.Spec.Spec.Service == nil { + if comp.ShipperSpec != nil { + return nil, errors.New("service runtime not supported for a shipper specification") + } + if comp.InputSpec == nil { + return nil, errors.New("service runtime requires an input specification to be defined") + } + if comp.InputSpec.Spec.Service == nil { return nil, errors.New("must have service defined in specification") } @@ -111,7 +119,7 @@ func (s *ServiceRuntime) Run(ctx context.Context, comm Communicator) (err error) // Start connection info if cis == nil { - cis, err = newConnInfoServer(s.log, comm, s.comp.Spec.Spec.Service.CPort) + cis, err = newConnInfoServer(s.log, comm, s.comp.InputSpec.Spec.Service.CPort) if err != nil { err = fmt.Errorf("failed to start connection info service %s: %w", s.name(), err) break @@ -321,7 +329,7 @@ func (s *ServiceRuntime) checkStatus(checkinPeriod time.Duration, lastCheckin *t } func (s *ServiceRuntime) checkinPeriod() time.Duration { - checkinPeriod := s.comp.Spec.Spec.Service.Timeouts.Checkin + checkinPeriod := s.comp.InputSpec.Spec.Service.Timeouts.Checkin if checkinPeriod == 0 { checkinPeriod = defaultCheckServiceStatusInterval } @@ -395,27 +403,27 @@ func (s *ServiceRuntime) compState(state client.UnitState, missedCheckins int) { } func (s *ServiceRuntime) name() string { - return s.comp.Spec.Spec.Name + return s.comp.InputSpec.Spec.Name } // check executes the service check command func (s *ServiceRuntime) check(ctx context.Context) error { - if s.comp.Spec.Spec.Service.Operations.Check == nil { - s.log.Errorf("missing check spec for %s service", s.comp.Spec.BinaryName) + if s.comp.InputSpec.Spec.Service.Operations.Check == nil { + s.log.Errorf("missing check spec for %s service", s.comp.InputSpec.BinaryName) return ErrOperationSpecUndefined } - s.log.Debugf("check if the %s is installed", s.comp.Spec.BinaryName) - return s.executeServiceCommandImpl(ctx, s.log, s.comp.Spec.BinaryPath, s.comp.Spec.Spec.Service.Operations.Check) + s.log.Debugf("check if the %s is installed", s.comp.InputSpec.BinaryName) + return s.executeServiceCommandImpl(ctx, s.log, s.comp.InputSpec.BinaryPath, s.comp.InputSpec.Spec.Service.Operations.Check) } // install executes the service install command func (s *ServiceRuntime) install(ctx context.Context) error { - if s.comp.Spec.Spec.Service.Operations.Install == nil { - s.log.Errorf("missing install spec for %s service", s.comp.Spec.BinaryName) + if s.comp.InputSpec.Spec.Service.Operations.Install == nil { + s.log.Errorf("missing install spec for %s service", s.comp.InputSpec.BinaryName) return ErrOperationSpecUndefined } - s.log.Debugf("install %s service", s.comp.Spec.BinaryName) - return s.executeServiceCommandImpl(ctx, s.log, s.comp.Spec.BinaryPath, s.comp.Spec.Spec.Service.Operations.Install) + s.log.Debugf("install %s service", s.comp.InputSpec.BinaryName) + return s.executeServiceCommandImpl(ctx, s.log, s.comp.InputSpec.BinaryPath, s.comp.InputSpec.Spec.Service.Operations.Install) } // uninstall executes the service uninstall command @@ -429,10 +437,10 @@ func UninstallService(ctx context.Context, log *logger.Logger, comp component.Co } func uninstallService(ctx context.Context, log *logger.Logger, comp component.Component, executeServiceCommandImpl executeServiceCommandFunc) error { - if comp.Spec.Spec.Service.Operations.Uninstall == nil { - log.Errorf("missing uninstall spec for %s service", comp.Spec.BinaryName) + if comp.InputSpec.Spec.Service.Operations.Uninstall == nil { + log.Errorf("missing uninstall spec for %s service", comp.InputSpec.BinaryName) return ErrOperationSpecUndefined } - log.Debugf("uninstall %s service", comp.Spec.BinaryName) - return executeServiceCommandImpl(ctx, log, comp.Spec.BinaryPath, comp.Spec.Spec.Service.Operations.Uninstall) + log.Debugf("uninstall %s service", comp.InputSpec.BinaryName) + return executeServiceCommandImpl(ctx, log, comp.InputSpec.BinaryPath, comp.InputSpec.Spec.Service.Operations.Uninstall) } diff --git a/pkg/component/runtime/shipper.go b/pkg/component/runtime/shipper.go new file mode 100644 index 00000000000..dd257ae3408 --- /dev/null +++ b/pkg/component/runtime/shipper.go @@ -0,0 +1,13 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import "github.com/elastic/elastic-agent/internal/pkg/core/authority" + +type shipperConn struct { + addr string + ca *authority.CertificateAuthority + pairs map[string]*authority.Pair +} diff --git a/pkg/component/shipper_spec.go b/pkg/component/shipper_spec.go new file mode 100644 index 00000000000..310bd17ec35 --- /dev/null +++ b/pkg/component/shipper_spec.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import "fmt" + +// ShipperSpec is the specification for a shipper type. +type ShipperSpec struct { + Name string `config:"name" yaml:"name" validate:"required"` + Description string `config:"description" yaml:"description" validate:"required"` + Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` + Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` + Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` + + Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` +} + +// Validate ensures correctness of output specification. +func (s *ShipperSpec) Validate() error { + if s.Command == nil { + return fmt.Errorf("shipper '%s' must define command (no other type is supported for shippers)", s.Name) + } + for i, a := range s.Platforms { + for j, b := range s.Platforms { + if i != j && a == b { + return fmt.Errorf("shipper '%s' defines the platform '%s' more than once", s.Name, a) + } + } + } + return nil +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index be20b92208c..e7ec47a5811 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -12,9 +12,10 @@ import ( // Spec a components specification. type Spec struct { - Name string `yaml:"name,omitempty"` - Version int `config:"version" yaml:"version" validate:"required"` - Inputs []InputSpec `config:"inputs,omitempty" yaml:"inputs,omitempty"` + Name string `yaml:"name,omitempty"` + Version int `config:"version" yaml:"version" validate:"required"` + Inputs []InputSpec `config:"inputs,omitempty" yaml:"inputs,omitempty"` + Shippers []ShipperSpec `config:"shippers,omitempty" yaml:"shippers,omitempty"` } // Validate ensures correctness of component specification. @@ -40,6 +41,24 @@ func (s *Spec) Validate() error { inputsToPlatforms[input.Name] = a } } + shippersToPlatforms := make(map[string][]string) + for i, shipper := range s.Shippers { + a, ok := shippersToPlatforms[shipper.Name] + if !ok { + shippersToPlatforms[shipper.Name] = make([]string, len(shipper.Platforms)) + copy(shippersToPlatforms[shipper.Name], shipper.Platforms) + continue + } + for _, platform := range shipper.Platforms { + for _, existing := range a { + if existing == platform { + return fmt.Errorf("shipper '%s' at shippers.%d defines the same platform as a previous definition", shipper.Name, i) + } + } + a = append(a, platform) + shippersToPlatforms[shipper.Name] = a + } + } return nil } diff --git a/specs/filebeat.spec.yml b/specs/filebeat.spec.yml index 07f3cb7666e..a5aaf39f910 100644 --- a/specs/filebeat.spec.yml +++ b/specs/filebeat.spec.yml @@ -1,170 +1,193 @@ -version: 2 -inputs: - - name: aws-cloudwatch - description: "AWS Cloudwatch" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${FILEBEAT_GOGC:100}" - - "-E" - - "filebeat.config.modules.enabled=false" - - name: aws-s3 - description: "AWS S3" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure-eventhub - description: "Azure Eventhub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry - description: "PCF Cloudfoundry" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: container - description: "Container logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: docker - aliases: - - log/docker - description: "Docker logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: gcp-pubsub - description: "GCP Pub-Sub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http_endpoint - description: "HTTP Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: httpjson - description: "HTTP JSON Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: journald - description: "Journald" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kafka - description: "Kafka" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: log - aliases: - - logfile - - event/file - description: "Logfile" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mqtt - description: "MQTT" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: netflow - description: "Netflow" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: o365audit - description: "Office 365 Audit" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis - aliases: - - log/redis_slowlog - description: "Redis" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syslog - aliases: - - log/syslog - description: "Syslog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: tcp - aliases: - - event/tcp - description: "TCP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: udp - aliases: - - event/udp - description: "UDP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: unix - description: "Unix Socket" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: winlog - description: "Winlog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: filestream - description: "Filestream" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: aws-cloudwatch + description: "AWS Cloudwatch" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + shippers: &shippers + - shipper + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${FILEBEAT_GOGC:100}" + - "-E" + - "filebeat.config.modules.enabled=false" + - name: aws-s3 + description: "AWS S3" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: azure-eventhub + description: "Azure Eventhub" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: cloudfoundry + description: "PCF Cloudfoundry" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: container + description: "Container logs" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: docker + aliases: + - log/docker + description: "Docker logs" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: gcp-pubsub + description: "GCP Pub-Sub" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: http_endpoint + description: "HTTP Endpoint" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: httpjson + description: "HTTP JSON Endpoint" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: journald + description: "Journald" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: kafka + description: "Kafka" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: log + aliases: + - logfile + - event/file + description: "Logfile" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mqtt + description: "MQTT" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: netflow + description: "Netflow" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: o365audit + description: "Office 365 Audit" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: redis + aliases: + - log/redis_slowlog + description: "Redis" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: syslog + aliases: + - log/syslog + description: "Syslog" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: tcp + aliases: + - event/tcp + description: "TCP" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: udp + aliases: + - event/udp + description: "UDP" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: unix + description: "Unix Socket" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: winlog + description: "Winlog" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: filestream + description: "Filestream" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index b160a4f29e7..b7c88ad4864 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -1,163 +1,187 @@ -version: 2 -inputs: - - name: beat/metrics - description: "Beat metrics" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${METRICBEAT_GOGC:100}" - - "-E" - - "metricbeat.config.modules.enabled=false" - - name: docker/metrics - description: "Docker metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: elasticsearch/metrics - description: "Elasticsearch metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kibana/metrics - description: "Kibana metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kubernetes/metrics - description: "Kubernetes metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: linux/metrics - description: "Linux metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: logstash/metrics - description: "Logstash metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mongodb/metrics - description: "Mongodb metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mysql/metrics - description: "MySQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: postgresql/metrics - description: "PostgreSQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis/metrics - description: "Redis metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: system/metrics - description: "System metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: uwsgi/metrics - description: "UWSGI metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: windows/metrics - description: "Windows metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: aws/metrics - description: "AWS metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: awsfargate/metrics - description: "AWS Fargate metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure/metrics - description: "Azure metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry/metrics - description: "PCF Cloudfoundry metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: containerd/metrics - description: "Containerd metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mssql/metrics - description: "Microsoft SQL Server metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: oracle/metrics - description: "Oracle Database metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syncgateway/metrics - description: "Couchbase Sync Gateway metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http/metrics - description: "HTTP metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: beat/metrics + description: "Beat metrics" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + shippers: &shippers + - shipper + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${METRICBEAT_GOGC:100}" + - "-E" + - "metricbeat.config.modules.enabled=false" + - name: docker/metrics + description: "Docker metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: elasticsearch/metrics + description: "Elasticsearch metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: kibana/metrics + description: "Kibana metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: kubernetes/metrics + description: "Kubernetes metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: linux/metrics + description: "Linux metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: logstash/metrics + description: "Logstash metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mongodb/metrics + description: "Mongodb metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mysql/metrics + description: "MySQL metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: postgresql/metrics + description: "PostgreSQL metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: redis/metrics + description: "Redis metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: system/metrics + description: "System metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: uwsgi/metrics + description: "UWSGI metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: windows/metrics + description: "Windows metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: aws/metrics + description: "AWS metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: awsfargate/metrics + description: "AWS Fargate metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: azure/metrics + description: "Azure metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: cloudfoundry/metrics + description: "PCF Cloudfoundry metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: containerd/metrics + description: "Containerd metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mssql/metrics + description: "Microsoft SQL Server metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: oracle/metrics + description: "Oracle Database metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: syncgateway/metrics + description: "Couchbase Sync Gateway metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: http/metrics + description: "HTTP metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args diff --git a/specs/shipper.spec.yml b/specs/shipper.spec.yml new file mode 100644 index 00000000000..b9cd74fe439 --- /dev/null +++ b/specs/shipper.spec.yml @@ -0,0 +1,18 @@ +version: 2 +shippers: + - name: shipper + description: "Elastic Agent Shipper" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: {}