Skip to content

Commit

Permalink
Support multi-log-forwarder
Browse files Browse the repository at this point in the history
Supports enabling namespaced ClusterLogForwarder resources.
Enabling the feature requires redeploying the Cluster Logging Operator.
  • Loading branch information
DebakelOrakel authored and Stephan Feurer committed Aug 13, 2024
1 parent 55866f5 commit f4dc9d0
Show file tree
Hide file tree
Showing 12 changed files with 348 additions and 24 deletions.
3 changes: 3 additions & 0 deletions class/defaults.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ parameters:
clusterLogging: {}
clusterLogForwarder: {}

namespaceLogForwarder:
enabled: false

operatorResources:
clusterLogging:
requests:
Expand Down
112 changes: 92 additions & 20 deletions component/config_forwarding.libsonnet
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
local com = import 'lib/commodore.libjsonnet';
local kap = import 'lib/kapitan.libjsonnet';
local kube = import 'lib/kube.libjsonnet';
local lib = import 'lib/openshift4-logging.libsonnet';
local utils = import 'utils.libsonnet';

local inv = kap.inventory();
local params = inv.parameters.openshift4_logging;
Expand Down Expand Up @@ -198,37 +200,107 @@ local clusterLogForwarderSpec = std.foldl(
},
) + com.makeMergeable(params.clusterLogForwarder);

// Unfold objects into array for ClusterLogForwarder resource.
local unfoldSpecs(specs) = {
// Unfold objects into array.
[if std.length(specs.inputs) > 0 then 'inputs']: [
{ name: name } + specs.inputs[name]
for name in std.objectFields(specs.inputs)
],
[if std.length(specs.outputs) > 0 then 'outputs']: [
{ name: name } + specs.outputs[name]
for name in std.objectFields(specs.outputs)
],
[if std.length(specs.pipelines) > 0 then 'pipelines']: [
{ name: name } + specs.pipelines[name]
for name in std.objectFields(specs.pipelines)
],
} + {
// Import remaining specs as is.
[key]: specs[key]
for key in std.objectFields(specs)
if !std.member([ 'inputs', 'outputs', 'pipelines' ], key)
};

// ClusterLogForwarder:
// Create definitive ClusterLogForwarder resource from specs.
local clusterLogForwarder = lib.ClusterLogForwarder(params.namespace, 'instance') {
spec: {
// Unfold objects into array.
[if std.length(clusterLogForwarderSpec.inputs) > 0 then 'inputs']: [
{ name: name } + clusterLogForwarderSpec.inputs[name]
for name in std.objectFields(clusterLogForwarderSpec.inputs)
],
[if std.length(clusterLogForwarderSpec.outputs) > 0 then 'outputs']: [
{ name: name } + clusterLogForwarderSpec.outputs[name]
for name in std.objectFields(clusterLogForwarderSpec.outputs)
],
[if std.length(clusterLogForwarderSpec.pipelines) > 0 then 'pipelines']: [
{ name: name } + clusterLogForwarderSpec.pipelines[name]
for name in std.objectFields(clusterLogForwarderSpec.pipelines)
],
} + {
// Import remaining specs as is.
[key]: clusterLogForwarderSpec[key]
for key in std.objectFields(clusterLogForwarderSpec)
if !std.member([ 'inputs', 'outputs', 'pipelines' ], key)
},
spec: unfoldSpecs(clusterLogForwarderSpec),
};

// namespaceLogForwarderIgnoreKeys
// List of keys to ignore in namespaceLogForwarder
local namespaceLogForwarderIgnoreKeys = [
'enabled',
'instance',
'openshift-logging/instance',
];
// namespaceLogForwarder:
// Create namespaced LogForwarder resource from specs.
local namespaceLogForwarder = [
local specs = { inputs: {}, outputs: {}, pipelines: {} } + com.makeMergeable(params.namespaceLogForwarder[forwarder]);
local name = utils.namespacedName(forwarder).name;
local namespace = utils.namespacedName(forwarder).namespace;
local serviceAccount = std.get(specs, 'serviceAccountName', utils.namespacedName(forwarder).name);

lib.ClusterLogForwarder(namespace, name) {
spec: { serviceAccountName: serviceAccount } + com.makeMergeable(unfoldSpecs(specs)),
}
for forwarder in std.objectFields(params.namespaceLogForwarder)
if !std.member(namespaceLogForwarderIgnoreKeys, forwarder)
];

// namespaceServiceAccount:
// Create ServiceAccount for namespaced LogForwarder specs.
local namespaceServiceAccount = [
local specs = params.namespaceLogForwarder[forwarder];
local namespace = utils.namespacedName(forwarder).namespace;
local serviceAccount = std.get(specs, 'serviceAccountName', utils.namespacedName(forwarder).name);

kube.ServiceAccount(serviceAccount) {
metadata+: {
namespace: namespace,
},
}
for forwarder in std.objectFields(params.namespaceLogForwarder)
if !std.member(namespaceLogForwarderIgnoreKeys, forwarder)
];

// namespaceRoleBinding:
// Create RoleBinding for namespaced LogForwarder.
local namespaceRoleBinding = [
local specs = params.namespaceLogForwarder[forwarder];
local namespace = utils.namespacedName(forwarder).namespace;
local serviceAccount = std.get(specs, 'serviceAccountName', utils.namespacedName(forwarder).name);

kube.RoleBinding(serviceAccount) {
metadata+: {
namespace: namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: 'collect-application-logs',
},
subjects: [ {
kind: 'ServiceAccount',
name: serviceAccount,
namespace: namespace,
} ],
}
for forwarder in std.objectFields(params.namespaceLogForwarder)
if !std.member(namespaceLogForwarderIgnoreKeys, forwarder)
];

local enableLogForwarder = std.length(params.clusterLogForwarder) > 0 || std.get(legacyConfig, 'enabled', false);

// Define outputs below
if enableLogForwarder then
{
'31_cluster_logforwarding': clusterLogForwarder,
[if std.length(params.namespaceLogForwarder) > 1 then '32_namespace_logforwarding']: namespaceLogForwarder,
[if std.length(params.namespaceLogForwarder) > 1 then '32_namespace_serviceaccount']: namespaceServiceAccount,
[if std.length(params.namespaceLogForwarder) > 1 then '32_namespace_rolebinding']: namespaceRoleBinding,
}
else
std.trace(
Expand Down
2 changes: 1 addition & 1 deletion component/main.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ local operatorGroup = operatorlib.OperatorGroup('cluster-logging') {
namespace: params.namespace,
},
spec: {
targetNamespaces: [
[if !params.namespaceLogForwarder.enabled then 'targetNamespaces']: [
params.namespace,
],
},
Expand Down
7 changes: 7 additions & 0 deletions component/utils.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,14 @@ local isVersion59 =
else if std.parseInt(major) == 5 && std.parseInt(minor) >= 9 then true
else false;

local namespacedName(name) = {
local namespaced = std.splitLimit(name, '/', 1),
namespace: if std.length(namespaced) > 1 then namespaced[0] else params.namespace,
name: if std.length(namespaced) > 1 then namespaced[1] else namespaced[0],
};

{
isVersion58: isVersion58,
isVersion59: isVersion59,
namespacedName: namespacedName,
}
70 changes: 70 additions & 0 deletions docs/modules/ROOT/pages/how-tos/enable-multi-forwarder.adoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
= Enable Multi LogForwarder

Red Hat OpenShift Logging Operator only watches the `openshift-logging` namespace.
If you want the Red Hat OpenShift Logging Operator to watch all namespaces on your cluster, you must redeploy the Operator.
You can complete the following procedure to redeploy the Operator without deleting your logging components.


== Disable ArgoCD sync

Disable ArgoCD sync of component-openshift4-logging:
[source,bash]
----
kubectl --as=cluster-admin -n syn patch apps root --type=json \
-p '[{"op":"replace", "path":"/spec/syncPolicy", "value": {}}]'
kubectl --as=cluster-admin -n syn patch apps openshift4-logging --type=json \
-p '[{"op":"replace", "path":"/spec/syncPolicy", "value": {}}]'
----

== Remove Cluster Logging Opeartor Group

1. Remove Subscription:
+
[source,bash]
----
kubectl --as=cluster-admin -n openshift-logging delete sub cluster-logging
----

1. Remove OperatorGroup:
+
[source,bash]
----
kubectl --as=cluster-admin -n openshift-logging delete og cluster-logging
----

1. Remove ClusterServiceVersion:
+
[source,bash]
----
kubectl --as=cluster-admin -n openshift-logging delete csv -l operators.coreos.com/cluster-logging.openshift-logging=
----

== Enable namespaced LogForwarding

1. Enable the following parameter in the tenant repo:
+
[source,bash]
----
parameters:
openshift4_logging:
namespaceLogForwarder:
enabled: true
----

1. Compile and push catalog


== Enable ArgoCD sync

NOTE: Make sure ArgoCD is refreshed before enabling the sync again.

Enable ArgoCD sync of component-openshift4-logging:
[source,bash]
----
kubectl --as=cluster-admin -n syn patch apps root --type=json \
-p '[{
"op":"replace",
"path":"/spec/syncPolicy",
"value": {"automated": {"prune": true, "selfHeal": true}}
}]'
----
42 changes: 42 additions & 0 deletions docs/modules/ROOT/pages/references/parameters.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,25 @@ See the https://docs.openshift.com/container-platform/latest/observability/loggi
IMPORTANT: `clusterLogForwarding` is deprecated, please use `clusterLogForwarder`


== `namespaceLogForwarder`

[horizontal]
type:: dictionary
default::
+
[source,yaml]
----
namespaceLogForwarder:
enabled: false
----

A dictionary holding the `.spec` for namespaced log forwarding.

See in examples below for configuration.

NOTE: Enabling namespaced log forwarding requires redeploying the logging operator. See xref:how-tos/enable-multi-forwarder.adoc[How-To] for instructions.


== Examples

[source,yaml]
Expand All @@ -381,6 +400,29 @@ clusterLogging:
nodeCount: 5
----

=== Use namespaced ClusterLogForwarder

Example creates a `ClusterLogForwarder`, `ServiceAccount` and `RoleBinding` in namespace `my-namespace`.

[source,yaml]
----
namespaceLogForwarder:
enabled: true
my-namespace/my-forwarder:
outputs:
splunk-forwarder:
secret:
name: splunk-forwarder
type: fluentdForward
url: tls://splunk-forwarder:24224
pipelines:
application-logs:
inputRefs:
- application
outputRefs:
- splunk-forwarder
----

=== Forward logs for all application logs to third-party

[source,yaml]
Expand Down
1 change: 1 addition & 0 deletions docs/modules/ROOT/partials/nav.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* xref:how-tos/upgrade-v2.x-v3.x.adoc[Upgrade from v2.x to v3.x]
* xref:how-tos/upgrade-v3.x-v4.x.adoc[Upgrade from v3.x to v4.x]
* xref:how-tos/switch-to-lokistack.adoc[Switch to Lokistack]
* xref:how-tos/enable-multi-forwarder.adoc[Enable Multi LogForwarder]
.Alert runbooks
* xref:runbooks/SYN_ElasticsearchExpectNodeToReachDiskWatermark.adoc[SYN_ElasticsearchExpectNodeToReachDiskWatermark]
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,4 @@ metadata:
name: cluster-logging
name: cluster-logging
namespace: openshift-logging
spec:
targetNamespaces:
- openshift-logging
spec: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
apiVersion: logging.openshift.io/v1
kind: ClusterLogForwarder
metadata:
annotations: {}
labels:
name: bar
name: bar
namespace: foo
spec:
inputs:
- application:
namespaces:
- app-one
- app-two
name: my-apps
outputs:
- name: custom-forwarder
type: syslog
pipelines:
- inputRefs:
- my-apps
name: my-apps
outputRefs:
- custom-forwarder
serviceAccountName: ueli
---
apiVersion: logging.openshift.io/v1
kind: ClusterLogForwarder
metadata:
annotations: {}
labels:
name: hands
name: hands
namespace: jazz
spec:
outputs:
- name: splunk-forwarder
secret:
name: splunk-forwarder
type: fluentdForward
url: tls://splunk-forwarder:24224
pipelines:
- inputRefs:
- application
name: application-logs
outputRefs:
- splunk-forwarder
serviceAccountName: hands
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: {}
labels:
name: ueli
name: ueli
namespace: foo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-application-logs
subjects:
- kind: ServiceAccount
name: ueli
namespace: foo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: {}
labels:
name: hands
name: hands
namespace: jazz
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-application-logs
subjects:
- kind: ServiceAccount
name: hands
namespace: jazz
Loading

0 comments on commit f4dc9d0

Please sign in to comment.