From a1bde7d3dc6571c25b65952c4780fc5ee65ec98c Mon Sep 17 00:00:00 2001 From: dmachard <5562930+dmachard@users.noreply.github.com> Date: Wed, 8 May 2024 21:07:16 +0200 Subject: [PATCH 1/5] new workers package --- .github/workflows/testing-go.yml | 5 +- pkglinker/multiplexer.go | 55 +++++++++---------- pkglinker/pipelines.go | 55 +++++++++---------- {loggers => workers}/clickhouse.go | 2 +- {loggers => workers}/clickhouse_test.go | 2 +- {loggers => workers}/devnull.go | 2 +- {collectors => workers}/dnsmessage.go | 2 +- {collectors => workers}/dnsmessage_test.go | 2 +- {collectors => workers}/dnstap.go | 2 +- {collectors => workers}/dnstap_relay.go | 2 +- {collectors => workers}/dnstap_relay_test.go | 2 +- {collectors => workers}/dnstap_test.go | 2 +- {loggers => workers}/dnstapclient.go | 2 +- {loggers => workers}/dnstapclient_test.go | 2 +- {loggers => workers}/elasticsearch.go | 2 +- {loggers => workers}/elasticsearch_test.go | 2 +- {loggers => workers}/falco.go | 2 +- {loggers => workers}/falco_test.go | 2 +- {collectors => workers}/file_ingestor.go | 2 +- {collectors => workers}/file_ingestor_test.go | 2 +- {collectors => workers}/file_tail.go | 2 +- {collectors => workers}/file_tail_test.go | 2 +- {loggers => workers}/fluentd.go | 2 +- {loggers => workers}/fluentd_test.go | 2 +- {loggers => workers}/influxdb.go | 2 +- {loggers => workers}/influxdb_test.go | 2 +- {loggers => workers}/kafkaproducer.go | 2 +- {loggers => workers}/kafkaproducer_test.go | 2 +- {loggers => workers}/logfile.go | 6 +- {loggers => workers}/logfile_test.go | 2 +- {loggers => workers}/lokiclient.go | 2 +- {loggers => workers}/lokiclient_test.go | 2 +- {collectors => workers}/powerdns.go | 2 +- {collectors => workers}/powerdns_test.go | 2 +- {loggers => workers}/prometheus.go | 2 +- {loggers => workers}/prometheus_test.go | 2 +- {loggers => workers}/redispub.go | 2 +- {loggers => workers}/redispub_test.go | 2 +- {loggers => workers}/restapi.go | 2 +- {loggers => workers}/restapi_test.go | 2 +- {loggers => workers}/scalyr.go | 2 +- {collectors => workers}/sniffer_afpacket.go | 2 +- .../sniffer_afpacket_linux.go | 2 +- .../sniffer_afpacket_test.go | 2 +- {collectors => workers}/sniffer_xdp.go | 2 +- .../sniffer_xdp_windows.go | 2 +- {loggers => workers}/statsd.go | 2 +- {loggers => workers}/statsd_test.go | 2 +- {loggers => workers}/stdout.go | 2 +- {loggers => workers}/stdout_test.go | 2 +- {loggers => workers}/syslog.go | 2 +- {loggers => workers}/syslog_test.go | 2 +- {loggers => workers}/tcpclient.go | 2 +- {loggers => workers}/tcpclient_test.go | 2 +- {collectors => workers}/tzsp.go | 2 +- {collectors => workers}/tzsp_linux.go | 2 +- 56 files changed, 111 insertions(+), 114 deletions(-) rename {loggers => workers}/clickhouse.go (99%) rename {loggers => workers}/clickhouse_test.go (98%) rename {loggers => workers}/devnull.go (98%) rename {collectors => workers}/dnsmessage.go (99%) rename {collectors => workers}/dnsmessage_test.go (99%) rename {collectors => workers}/dnstap.go (99%) rename {collectors => workers}/dnstap_relay.go (99%) rename {collectors => workers}/dnstap_relay_test.go (99%) rename {collectors => workers}/dnstap_test.go (99%) rename {loggers => workers}/dnstapclient.go (99%) rename {loggers => workers}/dnstapclient_test.go (99%) rename {loggers => workers}/elasticsearch.go (99%) rename {loggers => workers}/elasticsearch_test.go (99%) rename {loggers => workers}/falco.go (99%) rename {loggers => workers}/falco_test.go (98%) rename {collectors => workers}/file_ingestor.go (99%) rename {collectors => workers}/file_ingestor_test.go (97%) rename {collectors => workers}/file_tail.go (99%) rename {collectors => workers}/file_tail_test.go (98%) rename {loggers => workers}/fluentd.go (99%) rename {loggers => workers}/fluentd_test.go (99%) rename {loggers => workers}/influxdb.go (99%) rename {loggers => workers}/influxdb_test.go (98%) rename {loggers => workers}/kafkaproducer.go (99%) rename {loggers => workers}/kafkaproducer_test.go (99%) rename {loggers => workers}/logfile.go (99%) rename {loggers => workers}/logfile_test.go (99%) rename {loggers => workers}/lokiclient.go (99%) rename {loggers => workers}/lokiclient_test.go (99%) rename {collectors => workers}/powerdns.go (99%) rename {collectors => workers}/powerdns_test.go (96%) rename {loggers => workers}/prometheus.go (99%) rename {loggers => workers}/prometheus_test.go (99%) rename {loggers => workers}/redispub.go (99%) rename {loggers => workers}/redispub_test.go (99%) rename {loggers => workers}/restapi.go (99%) rename {loggers => workers}/restapi_test.go (99%) rename {loggers => workers}/scalyr.go (99%) rename {collectors => workers}/sniffer_afpacket.go (97%) rename {collectors => workers}/sniffer_afpacket_linux.go (99%) rename {collectors => workers}/sniffer_afpacket_test.go (97%) rename {collectors => workers}/sniffer_xdp.go (99%) rename {collectors => workers}/sniffer_xdp_windows.go (97%) rename {loggers => workers}/statsd.go (99%) rename {loggers => workers}/statsd_test.go (98%) rename {loggers => workers}/stdout.go (99%) rename {loggers => workers}/stdout_test.go (99%) rename {loggers => workers}/syslog.go (99%) rename {loggers => workers}/syslog_test.go (99%) rename {loggers => workers}/tcpclient.go (99%) rename {loggers => workers}/tcpclient_test.go (99%) rename {collectors => workers}/tzsp.go (97%) rename {collectors => workers}/tzsp_linux.go (99%) diff --git a/.github/workflows/testing-go.yml b/.github/workflows/testing-go.yml index 92901f58..be292ba5 100644 --- a/.github/workflows/testing-go.yml +++ b/.github/workflows/testing-go.yml @@ -29,8 +29,7 @@ jobs: - 'pkglinker' - 'pkgutils' - 'dnsutils' - - 'collectors' - - 'loggers' + - 'workers' - 'transformers' - 'netutils' - 'processors' @@ -148,7 +147,7 @@ jobs: - id: count_tests run: | - data=$(sudo go test -timeout 360s -v ./collectors ./processors ./dnsutils ./netutils ./loggers ./transformers ./pkgconfig ./pkglinker ./pkgutils ././ 2>&1 | grep -c RUN) + data=$(sudo go test -timeout 360s -v ./workers ./processors ./dnsutils ./netutils ./transformers ./pkgconfig ./pkglinker ./pkgutils ././ 2>&1 | grep -c RUN) echo "Count of Tests: $data" echo "data=$data" >> $GITHUB_OUTPUT diff --git a/pkglinker/multiplexer.go b/pkglinker/multiplexer.go index c05e58e9..4a6f26e3 100644 --- a/pkglinker/multiplexer.go +++ b/pkglinker/multiplexer.go @@ -4,10 +4,9 @@ import ( "fmt" "strings" - "github.com/dmachard/go-dnscollector/collectors" - "github.com/dmachard/go-dnscollector/loggers" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" + "github.com/dmachard/go-dnscollector/workers" "github.com/dmachard/go-logger" "gopkg.in/yaml.v2" ) @@ -101,58 +100,58 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st // registor the logger if enabled if subcfg.Loggers.DevNull.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewDevNull(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewDevNull(subcfg, logger, output.Name) } if subcfg.Loggers.RestAPI.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewRestAPI(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewRestAPI(subcfg, logger, output.Name) } if subcfg.Loggers.Prometheus.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewPrometheus(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewPrometheus(subcfg, logger, output.Name) } if subcfg.Loggers.Stdout.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewStdOut(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewStdOut(subcfg, logger, output.Name) } if subcfg.Loggers.LogFile.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewLogFile(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewLogFile(subcfg, logger, output.Name) } if subcfg.Loggers.DNSTap.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewDnstapSender(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewDnstapSender(subcfg, logger, output.Name) } if subcfg.Loggers.TCPClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewTCPClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewTCPClient(subcfg, logger, output.Name) } if subcfg.Loggers.Syslog.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewSyslog(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewSyslog(subcfg, logger, output.Name) } if subcfg.Loggers.Fluentd.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewFluentdClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewFluentdClient(subcfg, logger, output.Name) } if subcfg.Loggers.InfluxDB.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewInfluxDBClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewInfluxDBClient(subcfg, logger, output.Name) } if subcfg.Loggers.LokiClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewLokiClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewLokiClient(subcfg, logger, output.Name) } if subcfg.Loggers.Statsd.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewStatsdClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewStatsdClient(subcfg, logger, output.Name) } if subcfg.Loggers.ElasticSearchClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewElasticSearchClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewElasticSearchClient(subcfg, logger, output.Name) } if subcfg.Loggers.ScalyrClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewScalyrClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewScalyrClient(subcfg, logger, output.Name) } if subcfg.Loggers.RedisPub.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewRedisPub(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewRedisPub(subcfg, logger, output.Name) } if subcfg.Loggers.KafkaProducer.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewKafkaProducer(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewKafkaProducer(subcfg, logger, output.Name) } if subcfg.Loggers.FalcoClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewFalcoClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewFalcoClient(subcfg, logger, output.Name) } if subcfg.Loggers.ClickhouseClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewClickhouseClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewClickhouseClient(subcfg, logger, output.Name) } } @@ -164,28 +163,28 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st // register the collector if enabled if subcfg.Collectors.Dnstap.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewDnstap(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewDnstap(nil, subcfg, logger, input.Name) } if subcfg.Collectors.DnstapProxifier.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewDnstapProxifier(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewDnstapProxifier(nil, subcfg, logger, input.Name) } if subcfg.Collectors.AfpacketLiveCapture.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewAfpacketSniffer(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewAfpacketSniffer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.XdpLiveCapture.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewXDPSniffer(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewXDPSniffer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.Tail.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewTail(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewTail(nil, subcfg, logger, input.Name) } if subcfg.Collectors.PowerDNS.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewProtobufPowerDNS(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewProtobufPowerDNS(nil, subcfg, logger, input.Name) } if subcfg.Collectors.FileIngestor.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewFileIngestor(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewFileIngestor(nil, subcfg, logger, input.Name) } if subcfg.Collectors.Tzsp.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewTZSP(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewTZSP(nil, subcfg, logger, input.Name) } } diff --git a/pkglinker/pipelines.go b/pkglinker/pipelines.go index e4d8f551..e5378588 100644 --- a/pkglinker/pipelines.go +++ b/pkglinker/pipelines.go @@ -3,10 +3,9 @@ package pkglinker import ( "fmt" - "github.com/dmachard/go-dnscollector/collectors" - "github.com/dmachard/go-dnscollector/loggers" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" + "github.com/dmachard/go-dnscollector/workers" "github.com/dmachard/go-logger" "github.com/pkg/errors" "gopkg.in/yaml.v2" @@ -125,84 +124,84 @@ func CreateRouting(stanza pkgconfig.ConfigPipelines, mapCollectors map[string]pk func CreateStanza(stanzaName string, config *pkgconfig.Config, mapCollectors map[string]pkgutils.Worker, mapLoggers map[string]pkgutils.Worker, logger *logger.Logger) { // register the logger if enabled if config.Loggers.RestAPI.Enable { - mapLoggers[stanzaName] = loggers.NewRestAPI(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewRestAPI(config, logger, stanzaName) } if config.Loggers.Prometheus.Enable { - mapLoggers[stanzaName] = loggers.NewPrometheus(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewPrometheus(config, logger, stanzaName) } if config.Loggers.Stdout.Enable { - mapLoggers[stanzaName] = loggers.NewStdOut(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewStdOut(config, logger, stanzaName) } if config.Loggers.LogFile.Enable { - mapLoggers[stanzaName] = loggers.NewLogFile(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewLogFile(config, logger, stanzaName) } if config.Loggers.DNSTap.Enable { - mapLoggers[stanzaName] = loggers.NewDnstapSender(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewDnstapSender(config, logger, stanzaName) } if config.Loggers.TCPClient.Enable { - mapLoggers[stanzaName] = loggers.NewTCPClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewTCPClient(config, logger, stanzaName) } if config.Loggers.Syslog.Enable { - mapLoggers[stanzaName] = loggers.NewSyslog(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewSyslog(config, logger, stanzaName) } if config.Loggers.Fluentd.Enable { - mapLoggers[stanzaName] = loggers.NewFluentdClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewFluentdClient(config, logger, stanzaName) } if config.Loggers.InfluxDB.Enable { - mapLoggers[stanzaName] = loggers.NewInfluxDBClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewInfluxDBClient(config, logger, stanzaName) } if config.Loggers.LokiClient.Enable { - mapLoggers[stanzaName] = loggers.NewLokiClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewLokiClient(config, logger, stanzaName) } if config.Loggers.Statsd.Enable { - mapLoggers[stanzaName] = loggers.NewStatsdClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewStatsdClient(config, logger, stanzaName) } if config.Loggers.ElasticSearchClient.Enable { - mapLoggers[stanzaName] = loggers.NewElasticSearchClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewElasticSearchClient(config, logger, stanzaName) } if config.Loggers.ScalyrClient.Enable { - mapLoggers[stanzaName] = loggers.NewScalyrClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewScalyrClient(config, logger, stanzaName) } if config.Loggers.RedisPub.Enable { - mapLoggers[stanzaName] = loggers.NewRedisPub(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewRedisPub(config, logger, stanzaName) } if config.Loggers.KafkaProducer.Enable { - mapLoggers[stanzaName] = loggers.NewKafkaProducer(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewKafkaProducer(config, logger, stanzaName) } if config.Loggers.FalcoClient.Enable { - mapLoggers[stanzaName] = loggers.NewFalcoClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewFalcoClient(config, logger, stanzaName) } if config.Loggers.ClickhouseClient.Enable { - mapLoggers[stanzaName] = loggers.NewClickhouseClient(config, logger, stanzaName) + mapLoggers[stanzaName] = workers.NewClickhouseClient(config, logger, stanzaName) } // register the collector if enabled if config.Collectors.DNSMessage.Enable { - mapCollectors[stanzaName] = collectors.NewDNSMessage(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewDNSMessage(nil, config, logger, stanzaName) } if config.Collectors.Dnstap.Enable { - mapCollectors[stanzaName] = collectors.NewDnstap(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewDnstap(nil, config, logger, stanzaName) } if config.Collectors.DnstapProxifier.Enable { - mapCollectors[stanzaName] = collectors.NewDnstapProxifier(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewDnstapProxifier(nil, config, logger, stanzaName) } if config.Collectors.AfpacketLiveCapture.Enable { - mapCollectors[stanzaName] = collectors.NewAfpacketSniffer(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewAfpacketSniffer(nil, config, logger, stanzaName) } if config.Collectors.XdpLiveCapture.Enable { - mapCollectors[stanzaName] = collectors.NewXDPSniffer(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewXDPSniffer(nil, config, logger, stanzaName) } if config.Collectors.Tail.Enable { - mapCollectors[stanzaName] = collectors.NewTail(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewTail(nil, config, logger, stanzaName) } if config.Collectors.PowerDNS.Enable { - mapCollectors[stanzaName] = collectors.NewProtobufPowerDNS(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewProtobufPowerDNS(nil, config, logger, stanzaName) } if config.Collectors.FileIngestor.Enable { - mapCollectors[stanzaName] = collectors.NewFileIngestor(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewFileIngestor(nil, config, logger, stanzaName) } if config.Collectors.Tzsp.Enable { - mapCollectors[stanzaName] = collectors.NewTZSP(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewTZSP(nil, config, logger, stanzaName) } } diff --git a/loggers/clickhouse.go b/workers/clickhouse.go similarity index 99% rename from loggers/clickhouse.go rename to workers/clickhouse.go index 97b728f4..ec6b5a19 100644 --- a/loggers/clickhouse.go +++ b/workers/clickhouse.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "net/http" diff --git a/loggers/clickhouse_test.go b/workers/clickhouse_test.go similarity index 98% rename from loggers/clickhouse_test.go rename to workers/clickhouse_test.go index f62a2b2d..5ba0ba71 100644 --- a/loggers/clickhouse_test.go +++ b/workers/clickhouse_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/devnull.go b/workers/devnull.go similarity index 98% rename from loggers/devnull.go rename to workers/devnull.go index 80f61c8d..b6ef953b 100644 --- a/loggers/devnull.go +++ b/workers/devnull.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "github.com/dmachard/go-dnscollector/pkgconfig" diff --git a/collectors/dnsmessage.go b/workers/dnsmessage.go similarity index 99% rename from collectors/dnsmessage.go rename to workers/dnsmessage.go index 754cefcc..92d42018 100644 --- a/collectors/dnsmessage.go +++ b/workers/dnsmessage.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/collectors/dnsmessage_test.go b/workers/dnsmessage_test.go similarity index 99% rename from collectors/dnsmessage_test.go rename to workers/dnsmessage_test.go index b6462b36..1ed1624e 100644 --- a/collectors/dnsmessage_test.go +++ b/workers/dnsmessage_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "fmt" diff --git a/collectors/dnstap.go b/workers/dnstap.go similarity index 99% rename from collectors/dnstap.go rename to workers/dnstap.go index 2c87620f..a0a2aa31 100644 --- a/collectors/dnstap.go +++ b/workers/dnstap.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/collectors/dnstap_relay.go b/workers/dnstap_relay.go similarity index 99% rename from collectors/dnstap_relay.go rename to workers/dnstap_relay.go index 05baf909..2f35674b 100644 --- a/collectors/dnstap_relay.go +++ b/workers/dnstap_relay.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/collectors/dnstap_relay_test.go b/workers/dnstap_relay_test.go similarity index 99% rename from collectors/dnstap_relay_test.go rename to workers/dnstap_relay_test.go index 1baa7035..79100be2 100644 --- a/collectors/dnstap_relay_test.go +++ b/workers/dnstap_relay_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/collectors/dnstap_test.go b/workers/dnstap_test.go similarity index 99% rename from collectors/dnstap_test.go rename to workers/dnstap_test.go index e44dc649..47391e83 100644 --- a/collectors/dnstap_test.go +++ b/workers/dnstap_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/loggers/dnstapclient.go b/workers/dnstapclient.go similarity index 99% rename from loggers/dnstapclient.go rename to workers/dnstapclient.go index 1e316039..07d1ed95 100644 --- a/loggers/dnstapclient.go +++ b/workers/dnstapclient.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/dnstapclient_test.go b/workers/dnstapclient_test.go similarity index 99% rename from loggers/dnstapclient_test.go rename to workers/dnstapclient_test.go index 32e94622..adcf495b 100644 --- a/loggers/dnstapclient_test.go +++ b/workers/dnstapclient_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/elasticsearch.go b/workers/elasticsearch.go similarity index 99% rename from loggers/elasticsearch.go rename to workers/elasticsearch.go index 9c569cda..fc418609 100644 --- a/loggers/elasticsearch.go +++ b/workers/elasticsearch.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/elasticsearch_test.go b/workers/elasticsearch_test.go similarity index 99% rename from loggers/elasticsearch_test.go rename to workers/elasticsearch_test.go index 4a317c0f..d52a6e02 100644 --- a/loggers/elasticsearch_test.go +++ b/workers/elasticsearch_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/falco.go b/workers/falco.go similarity index 99% rename from loggers/falco.go rename to workers/falco.go index f596c396..e3202a63 100644 --- a/loggers/falco.go +++ b/workers/falco.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/falco_test.go b/workers/falco_test.go similarity index 98% rename from loggers/falco_test.go rename to workers/falco_test.go index cb868fc4..e6e72c83 100644 --- a/loggers/falco_test.go +++ b/workers/falco_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/collectors/file_ingestor.go b/workers/file_ingestor.go similarity index 99% rename from collectors/file_ingestor.go rename to workers/file_ingestor.go index b1195415..c2f541ad 100644 --- a/collectors/file_ingestor.go +++ b/workers/file_ingestor.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "errors" diff --git a/collectors/file_ingestor_test.go b/workers/file_ingestor_test.go similarity index 97% rename from collectors/file_ingestor_test.go rename to workers/file_ingestor_test.go index aa7ef10c..744ba9c5 100644 --- a/collectors/file_ingestor_test.go +++ b/workers/file_ingestor_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "testing" diff --git a/collectors/file_tail.go b/workers/file_tail.go similarity index 99% rename from collectors/file_tail.go rename to workers/file_tail.go index 28fe6416..55ee22b8 100644 --- a/collectors/file_tail.go +++ b/workers/file_tail.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "fmt" diff --git a/collectors/file_tail_test.go b/workers/file_tail_test.go similarity index 98% rename from collectors/file_tail_test.go rename to workers/file_tail_test.go index b184677b..ac5fefb3 100644 --- a/collectors/file_tail_test.go +++ b/workers/file_tail_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/loggers/fluentd.go b/workers/fluentd.go similarity index 99% rename from loggers/fluentd.go rename to workers/fluentd.go index 0ddb68aa..e74000be 100644 --- a/loggers/fluentd.go +++ b/workers/fluentd.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "crypto/tls" diff --git a/loggers/fluentd_test.go b/workers/fluentd_test.go similarity index 99% rename from loggers/fluentd_test.go rename to workers/fluentd_test.go index 0f42260e..0a5f7d90 100644 --- a/loggers/fluentd_test.go +++ b/workers/fluentd_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/influxdb.go b/workers/influxdb.go similarity index 99% rename from loggers/influxdb.go rename to workers/influxdb.go index b1c59d25..3be72810 100644 --- a/loggers/influxdb.go +++ b/workers/influxdb.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "time" diff --git a/loggers/influxdb_test.go b/workers/influxdb_test.go similarity index 98% rename from loggers/influxdb_test.go rename to workers/influxdb_test.go index b483becb..94917679 100644 --- a/loggers/influxdb_test.go +++ b/workers/influxdb_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/kafkaproducer.go b/workers/kafkaproducer.go similarity index 99% rename from loggers/kafkaproducer.go rename to workers/kafkaproducer.go index daca75fd..4d05d8e4 100644 --- a/loggers/kafkaproducer.go +++ b/workers/kafkaproducer.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/kafkaproducer_test.go b/workers/kafkaproducer_test.go similarity index 99% rename from loggers/kafkaproducer_test.go rename to workers/kafkaproducer_test.go index 29e39ddd..61edf1de 100644 --- a/loggers/kafkaproducer_test.go +++ b/workers/kafkaproducer_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "log" diff --git a/loggers/logfile.go b/workers/logfile.go similarity index 99% rename from loggers/logfile.go rename to workers/logfile.go index 663fdbe2..986d9ef1 100644 --- a/loggers/logfile.go +++ b/workers/logfile.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -32,7 +32,7 @@ const ( compressSuffix = ".gz" ) -func IsValidMode(mode string) bool { +func IsValid(mode string) bool { switch mode { case pkgconfig.ModeText, @@ -67,7 +67,7 @@ func NewLogFile(config *pkgconfig.Config, logger *logger.Logger, name string) *L } func (w *LogFile) ReadConfig() { - if !IsValidMode(w.GetConfig().Loggers.LogFile.Mode) { + if !IsValid(w.GetConfig().Loggers.LogFile.Mode) { w.LogFatal("["+w.GetName()+"] logger=file - invalid mode: ", w.GetConfig().Loggers.LogFile.Mode) } w.fileDir = filepath.Dir(w.GetConfig().Loggers.LogFile.FilePath) diff --git a/loggers/logfile_test.go b/workers/logfile_test.go similarity index 99% rename from loggers/logfile_test.go rename to workers/logfile_test.go index bdd97e24..9bc80f45 100644 --- a/loggers/logfile_test.go +++ b/workers/logfile_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "fmt" diff --git a/loggers/lokiclient.go b/workers/lokiclient.go similarity index 99% rename from loggers/lokiclient.go rename to workers/lokiclient.go index 5f84ea5c..ccdde497 100644 --- a/loggers/lokiclient.go +++ b/workers/lokiclient.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/lokiclient_test.go b/workers/lokiclient_test.go similarity index 99% rename from loggers/lokiclient_test.go rename to workers/lokiclient_test.go index dd65d78a..2cb93502 100644 --- a/loggers/lokiclient_test.go +++ b/workers/lokiclient_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/collectors/powerdns.go b/workers/powerdns.go similarity index 99% rename from collectors/powerdns.go rename to workers/powerdns.go index 4ad00877..f823d4dc 100644 --- a/collectors/powerdns.go +++ b/workers/powerdns.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" diff --git a/collectors/powerdns_test.go b/workers/powerdns_test.go similarity index 96% rename from collectors/powerdns_test.go rename to workers/powerdns_test.go index 4478acec..3e2e9175 100644 --- a/collectors/powerdns_test.go +++ b/workers/powerdns_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "net" diff --git a/loggers/prometheus.go b/workers/prometheus.go similarity index 99% rename from loggers/prometheus.go rename to workers/prometheus.go index 876b63f9..b2ceb62e 100644 --- a/loggers/prometheus.go +++ b/workers/prometheus.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "crypto/tls" diff --git a/loggers/prometheus_test.go b/workers/prometheus_test.go similarity index 99% rename from loggers/prometheus_test.go rename to workers/prometheus_test.go index 49b9750c..c3408f62 100644 --- a/loggers/prometheus_test.go +++ b/workers/prometheus_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "net/http" diff --git a/loggers/redispub.go b/workers/redispub.go similarity index 99% rename from loggers/redispub.go rename to workers/redispub.go index c94043e7..70fcbd53 100644 --- a/loggers/redispub.go +++ b/workers/redispub.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/redispub_test.go b/workers/redispub_test.go similarity index 99% rename from loggers/redispub_test.go rename to workers/redispub_test.go index 5b03ead4..3178a65a 100644 --- a/loggers/redispub_test.go +++ b/workers/redispub_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/restapi.go b/workers/restapi.go similarity index 99% rename from loggers/restapi.go rename to workers/restapi.go index aad0f102..1e7f761b 100644 --- a/loggers/restapi.go +++ b/workers/restapi.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "crypto/tls" diff --git a/loggers/restapi_test.go b/workers/restapi_test.go similarity index 99% rename from loggers/restapi_test.go rename to workers/restapi_test.go index 1a472ce4..7b78da72 100644 --- a/loggers/restapi_test.go +++ b/workers/restapi_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "net/http" diff --git a/loggers/scalyr.go b/workers/scalyr.go similarity index 99% rename from loggers/scalyr.go rename to workers/scalyr.go index cc7007f0..d81b15ff 100644 --- a/loggers/scalyr.go +++ b/workers/scalyr.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/collectors/sniffer_afpacket.go b/workers/sniffer_afpacket.go similarity index 97% rename from collectors/sniffer_afpacket.go rename to workers/sniffer_afpacket.go index 0c849de8..ee76571a 100644 --- a/collectors/sniffer_afpacket.go +++ b/workers/sniffer_afpacket.go @@ -1,7 +1,7 @@ //go:build windows || darwin || freebsd // +build windows darwin freebsd -package collectors +package workers import ( "github.com/dmachard/go-dnscollector/pkgconfig" diff --git a/collectors/sniffer_afpacket_linux.go b/workers/sniffer_afpacket_linux.go similarity index 99% rename from collectors/sniffer_afpacket_linux.go rename to workers/sniffer_afpacket_linux.go index 960c4bc0..e37a62c8 100644 --- a/collectors/sniffer_afpacket_linux.go +++ b/workers/sniffer_afpacket_linux.go @@ -1,7 +1,7 @@ //go:build linux // +build linux -package collectors +package workers import ( "context" diff --git a/collectors/sniffer_afpacket_test.go b/workers/sniffer_afpacket_test.go similarity index 97% rename from collectors/sniffer_afpacket_test.go rename to workers/sniffer_afpacket_test.go index e994af38..89f6198f 100644 --- a/collectors/sniffer_afpacket_test.go +++ b/workers/sniffer_afpacket_test.go @@ -1,7 +1,7 @@ //go:build linux // +build linux -package collectors +package workers import ( "log" diff --git a/collectors/sniffer_xdp.go b/workers/sniffer_xdp.go similarity index 99% rename from collectors/sniffer_xdp.go rename to workers/sniffer_xdp.go index 50b372aa..924f3e3e 100644 --- a/collectors/sniffer_xdp.go +++ b/workers/sniffer_xdp.go @@ -1,7 +1,7 @@ //go:build linux || darwin || freebsd // +build linux darwin freebsd -package collectors +package workers import ( "bytes" diff --git a/collectors/sniffer_xdp_windows.go b/workers/sniffer_xdp_windows.go similarity index 97% rename from collectors/sniffer_xdp_windows.go rename to workers/sniffer_xdp_windows.go index 690b15ee..54f7f377 100644 --- a/collectors/sniffer_xdp_windows.go +++ b/workers/sniffer_xdp_windows.go @@ -1,7 +1,7 @@ //go:build windows // +build windows -package collectors +package workers import ( "github.com/dmachard/go-dnscollector/pkgconfig" diff --git a/loggers/statsd.go b/workers/statsd.go similarity index 99% rename from loggers/statsd.go rename to workers/statsd.go index 2bd4eb51..88e7a233 100644 --- a/loggers/statsd.go +++ b/workers/statsd.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/statsd_test.go b/workers/statsd_test.go similarity index 98% rename from loggers/statsd_test.go rename to workers/statsd_test.go index 8d7c4b6d..867cf1be 100644 --- a/loggers/statsd_test.go +++ b/workers/statsd_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "net" diff --git a/loggers/stdout.go b/workers/stdout.go similarity index 99% rename from loggers/stdout.go rename to workers/stdout.go index 89d371ea..d3136ebc 100644 --- a/loggers/stdout.go +++ b/workers/stdout.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/stdout_test.go b/workers/stdout_test.go similarity index 99% rename from loggers/stdout_test.go rename to workers/stdout_test.go index 0fbb5790..a9bdbebf 100644 --- a/loggers/stdout_test.go +++ b/workers/stdout_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/syslog.go b/workers/syslog.go similarity index 99% rename from loggers/syslog.go rename to workers/syslog.go index bf33f0b3..d79b3b56 100644 --- a/loggers/syslog.go +++ b/workers/syslog.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" diff --git a/loggers/syslog_test.go b/workers/syslog_test.go similarity index 99% rename from loggers/syslog_test.go rename to workers/syslog_test.go index 65e61aa2..66d33a8d 100644 --- a/loggers/syslog_test.go +++ b/workers/syslog_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/tcpclient.go b/workers/tcpclient.go similarity index 99% rename from loggers/tcpclient.go rename to workers/tcpclient.go index 7290e6b9..68c194a4 100644 --- a/loggers/tcpclient.go +++ b/workers/tcpclient.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/loggers/tcpclient_test.go b/workers/tcpclient_test.go similarity index 99% rename from loggers/tcpclient_test.go rename to workers/tcpclient_test.go index f985f156..1b658668 100644 --- a/loggers/tcpclient_test.go +++ b/workers/tcpclient_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" diff --git a/collectors/tzsp.go b/workers/tzsp.go similarity index 97% rename from collectors/tzsp.go rename to workers/tzsp.go index b96a88ce..29e4d957 100644 --- a/collectors/tzsp.go +++ b/workers/tzsp.go @@ -1,7 +1,7 @@ //go:build windows || freebsd || darwin // +build windows freebsd darwin -package collectors +package workers import ( "github.com/dmachard/go-dnscollector/pkgconfig" diff --git a/collectors/tzsp_linux.go b/workers/tzsp_linux.go similarity index 99% rename from collectors/tzsp_linux.go rename to workers/tzsp_linux.go index 978ae77d..f9496d53 100644 --- a/collectors/tzsp_linux.go +++ b/workers/tzsp_linux.go @@ -4,7 +4,7 @@ // Written by Noel Kuntze // Updating by Denis Machard -package collectors +package workers import ( "context" From 93e448dbca1bcaa4bc749bbbecbe00a0fccee2d0 Mon Sep 17 00:00:00 2001 From: dmachard <5562930+dmachard@users.noreply.github.com> Date: Thu, 9 May 2024 11:26:08 +0200 Subject: [PATCH 2/5] fix linter and update the docs --- Makefile | 3 +- dnsutils/message.go | 3 +- docs/development.md | 283 +++--------------- pkgconfig/constants.go | 1 + pkgconfig/loggers.go | 2 +- pkglinker/multiplexer.go | 2 +- pkglinker/pipelines.go | 2 +- workers/clickhouse_test.go | 2 +- workers/{dnstap.go => dnstapserver.go} | 18 +- .../{dnstap_test.go => dnstapserver_test.go} | 4 +- workers/fluentd_test.go | 2 +- workers/sniffer_afpacket_test.go | 4 +- workers/stdout_test.go | 6 +- 13 files changed, 72 insertions(+), 260 deletions(-) rename workers/{dnstap.go => dnstapserver.go} (92%) rename workers/{dnstap_test.go => dnstapserver_test.go} (97%) diff --git a/Makefile b/Makefile index 1b7ab5e9..74621cb6 100644 --- a/Makefile +++ b/Makefile @@ -74,8 +74,7 @@ tests: check-go @go test ./netutils/ -race -cover -v @go test -timeout 90s ./dnsutils/ -race -cover -v @go test -timeout 90s ./transformers/ -race -cover -v - @go test -timeout 90s ./collectors/ -race -cover -v - @go test -timeout 90s ./loggers/ -race -cover -v + @go test -timeout 180s ./workers/ -race -cover -v @go test -timeout 90s ./processors/ -race -cover -v # Cleans the project using go clean. diff --git a/dnsutils/message.go b/dnsutils/message.go index baea3c5b..efdc060f 100644 --- a/dnsutils/message.go +++ b/dnsutils/message.go @@ -17,6 +17,7 @@ import ( "time" "github.com/dmachard/go-dnscollector/netutils" + "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnstap-protobuf" "github.com/google/gopacket" "github.com/google/gopacket/layers" @@ -1830,7 +1831,7 @@ func GetFakeDNSMessage() DNSMessage { dm.DNSTap.Identity = "collector" dm.DNSTap.Operation = "CLIENT_QUERY" dm.DNS.Type = DNSQuery - dm.DNS.Qname = "dns.collector" + dm.DNS.Qname = pkgconfig.ProgQname dm.NetworkInfo.QueryIP = "1.2.3.4" dm.NetworkInfo.QueryPort = "1234" dm.NetworkInfo.ResponseIP = "4.3.2.1" diff --git a/docs/development.md b/docs/development.md index 0b4389e1..42fbc1b5 100644 --- a/docs/development.md +++ b/docs/development.md @@ -6,8 +6,7 @@ First, make sure your golang version is `1.20` or higher How to userguides: -- [Add a new collector](#add-collector) -- [Add a new logger](#add-logger) +- [Add a new worker](#add-worker) - [Add a new transform](#add-transformer) ## Build and run from source @@ -149,9 +148,9 @@ func NewTransforms( Finally update the docs `doc/transformers.md` and `README.md` -### Add logger +### Add a worker (collector or logger) -1. Add Configuration `dnsutils/config.go` and `config.yml` +1. Add Configuration in `pkgconfig/logger.go` or `pkgconfig/collectors.go` ```golang Loggers struct { @@ -159,7 +158,6 @@ Loggers struct { Enable bool `yaml:"enable"` } } - ``` ```golang @@ -168,82 +166,70 @@ func (c *Config) SetDefault() { } ``` -2. Create the following file `loggers/mylogger.go` and `loggers/mylogger_test.go` +2. Create the following file `workers/mylogger.go` and `loggers/mylogger_test.go` ```golang -package loggers +package workers import ( - "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/pkgutils" + "github.com/dmachard/go-logger" ) -type MyLogger struct { - done chan bool - channel chan dnsutils.DnsMessage - config *pkgconfig.Config - logger *logger.Logger - exit chan bool - name string -} - -func NewMyLogger(config *pkgconfig.Config, logger *logger.Logger, name string) *MyLogger { - o := &MyLogger{ - done: make(chan bool), - exit: make(chan bool), - channel: make(chan dnsutils.DnsMessage, 512), - logger: logger, - config: config, - name: "mylogger", - } - return o -} - -func (c *MyLogger) GetName() string { return c.name } - -func (c *MyLogger) SetLoggers(loggers []pkgutils.Worker) {} - -func (o *MyLogger) ReadConfig() {} - -func (o *MyLogger) LogInfo(msg string, v ...interface{}) { - o.logger.Info("["+o.name+"] mylogger - "+msg, v...) +type MyWorker struct { + *pkgutils.GenericWorker } -func (o *MyLogger) LogError(msg string, v ...interface{}) { - o.logger.Error("["+o.name+"] mylogger - "+msg, v...) +func NewMyWorker(config *pkgconfig.Config, console *logger.Logger, name string) *MyWorker { + s := &MyWorker{GenericWorker: pkgutils.NewGenericWorker(config, console, name, "worker", DefaultBufferSize)} + s.ReadConfig() + return s } -func (o *MyLogger) Stop() { - o.LogInfo("stopping...") +func (w *DevNull) StartCollect() { + w.LogInfo("worker is starting collection") + defer w.CollectDone() - // exit to close properly - o.exit <- true + // goroutine to process transformed dns messages + go w.StartLogging() - // read done channel and block until run is terminated - <-o.done - close(o.done) -} + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() -func (o *MyLogger) GetInputChannel() chan dnsutils.DnsMessage { - return o.channel + case _, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("run: input channel closed!") + return + } + } + } } -func (o *MyLogger) Run() { - o.LogInfo("running in background...") - // prepare transforms - listChannel := []chan dnsutils.DnsMessage{} - listChannel = append(listChannel, o.channel) - subprocessors := transformers.NewTransforms(&o.config.OutgoingTransformers, o.logger, o.name, listChannel) +func (w *DevNull) StartLogging() { + w.LogInfo("worker is starting logging") + defer w.LoggingDone() - o.LogInfo("run terminated") + for { + select { + case <-w.OnLoggerStopped(): + return - // cleanup transformers - subprocessors.Reset() + case _, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("process: output channel closed!") + return + } - o.done <- true + } + } } ``` -3. Update the main file `dnscollector.go` +3. Update the main file `pkglinker` in `pipelines.go` ```golang if subcfg.Loggers.MyLogger.Enable && IsLoggerRouted(config, output.Name) { @@ -251,179 +237,4 @@ if subcfg.Loggers.MyLogger.Enable && IsLoggerRouted(config, output.Name) { } ``` -4. Finally update the docs `doc/loggers.md` and `README.md` - -### Add collector - -Add Configuration `dnsutils/config.go` and `config.yml` - -```golang -Collectors struct { - MyCollector struct { - Enable bool `yaml:"enable"` - } `yaml:"tail"` -} -``` - -```golang -func (c *Config) SetDefault() { - c.Collectors.MyCollector.Enable = false -} -``` - -Create the following file `collectors/mycollector.go` and `collectors/mycollector_test.go` - -```golang -package collectors - -import ( - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-logger" -) - -type MyNewCollector struct { - doneRun chan bool - doneMonitor chan bool - stopRun chan bool - stopMonitor chan bool - loggers []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - droppedCount int - dropped chan int -} - -func NewNewCollector(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *Dnstap { - logger.Info("[%s] collector=mynewcollector - enabled", name) - s := &MyNewCollector{ - doneRun: make(chan bool), - doneMonitor: make(chan bool), - stopRun: make(chan bool), - stopMonitor: make(chan bool), - dropped: make(chan int), - config: config, - configChan: make(chan *pkgconfig.Config), - loggers: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *MyNewCollector) GetName() string { return c.name } - -func (c *MyNewCollector) AddDefaultRoute(wrk pkgutils.Worker) { - c.loggers = append(c.loggers, wrk) -} - -func (c *MyNewCollector) SetLoggers(loggers []pkgutils.Worker) { - c.loggers = loggers -} - -func (c *MyNewCollector) Loggers() ([]chan dnsutils.DNSMessage, []string) { - channels := []chan dnsutils.DNSMessage{} - names := []string{} - for _, p := range c.loggers { - channels = append(channels,p.GetInputChannel()) - names = append(names, p.GetName()) - } - return channels, names -} - -func (c *MyNewCollector) ReadConfig() {} - -func (c *MyNewCollector) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *MyNewCollector) LogInfo(msg string, v ...interface{}) { - c.logger.Info("["+c.name+"] collector=mynewcollector - "+msg, v...) -} - -func (c *MyNewCollector) LogError(msg string, v ...interface{}) { - c.logger.Error("["+c.name+" collector=mynewcollector - "+msg, v...) -} - -func (c *MyNewCollector) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *MyNewCollector) Stop() { - // stop monitor goroutine - c.LogInfo("stopping monitor...") - c.stopMonitor <- true - <-c.doneMonitor - - // read done channel and block until run is terminated - c.LogInfo("stopping run...") - c.stopRun <- true - <-c.doneRun -} - -func (c *MyNewCollector) MonitorCollector() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -MONITOR_LOOP: - for { - select { - case <-c.dropped: - c.droppedCount++ - case <-c.stopMonitor: - close(c.dropped) - bufferFull.Stop() - c.doneMonitor <- true - break MONITOR_LOOP - case <-bufferFull.C: - if c.droppedCount > 0 { - c.LogError("recv buffer is full, %d packet(s) dropped", c.droppedCount) - c.droppedCount = 0 - } - bufferFull.Reset(watchInterval) - } - } - c.LogInfo("monitor terminated") -} - -func (c *DNSMessage) Run() { - c.LogInfo("starting collector...") - - // start goroutine to count dropped messsages - go c.MonitorCollector() - -RUN_LOOP: - for { - select { - case <-c.stopRun: - c.doneRun <- true - break RUN_LOOP - - case cfg := <-c.configChan: - - // save the new config - c.config = cfg - c.ReadConfig() - } - - } - c.LogInfo("run terminated") -} - - -``` - -Update the main file `dnscollector.go` - -```golang -if subcfg.Collectors.MyCollector.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewMyCollector(nil, subcfg, logger, input.Name) -} -``` - -Finally update the docs `doc/collectors.md` and `README.md` +4. Finally update the docs `doc/loggers.md` or `doc/collectors.md` and `README.md` \ No newline at end of file diff --git a/pkgconfig/constants.go b/pkgconfig/constants.go index 7e8a90f7..29f6d9d5 100644 --- a/pkgconfig/constants.go +++ b/pkgconfig/constants.go @@ -7,6 +7,7 @@ import ( const ( StrUnknown = "UNKNOWN" + ProgQname = "dns.collector" ProgName = "dnscollector" LocalhostIP = "127.0.0.1" AnyIP = "0.0.0.0" diff --git a/pkgconfig/loggers.go b/pkgconfig/loggers.go index f0572d5a..dc25d4e0 100644 --- a/pkgconfig/loggers.go +++ b/pkgconfig/loggers.go @@ -457,7 +457,7 @@ func (c *ConfigLoggers) SetDefault() { c.Fluentd.CAFile = "" c.Fluentd.CertFile = "" c.Fluentd.KeyFile = "" - c.Fluentd.Tag = "dns.collector" + c.Fluentd.Tag = ProgQname c.Fluentd.BufferSize = 100 c.Fluentd.ChannelBufferSize = 4096 diff --git a/pkglinker/multiplexer.go b/pkglinker/multiplexer.go index 4a6f26e3..45c27cea 100644 --- a/pkglinker/multiplexer.go +++ b/pkglinker/multiplexer.go @@ -163,7 +163,7 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st // register the collector if enabled if subcfg.Collectors.Dnstap.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = workers.NewDnstap(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewDnstapServer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.DnstapProxifier.Enable && IsCollectorRouted(config, input.Name) { mapCollectors[input.Name] = workers.NewDnstapProxifier(nil, subcfg, logger, input.Name) diff --git a/pkglinker/pipelines.go b/pkglinker/pipelines.go index e5378588..ed89073d 100644 --- a/pkglinker/pipelines.go +++ b/pkglinker/pipelines.go @@ -180,7 +180,7 @@ func CreateStanza(stanzaName string, config *pkgconfig.Config, mapCollectors map mapCollectors[stanzaName] = workers.NewDNSMessage(nil, config, logger, stanzaName) } if config.Collectors.Dnstap.Enable { - mapCollectors[stanzaName] = workers.NewDnstap(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewDnstapServer(nil, config, logger, stanzaName) } if config.Collectors.DnstapProxifier.Enable { mapCollectors[stanzaName] = workers.NewDnstapProxifier(nil, config, logger, stanzaName) diff --git a/workers/clickhouse_test.go b/workers/clickhouse_test.go index 5ba0ba71..66def44b 100644 --- a/workers/clickhouse_test.go +++ b/workers/clickhouse_test.go @@ -20,7 +20,7 @@ func Test_ClickhouseClient(t *testing.T) { }{ { mode: pkgconfig.ModeJSON, - pattern: "dns.collector", + pattern: pkgconfig.ProgQname, }, } cfg := pkgconfig.GetFakeConfig() diff --git a/workers/dnstap.go b/workers/dnstapserver.go similarity index 92% rename from workers/dnstap.go rename to workers/dnstapserver.go index a0a2aa31..85b15a31 100644 --- a/workers/dnstap.go +++ b/workers/dnstapserver.go @@ -19,25 +19,25 @@ import ( "github.com/segmentio/kafka-go/compress" ) -type Dnstap struct { +type DnstapServer struct { *pkgutils.GenericWorker connCounter uint64 } -func NewDnstap(next []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *Dnstap { - s := &Dnstap{GenericWorker: pkgutils.NewGenericWorker(config, logger, name, "dnstap", pkgutils.DefaultBufferSize)} - s.SetDefaultRoutes(next) - s.CheckConfig() - return s +func NewDnstapServer(next []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *DnstapServer { + w := &DnstapServer{GenericWorker: pkgutils.NewGenericWorker(config, logger, name, "dnstap", pkgutils.DefaultBufferSize)} + w.SetDefaultRoutes(next) + w.CheckConfig() + return w } -func (w *Dnstap) CheckConfig() { +func (w *DnstapServer) CheckConfig() { if !pkgconfig.IsValidTLS(w.GetConfig().Collectors.Dnstap.TLSMinVersion) { w.LogFatal(pkgutils.PrefixLogCollector + "[" + w.GetName() + "] dnstap - invalid tls min version") } } -func (w *Dnstap) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { +func (w *DnstapServer) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { // close connection on function exit defer func() { w.LogInfo("conn #%d - connection handler terminated", connID) @@ -175,7 +175,7 @@ func (w *Dnstap) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, } } -func (w *Dnstap) StartCollect() { +func (w *DnstapServer) StartCollect() { w.LogInfo("worker is starting collection") defer w.CollectDone() diff --git a/workers/dnstap_test.go b/workers/dnstapserver_test.go similarity index 97% rename from workers/dnstap_test.go rename to workers/dnstapserver_test.go index 47391e83..df6a0fb8 100644 --- a/workers/dnstap_test.go +++ b/workers/dnstapserver_test.go @@ -76,7 +76,7 @@ func Test_DnstapCollector(t *testing.T) { config.Collectors.Dnstap.Compression = tc.compression // start the collector - c := NewDnstap([]pkgutils.Worker{g}, config, logger.New(false), "test") + c := NewDnstapServer([]pkgutils.Worker{g}, config, logger.New(false), "test") go c.StartCollect() // wait before to connect @@ -155,7 +155,7 @@ func Test_DnstapCollector_CloseFrameStream(t *testing.T) { // start the collector in unix mode g := pkgutils.NewFakeLogger() - c := NewDnstap([]pkgutils.Worker{g}, config, lg, "test") + c := NewDnstapServer([]pkgutils.Worker{g}, config, lg, "test") go c.StartCollect() // simulate dns server connection to collector diff --git a/workers/fluentd_test.go b/workers/fluentd_test.go index 0a5f7d90..c34f7331 100644 --- a/workers/fluentd_test.go +++ b/workers/fluentd_test.go @@ -96,7 +96,7 @@ func Test_FluentdClient(t *testing.T) { t.Errorf("Decode tag: %v", err) break } - if tag != "dns.collector" { + if tag != pkgconfig.ProgQname { t.Errorf("invalid tag: %s", tag) break } diff --git a/workers/sniffer_afpacket_test.go b/workers/sniffer_afpacket_test.go index 89f6198f..a3fbd923 100644 --- a/workers/sniffer_afpacket_test.go +++ b/workers/sniffer_afpacket_test.go @@ -23,12 +23,12 @@ func TestAfpacketSnifferRun(t *testing.T) { go c.StartCollect() // send dns query - net.LookupIP("dns.collector") + net.LookupIP(pkgconfig.ProgQname) // waiting message in channel for { msg := <-g.GetInputChannel() - if msg.DNSTap.Operation == dnsutils.DNSTapClientQuery && msg.DNS.Qname == "dns.collector" { + if msg.DNSTap.Operation == dnsutils.DNSTapClientQuery && msg.DNS.Qname == pkgconfig.ProgQname { break } } diff --git a/workers/stdout_test.go b/workers/stdout_test.go index a9bdbebf..987c083e 100644 --- a/workers/stdout_test.go +++ b/workers/stdout_test.go @@ -30,21 +30,21 @@ func Test_StdoutTextMode(t *testing.T) { name: "default_delimiter", delimiter: cfg.Global.TextFormatDelimiter, boundary: cfg.Global.TextFormatBoundary, - qname: "dns.collector", + qname: pkgconfig.ProgQname, expected: "- collector CLIENT_QUERY NOERROR 1.2.3.4 1234 - - 0b dns.collector A -\n", }, { name: "custom_delimiter", delimiter: ";", boundary: cfg.Global.TextFormatBoundary, - qname: "dns.collector", + qname: pkgconfig.ProgQname, expected: "-;collector;CLIENT_QUERY;NOERROR;1.2.3.4;1234;-;-;0b;dns.collector;A;-\n", }, { name: "default_boundary", delimiter: cfg.Global.TextFormatDelimiter, boundary: cfg.Global.TextFormatBoundary, - qname: "dns. collector", + qname: pkgconfig.ProgQname, expected: "- collector CLIENT_QUERY NOERROR 1.2.3.4 1234 - - 0b \"dns. collector\" A -\n", }, { From f9aa6704acab266c346e8d1121a3a2116ee31676 Mon Sep 17 00:00:00 2001 From: dmachard <5562930+dmachard@users.noreply.github.com> Date: Thu, 9 May 2024 11:35:47 +0200 Subject: [PATCH 3/5] fix regression in test --- pkglinker/multiplexer.go | 2 +- pkglinker/pipelines.go | 2 +- processors/powerdns.go | 397 ----------------------------------- processors/powerdns_test.go | 284 ------------------------- workers/powerdns.go | 400 +++++++++++++++++++++++++++++++++++- workers/powerdns_test.go | 284 ++++++++++++++++++++++++- workers/stdout_test.go | 2 +- 7 files changed, 678 insertions(+), 693 deletions(-) delete mode 100644 processors/powerdns.go delete mode 100644 processors/powerdns_test.go diff --git a/pkglinker/multiplexer.go b/pkglinker/multiplexer.go index 45c27cea..3e78b97c 100644 --- a/pkglinker/multiplexer.go +++ b/pkglinker/multiplexer.go @@ -178,7 +178,7 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st mapCollectors[input.Name] = workers.NewTail(nil, subcfg, logger, input.Name) } if subcfg.Collectors.PowerDNS.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = workers.NewProtobufPowerDNS(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewPdnsServer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.FileIngestor.Enable && IsCollectorRouted(config, input.Name) { mapCollectors[input.Name] = workers.NewFileIngestor(nil, subcfg, logger, input.Name) diff --git a/pkglinker/pipelines.go b/pkglinker/pipelines.go index ed89073d..d62a9293 100644 --- a/pkglinker/pipelines.go +++ b/pkglinker/pipelines.go @@ -195,7 +195,7 @@ func CreateStanza(stanzaName string, config *pkgconfig.Config, mapCollectors map mapCollectors[stanzaName] = workers.NewTail(nil, config, logger, stanzaName) } if config.Collectors.PowerDNS.Enable { - mapCollectors[stanzaName] = workers.NewProtobufPowerDNS(nil, config, logger, stanzaName) + mapCollectors[stanzaName] = workers.NewPdnsServer(nil, config, logger, stanzaName) } if config.Collectors.FileIngestor.Enable { mapCollectors[stanzaName] = workers.NewFileIngestor(nil, config, logger, stanzaName) diff --git a/processors/powerdns.go b/processors/powerdns.go deleted file mode 100644 index 20e0c526..00000000 --- a/processors/powerdns.go +++ /dev/null @@ -1,397 +0,0 @@ -package processors - -import ( - "fmt" - "net" - "strconv" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" - "github.com/miekg/dns" - "google.golang.org/protobuf/proto" -) - -var ( - ProtobufPowerDNSToDNSTap = map[string]string{ - "DNSQueryType": "CLIENT_QUERY", - "DNSResponseType": "CLIENT_RESPONSE", - "DNSOutgoingQueryType": "RESOLVER_QUERY", - "DNSIncomingResponseType": "RESOLVER_RESPONSE", - } -) - -type PdnsProcessor struct { - ConnID int - PeerName string - doneRun, stopRun chan bool - doneMonitor, stopMonitor chan bool - recvFrom chan []byte - logger *logger.Logger - config *pkgconfig.Config - ConfigChan chan *pkgconfig.Config - name string - chanSize int - RoutingHandler pkgutils.RoutingHandler - dropped chan string - droppedCount map[string]int -} - -func NewPdnsProcessor(connID int, peerName string, config *pkgconfig.Config, logger *logger.Logger, name string, size int) PdnsProcessor { - logger.Info(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - initialization...", name, connID) - d := PdnsProcessor{ - ConnID: connID, - PeerName: peerName, - doneMonitor: make(chan bool), - doneRun: make(chan bool), - stopMonitor: make(chan bool), - stopRun: make(chan bool), - recvFrom: make(chan []byte, size), - chanSize: size, - logger: logger, - config: config, - ConfigChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - dropped: make(chan string), - droppedCount: map[string]int{}, - } - return d -} - -func (p *PdnsProcessor) LogInfo(msg string, v ...interface{}) { - var log string - if p.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - ", p.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - ", p.name, p.ConnID) - } - p.logger.Info(log+msg, v...) -} - -func (p *PdnsProcessor) LogError(msg string, v ...interface{}) { - var log string - if p.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - ", p.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - ", p.name, p.ConnID) - } - p.logger.Error(log+msg, v...) -} - -func (p *PdnsProcessor) GetChannel() chan []byte { - return p.recvFrom -} - -func (p *PdnsProcessor) Stop() { - p.LogInfo("stopping processor...") - p.RoutingHandler.Stop() - - p.LogInfo("stopping to process...") - p.stopRun <- true - <-p.doneRun - - p.LogInfo("stopping to monitor loggers...") - p.stopMonitor <- true - <-p.doneMonitor -} - -func (p *PdnsProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { - pbdm := &powerdns_protobuf.PBDNSMessage{} - - // prepare next channels - defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) - droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) - - // prepare enabled transformers - transforms := transformers.NewTransforms(&p.config.IngoingTransformers, p.logger, p.name, defaultRoutes, p.ConnID) - - // start goroutine to count dropped messsages - go p.MonitorLoggers() - - // read incoming dns message - p.LogInfo("waiting dns message to process...") -RUN_LOOP: - for { - select { - case cfg := <-p.ConfigChan: - p.config = cfg - transforms.ReloadConfig(&cfg.IngoingTransformers) - - case <-p.stopRun: - transforms.Reset() - p.doneRun <- true - break RUN_LOOP - - case data, opened := <-p.recvFrom: - if !opened { - p.LogInfo("channel closed, exit") - return - } - - err := proto.Unmarshal(data, pbdm) - if err != nil { - p.LogError("pbdm decoding, %s", err) - continue - } - - // init dns message - dm := dnsutils.DNSMessage{} - dm.Init() - - // init dns message with additionnals parts - transforms.InitDNSMessageFormat(&dm) - - // init powerdns with default values - dm.PowerDNS = &dnsutils.PowerDNS{ - Tags: []string{}, - OriginalRequestSubnet: "", - AppliedPolicy: "", - Metadata: map[string]string{}, - } - - dm.DNSTap.Identity = string(pbdm.GetServerIdentity()) - dm.DNSTap.Operation = ProtobufPowerDNSToDNSTap[pbdm.GetType().String()] - - if ipVersion, valid := netutils.IPVersion[pbdm.GetSocketFamily().String()]; valid { - dm.NetworkInfo.Family = ipVersion - } else { - dm.NetworkInfo.Family = pkgconfig.StrUnknown - } - dm.NetworkInfo.Protocol = pbdm.GetSocketProtocol().String() - - if pbdm.From != nil { - dm.NetworkInfo.QueryIP = net.IP(pbdm.From).String() - } - dm.NetworkInfo.QueryPort = strconv.FormatUint(uint64(pbdm.GetFromPort()), 10) - dm.NetworkInfo.ResponseIP = net.IP(pbdm.To).String() - dm.NetworkInfo.ResponsePort = strconv.FormatUint(uint64(pbdm.GetToPort()), 10) - - dm.DNS.ID = int(pbdm.GetId()) - dm.DNS.Length = int(pbdm.GetInBytes()) - dm.DNSTap.TimeSec = int(pbdm.GetTimeSec()) - dm.DNSTap.TimeNsec = int(pbdm.GetTimeUsec()) * 1e3 - - if int(pbdm.Type.Number())%2 == 1 { - dm.DNS.Type = dnsutils.DNSQuery - } else { - dm.DNS.Type = dnsutils.DNSReply - - tsQuery := float64(pbdm.Response.GetQueryTimeSec()) + float64(pbdm.Response.GetQueryTimeUsec())/1e6 - tsReply := float64(pbdm.GetTimeSec()) + float64(pbdm.GetTimeUsec())/1e6 - - // convert latency to human - dm.DNSTap.Latency = tsReply - tsQuery - dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) - dm.DNS.Rcode = dnsutils.RcodeToString(int(pbdm.Response.GetRcode())) - } - - // compute timestamp - ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) - dm.DNSTap.Timestamp = ts.UnixNano() - dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) - - dm.DNS.Qname = pbdm.Question.GetQName() - // remove ending dot ? - dm.DNS.Qname = strings.TrimSuffix(dm.DNS.Qname, ".") - - // get query type - dm.DNS.Qtype = dnsutils.RdatatypeToString(int(pbdm.Question.GetQType())) - - // get specific powerdns params - pdns := dnsutils.PowerDNS{} - - // get PowerDNS OriginalRequestSubnet - ip := pbdm.GetOriginalRequestorSubnet() - if len(ip) == 4 { - addr := make(net.IP, net.IPv4len) - copy(addr, ip) - pdns.OriginalRequestSubnet = addr.String() - } - if len(ip) == 16 { - addr := make(net.IP, net.IPv6len) - copy(addr, ip) - pdns.OriginalRequestSubnet = addr.String() - } - - // get PowerDNS tags - tags := pbdm.GetResponse().GetTags() - if tags == nil { - tags = []string{} - } - pdns.Tags = tags - - // get PowerDNS policy applied - pdns.AppliedPolicy = pbdm.GetResponse().GetAppliedPolicy() - pdns.AppliedPolicyHit = pbdm.GetResponse().GetAppliedPolicyHit() - pdns.AppliedPolicyKind = pbdm.GetResponse().GetAppliedPolicyKind().String() - pdns.AppliedPolicyTrigger = pbdm.GetResponse().GetAppliedPolicyTrigger() - pdns.AppliedPolicyType = pbdm.GetResponse().GetAppliedPolicyType().String() - - // get PowerDNS metadata - metas := make(map[string]string) - for _, e := range pbdm.GetMeta() { - metas[e.GetKey()] = strings.Join(e.Value.StringVal, " ") - } - pdns.Metadata = metas - - // get http protocol version - if pbdm.GetSocketProtocol().String() == "DOH" { - pdns.HTTPVersion = pbdm.GetHttpVersion().String() - } - - // finally set pdns to dns message - dm.PowerDNS = &pdns - - // decode answers - answers := []dnsutils.DNSAnswer{} - RRs := pbdm.GetResponse().GetRrs() - for j := range RRs { - rdata := string(RRs[j].GetRdata()) - if RRs[j].GetType() == 1 { - addr := make(net.IP, net.IPv4len) - copy(addr, rdata[:net.IPv4len]) - rdata = addr.String() - } - if RRs[j].GetType() == 28 { - addr := make(net.IP, net.IPv6len) - copy(addr, rdata[:net.IPv6len]) - rdata = addr.String() - } - - rr := dnsutils.DNSAnswer{ - Name: RRs[j].GetName(), - Rdatatype: dnsutils.RdatatypeToString(int(RRs[j].GetType())), - Class: dnsutils.ClassToString(int(RRs[j].GetClass())), - TTL: int(RRs[j].GetTtl()), - Rdata: rdata, - } - answers = append(answers, rr) - } - dm.DNS.DNSRRs.Answers = answers - - if p.config.Collectors.PowerDNS.AddDNSPayload { - - qname := dns.Fqdn(pbdm.Question.GetQName()) - newDNS := new(dns.Msg) - newDNS.Id = uint16(pbdm.GetId()) - newDNS.Response = false - - question := dns.Question{ - Name: qname, - Qtype: uint16(pbdm.Question.GetQType()), - Qclass: uint16(pbdm.Question.GetQClass()), - } - newDNS.Question = append(newDNS.Question, question) - - if int(pbdm.Type.Number())%2 != 1 { - newDNS.Response = true - newDNS.Rcode = int(pbdm.Response.GetRcode()) - - newDNS.Answer = []dns.RR{} - rrs := pbdm.GetResponse().GetRrs() - for j := range rrs { - rrname := dns.Fqdn(rrs[j].GetName()) - switch rrs[j].GetType() { - // A - case 1: - rdata := &dns.A{ - Hdr: dns.RR_Header{Name: rrname, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: rrs[j].GetTtl()}, - A: net.IP(rrs[j].GetRdata()), - } - newDNS.Answer = append(newDNS.Answer, rdata) - // AAAA - case 28: - rdata := &dns.AAAA{ - Hdr: dns.RR_Header{Name: rrname, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: rrs[j].GetTtl()}, - AAAA: net.IP(rrs[j].GetRdata()), - } - newDNS.Answer = append(newDNS.Answer, rdata) - // CNAME - case 5: - rdata := &dns.CNAME{ - Hdr: dns.RR_Header{Name: rrname, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: rrs[j].GetTtl()}, - Target: dns.Fqdn(string(rrs[j].GetRdata())), - } - newDNS.Answer = append(newDNS.Answer, rdata) - } - - } - - } - - pktWire, err := newDNS.Pack() - if err == nil { - dm.DNS.Payload = pktWire - if dm.DNS.Length == 0 { - dm.DNS.Length = len(pktWire) - } - } else { - dm.DNS.MalformedPacket = true - } - } - - // apply all enabled transformers - if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: // Successful send to logger channel - default: - p.dropped <- droppedNames[i] - } - } - continue - } - - // dispatch dns messages to connected loggers - for i := range defaultRoutes { - select { - case defaultRoutes[i] <- dm: // Successful send to logger channel - default: - p.dropped <- defaultNames[i] - } - } - } - } - p.LogInfo("processing terminated") -} - -func (p *PdnsProcessor) MonitorLoggers() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -FOLLOW_LOOP: - for { - select { - case <-p.stopMonitor: - close(p.dropped) - bufferFull.Stop() - p.doneMonitor <- true - break FOLLOW_LOOP - - case loggerName := <-p.dropped: - if _, ok := p.droppedCount[loggerName]; !ok { - p.droppedCount[loggerName] = 1 - } else { - p.droppedCount[loggerName]++ - } - - case <-bufferFull.C: - - for v, k := range p.droppedCount { - if k > 0 { - p.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) - p.droppedCount[v] = 0 - } - } - bufferFull.Reset(watchInterval) - - } - } - p.LogInfo("monitor terminated") -} diff --git a/processors/powerdns_test.go b/processors/powerdns_test.go deleted file mode 100644 index 4bc57091..00000000 --- a/processors/powerdns_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package processors - -import ( - "fmt" - "regexp" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" - powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" - "github.com/miekg/dns" - "google.golang.org/protobuf/proto" -) - -func Test_PowerDNSProcessor(t *testing.T) { - // init the dnstap consumer - consumer := NewPdnsProcessor(0, "peername", pkgconfig.GetFakeConfig(), logger.New(false), "test", 512) - - // init the powerdns processor - dnsQname := pkgconfig.ValidDomain - dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} - - dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte(ExpectedIdentity) - dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() - dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() - dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() - dm.Question = &dnsQuestion - - data, _ := proto.Marshal(dm) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - msg := <-fl.GetInputChannel() - if msg.DNSTap.Identity != ExpectedIdentity { - t.Errorf("invalid identity in dns message: %s", msg.DNSTap.Identity) - } -} - -func Test_PowerDNSProcessor_AddDNSPayload_Valid(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.PowerDNS.AddDNSPayload = true - - // init the powerdns processor - consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) - - // prepare powerdns message - dnsQname := pkgconfig.ValidDomain - dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} - - dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte(ExpectedIdentity) - dm.Id = proto.Uint32(2000) - dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() - dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() - dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() - dm.Question = &dnsQuestion - - data, _ := proto.Marshal(dm) - - // start the consumer and add packet - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - consumer.GetChannel() <- data - - // read dns message - msg := <-fl.GetInputChannel() - - // checks - if msg.DNS.Length == 0 { - t.Errorf("invalid length got %d", msg.DNS.Length) - } - if len(msg.DNS.Payload) == 0 { - t.Errorf("invalid payload length %d", len(msg.DNS.Payload)) - } - - // valid dns payload ? - var decodedPayload dns.Msg - err := decodedPayload.Unpack(msg.DNS.Payload) - if err != nil { - t.Errorf("unpack error %s", err) - } - if decodedPayload.Question[0].Name != pkgconfig.ValidDomain { - t.Errorf("invalid qname in payload: %s", decodedPayload.Question[0].Name) - } -} - -func Test_PowerDNSProcessor_AddDNSPayload_InvalidLabelLength(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.PowerDNS.AddDNSPayload = true - - // init the dnstap consumer - consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) - - // prepare dnstap - dnsQname := pkgconfig.BadDomainLabel - dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} - - dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte("powerdnspb") - dm.Id = proto.Uint32(2000) - dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() - dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() - dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() - dm.Question = &dnsQuestion - - data, _ := proto.Marshal(dm) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - msg := <-fl.GetInputChannel() - if !msg.DNS.MalformedPacket { - t.Errorf("DNS message should malformed") - } -} - -func Test_PowerDNSProcessor_AddDNSPayload_QnameTooLongDomain(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.PowerDNS.AddDNSPayload = true - - // init the dnstap consumer - consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) - - // prepare dnstap - dnsQname := pkgconfig.BadVeryLongDomain - dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} - - dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte("powerdnspb") - dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() - dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() - dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() - dm.Question = &dnsQuestion - - data, _ := proto.Marshal(dm) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - msg := <-fl.GetInputChannel() - if !msg.DNS.MalformedPacket { - t.Errorf("DNS message should malformed because of qname too long") - } -} - -func Test_PowerDNSProcessor_AddDNSPayload_AnswersTooLongDomain(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.PowerDNS.AddDNSPayload = true - - // init the dnstap consumer - consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) - - // prepare dnstap - dnsQname := pkgconfig.ValidDomain - dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} - - rrQname := pkgconfig.BadVeryLongDomain - rrDNS := powerdns_protobuf.PBDNSMessage_DNSResponse_DNSRR{ - Name: &rrQname, - Class: proto.Uint32(1), - Type: proto.Uint32(1), - Rdata: []byte{0x01, 0x00, 0x00, 0x01}, - } - dnsReply := powerdns_protobuf.PBDNSMessage_DNSResponse{} - dnsReply.Rrs = append(dnsReply.Rrs, &rrDNS) - - dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte("powerdnspb") - dm.Type = powerdns_protobuf.PBDNSMessage_DNSResponseType.Enum() - dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() - dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() - dm.Question = &dnsQuestion - dm.Response = &dnsReply - - data, _ := proto.Marshal(dm) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - msg := <-fl.GetInputChannel() - - // tests verifications - if !msg.DNS.MalformedPacket { - t.Errorf("DNS message is not malformed") - } -} - -// test for issue https://github.com/dmachard/go-dnscollector/issues/568 -func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - // init the dnstap consumer - cfg := pkgconfig.GetFakeConfig() - consumer := NewPdnsProcessor(0, "peername", cfg, lg, "test", 512) - - // init the powerdns processor - dnsQname := pkgconfig.ValidDomain - dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} - - dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte(ExpectedIdentity) - dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() - dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() - dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() - dm.Question = &dnsQuestion - - data, _ := proto.Marshal(dm) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLoggerWithBufferSize(1) - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packets to consumer - for i := 0; i < 512; i++ { - consumer.GetChannel() <- data - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg511) - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - msg := <-fl.GetInputChannel() - if msg.DNSTap.Identity != ExpectedIdentity { - t.Errorf("invalid identity in dns message: %s", msg.DNSTap.Identity) - } - - // send second shot of packets to consumer - for i := 0; i < 1024; i++ { - consumer.GetChannel() <- data - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg1023) - if pattern.MatchString(entry.Message) { - break - } - } - - // read just one dns message from dnstap consumer - msg2 := <-fl.GetInputChannel() - if msg2.DNSTap.Identity != ExpectedIdentity { - t.Errorf("invalid identity in second dns message: %s", msg2.DNSTap.Identity) - } -} diff --git a/workers/powerdns.go b/workers/powerdns.go index f823d4dc..c1905576 100644 --- a/workers/powerdns.go +++ b/workers/powerdns.go @@ -3,39 +3,45 @@ package workers import ( "bufio" "errors" + "fmt" "io" "net" + "strconv" + "strings" "sync" "sync/atomic" "time" + "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" + "github.com/dmachard/go-dnscollector/transformers" "github.com/dmachard/go-logger" powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" + "github.com/miekg/dns" + "google.golang.org/protobuf/proto" ) -type ProtobufPowerDNS struct { +type PdnsServer struct { *pkgutils.GenericWorker connCounter uint64 } -func NewProtobufPowerDNS(next []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *ProtobufPowerDNS { - w := &ProtobufPowerDNS{GenericWorker: pkgutils.NewGenericWorker(config, logger, name, "powerdns", pkgutils.DefaultBufferSize)} +func NewPdnsServer(next []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *PdnsServer { + w := &PdnsServer{GenericWorker: pkgutils.NewGenericWorker(config, logger, name, "powerdns", pkgutils.DefaultBufferSize)} w.SetDefaultRoutes(next) w.CheckConfig() return w } -func (w *ProtobufPowerDNS) CheckConfig() { +func (w *PdnsServer) CheckConfig() { if !pkgconfig.IsValidTLS(w.GetConfig().Collectors.PowerDNS.TLSMinVersion) { w.LogFatal(pkgutils.PrefixLogCollector + "[" + w.GetName() + "] invalid tls min version") } } -func (w *ProtobufPowerDNS) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { +func (w *PdnsServer) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { // close connection on function exit defer func() { w.LogInfo("conn #%d - connection handler terminated", connID) @@ -49,7 +55,7 @@ func (w *ProtobufPowerDNS) HandleConn(conn net.Conn, connID uint64, forceClose c w.LogInfo("new connection #%d from %s (%s)", connID, peer, peerName) // start protobuf subprocessor - pdnsProcessor := processors.NewPdnsProcessor(int(connID), peerName, w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.PowerDNS.ChannelBufferSize) + pdnsProcessor := NewPdnsProcessor(int(connID), peerName, w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.PowerDNS.ChannelBufferSize) go pdnsProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) r := bufio.NewReader(conn) @@ -114,7 +120,7 @@ func (w *ProtobufPowerDNS) HandleConn(conn net.Conn, connID uint64, forceClose c } } -func (w *ProtobufPowerDNS) StartCollect() { +func (w *PdnsServer) StartCollect() { w.LogInfo("worker is starting collection") defer w.CollectDone() @@ -174,3 +180,381 @@ func (w *ProtobufPowerDNS) StartCollect() { } } } + +var ( + ProtobufPowerDNSToDNSTap = map[string]string{ + "DNSQueryType": "CLIENT_QUERY", + "DNSResponseType": "CLIENT_RESPONSE", + "DNSOutgoingQueryType": "RESOLVER_QUERY", + "DNSIncomingResponseType": "RESOLVER_RESPONSE", + } +) + +type PdnsProcessor struct { + ConnID int + PeerName string + doneRun, stopRun chan bool + doneMonitor, stopMonitor chan bool + recvFrom chan []byte + logger *logger.Logger + config *pkgconfig.Config + ConfigChan chan *pkgconfig.Config + name string + chanSize int + RoutingHandler pkgutils.RoutingHandler + dropped chan string + droppedCount map[string]int +} + +func NewPdnsProcessor(connID int, peerName string, config *pkgconfig.Config, logger *logger.Logger, name string, size int) PdnsProcessor { + logger.Info(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - initialization...", name, connID) + d := PdnsProcessor{ + ConnID: connID, + PeerName: peerName, + doneMonitor: make(chan bool), + doneRun: make(chan bool), + stopMonitor: make(chan bool), + stopRun: make(chan bool), + recvFrom: make(chan []byte, size), + chanSize: size, + logger: logger, + config: config, + ConfigChan: make(chan *pkgconfig.Config), + name: name, + RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), + dropped: make(chan string), + droppedCount: map[string]int{}, + } + return d +} + +func (p *PdnsProcessor) LogInfo(msg string, v ...interface{}) { + var log string + if p.ConnID == 0 { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - ", p.name) + } else { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - ", p.name, p.ConnID) + } + p.logger.Info(log+msg, v...) +} + +func (p *PdnsProcessor) LogError(msg string, v ...interface{}) { + var log string + if p.ConnID == 0 { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - ", p.name) + } else { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - ", p.name, p.ConnID) + } + p.logger.Error(log+msg, v...) +} + +func (p *PdnsProcessor) GetChannel() chan []byte { + return p.recvFrom +} + +func (p *PdnsProcessor) Stop() { + p.LogInfo("stopping processor...") + p.RoutingHandler.Stop() + + p.LogInfo("stopping to process...") + p.stopRun <- true + <-p.doneRun + + p.LogInfo("stopping to monitor loggers...") + p.stopMonitor <- true + <-p.doneMonitor +} + +func (p *PdnsProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { + pbdm := &powerdns_protobuf.PBDNSMessage{} + + // prepare next channels + defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) + droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) + + // prepare enabled transformers + transforms := transformers.NewTransforms(&p.config.IngoingTransformers, p.logger, p.name, defaultRoutes, p.ConnID) + + // start goroutine to count dropped messsages + go p.MonitorLoggers() + + // read incoming dns message + p.LogInfo("waiting dns message to process...") +RUN_LOOP: + for { + select { + case cfg := <-p.ConfigChan: + p.config = cfg + transforms.ReloadConfig(&cfg.IngoingTransformers) + + case <-p.stopRun: + transforms.Reset() + p.doneRun <- true + break RUN_LOOP + + case data, opened := <-p.recvFrom: + if !opened { + p.LogInfo("channel closed, exit") + return + } + + err := proto.Unmarshal(data, pbdm) + if err != nil { + p.LogError("pbdm decoding, %s", err) + continue + } + + // init dns message + dm := dnsutils.DNSMessage{} + dm.Init() + + // init dns message with additionnals parts + transforms.InitDNSMessageFormat(&dm) + + // init powerdns with default values + dm.PowerDNS = &dnsutils.PowerDNS{ + Tags: []string{}, + OriginalRequestSubnet: "", + AppliedPolicy: "", + Metadata: map[string]string{}, + } + + dm.DNSTap.Identity = string(pbdm.GetServerIdentity()) + dm.DNSTap.Operation = ProtobufPowerDNSToDNSTap[pbdm.GetType().String()] + + if ipVersion, valid := netutils.IPVersion[pbdm.GetSocketFamily().String()]; valid { + dm.NetworkInfo.Family = ipVersion + } else { + dm.NetworkInfo.Family = pkgconfig.StrUnknown + } + dm.NetworkInfo.Protocol = pbdm.GetSocketProtocol().String() + + if pbdm.From != nil { + dm.NetworkInfo.QueryIP = net.IP(pbdm.From).String() + } + dm.NetworkInfo.QueryPort = strconv.FormatUint(uint64(pbdm.GetFromPort()), 10) + dm.NetworkInfo.ResponseIP = net.IP(pbdm.To).String() + dm.NetworkInfo.ResponsePort = strconv.FormatUint(uint64(pbdm.GetToPort()), 10) + + dm.DNS.ID = int(pbdm.GetId()) + dm.DNS.Length = int(pbdm.GetInBytes()) + dm.DNSTap.TimeSec = int(pbdm.GetTimeSec()) + dm.DNSTap.TimeNsec = int(pbdm.GetTimeUsec()) * 1e3 + + if int(pbdm.Type.Number())%2 == 1 { + dm.DNS.Type = dnsutils.DNSQuery + } else { + dm.DNS.Type = dnsutils.DNSReply + + tsQuery := float64(pbdm.Response.GetQueryTimeSec()) + float64(pbdm.Response.GetQueryTimeUsec())/1e6 + tsReply := float64(pbdm.GetTimeSec()) + float64(pbdm.GetTimeUsec())/1e6 + + // convert latency to human + dm.DNSTap.Latency = tsReply - tsQuery + dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) + dm.DNS.Rcode = dnsutils.RcodeToString(int(pbdm.Response.GetRcode())) + } + + // compute timestamp + ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) + dm.DNSTap.Timestamp = ts.UnixNano() + dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) + + dm.DNS.Qname = pbdm.Question.GetQName() + // remove ending dot ? + dm.DNS.Qname = strings.TrimSuffix(dm.DNS.Qname, ".") + + // get query type + dm.DNS.Qtype = dnsutils.RdatatypeToString(int(pbdm.Question.GetQType())) + + // get specific powerdns params + pdns := dnsutils.PowerDNS{} + + // get PowerDNS OriginalRequestSubnet + ip := pbdm.GetOriginalRequestorSubnet() + if len(ip) == 4 { + addr := make(net.IP, net.IPv4len) + copy(addr, ip) + pdns.OriginalRequestSubnet = addr.String() + } + if len(ip) == 16 { + addr := make(net.IP, net.IPv6len) + copy(addr, ip) + pdns.OriginalRequestSubnet = addr.String() + } + + // get PowerDNS tags + tags := pbdm.GetResponse().GetTags() + if tags == nil { + tags = []string{} + } + pdns.Tags = tags + + // get PowerDNS policy applied + pdns.AppliedPolicy = pbdm.GetResponse().GetAppliedPolicy() + pdns.AppliedPolicyHit = pbdm.GetResponse().GetAppliedPolicyHit() + pdns.AppliedPolicyKind = pbdm.GetResponse().GetAppliedPolicyKind().String() + pdns.AppliedPolicyTrigger = pbdm.GetResponse().GetAppliedPolicyTrigger() + pdns.AppliedPolicyType = pbdm.GetResponse().GetAppliedPolicyType().String() + + // get PowerDNS metadata + metas := make(map[string]string) + for _, e := range pbdm.GetMeta() { + metas[e.GetKey()] = strings.Join(e.Value.StringVal, " ") + } + pdns.Metadata = metas + + // get http protocol version + if pbdm.GetSocketProtocol().String() == "DOH" { + pdns.HTTPVersion = pbdm.GetHttpVersion().String() + } + + // finally set pdns to dns message + dm.PowerDNS = &pdns + + // decode answers + answers := []dnsutils.DNSAnswer{} + RRs := pbdm.GetResponse().GetRrs() + for j := range RRs { + rdata := string(RRs[j].GetRdata()) + if RRs[j].GetType() == 1 { + addr := make(net.IP, net.IPv4len) + copy(addr, rdata[:net.IPv4len]) + rdata = addr.String() + } + if RRs[j].GetType() == 28 { + addr := make(net.IP, net.IPv6len) + copy(addr, rdata[:net.IPv6len]) + rdata = addr.String() + } + + rr := dnsutils.DNSAnswer{ + Name: RRs[j].GetName(), + Rdatatype: dnsutils.RdatatypeToString(int(RRs[j].GetType())), + Class: dnsutils.ClassToString(int(RRs[j].GetClass())), + TTL: int(RRs[j].GetTtl()), + Rdata: rdata, + } + answers = append(answers, rr) + } + dm.DNS.DNSRRs.Answers = answers + + if p.config.Collectors.PowerDNS.AddDNSPayload { + + qname := dns.Fqdn(pbdm.Question.GetQName()) + newDNS := new(dns.Msg) + newDNS.Id = uint16(pbdm.GetId()) + newDNS.Response = false + + question := dns.Question{ + Name: qname, + Qtype: uint16(pbdm.Question.GetQType()), + Qclass: uint16(pbdm.Question.GetQClass()), + } + newDNS.Question = append(newDNS.Question, question) + + if int(pbdm.Type.Number())%2 != 1 { + newDNS.Response = true + newDNS.Rcode = int(pbdm.Response.GetRcode()) + + newDNS.Answer = []dns.RR{} + rrs := pbdm.GetResponse().GetRrs() + for j := range rrs { + rrname := dns.Fqdn(rrs[j].GetName()) + switch rrs[j].GetType() { + // A + case 1: + rdata := &dns.A{ + Hdr: dns.RR_Header{Name: rrname, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: rrs[j].GetTtl()}, + A: net.IP(rrs[j].GetRdata()), + } + newDNS.Answer = append(newDNS.Answer, rdata) + // AAAA + case 28: + rdata := &dns.AAAA{ + Hdr: dns.RR_Header{Name: rrname, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: rrs[j].GetTtl()}, + AAAA: net.IP(rrs[j].GetRdata()), + } + newDNS.Answer = append(newDNS.Answer, rdata) + // CNAME + case 5: + rdata := &dns.CNAME{ + Hdr: dns.RR_Header{Name: rrname, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: rrs[j].GetTtl()}, + Target: dns.Fqdn(string(rrs[j].GetRdata())), + } + newDNS.Answer = append(newDNS.Answer, rdata) + } + + } + + } + + pktWire, err := newDNS.Pack() + if err == nil { + dm.DNS.Payload = pktWire + if dm.DNS.Length == 0 { + dm.DNS.Length = len(pktWire) + } + } else { + dm.DNS.MalformedPacket = true + } + } + + // apply all enabled transformers + if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { + for i := range droppedRoutes { + select { + case droppedRoutes[i] <- dm: // Successful send to logger channel + default: + p.dropped <- droppedNames[i] + } + } + continue + } + + // dispatch dns messages to connected loggers + for i := range defaultRoutes { + select { + case defaultRoutes[i] <- dm: // Successful send to logger channel + default: + p.dropped <- defaultNames[i] + } + } + } + } + p.LogInfo("processing terminated") +} + +func (p *PdnsProcessor) MonitorLoggers() { + watchInterval := 10 * time.Second + bufferFull := time.NewTimer(watchInterval) +FOLLOW_LOOP: + for { + select { + case <-p.stopMonitor: + close(p.dropped) + bufferFull.Stop() + p.doneMonitor <- true + break FOLLOW_LOOP + + case loggerName := <-p.dropped: + if _, ok := p.droppedCount[loggerName]; !ok { + p.droppedCount[loggerName] = 1 + } else { + p.droppedCount[loggerName]++ + } + + case <-bufferFull.C: + + for v, k := range p.droppedCount { + if k > 0 { + p.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) + p.droppedCount[v] = 0 + } + } + bufferFull.Reset(watchInterval) + + } + } + p.LogInfo("monitor terminated") +} diff --git a/workers/powerdns_test.go b/workers/powerdns_test.go index 3e2e9175..125531a2 100644 --- a/workers/powerdns_test.go +++ b/workers/powerdns_test.go @@ -1,7 +1,9 @@ package workers import ( + "fmt" "net" + "regexp" "testing" "time" @@ -9,12 +11,23 @@ import ( "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-logger" + powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" + "github.com/miekg/dns" + "google.golang.org/protobuf/proto" +) + +const ( + ExpectedQname = "dnscollector.dev" + ExpectedQname2 = "dns.collector" + ExpectedBufferMsg511 = ".*buffer is full, 511.*" + ExpectedBufferMsg1023 = ".*buffer is full, 1023.*" + ExpectedIdentity = "powerdnspb" ) func TestPowerDNS_Run(t *testing.T) { g := pkgutils.NewFakeLogger() - c := NewProtobufPowerDNS([]pkgutils.Worker{g}, pkgconfig.GetFakeConfig(), logger.New(false), "test") + c := NewPdnsServer([]pkgutils.Worker{g}, pkgconfig.GetFakeConfig(), logger.New(false), "test") go c.StartCollect() // wait before to connect @@ -25,3 +38,272 @@ func TestPowerDNS_Run(t *testing.T) { } defer conn.Close() } + +func Test_PowerDNSProcessor(t *testing.T) { + // init the dnstap consumer + consumer := NewPdnsProcessor(0, "peername", pkgconfig.GetFakeConfig(), logger.New(false), "test", 512) + + // init the powerdns processor + dnsQname := pkgconfig.ValidDomain + dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} + + dm := &powerdns_protobuf.PBDNSMessage{} + dm.ServerIdentity = []byte(ExpectedIdentity) + dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() + dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() + dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() + dm.Question = &dnsQuestion + + data, _ := proto.Marshal(dm) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + msg := <-fl.GetInputChannel() + if msg.DNSTap.Identity != ExpectedIdentity { + t.Errorf("invalid identity in dns message: %s", msg.DNSTap.Identity) + } +} + +func Test_PowerDNSProcessor_AddDNSPayload_Valid(t *testing.T) { + cfg := pkgconfig.GetFakeConfig() + cfg.Collectors.PowerDNS.AddDNSPayload = true + + // init the powerdns processor + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + + // prepare powerdns message + dnsQname := pkgconfig.ValidDomain + dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} + + dm := &powerdns_protobuf.PBDNSMessage{} + dm.ServerIdentity = []byte(ExpectedIdentity) + dm.Id = proto.Uint32(2000) + dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() + dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() + dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() + dm.Question = &dnsQuestion + + data, _ := proto.Marshal(dm) + + // start the consumer and add packet + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + consumer.GetChannel() <- data + + // read dns message + msg := <-fl.GetInputChannel() + + // checks + if msg.DNS.Length == 0 { + t.Errorf("invalid length got %d", msg.DNS.Length) + } + if len(msg.DNS.Payload) == 0 { + t.Errorf("invalid payload length %d", len(msg.DNS.Payload)) + } + + // valid dns payload ? + var decodedPayload dns.Msg + err := decodedPayload.Unpack(msg.DNS.Payload) + if err != nil { + t.Errorf("unpack error %s", err) + } + if decodedPayload.Question[0].Name != pkgconfig.ValidDomain { + t.Errorf("invalid qname in payload: %s", decodedPayload.Question[0].Name) + } +} + +func Test_PowerDNSProcessor_AddDNSPayload_InvalidLabelLength(t *testing.T) { + cfg := pkgconfig.GetFakeConfig() + cfg.Collectors.PowerDNS.AddDNSPayload = true + + // init the dnstap consumer + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + + // prepare dnstap + dnsQname := pkgconfig.BadDomainLabel + dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} + + dm := &powerdns_protobuf.PBDNSMessage{} + dm.ServerIdentity = []byte("powerdnspb") + dm.Id = proto.Uint32(2000) + dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() + dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() + dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() + dm.Question = &dnsQuestion + + data, _ := proto.Marshal(dm) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + msg := <-fl.GetInputChannel() + if !msg.DNS.MalformedPacket { + t.Errorf("DNS message should malformed") + } +} + +func Test_PowerDNSProcessor_AddDNSPayload_QnameTooLongDomain(t *testing.T) { + cfg := pkgconfig.GetFakeConfig() + cfg.Collectors.PowerDNS.AddDNSPayload = true + + // init the dnstap consumer + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + + // prepare dnstap + dnsQname := pkgconfig.BadVeryLongDomain + dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} + + dm := &powerdns_protobuf.PBDNSMessage{} + dm.ServerIdentity = []byte("powerdnspb") + dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() + dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() + dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() + dm.Question = &dnsQuestion + + data, _ := proto.Marshal(dm) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + msg := <-fl.GetInputChannel() + if !msg.DNS.MalformedPacket { + t.Errorf("DNS message should malformed because of qname too long") + } +} + +func Test_PowerDNSProcessor_AddDNSPayload_AnswersTooLongDomain(t *testing.T) { + cfg := pkgconfig.GetFakeConfig() + cfg.Collectors.PowerDNS.AddDNSPayload = true + + // init the dnstap consumer + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + + // prepare dnstap + dnsQname := pkgconfig.ValidDomain + dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} + + rrQname := pkgconfig.BadVeryLongDomain + rrDNS := powerdns_protobuf.PBDNSMessage_DNSResponse_DNSRR{ + Name: &rrQname, + Class: proto.Uint32(1), + Type: proto.Uint32(1), + Rdata: []byte{0x01, 0x00, 0x00, 0x01}, + } + dnsReply := powerdns_protobuf.PBDNSMessage_DNSResponse{} + dnsReply.Rrs = append(dnsReply.Rrs, &rrDNS) + + dm := &powerdns_protobuf.PBDNSMessage{} + dm.ServerIdentity = []byte("powerdnspb") + dm.Type = powerdns_protobuf.PBDNSMessage_DNSResponseType.Enum() + dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() + dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() + dm.Question = &dnsQuestion + dm.Response = &dnsReply + + data, _ := proto.Marshal(dm) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + msg := <-fl.GetInputChannel() + + // tests verifications + if !msg.DNS.MalformedPacket { + t.Errorf("DNS message is not malformed") + } +} + +// test for issue https://github.com/dmachard/go-dnscollector/issues/568 +func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { + // redirect stdout output to bytes buffer + logsChan := make(chan logger.LogEntry, 10) + lg := logger.New(true) + lg.SetOutputChannel((logsChan)) + + // init the dnstap consumer + cfg := pkgconfig.GetFakeConfig() + consumer := NewPdnsProcessor(0, "peername", cfg, lg, "test", 512) + + // init the powerdns processor + dnsQname := pkgconfig.ValidDomain + dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} + + dm := &powerdns_protobuf.PBDNSMessage{} + dm.ServerIdentity = []byte(ExpectedIdentity) + dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() + dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() + dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() + dm.Question = &dnsQuestion + + data, _ := proto.Marshal(dm) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLoggerWithBufferSize(1) + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packets to consumer + for i := 0; i < 512; i++ { + consumer.GetChannel() <- data + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(ExpectedBufferMsg511) + if pattern.MatchString(entry.Message) { + break + } + } + + // read dns message from dnstap consumer + msg := <-fl.GetInputChannel() + if msg.DNSTap.Identity != ExpectedIdentity { + t.Errorf("invalid identity in dns message: %s", msg.DNSTap.Identity) + } + + // send second shot of packets to consumer + for i := 0; i < 1024; i++ { + consumer.GetChannel() <- data + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(ExpectedBufferMsg1023) + if pattern.MatchString(entry.Message) { + break + } + } + + // read just one dns message from dnstap consumer + msg2 := <-fl.GetInputChannel() + if msg2.DNSTap.Identity != ExpectedIdentity { + t.Errorf("invalid identity in second dns message: %s", msg2.DNSTap.Identity) + } +} diff --git a/workers/stdout_test.go b/workers/stdout_test.go index 987c083e..8433175b 100644 --- a/workers/stdout_test.go +++ b/workers/stdout_test.go @@ -44,7 +44,7 @@ func Test_StdoutTextMode(t *testing.T) { name: "default_boundary", delimiter: cfg.Global.TextFormatDelimiter, boundary: cfg.Global.TextFormatBoundary, - qname: pkgconfig.ProgQname, + qname: "dns. collector", expected: "- collector CLIENT_QUERY NOERROR 1.2.3.4 1234 - - 0b \"dns. collector\" A -\n", }, { From 0e741c1481fea2f8dc9fdcc8615194f22599c18e Mon Sep 17 00:00:00 2001 From: dmachard <5562930+dmachard@users.noreply.github.com> Date: Thu, 9 May 2024 11:46:15 +0200 Subject: [PATCH 4/5] update timeout in CI --- .github/workflows/testing-go.yml | 5 +- processors/dnstap.go | 406 ------------------------------- processors/dnstap_test.go | 362 --------------------------- workers/dnstap_relay_test.go | 3 +- workers/dnstapserver.go | 398 +++++++++++++++++++++++++++++- workers/dnstapserver_test.go | 351 +++++++++++++++++++++++++- workers/file_ingestor.go | 4 +- 7 files changed, 749 insertions(+), 780 deletions(-) delete mode 100644 processors/dnstap.go delete mode 100644 processors/dnstap_test.go diff --git a/.github/workflows/testing-go.yml b/.github/workflows/testing-go.yml index be292ba5..ff1b787d 100644 --- a/.github/workflows/testing-go.yml +++ b/.github/workflows/testing-go.yml @@ -33,9 +33,6 @@ jobs: - 'transformers' - 'netutils' - 'processors' - # exclude: - # - os-version: macos-latest - # go-version: '1.20' runs-on: ${{ matrix.os-version }} @@ -63,7 +60,7 @@ jobs: sudo go version - name: Test ${{ matrix.package }} - run: sudo go test -timeout 120s ./${{ matrix.package }}/ -race -cover -v + run: sudo go test -timeout 240s ./${{ matrix.package }}/ -race -cover -v int: runs-on: ubuntu-22.04 diff --git a/processors/dnstap.go b/processors/dnstap.go deleted file mode 100644 index 18517cd0..00000000 --- a/processors/dnstap.go +++ /dev/null @@ -1,406 +0,0 @@ -package processors - -import ( - "fmt" - "net" - "strconv" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-dnstap-protobuf" - "github.com/dmachard/go-logger" - "google.golang.org/protobuf/proto" -) - -func GetFakeDNSTap(dnsquery []byte) *dnstap.Dnstap { - dtQuery := &dnstap.Dnstap{} - - dt := dnstap.Dnstap_MESSAGE - dtQuery.Identity = []byte("dnstap-generator") - dtQuery.Version = []byte("-") - dtQuery.Type = &dt - - mt := dnstap.Message_CLIENT_QUERY - sf := dnstap.SocketFamily_INET - sp := dnstap.SocketProtocol_UDP - - now := time.Now() - tsec := uint64(now.Unix()) - tnsec := uint32(uint64(now.UnixNano()) - uint64(now.Unix())*1e9) - - rport := uint32(53) - qport := uint32(5300) - - msg := &dnstap.Message{Type: &mt} - msg.SocketFamily = &sf - msg.SocketProtocol = &sp - msg.QueryAddress = net.ParseIP("127.0.0.1") - msg.QueryPort = &qport - msg.ResponseAddress = net.ParseIP("127.0.0.2") - msg.ResponsePort = &rport - - msg.QueryMessage = dnsquery - msg.QueryTimeSec = &tsec - msg.QueryTimeNsec = &tnsec - - dtQuery.Message = msg - return dtQuery -} - -type DNSTapProcessor struct { - ConnID int - PeerName string - doneRun, stopRun chan bool - doneMonitor, stopMonitor chan bool - recvFrom chan []byte - logger *logger.Logger - config *pkgconfig.Config - ConfigChan chan *pkgconfig.Config - name string - chanSize int - RoutingHandler pkgutils.RoutingHandler - dropped chan string - droppedCount map[string]int -} - -func NewDNSTapProcessor(connID int, peerName string, config *pkgconfig.Config, logger *logger.Logger, name string, size int) DNSTapProcessor { - logger.Info(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - initialization...", name, connID) - d := DNSTapProcessor{ - ConnID: connID, - PeerName: peerName, - doneMonitor: make(chan bool), - doneRun: make(chan bool), - stopMonitor: make(chan bool), - stopRun: make(chan bool), - recvFrom: make(chan []byte, size), - chanSize: size, - logger: logger, - config: config, - ConfigChan: make(chan *pkgconfig.Config), - name: name, - dropped: make(chan string), - droppedCount: map[string]int{}, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - return d -} - -func (d *DNSTapProcessor) LogInfo(msg string, v ...interface{}) { - var log string - if d.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - ", d.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - ", d.name, d.ConnID) - } - d.logger.Info(log+msg, v...) -} - -func (d *DNSTapProcessor) LogError(msg string, v ...interface{}) { - var log string - if d.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - ", d.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - ", d.name, d.ConnID) - } - d.logger.Error(log+msg, v...) -} - -func (d *DNSTapProcessor) GetChannel() chan []byte { - return d.recvFrom -} - -func (d *DNSTapProcessor) Stop() { - d.LogInfo("stopping processor...") - d.RoutingHandler.Stop() - - d.LogInfo("stopping to process...") - d.stopRun <- true - <-d.doneRun - - d.LogInfo("stopping monitor...") - d.stopMonitor <- true - <-d.doneMonitor -} - -func (d *DNSTapProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { - dt := &dnstap.Dnstap{} - edt := &dnsutils.ExtendedDnstap{} - - // prepare next channels - defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) - droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) - - // prepare enabled transformers - transforms := transformers.NewTransforms(&d.config.IngoingTransformers, d.logger, d.name, defaultRoutes, d.ConnID) - - // start goroutine to count dropped messsages - go d.MonitorLoggers() - - // read incoming dns message - d.LogInfo("waiting dns message to process...") -RUN_LOOP: - for { - select { - case cfg := <-d.ConfigChan: - d.config = cfg - transforms.ReloadConfig(&cfg.IngoingTransformers) - - case <-d.stopRun: - transforms.Reset() - d.doneRun <- true - break RUN_LOOP - - case data, opened := <-d.recvFrom: - if !opened { - d.LogInfo("channel closed, exit") - return - } - - err := proto.Unmarshal(data, dt) - if err != nil { - continue - } - - // init dns message - dm := dnsutils.DNSMessage{} - dm.Init() - - dm.DNSTap.PeerName = d.PeerName - - // init dns message with additionnals parts - transforms.InitDNSMessageFormat(&dm) - - identity := dt.GetIdentity() - if len(identity) > 0 { - dm.DNSTap.Identity = string(identity) - } - version := dt.GetVersion() - if len(version) > 0 { - dm.DNSTap.Version = string(version) - } - dm.DNSTap.Operation = dt.GetMessage().GetType().String() - - // extended extra field ? - if d.config.Collectors.Dnstap.ExtendedSupport { - err := proto.Unmarshal(dt.GetExtra(), edt) - if err != nil { - continue - } - - // get original extra value - originalExtra := string(edt.GetOriginalDnstapExtra()) - if len(originalExtra) > 0 { - dm.DNSTap.Extra = originalExtra - } - - // get atags - atags := edt.GetAtags() - if atags != nil { - dm.ATags = &dnsutils.TransformATags{ - Tags: atags.GetTags(), - } - } - - // get public suffix - norm := edt.GetNormalize() - if norm != nil { - dm.PublicSuffix = &dnsutils.TransformPublicSuffix{} - if len(norm.GetTld()) > 0 { - dm.PublicSuffix.QnamePublicSuffix = norm.GetTld() - } - if len(norm.GetEtldPlusOne()) > 0 { - dm.PublicSuffix.QnameEffectiveTLDPlusOne = norm.GetEtldPlusOne() - } - } - - // filtering - sampleRate := edt.GetFiltering() - if sampleRate != nil { - dm.Filtering = &dnsutils.TransformFiltering{} - dm.Filtering.SampleRate = int(sampleRate.SampleRate) - } - } else { - extra := string(dt.GetExtra()) - if len(extra) > 0 { - dm.DNSTap.Extra = extra - } - } - - if ipVersion, valid := netutils.IPVersion[dt.GetMessage().GetSocketFamily().String()]; valid { - dm.NetworkInfo.Family = ipVersion - } else { - dm.NetworkInfo.Family = pkgconfig.StrUnknown - } - - dm.NetworkInfo.Protocol = dt.GetMessage().GetSocketProtocol().String() - - // decode query address and port - queryip := dt.GetMessage().GetQueryAddress() - if len(queryip) > 0 { - dm.NetworkInfo.QueryIP = net.IP(queryip).String() - } - queryport := dt.GetMessage().GetQueryPort() - if queryport > 0 { - dm.NetworkInfo.QueryPort = strconv.FormatUint(uint64(queryport), 10) - } - - // decode response address and port - responseip := dt.GetMessage().GetResponseAddress() - if len(responseip) > 0 { - dm.NetworkInfo.ResponseIP = net.IP(responseip).String() - } - responseport := dt.GetMessage().GetResponsePort() - if responseport > 0 { - dm.NetworkInfo.ResponsePort = strconv.FormatUint(uint64(responseport), 10) - } - - // get dns payload and timestamp according to the type (query or response) - op := dnstap.Message_Type_value[dm.DNSTap.Operation] - if op%2 == 1 { - dnsPayload := dt.GetMessage().GetQueryMessage() - dm.DNS.Payload = dnsPayload - dm.DNS.Length = len(dnsPayload) - dm.DNS.Type = dnsutils.DNSQuery - dm.DNSTap.TimeSec = int(dt.GetMessage().GetQueryTimeSec()) - dm.DNSTap.TimeNsec = int(dt.GetMessage().GetQueryTimeNsec()) - } else { - dnsPayload := dt.GetMessage().GetResponseMessage() - dm.DNS.Payload = dnsPayload - dm.DNS.Length = len(dnsPayload) - dm.DNS.Type = dnsutils.DNSReply - dm.DNSTap.TimeSec = int(dt.GetMessage().GetResponseTimeSec()) - dm.DNSTap.TimeNsec = int(dt.GetMessage().GetResponseTimeNsec()) - } - - // policy - policyType := dt.GetMessage().GetPolicy().GetType() - if len(policyType) > 0 { - dm.DNSTap.PolicyType = policyType - } - - policyRule := string(dt.GetMessage().GetPolicy().GetRule()) - if len(policyRule) > 0 { - dm.DNSTap.PolicyRule = policyRule - } - - policyAction := dt.GetMessage().GetPolicy().GetAction().String() - if len(policyAction) > 0 { - dm.DNSTap.PolicyAction = policyAction - } - - policyMatch := dt.GetMessage().GetPolicy().GetMatch().String() - if len(policyMatch) > 0 { - dm.DNSTap.PolicyMatch = policyMatch - } - - policyValue := string(dt.GetMessage().GetPolicy().GetValue()) - if len(policyValue) > 0 { - dm.DNSTap.PolicyValue = policyValue - } - - // decode query zone if provided - queryZone := dt.GetMessage().GetQueryZone() - if len(queryZone) > 0 { - qz, _, err := dnsutils.ParseLabels(0, queryZone) - if err != nil { - d.LogError("invalid query zone: %v - %v", err, queryZone) - } - dm.DNSTap.QueryZone = qz - } - - // compute timestamp - ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) - dm.DNSTap.Timestamp = ts.UnixNano() - dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) - - // decode payload if provided - if !d.config.Collectors.Dnstap.DisableDNSParser && len(dm.DNS.Payload) > 0 { - // decode the dns payload to get id, rcode and the number of question - // number of answer, ignore invalid packet - dnsHeader, err := dnsutils.DecodeDNS(dm.DNS.Payload) - if err != nil { - dm.DNS.MalformedPacket = true - d.LogInfo("dns header parser stopped: %s", err) - if d.config.Global.Trace.LogMalformed { - d.LogError("%v", dm) - d.LogError("dump invalid dns headr: %v", dm.DNS.Payload) - } - } - - if err = dnsutils.DecodePayload(&dm, &dnsHeader, d.config); err != nil { - dm.DNS.MalformedPacket = true - d.LogInfo("dns payload parser stopped: %s", err) - if d.config.Global.Trace.LogMalformed { - d.LogError("%v", dm) - d.LogError("dump invalid dns payload: %v", dm.DNS.Payload) - } - } - } - - // apply all enabled transformers - if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: // Successful send to logger channel - default: - d.dropped <- droppedNames[i] - } - } - continue - } - - // convert latency to human - dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) - - // dispatch dns message to connected routes - for i := range defaultRoutes { - select { - case defaultRoutes[i] <- dm: // Successful send to logger channel - default: - d.dropped <- defaultNames[i] - } - } - - } - } - - d.LogInfo("processing terminated") -} - -func (d *DNSTapProcessor) MonitorLoggers() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -MONITOR_LOOP: - for { - select { - case <-d.stopMonitor: - close(d.dropped) - bufferFull.Stop() - d.doneMonitor <- true - break MONITOR_LOOP - - case loggerName := <-d.dropped: - if _, ok := d.droppedCount[loggerName]; !ok { - d.droppedCount[loggerName] = 1 - } else { - d.droppedCount[loggerName]++ - } - - case <-bufferFull.C: - for v, k := range d.droppedCount { - if k > 0 { - d.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) - d.droppedCount[v] = 0 - } - } - bufferFull.Reset(watchInterval) - - } - } - d.LogInfo("monitor terminated") -} diff --git a/processors/dnstap_test.go b/processors/dnstap_test.go deleted file mode 100644 index c886b01a..00000000 --- a/processors/dnstap_test.go +++ /dev/null @@ -1,362 +0,0 @@ -package processors - -import ( - "bytes" - "fmt" - "regexp" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnstap-protobuf" - "github.com/dmachard/go-logger" - "github.com/miekg/dns" - "google.golang.org/protobuf/proto" -) - -func Test_DnstapProcessor(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion(ExpectedQname+".", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) - } -} - -func Test_DnstapProcessor_MalformedDnsHeader(t *testing.T) { - // init the dnstap consumer - logger := logger.New(false) - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion[:4] - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == false { - t.Errorf("malformed packet not detected") - } -} - -func Test_DnstapProcessor_MalformedDnsQuestion(t *testing.T) { - // init the dnstap consumer - logger := logger.New(false) - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - - // prepare dns query - dnsquestion := []byte{88, 27, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, - 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0} - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == false { - t.Errorf("malformed packet not detected") - } -} - -func Test_DnstapProcessor_MalformedDnsAnswer(t *testing.T) { - // init the dnstap consumer - logger := logger.New(false) - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - - // prepare dns query - dnsanswer := []byte{46, 172, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, - 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, - 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 127, 0} - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(6) - dt.Message.ResponseMessage = dnsanswer - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == false { - t.Errorf("malformed packet not detected") - } -} - -func Test_DnstapProcessor_EmptyDnsPayload(t *testing.T) { - // init the dnstap consumer - logger := logger.New(false) - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == true { - t.Errorf("malformed packet detected, should not with empty payload") - } -} - -func Test_DnstapProcessor_DisableDNSParser(t *testing.T) { - // init the dnstap consumer - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.Dnstap.DisableDNSParser = true - - logger := logger.New(false) - consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.ID != 0 { - t.Errorf("DNS ID should be equal to zero: %d", dm.DNS.ID) - } -} - -// test to decode the extended part -func Test_DnstapProcessor_Extended(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.Dnstap.ExtendedSupport = true - - consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - edt := &dnsutils.ExtendedDnstap{} - edt.Atags = &dnsutils.ExtendedATags{ - Tags: []string{"atags:value"}, - } - edt.OriginalDnstapExtra = []byte("originalextrafield") - edt.Normalize = &dnsutils.ExtendedNormalize{ - Tld: "org", - EtldPlusOne: "dnscollector.org", - } - edt.Filtering = &dnsutils.ExtendedFiltering{ - SampleRate: 30, - } - edtData, _ := proto.Marshal(edt) - dt.Extra = edtData - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNSTap.Extra != "originalextrafield" { - t.Errorf("invalid extra field: %s", dm.DNSTap.Extra) - } - if dm.ATags.Tags[0] != "atags:value" { - t.Errorf("invalid atags: %s", dm.ATags.Tags[0]) - } - if dm.PublicSuffix.QnameEffectiveTLDPlusOne != "dnscollector.org" { - t.Errorf("invalid etld+1: %s", dm.PublicSuffix.QnameEffectiveTLDPlusOne) - } - if dm.PublicSuffix.QnamePublicSuffix != "org" { - t.Errorf("invalid tld: %s", dm.PublicSuffix.QnamePublicSuffix) - } - if dm.Filtering.SampleRate != 30 { - t.Errorf("invalid sample rate: %d", dm.Filtering.SampleRate) - } -} - -// test for issue https://github.com/dmachard/go-dnscollector/issues/568 -func Test_DnstapProcessor_BufferLoggerIsFull(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), lg, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion(ExpectedQname+".", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLoggerWithBufferSize(1) - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packets to consumer - for i := 0; i < 512; i++ { - consumer.GetChannel() <- data - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg511) - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) - } - - // send second shot of packets to consumer - for i := 0; i < 1024; i++ { - consumer.GetChannel() <- data - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg1023) - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - dm2 := <-fl.GetInputChannel() - if dm2.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in second dns message: %s", dm2.DNS.Qname) - } -} diff --git a/workers/dnstap_relay_test.go b/workers/dnstap_relay_test.go index 79100be2..6b3093e7 100644 --- a/workers/dnstap_relay_test.go +++ b/workers/dnstap_relay_test.go @@ -10,7 +10,6 @@ import ( "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-framestream" "github.com/dmachard/go-logger" "google.golang.org/protobuf/proto" @@ -82,7 +81,7 @@ func Test_DnstapRelay(t *testing.T) { } // get fake dnstap message - dtQuery := processors.GetFakeDNSTap(dnsquery) + dtQuery := GetFakeDNSTap(dnsquery) // serialize to bytes data, err := proto.Marshal(dtQuery) diff --git a/workers/dnstapserver.go b/workers/dnstapserver.go index 85b15a31..f2d5c63d 100644 --- a/workers/dnstapserver.go +++ b/workers/dnstapserver.go @@ -4,19 +4,24 @@ import ( "bufio" "encoding/binary" "errors" + "fmt" "io" "net" + "strconv" "sync" "sync/atomic" "time" + "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-dnstap-protobuf" "github.com/dmachard/go-framestream" "github.com/dmachard/go-logger" "github.com/segmentio/kafka-go/compress" + "google.golang.org/protobuf/proto" ) type DnstapServer struct { @@ -51,7 +56,7 @@ func (w *DnstapServer) HandleConn(conn net.Conn, connID uint64, forceClose chan w.LogInfo("new connection #%d from %s (%s)", connID, peer, peerName) // start dnstap processor and run it - dnstapProcessor := processors.NewDNSTapProcessor(int(connID), peerName, w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.Dnstap.ChannelBufferSize) + dnstapProcessor := NewDNSTapProcessor(int(connID), peerName, w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.Dnstap.ChannelBufferSize) go dnstapProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) // init frame stream library @@ -236,3 +241,392 @@ func (w *DnstapServer) StartCollect() { } } + +func GetFakeDNSTap(dnsquery []byte) *dnstap.Dnstap { + dtQuery := &dnstap.Dnstap{} + + dt := dnstap.Dnstap_MESSAGE + dtQuery.Identity = []byte("dnstap-generator") + dtQuery.Version = []byte("-") + dtQuery.Type = &dt + + mt := dnstap.Message_CLIENT_QUERY + sf := dnstap.SocketFamily_INET + sp := dnstap.SocketProtocol_UDP + + now := time.Now() + tsec := uint64(now.Unix()) + tnsec := uint32(uint64(now.UnixNano()) - uint64(now.Unix())*1e9) + + rport := uint32(53) + qport := uint32(5300) + + msg := &dnstap.Message{Type: &mt} + msg.SocketFamily = &sf + msg.SocketProtocol = &sp + msg.QueryAddress = net.ParseIP("127.0.0.1") + msg.QueryPort = &qport + msg.ResponseAddress = net.ParseIP("127.0.0.2") + msg.ResponsePort = &rport + + msg.QueryMessage = dnsquery + msg.QueryTimeSec = &tsec + msg.QueryTimeNsec = &tnsec + + dtQuery.Message = msg + return dtQuery +} + +type DNSTapProcessor struct { + ConnID int + PeerName string + doneRun, stopRun chan bool + doneMonitor, stopMonitor chan bool + recvFrom chan []byte + logger *logger.Logger + config *pkgconfig.Config + ConfigChan chan *pkgconfig.Config + name string + chanSize int + RoutingHandler pkgutils.RoutingHandler + dropped chan string + droppedCount map[string]int +} + +func NewDNSTapProcessor(connID int, peerName string, config *pkgconfig.Config, logger *logger.Logger, name string, size int) DNSTapProcessor { + logger.Info(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - initialization...", name, connID) + d := DNSTapProcessor{ + ConnID: connID, + PeerName: peerName, + doneMonitor: make(chan bool), + doneRun: make(chan bool), + stopMonitor: make(chan bool), + stopRun: make(chan bool), + recvFrom: make(chan []byte, size), + chanSize: size, + logger: logger, + config: config, + ConfigChan: make(chan *pkgconfig.Config), + name: name, + dropped: make(chan string), + droppedCount: map[string]int{}, + RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), + } + + return d +} + +func (d *DNSTapProcessor) LogInfo(msg string, v ...interface{}) { + var log string + if d.ConnID == 0 { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - ", d.name) + } else { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - ", d.name, d.ConnID) + } + d.logger.Info(log+msg, v...) +} + +func (d *DNSTapProcessor) LogError(msg string, v ...interface{}) { + var log string + if d.ConnID == 0 { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - ", d.name) + } else { + log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - ", d.name, d.ConnID) + } + d.logger.Error(log+msg, v...) +} + +func (d *DNSTapProcessor) GetChannel() chan []byte { + return d.recvFrom +} + +func (d *DNSTapProcessor) Stop() { + d.LogInfo("stopping processor...") + d.RoutingHandler.Stop() + + d.LogInfo("stopping to process...") + d.stopRun <- true + <-d.doneRun + + d.LogInfo("stopping monitor...") + d.stopMonitor <- true + <-d.doneMonitor +} + +func (d *DNSTapProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { + dt := &dnstap.Dnstap{} + edt := &dnsutils.ExtendedDnstap{} + + // prepare next channels + defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) + droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) + + // prepare enabled transformers + transforms := transformers.NewTransforms(&d.config.IngoingTransformers, d.logger, d.name, defaultRoutes, d.ConnID) + + // start goroutine to count dropped messsages + go d.MonitorLoggers() + + // read incoming dns message + d.LogInfo("waiting dns message to process...") +RUN_LOOP: + for { + select { + case cfg := <-d.ConfigChan: + d.config = cfg + transforms.ReloadConfig(&cfg.IngoingTransformers) + + case <-d.stopRun: + transforms.Reset() + d.doneRun <- true + break RUN_LOOP + + case data, opened := <-d.recvFrom: + if !opened { + d.LogInfo("channel closed, exit") + return + } + + err := proto.Unmarshal(data, dt) + if err != nil { + continue + } + + // init dns message + dm := dnsutils.DNSMessage{} + dm.Init() + + dm.DNSTap.PeerName = d.PeerName + + // init dns message with additionnals parts + transforms.InitDNSMessageFormat(&dm) + + identity := dt.GetIdentity() + if len(identity) > 0 { + dm.DNSTap.Identity = string(identity) + } + version := dt.GetVersion() + if len(version) > 0 { + dm.DNSTap.Version = string(version) + } + dm.DNSTap.Operation = dt.GetMessage().GetType().String() + + // extended extra field ? + if d.config.Collectors.Dnstap.ExtendedSupport { + err := proto.Unmarshal(dt.GetExtra(), edt) + if err != nil { + continue + } + + // get original extra value + originalExtra := string(edt.GetOriginalDnstapExtra()) + if len(originalExtra) > 0 { + dm.DNSTap.Extra = originalExtra + } + + // get atags + atags := edt.GetAtags() + if atags != nil { + dm.ATags = &dnsutils.TransformATags{ + Tags: atags.GetTags(), + } + } + + // get public suffix + norm := edt.GetNormalize() + if norm != nil { + dm.PublicSuffix = &dnsutils.TransformPublicSuffix{} + if len(norm.GetTld()) > 0 { + dm.PublicSuffix.QnamePublicSuffix = norm.GetTld() + } + if len(norm.GetEtldPlusOne()) > 0 { + dm.PublicSuffix.QnameEffectiveTLDPlusOne = norm.GetEtldPlusOne() + } + } + + // filtering + sampleRate := edt.GetFiltering() + if sampleRate != nil { + dm.Filtering = &dnsutils.TransformFiltering{} + dm.Filtering.SampleRate = int(sampleRate.SampleRate) + } + } else { + extra := string(dt.GetExtra()) + if len(extra) > 0 { + dm.DNSTap.Extra = extra + } + } + + if ipVersion, valid := netutils.IPVersion[dt.GetMessage().GetSocketFamily().String()]; valid { + dm.NetworkInfo.Family = ipVersion + } else { + dm.NetworkInfo.Family = pkgconfig.StrUnknown + } + + dm.NetworkInfo.Protocol = dt.GetMessage().GetSocketProtocol().String() + + // decode query address and port + queryip := dt.GetMessage().GetQueryAddress() + if len(queryip) > 0 { + dm.NetworkInfo.QueryIP = net.IP(queryip).String() + } + queryport := dt.GetMessage().GetQueryPort() + if queryport > 0 { + dm.NetworkInfo.QueryPort = strconv.FormatUint(uint64(queryport), 10) + } + + // decode response address and port + responseip := dt.GetMessage().GetResponseAddress() + if len(responseip) > 0 { + dm.NetworkInfo.ResponseIP = net.IP(responseip).String() + } + responseport := dt.GetMessage().GetResponsePort() + if responseport > 0 { + dm.NetworkInfo.ResponsePort = strconv.FormatUint(uint64(responseport), 10) + } + + // get dns payload and timestamp according to the type (query or response) + op := dnstap.Message_Type_value[dm.DNSTap.Operation] + if op%2 == 1 { + dnsPayload := dt.GetMessage().GetQueryMessage() + dm.DNS.Payload = dnsPayload + dm.DNS.Length = len(dnsPayload) + dm.DNS.Type = dnsutils.DNSQuery + dm.DNSTap.TimeSec = int(dt.GetMessage().GetQueryTimeSec()) + dm.DNSTap.TimeNsec = int(dt.GetMessage().GetQueryTimeNsec()) + } else { + dnsPayload := dt.GetMessage().GetResponseMessage() + dm.DNS.Payload = dnsPayload + dm.DNS.Length = len(dnsPayload) + dm.DNS.Type = dnsutils.DNSReply + dm.DNSTap.TimeSec = int(dt.GetMessage().GetResponseTimeSec()) + dm.DNSTap.TimeNsec = int(dt.GetMessage().GetResponseTimeNsec()) + } + + // policy + policyType := dt.GetMessage().GetPolicy().GetType() + if len(policyType) > 0 { + dm.DNSTap.PolicyType = policyType + } + + policyRule := string(dt.GetMessage().GetPolicy().GetRule()) + if len(policyRule) > 0 { + dm.DNSTap.PolicyRule = policyRule + } + + policyAction := dt.GetMessage().GetPolicy().GetAction().String() + if len(policyAction) > 0 { + dm.DNSTap.PolicyAction = policyAction + } + + policyMatch := dt.GetMessage().GetPolicy().GetMatch().String() + if len(policyMatch) > 0 { + dm.DNSTap.PolicyMatch = policyMatch + } + + policyValue := string(dt.GetMessage().GetPolicy().GetValue()) + if len(policyValue) > 0 { + dm.DNSTap.PolicyValue = policyValue + } + + // decode query zone if provided + queryZone := dt.GetMessage().GetQueryZone() + if len(queryZone) > 0 { + qz, _, err := dnsutils.ParseLabels(0, queryZone) + if err != nil { + d.LogError("invalid query zone: %v - %v", err, queryZone) + } + dm.DNSTap.QueryZone = qz + } + + // compute timestamp + ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) + dm.DNSTap.Timestamp = ts.UnixNano() + dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) + + // decode payload if provided + if !d.config.Collectors.Dnstap.DisableDNSParser && len(dm.DNS.Payload) > 0 { + // decode the dns payload to get id, rcode and the number of question + // number of answer, ignore invalid packet + dnsHeader, err := dnsutils.DecodeDNS(dm.DNS.Payload) + if err != nil { + dm.DNS.MalformedPacket = true + d.LogInfo("dns header parser stopped: %s", err) + if d.config.Global.Trace.LogMalformed { + d.LogError("%v", dm) + d.LogError("dump invalid dns headr: %v", dm.DNS.Payload) + } + } + + if err = dnsutils.DecodePayload(&dm, &dnsHeader, d.config); err != nil { + dm.DNS.MalformedPacket = true + d.LogInfo("dns payload parser stopped: %s", err) + if d.config.Global.Trace.LogMalformed { + d.LogError("%v", dm) + d.LogError("dump invalid dns payload: %v", dm.DNS.Payload) + } + } + } + + // apply all enabled transformers + if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { + for i := range droppedRoutes { + select { + case droppedRoutes[i] <- dm: // Successful send to logger channel + default: + d.dropped <- droppedNames[i] + } + } + continue + } + + // convert latency to human + dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) + + // dispatch dns message to connected routes + for i := range defaultRoutes { + select { + case defaultRoutes[i] <- dm: // Successful send to logger channel + default: + d.dropped <- defaultNames[i] + } + } + + } + } + + d.LogInfo("processing terminated") +} + +func (d *DNSTapProcessor) MonitorLoggers() { + watchInterval := 10 * time.Second + bufferFull := time.NewTimer(watchInterval) +MONITOR_LOOP: + for { + select { + case <-d.stopMonitor: + close(d.dropped) + bufferFull.Stop() + d.doneMonitor <- true + break MONITOR_LOOP + + case loggerName := <-d.dropped: + if _, ok := d.droppedCount[loggerName]; !ok { + d.droppedCount[loggerName] = 1 + } else { + d.droppedCount[loggerName]++ + } + + case <-bufferFull.C: + for v, k := range d.droppedCount { + if k > 0 { + d.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) + d.droppedCount[v] = 0 + } + } + bufferFull.Reset(watchInterval) + + } + } + d.LogInfo("monitor terminated") +} diff --git a/workers/dnstapserver_test.go b/workers/dnstapserver_test.go index df6a0fb8..7e284d51 100644 --- a/workers/dnstapserver_test.go +++ b/workers/dnstapserver_test.go @@ -2,6 +2,7 @@ package workers import ( "bufio" + "bytes" "fmt" "net" "regexp" @@ -12,9 +13,10 @@ import ( "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" + "github.com/dmachard/go-dnstap-protobuf" "github.com/dmachard/go-framestream" "github.com/dmachard/go-logger" + "github.com/miekg/dns" "github.com/segmentio/kafka-go/compress" "google.golang.org/protobuf/proto" ) @@ -103,7 +105,7 @@ func Test_DnstapCollector(t *testing.T) { } // get fake dnstap message - dtQuery := processors.GetFakeDNSTap(dnsquery) + dtQuery := GetFakeDNSTap(dnsquery) // serialize to bytes data, err := proto.Marshal(dtQuery) @@ -197,3 +199,348 @@ func Test_DnstapCollector_CloseFrameStream(t *testing.T) { // cleanup c.Stop() } + +func Test_DnstapProcessor(t *testing.T) { + logger := logger.New(true) + var o bytes.Buffer + logger.SetOutput(&o) + + // init the dnstap consumer + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion(ExpectedQname+".", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.Qname != ExpectedQname { + t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) + } +} + +func Test_DnstapProcessor_MalformedDnsHeader(t *testing.T) { + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion[:4] + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == false { + t.Errorf("malformed packet not detected") + } +} + +func Test_DnstapProcessor_MalformedDnsQuestion(t *testing.T) { + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) + + // prepare dns query + dnsquestion := []byte{88, 27, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, + 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0} + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == false { + t.Errorf("malformed packet not detected") + } +} + +func Test_DnstapProcessor_MalformedDnsAnswer(t *testing.T) { + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) + + // prepare dns query + dnsanswer := []byte{46, 172, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, + 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, + 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 127, 0} + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(6) + dt.Message.ResponseMessage = dnsanswer + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == false { + t.Errorf("malformed packet not detected") + } +} + +func Test_DnstapProcessor_EmptyDnsPayload(t *testing.T) { + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == true { + t.Errorf("malformed packet detected, should not with empty payload") + } +} + +func Test_DnstapProcessor_DisableDNSParser(t *testing.T) { + // init the dnstap consumer + cfg := pkgconfig.GetFakeConfig() + cfg.Collectors.Dnstap.DisableDNSParser = true + + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.ID != 0 { + t.Errorf("DNS ID should be equal to zero: %d", dm.DNS.ID) + } +} + +// test to decode the extended part +func Test_DnstapProcessor_Extended(t *testing.T) { + logger := logger.New(true) + var o bytes.Buffer + logger.SetOutput(&o) + + // init the dnstap consumer + cfg := pkgconfig.GetFakeConfig() + cfg.Collectors.Dnstap.ExtendedSupport = true + + consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + edt := &dnsutils.ExtendedDnstap{} + edt.Atags = &dnsutils.ExtendedATags{ + Tags: []string{"atags:value"}, + } + edt.OriginalDnstapExtra = []byte("originalextrafield") + edt.Normalize = &dnsutils.ExtendedNormalize{ + Tld: "org", + EtldPlusOne: "dnscollector.org", + } + edt.Filtering = &dnsutils.ExtendedFiltering{ + SampleRate: 30, + } + edtData, _ := proto.Marshal(edt) + dt.Extra = edtData + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLogger() + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packet to consumer + consumer.GetChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNSTap.Extra != "originalextrafield" { + t.Errorf("invalid extra field: %s", dm.DNSTap.Extra) + } + if dm.ATags.Tags[0] != "atags:value" { + t.Errorf("invalid atags: %s", dm.ATags.Tags[0]) + } + if dm.PublicSuffix.QnameEffectiveTLDPlusOne != "dnscollector.org" { + t.Errorf("invalid etld+1: %s", dm.PublicSuffix.QnameEffectiveTLDPlusOne) + } + if dm.PublicSuffix.QnamePublicSuffix != "org" { + t.Errorf("invalid tld: %s", dm.PublicSuffix.QnamePublicSuffix) + } + if dm.Filtering.SampleRate != 30 { + t.Errorf("invalid sample rate: %d", dm.Filtering.SampleRate) + } +} + +// test for issue https://github.com/dmachard/go-dnscollector/issues/568 +func Test_DnstapProcessor_BufferLoggerIsFull(t *testing.T) { + // redirect stdout output to bytes buffer + logsChan := make(chan logger.LogEntry, 10) + lg := logger.New(true) + lg.SetOutputChannel((logsChan)) + + // init the dnstap consumer + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), lg, "test", 512) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion(ExpectedQname+".", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // run the consumer with a fake logger + fl := pkgutils.NewFakeLoggerWithBufferSize(1) + go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + + // add packets to consumer + for i := 0; i < 512; i++ { + consumer.GetChannel() <- data + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(ExpectedBufferMsg511) + if pattern.MatchString(entry.Message) { + break + } + } + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.Qname != ExpectedQname { + t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) + } + + // send second shot of packets to consumer + for i := 0; i < 1024; i++ { + consumer.GetChannel() <- data + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(ExpectedBufferMsg1023) + if pattern.MatchString(entry.Message) { + break + } + } + + // read dns message from dnstap consumer + dm2 := <-fl.GetInputChannel() + if dm2.DNS.Qname != ExpectedQname { + t.Errorf("invalid qname in second dns message: %s", dm2.DNS.Qname) + } +} diff --git a/workers/file_ingestor.go b/workers/file_ingestor.go index c2f541ad..a8d7d00b 100644 --- a/workers/file_ingestor.go +++ b/workers/file_ingestor.go @@ -39,7 +39,7 @@ type FileIngestor struct { *pkgutils.GenericWorker watcherTimers map[string]*time.Timer dnsProcessor processors.DNSProcessor - dnstapProcessor processors.DNSTapProcessor + dnstapProcessor DNSTapProcessor mu sync.Mutex } @@ -315,7 +315,7 @@ func (w *FileIngestor) StartCollect() { go w.dnsProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) // start dnstap subprocessor - w.dnstapProcessor = processors.NewDNSTapProcessor(0, "", w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.FileIngestor.ChannelBufferSize) + w.dnstapProcessor = NewDNSTapProcessor(0, "", w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.FileIngestor.ChannelBufferSize) go w.dnstapProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) // read current folder content From b681d68d033cfa237450f7d882f1f13119c89129 Mon Sep 17 00:00:00 2001 From: dmachard <5562930+dmachard@users.noreply.github.com> Date: Thu, 9 May 2024 11:55:15 +0200 Subject: [PATCH 5/5] remove processors package --- .github/workflows/testing-go.yml | 3 +-- Makefile | 1 - {processors => workers}/constants.go | 2 +- workers/dnsmessage_test.go | 9 ++++----- processors/dns.go => workers/dnsprocessor.go | 2 +- processors/dns_test.go => workers/dnsprocessor_test.go | 2 +- workers/file_ingestor.go | 5 ++--- workers/powerdns_test.go | 8 -------- workers/sniffer_afpacket_linux.go | 3 +-- workers/sniffer_xdp.go | 3 +-- workers/stdout_test.go | 9 ++++----- workers/tzsp_linux.go | 3 +-- 12 files changed, 17 insertions(+), 33 deletions(-) rename {processors => workers}/constants.go (92%) rename processors/dns.go => workers/dnsprocessor.go (99%) rename processors/dns_test.go => workers/dnsprocessor_test.go (99%) diff --git a/.github/workflows/testing-go.yml b/.github/workflows/testing-go.yml index ff1b787d..1baa4b3c 100644 --- a/.github/workflows/testing-go.yml +++ b/.github/workflows/testing-go.yml @@ -32,7 +32,6 @@ jobs: - 'workers' - 'transformers' - 'netutils' - - 'processors' runs-on: ${{ matrix.os-version }} @@ -144,7 +143,7 @@ jobs: - id: count_tests run: | - data=$(sudo go test -timeout 360s -v ./workers ./processors ./dnsutils ./netutils ./transformers ./pkgconfig ./pkglinker ./pkgutils ././ 2>&1 | grep -c RUN) + data=$(sudo go test -timeout 360s -v ./workers ./dnsutils ./netutils ./transformers ./pkgconfig ./pkglinker ./pkgutils ././ 2>&1 | grep -c RUN) echo "Count of Tests: $data" echo "data=$data" >> $GITHUB_OUTPUT diff --git a/Makefile b/Makefile index 74621cb6..4c15b90e 100644 --- a/Makefile +++ b/Makefile @@ -75,7 +75,6 @@ tests: check-go @go test -timeout 90s ./dnsutils/ -race -cover -v @go test -timeout 90s ./transformers/ -race -cover -v @go test -timeout 180s ./workers/ -race -cover -v - @go test -timeout 90s ./processors/ -race -cover -v # Cleans the project using go clean. clean: check-go diff --git a/processors/constants.go b/workers/constants.go similarity index 92% rename from processors/constants.go rename to workers/constants.go index f8ba88f2..31c3451a 100644 --- a/processors/constants.go +++ b/workers/constants.go @@ -1,4 +1,4 @@ -package processors +package workers const ( ExpectedQname = "dnscollector.dev" diff --git a/workers/dnsmessage_test.go b/workers/dnsmessage_test.go index 1ed1624e..a61d89a6 100644 --- a/workers/dnsmessage_test.go +++ b/workers/dnsmessage_test.go @@ -9,7 +9,6 @@ import ( "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-logger" ) @@ -41,7 +40,7 @@ func Test_DnsMessage_BufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg511) + pattern := regexp.MustCompile(ExpectedBufferMsg511) if pattern.MatchString(entry.Message) { break } @@ -49,7 +48,7 @@ func Test_DnsMessage_BufferLoggerIsFull(t *testing.T) { // read dnsmessage from next logger dmOut := <-nxt.GetInputChannel() - if dmOut.DNS.Qname != processors.ExpectedQname2 { + if dmOut.DNS.Qname != ExpectedQname2 { t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) } @@ -63,14 +62,14 @@ func Test_DnsMessage_BufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg1023) + pattern := regexp.MustCompile(ExpectedBufferMsg1023) if pattern.MatchString(entry.Message) { break } } // read dnsmessage from next logger dm2 := <-nxt.GetInputChannel() - if dm2.DNS.Qname != processors.ExpectedQname2 { + if dm2.DNS.Qname != ExpectedQname2 { t.Errorf("invalid qname in dns message: %s", dm2.DNS.Qname) } diff --git a/processors/dns.go b/workers/dnsprocessor.go similarity index 99% rename from processors/dns.go rename to workers/dnsprocessor.go index 3f1706a7..817598b9 100644 --- a/processors/dns.go +++ b/workers/dnsprocessor.go @@ -1,4 +1,4 @@ -package processors +package workers import ( "fmt" diff --git a/processors/dns_test.go b/workers/dnsprocessor_test.go similarity index 99% rename from processors/dns_test.go rename to workers/dnsprocessor_test.go index ec213e03..d641a682 100644 --- a/processors/dns_test.go +++ b/workers/dnsprocessor_test.go @@ -1,4 +1,4 @@ -package processors +package workers import ( "bytes" diff --git a/workers/file_ingestor.go b/workers/file_ingestor.go index a8d7d00b..fe7e91c9 100644 --- a/workers/file_ingestor.go +++ b/workers/file_ingestor.go @@ -14,7 +14,6 @@ import ( "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-logger" framestream "github.com/farsightsec/golang-framestream" "github.com/fsnotify/fsnotify" @@ -38,7 +37,7 @@ func IsValidMode(mode string) bool { type FileIngestor struct { *pkgutils.GenericWorker watcherTimers map[string]*time.Timer - dnsProcessor processors.DNSProcessor + dnsProcessor DNSProcessor dnstapProcessor DNSTapProcessor mu sync.Mutex } @@ -311,7 +310,7 @@ func (w *FileIngestor) StartCollect() { w.LogInfo("worker is starting collection") defer w.CollectDone() - w.dnsProcessor = processors.NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.FileIngestor.ChannelBufferSize) + w.dnsProcessor = NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.FileIngestor.ChannelBufferSize) go w.dnsProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) // start dnstap subprocessor diff --git a/workers/powerdns_test.go b/workers/powerdns_test.go index 125531a2..3ad2a959 100644 --- a/workers/powerdns_test.go +++ b/workers/powerdns_test.go @@ -16,14 +16,6 @@ import ( "google.golang.org/protobuf/proto" ) -const ( - ExpectedQname = "dnscollector.dev" - ExpectedQname2 = "dns.collector" - ExpectedBufferMsg511 = ".*buffer is full, 511.*" - ExpectedBufferMsg1023 = ".*buffer is full, 1023.*" - ExpectedIdentity = "powerdnspb" -) - func TestPowerDNS_Run(t *testing.T) { g := pkgutils.NewFakeLogger() diff --git a/workers/sniffer_afpacket_linux.go b/workers/sniffer_afpacket_linux.go index e37a62c8..1d4b06be 100644 --- a/workers/sniffer_afpacket_linux.go +++ b/workers/sniffer_afpacket_linux.go @@ -16,7 +16,6 @@ import ( "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-logger" "github.com/google/gopacket" "github.com/google/gopacket/layers" @@ -87,7 +86,7 @@ func (w *AfpacketSniffer) StartCollect() { } } - dnsProcessor := processors.NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.AfpacketLiveCapture.ChannelBufferSize) + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.AfpacketLiveCapture.ChannelBufferSize) go dnsProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) dnsChan := make(chan netutils.DNSPacket) diff --git a/workers/sniffer_xdp.go b/workers/sniffer_xdp.go index 924f3e3e..4210da78 100644 --- a/workers/sniffer_xdp.go +++ b/workers/sniffer_xdp.go @@ -18,7 +18,6 @@ import ( "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-dnscollector/xdp" "github.com/dmachard/go-logger" "golang.org/x/sys/unix" @@ -39,7 +38,7 @@ func (w *XDPSniffer) StartCollect() { defer w.CollectDone() // init dns processor - dnsProcessor := processors.NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.XdpLiveCapture.ChannelBufferSize) + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.XdpLiveCapture.ChannelBufferSize) go dnsProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) // get network interface by name diff --git a/workers/stdout_test.go b/workers/stdout_test.go index 8433175b..9d2a2184 100644 --- a/workers/stdout_test.go +++ b/workers/stdout_test.go @@ -10,7 +10,6 @@ import ( "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-logger" "github.com/google/gopacket/pcapgo" ) @@ -241,7 +240,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg511) + pattern := regexp.MustCompile(ExpectedBufferMsg511) if pattern.MatchString(entry.Message) { break } @@ -249,7 +248,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer dmOut := <-nxt.GetInputChannel() - if dmOut.DNS.Qname != processors.ExpectedQname2 { + if dmOut.DNS.Qname != ExpectedQname2 { t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) } @@ -262,7 +261,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { time.Sleep(12 * time.Second) for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg1023) + pattern := regexp.MustCompile(ExpectedBufferMsg1023) if pattern.MatchString(entry.Message) { break } @@ -270,7 +269,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer dmOut2 := <-nxt.GetInputChannel() - if dmOut2.DNS.Qname != processors.ExpectedQname2 { + if dmOut2.DNS.Qname != ExpectedQname2 { t.Errorf("invalid qname in second dns message: %s", dmOut2.DNS.Qname) } diff --git a/workers/tzsp_linux.go b/workers/tzsp_linux.go index f9496d53..abf9606d 100644 --- a/workers/tzsp_linux.go +++ b/workers/tzsp_linux.go @@ -19,7 +19,6 @@ import ( "github.com/dmachard/go-dnscollector/netutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-logger" "github.com/google/gopacket" "github.com/google/gopacket/layers" @@ -84,7 +83,7 @@ func (w *TZSPSniffer) StartCollect() { } // init dns processor - dnsProcessor := processors.NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.Tzsp.ChannelBufferSize) + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.Tzsp.ChannelBufferSize) go dnsProcessor.Run(w.GetDefaultRoutes(), w.GetDroppedRoutes()) ctx, cancel := context.WithCancel(context.Background())