diff --git a/cardano-tracer/app/cardano-tracer.hs b/cardano-tracer/app/cardano-tracer.hs index be0f014e768..cfac4e05c6d 100644 --- a/cardano-tracer/app/cardano-tracer.hs +++ b/cardano-tracer/app/cardano-tracer.hs @@ -13,7 +13,7 @@ main = do tracerInfo :: ParserInfo TracerParams tracerInfo = info (parseTracerParams <**> helper <**> versionOption) - (fullDesc <> header "cardano-tracer - the logging/monitoring service for cardano node.") + (fullDesc <> header "cardano-tracer - the logging/monitoring service for Cardano node.") versionOption = infoOption (showVersion version) (long "version" <> diff --git a/cardano-tracer/cardano-tracer.cabal b/cardano-tracer/cardano-tracer.cabal index 7057e811bf4..2131835db0f 100644 --- a/cardano-tracer/cardano-tracer.cabal +++ b/cardano-tracer/cardano-tracer.cabal @@ -12,40 +12,37 @@ build-type: Simple extra-doc-files: README.md CHANGELOG.md -common common-options - build-depends: base >=4.12 && <5 +common base { build-depends: base >= 4.14 && < 4.15 } + +common project-config + default-language: Haskell2010 ghc-options: -Wall -Wcompat - -Widentities - -Wincomplete-uni-patterns -Wincomplete-record-updates - if impl(ghc >= 8.0) - ghc-options: -Wredundant-constraints - if impl(ghc >= 8.2) - ghc-options: -fhide-source-paths - if impl(ghc >= 8.4) - ghc-options: -Wmissing-export-lists + -Wincomplete-uni-patterns + -Wno-unticked-promoted-constructors + -Wno-orphans -Wpartial-fields - - default-language: Haskell2010 + -Wredundant-constraints + -Wunused-packages library - import: common-options + import: base, project-config hs-source-dirs: src exposed-modules: Cardano.Tracer.Acceptors Cardano.Tracer.CLI Cardano.Tracer.Configuration - Cardano.Tracer.Handlers Cardano.Tracer.Run Cardano.Tracer.Types + Cardano.Tracer.Handlers + Cardano.Tracer.Handlers.Logs.File Cardano.Tracer.Handlers.Logs.Journal Cardano.Tracer.Handlers.Logs.Log Cardano.Tracer.Handlers.Logs.Rotator - Cardano.Tracer.Handlers.Logs.Run Cardano.Tracer.Handlers.Metrics.Monitoring Cardano.Tracer.Handlers.Metrics.Prometheus @@ -69,10 +66,8 @@ library , filepath , hashable , libsystemd-journal - , network , optparse-applicative , ouroboros-network-framework - , serialise , snap-blaze , snap-core , snap-server @@ -84,35 +79,38 @@ library , unordered-containers executable cardano-tracer + import: base, project-config + hs-source-dirs: app main-is: cardano-tracer.hs other-modules: Paths_cardano_tracer - build-depends: base - , cardano-tracer + build-depends: cardano-tracer , optparse-applicative - , text - default-language: Haskell2010 - ghc-options: -Wall - -threaded + ghc-options: -threaded -rtsopts -with-rtsopts=-T test-suite cardano-tracer-test - import: common-options + import: base, project-config type: exitcode-stdio-1.0 + hs-source-dirs: test + main-is: cardano-tracer-test.hs - other-modules: Cardano.Tracer.Test.Types - Cardano.Tracer.Test.Forwarder - Cardano.Tracer.Test.Logs.File - Cardano.Tracer.Test.Logs.Rotator - build-depends: aeson - , async + other-modules: Cardano.Tracer.Test.Forwarder + Cardano.Tracer.Test.Logs.Tests + Cardano.Tracer.Test.Network.Tests + Cardano.Tracer.Test.Queue.Tests + Cardano.Tracer.Test.SSH.Tests + + Cardano.Tracer.Test.Utils + + build-depends: async , base , bytestring , cardano-tracer @@ -121,13 +119,11 @@ test-suite cardano-tracer-test , directory , ekg-core , ekg-forward + , extra , filepath - , io-sim - , io-classes - , network , ouroboros-network-framework + , process , QuickCheck - , serialise , stm , tasty , tasty-quickcheck @@ -135,11 +131,7 @@ test-suite cardano-tracer-test , time , trace-dispatcher , trace-forward - , typed-protocols-examples - , unordered-containers - , vector ghc-options: -threaded -rtsopts -with-rtsopts=-N - default-extensions: OverloadedStrings diff --git a/cardano-tracer/configuration/example.json b/cardano-tracer/configuration/example.json index 1456d1ebe7b..39a998981d9 100644 --- a/cardano-tracer/configuration/example.json +++ b/cardano-tracer/configuration/example.json @@ -1,12 +1,8 @@ { - "connectMode": "Initiator", - "acceptAt": [ - "/tmp/forwarder-1.sock", - "/tmp/forwarder-2.sock", - "/tmp/forwarder-3.sock" - ], - "loRequestNum": 1, - "ekgRequestFreq": 1, + "network": { + "tag": "AcceptAt", + "contents": "/tmp/forwarder-1.sock" + }, "hasEKG": null, "hasPrometheus": [ "127.0.0.1", @@ -25,3 +21,4 @@ "rpMaxAgeHours": 1 } } + diff --git a/cardano-tracer/docs/cardano-tracer.md b/cardano-tracer/docs/cardano-tracer.md index 4f7fbfa3bc9..4f74296c803 100644 --- a/cardano-tracer/docs/cardano-tracer.md +++ b/cardano-tracer/docs/cardano-tracer.md @@ -9,28 +9,26 @@ 3. [Overview](#Overview) 2. [Build and run](#Build-and-run) 3. [Configuration](#Configuration) - 1. [Accept endpoint](#Accept-endpoint) + 1. [Distributed Scenario](#Distributed-scenario) + 1. [Local Scenario](#Local-scenario) 2. [Requests](#Requests) 3. [Logging](#Logging) 4. [Logs rotation](#Logs-rotation) 5. [Prometheus](#Prometheus) -4. [Appendix](#Appendix) - 1. [SSH socket forwarding](#SSH-socket-forwarding) - 2. [Example 1](#Example-1) # Introduction ## Motivation -Previously, `cardano-node` handled all the logging by itself. Moreover, it provided monitoring tools as well, for example, a web server for returning EKG metrics. `cardano-tracer` is an attempt to move all logging/monitoring-related stuff from the node to a separate application. As a result, the node will be smaller, faster, and simpler. +Previously, `cardano-node` handled all the logging by itself. Moreover, it provided monitoring tools as well, for example, a web page for EKG metrics. `cardano-tracer` is an attempt to _move_ all logging/monitoring-related stuff from the node to a separate application. As a result, the node will be smaller, faster, and simpler. ## Overview -You can think of `cardano-node` as a **producer** of logging/monitoring information, and `cardano-tracer` is a **consumer** of this information. So, from the network point of view, `cardano-node` is a server, and `cardano-tracer` is a client. After the connection between them is established, `cardano-tracer` periodically asks for this information, and `cardano-node` replies with it. +You can think of `cardano-node` as a **producer** of logging/monitoring information, and `cardano-tracer` as a **consumer** of this information. And after the network connection between them is established, `cardano-tracer` periodically asks for logging/monitoring information, and `cardano-node` replies with it. Currently, this information is presented in two items: -1. Log item, which contains some information from the node. Please see `Cardano.Logging.Types.TraceObject` from `trace-dispatcher` library for more info. +1. Trace object, which contains arbitrary information from the node. For more details, please see `Cardano.Logging.Types.TraceObject` from `trace-dispatcher` library. 2. EKG metric, which contains some system metric. Please [read the documentation](https://hackage.haskell.org/package/ekg-core) for more info. Please note that `cardano-tracer` can work as an aggregator as well: _one_ `cardano-tracer` process can receive the information from _multiple_ `cardano-node` processes. In this case, received logging information will be stored in subdirectories: each subdirectory will contain all the items received from the particular node. @@ -42,7 +40,7 @@ Please make sure you have [Nix installed](https://nixos.org/guides/install-nix.h First of all, go to Nix shell using the following command (from the root of `cardano-node` repository): ``` -nix-shell --arg withHoogle false +nix-shell ``` Now build `cardano-tracer` using the following command: @@ -61,84 +59,128 @@ Please see below an explanation about the configuration file. # Configuration -The real-life example of the configuration file looks like this: +The way how to configure `cardano-tracer` is depending on your requirements. There are two basic scenarios: + +1. **Distributed** scenario, when `cardano-tracer` is working on one machine, and your nodes are working on another machine(s). +2. **Local** scenario, when `cardano-tracer` and your nodes are working on the same machine. + +Distributed scenario is for real-world case: for example, you have a cluster from `N` nodes working on `N` different AWS-instances, and you want to collect all the logging/monitoring information from these nodes using one single `cardano-tracer` process working on your machine. So, by default you should consider using distributed scenario. + +Local scenario is for testing case: for example, you want to try your new infrastructure from scratch, so you run `N` nodes and one `cardano-tracer` process on your laptop. + +## Distributed Scenario + +This is an example of the cluster from 3 nodes and one tracer: + +``` +machine A machine B machine C ++----------------------+ +----------------------+ +----------------------+ +| cardano-node | | cardano-node | | cardano-node | ++----------------------+ +----------------------+ +----------------------+ + ^ ^ ^ + \ | / + \ | / + v v v + +----------------------+ + | cardano-tracer | + +----------------------+ + machine D +``` + +The minimalistic configuration file for `cardano-tracer` in such a scenario would be: ``` { - "connectMode": "Initiator", - "acceptAt": [ - "/tmp/forwarder-1.sock", - "/tmp/forwarder-2.sock", - "/tmp/forwarder-3.sock" - ], - "loRequestNum": 10, - "ekgRequestFreq": 1, - "hasEKG": null, - "hasPrometheus": [ - "127.0.0.1", - 12798 - ], + "network": { + "tag": "AcceptAt", + "contents": "/tmp/cardano-tracer.sock" + }, "logging": [ { "logRoot": "/tmp/cardano-tracer-logs", "logMode": "FileMode", "logFormat": "ForMachine" } - ], - "rotation": { - "rpKeepFilesNum": 1, - "rpLogLimitBytes": 50000, - "rpMaxAgeHours": 1 - } + ] } ``` -Let's explore it in detail. +The `network` field specifies the way how `cardano-tracer` will be connected to your nodes. Here you see `AcceptAt` tag, which means that `cardano-tracer` works as a server: it _accepts_ network connections by listening the local Unix socket `/tmp/cardano-tracer.sock`. But if `cardano-tracer` _accepts_ the connections - who should _initiate_ them? The node cannot do that, because it listens its local Unix socket too. To do that, we use SSH forwarding. Please note that `cardano-tracer` **does not** support connection via IP-address and port, to avoid unauthorized connections, that's why we need `ssh` for distributed scenario. -## Accept endpoints +It can be shown like this: -The field `acceptAt` specifies a list of endpoints which using to connect `cardano-tracer` with one or more `cardano-node` processes: ``` -"acceptAt": [ - "/tmp/forwarder-1.sock", - "/tmp/forwarder-2.sock", - "/tmp/forwarder-3.sock" -] +machine A machine B machine C ++----------------------+ +----------------------+ +----------------------+ +| cardano-node | | cardano-node | | cardano-node | +| \ | | \ | | \ | +| v | | v | | v | +| local socket | | local socket | | local socket | ++----------------------+ +----------------------+ +----------------------+ + ^ ^ ^ + \ | / + SSH SSH SSH + \ | / + v v v + +----------------------+ + | local socket | + | ^ | + | \ | + | \ | + | cardano-tracer | + +----------------------+ + machine D ``` -where `/tmp/forwarder-*.sock` are local paths to Unix sockets. You can think of these sockets as channels to connect with nodes, in this example - with 3 nodes. It can be shown like this: +In this case, `ssh` connects each node with the same `cardano-tracer`. The idea of SSH forwarding is simple: we connect not the processes directly, but their network endpoints instead. You can think of it as a network channel from the local socket on one machine to the local socket on another machine. So, to connect `cardano-node` working on machine `A` with `cardano-tracer` working on machine `D`, run this command on machine `A`: ``` - +----------------+ - --> /tmp/forwarder-1.sock --> | cardano-node 1 | - / +----------------+ -+----------------+ +----------------+ -| cardano-tracer | ---> /tmp/forwarder-2.sock --> | cardano-node 2 | -+----------------+ +----------------+ - \ +----------------+ - --> /tmp/forwarder-3.sock --> | cardano-node 3 | - +----------------+ +ssh -nNT -L /tmp/cardano-tracer.sock:/tmp/cardano-node.sock -o "ExitOnForwardFailure yes" john@109.75.33.121 ``` -Please note that `cardano-tracer` **does not** support connection via IP-address and port, to avoid unauthorized connections. So there are two possible cases: +where: + +- `/tmp/cardano-tracer.sock` is a path to the local Unix socket on machine `A`, +- `/tmp/cardano-node.sock` is a path to the local Unix socket on machine `D`, +- `john` is a user you use to login on machine `D`, +- `109.75.33.121` is an IP-adress of machine `D`. -1. `cardano-tracer` and `cardano-node` work on the **same** machine. In this case, they will use the same Unix socket directly. -2. `cardano-tracer` and `cardano-node` work on **different** machines. In this case, they will be connected using SSH socket forwarding (please see an explanation below). Also, this case corresponds to the situation when _one_ `cardano-tracer` is connected to _multiple_ `cardano-node`s. +Run the same command on machines `B` and `C` to connect corresponding nodes with the same `cardano-tracer` working on machine `D`. -## Connection mode +## Local Scenario -The field `connectMode` specifies the connection mode, i.e. how `cardano-tracer` and `cardano-node` will be connected. There are two possible modes, `Initiator` and `Responder`. +As was mentioned above, local scenario is for testing, when your nodes and `cardano-tracer` are working on the same machine. In this case all these processes can see the same local sockets directly, so we don't need `ssh`. The configuration file for the local cluster from 3 nodes will look like this this: -The mode `Initiator` means that `cardano-tracer` **initiates** the connection, so you can think of `cardano-tracer` as a client and `cardano-node` as a server. By default you should use `Initiator` mode. +``` +{ + "network": { + "tag": "ConnectTo", + "contents": [ + "/tmp/cardano-node-1.sock" + "/tmp/cardano-node-2.sock" + "/tmp/cardano-node-3.sock" + ] + }, + "logging": [ + { + "logRoot": "/tmp/cardano-tracer-logs", + "logMode": "FileMode", + "logFormat": "ForMachine" + } + ] +} +``` + +As you see, the tag is `ConnectTo` now, which means that `cardano-tracer` works as a client: it _establishes_ network connections with the node via the local Unix sockets `/tmp/cardano-node-*.sock`. Please make sure your local nodes are listening corresponding sockets. -The mode `Responder` means that `cardano-tracer` **accepts** the connection. +Let's explore other fields of the configuration file. ## Requests -The field `loRequestNum` specifies the number of log items that will be requested from the node. In this example, `loRequestNum` is `10`, it means that `cardano-tracer` will periodically ask 10 log item in one request. It is useful to reduce the network traffic: it is possible to ask 50 log items in one request or ask them in 50 requests one at a time. Please note that if `loRequestNum` is bigger than the real number of log items in the node, all these items will be returned immediately. For example, if `cardano-tracer` asks 50 log items but the node has only 40 log items _in this moment of time_, these 40 items will be returned, there is no waiting for missing 10 items. +The optional field `loRequestNum` specifies the number of log items that will be requested from the node. In this example, `loRequestNum` is `10`, it means that `cardano-tracer` will periodically ask 10 log item in one request. It is useful to reduce the network traffic: it is possible to ask 50 log items in one request or ask them in 50 requests one at a time. Please note that if `loRequestNum` is bigger than the real number of log items in the node, all these items will be returned immediately. For example, if `cardano-tracer` asks 50 log items but the node has only 40 log items _in this moment of time_, these 40 items will be returned, there is no waiting for missing 10 items. -The field `ekgRequestFreq` specifies the period of how often EKG metrics will be requested, in seconds. In this example, `ekgRequestFreq` is `1`, which means that `cardano-tracer` will ask for new EKG metrics every second. Please note that there is no limit as `loRequestNum`, so every request returns _all_ the metrics the node has _in this moment of time_. +The optional field `ekgRequestFreq` specifies the period of how often EKG metrics will be requested, in seconds. In this example, `ekgRequestFreq` is `1`, which means that `cardano-tracer` will ask for new EKG metrics every second. Please note that there is no limit as `loRequestNum`, so every request returns _all_ the metrics the node has _in this moment of time_. ## Logging @@ -226,72 +268,3 @@ After you open `http://127.0.0.1:12798` in your browser you will see: 2. the list of identifiers of connected nodes. Each identifier is a hyperlink to the page where you will see the list of EKG metrics received from the corresponding node. - -# Appendix - -## SSH socket forwarding - -As was mentioned above, `cardano-tracer` supports the connection with `cardano-node` via the local socket only. And if `cardano-tracer` and `cardano-node` work on the **same** machine, they use the same Unix socket directly. It can be shown like this: - -``` -machine -+----------------------------------+ -| cardano-node cardano-tracer | -| \ / | -| v v | -| /path/to/socket | -+----------------------------------+ -``` - -But if they work on **different** machines, you need SSH socket forwarding. This mechanism can be shown like this: - -``` -machine A machine B -+-------------------------+ +-------------------------+ -| cardano-node | | cardano-tracer | -| \ | | / | -| v | | v | -| /path/to/socket |<--SSH socket forwarding-->| /path/to/socket | -+-------------------------+ +-------------------------+ -``` - -So, from the programs' point of view, they still work with the same `/path/to/socket` directly. But actually, there are two local sockets on two machines, and SSH socket forwarding mechanism connects these sockets. In other words, this mechanism allows treating `/path/to/socket` on the machine `A` and `/path/to/socket` on the machine `B` as the **same** local socket. - -## Example 1 - -Suppose you have: - -1. machine `A` with `cardano-node` installed, -2. machine `B` with `cardano-tracer` installed and configured as `Initiator`, -3. SSH-access from `B` to `A`. - -The most convenient case is when your access from `B` to `A` is **key**-based, not **password**-based. Please [read the documentation](https://www.ssh.com/academy/ssh/key) for more details. - -[OpenSSH](https://www.openssh.com/) supports socket forwarding out of the box. So, first of all, run the following command on `B`: - -``` -ssh -nNT -L PATH_TO_SOCKET_ON_MACHINE_B:PATH_TO_SOCKET_ON_MACHINE_A -o "ExitOnForwardFailure yes" USER_ON_MACHINE_A@IP_OF_MACHINE_A -``` - -where: - -1. `PATH_TO_SOCKET_ON_MACHINE_B` is the local socket on `B`, -2. `PATH_TO_SOCKET_ON_MACHINE_A` is the local socket on `A`, -3. `USER_ON_MACHINE_A` is the name of your user on `A`, -4. `IP_OF_MACHINE_A` is an IP address (or hostname) of `A`. - -Real example: - -``` -ssh -nNT -L /tmp/cardano-tracer.sock:/tmp/cardano-node.sock -o "ExitOnForwardFailure yes" john@109.75.33.121 -``` - -This command connects the local socket `/tmp/cardano-tracer.sock` on your local machine with the local socket `/tmp/cardano-node.sock` on the remote machine `123.45.67.89`. - -Now you can run the node and the tracer. - -First, run `cardano-node` on `A`. This is because `cardano-tracer` is configured as `Initiator`: in this mode, `cardano-tracer` is treated as a client and `cardano-node` as a server, so the server should be launched **before** the client. - -Finally, run `cardano-tracer` on `B`. - -Please make sure that `TraceOptionForwarder` field in the node's configuration file and `acceptAt` field in the tracer's configuration file contain correct paths to the local sockets. In the previous example, both `TraceOptionForwarder` and `acceptAt` should contain `/tmp/cardano-tracer.sock` path. diff --git a/cardano-tracer/src/Cardano/Tracer/Acceptors.hs b/cardano-tracer/src/Cardano/Tracer/Acceptors.hs index 7c0c4dc1b47..f44f6741d39 100644 --- a/cardano-tracer/src/Cardano/Tracer/Acceptors.hs +++ b/cardano-tracer/src/Cardano/Tracer/Acceptors.hs @@ -1,25 +1,22 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} - {-# LANGUAGE PackageImports #-} module Cardano.Tracer.Acceptors ( runAcceptors + , runAcceptorsWithBrakes ) where import Codec.CBOR.Term (Term) -import Control.Concurrent (ThreadId, killThread, myThreadId, threadDelay) -import Control.Concurrent.Async (async, asyncThreadId, wait, waitAnyCancel) -import Control.Concurrent.STM (atomically) -import Control.Concurrent.STM.TVar (TVar, modifyTVar', newTVarIO, readTVarIO) -import Control.Exception (SomeException, try) -import Control.Monad (forever, forM_, void) +import Control.Concurrent.Async (forConcurrently_, race_, wait) +import Control.Concurrent.STM.TVar (TVar, newTVarIO, readTVarIO) import "contra-tracer" Control.Tracer (nullTracer) import qualified Data.ByteString.Lazy as LBS -import Data.IORef (IORef, newIORef, readIORef) +import Data.List.NonEmpty (NonEmpty) +import qualified Data.List.NonEmpty as NE +import Data.Maybe (fromMaybe) import Data.HashMap.Strict ((!)) import Data.Time.Clock (secondsToNominalDiffTime) import Data.Void (Void) @@ -53,112 +50,96 @@ import Cardano.Logging (TraceObject) import qualified Trace.Forward.Configuration as TF import qualified Trace.Forward.Protocol.Type as TF import Trace.Forward.Network.Acceptor (acceptTraceObjects, acceptTraceObjectsInit) +import Trace.Forward.Utils (runActionInLoop) import qualified System.Metrics.Configuration as EKGF import qualified System.Metrics.ReqResp as EKGF import System.Metrics.Network.Acceptor (acceptEKGMetrics, acceptEKGMetricsInit) import Cardano.Tracer.Configuration -import Cardano.Tracer.Types (AcceptedItems, TraceObjects, Metrics, - addressToNodeId, prepareAcceptedItems) +import Cardano.Tracer.Handlers (nodeInfoHandler, traceObjectsHandler) +import Cardano.Tracer.Types runAcceptors :: TracerConfig - -> AcceptedItems + -> AcceptedMetrics + -> AcceptedNodeInfo -> IO () -runAcceptors config acceptedItems = do - runAcceptors' config acceptedItems - waitForever - where - waitForever = forever $ threadDelay 1000000000 +runAcceptors config@TracerConfig{network} acceptedMetrics acceptedNodeInfo = + case network of + AcceptAt (LocalSocket p) -> do + stopEKG <- newTVarIO False + stopTF <- newTVarIO False + runActionInLoop + (runAcceptorsResp config p (mkAcceptorsConfigs config p stopEKG stopTF) acceptedMetrics acceptedNodeInfo) + (TF.LocalPipe p) 1 + ConnectTo localSocks -> + forConcurrently_ (NE.nub localSocks) $ \(LocalSocket p) -> do + stopEKG <- newTVarIO False + stopTF <- newTVarIO False + runActionInLoop + (runAcceptorsInit config p (mkAcceptorsConfigs config p stopEKG stopTF) acceptedMetrics acceptedNodeInfo) + (TF.LocalPipe p) 1 -runAcceptors' +runAcceptorsWithBrakes :: TracerConfig - -> AcceptedItems + -> AcceptedMetrics + -> AcceptedNodeInfo + -> NonEmpty (TVar Bool, TVar Bool) -> IO () -runAcceptors' config@TracerConfig{..} acceptedItems = - forM_ acceptAt $ \localSocket -> void . async . forever $ do - runAcceptorForOneNode localSocket - threadDelay 1000000 - where - runAcceptorForOneNode localSocket = do - stopEKG <- newIORef False - stopTF <- newIORef False - - -- Temporary fill 'tidVar' using current 'ThreadId'. Later it will be - -- replaced by the real 'ThreadId' from 'serverAsync' (see below). - tmpTId <- myThreadId - tidVar :: TVar ThreadId <- newTVarIO tmpTId +runAcceptorsWithBrakes config@TracerConfig{network} acceptedMetrics acceptedNodeInfo protocolsBrakes = + case network of + AcceptAt (LocalSocket p) -> do + let (stopEKG, stopTF) = NE.head protocolsBrakes + runActionInLoop + (runAcceptorsResp config p (mkAcceptorsConfigs config p stopEKG stopTF) acceptedMetrics acceptedNodeInfo) + (TF.LocalPipe p) 1 + ConnectTo localSocks -> + forConcurrently_ (NE.zip localSocks protocolsBrakes) $ \(LocalSocket p, (stopEKG, stopTF)) -> + runActionInLoop + (runAcceptorsInit config p (mkAcceptorsConfigs config p stopEKG stopTF) acceptedMetrics acceptedNodeInfo) + (TF.LocalPipe p) 1 - let configs = mkAcceptorsConfigs config localSocket stopEKG stopTF - try (runAcceptor connectMode localSocket configs tidVar acceptedItems) >>= \case - Left (e :: SomeException) -> do - -- There is some problem (probably the connection was dropped). - putStrLn $ "cardano-tracer, runAcceptor problem: " <> show e - -- Explicitly stop 'serverAsync'. - killThread =<< readTVarIO tidVar - Right _ -> return () - mkAcceptorsConfigs :: TracerConfig - -> Address - -> IORef Bool - -> IORef Bool + -> FilePath + -> TVar Bool + -> TVar Bool -> ( EKGF.AcceptorConfiguration , TF.AcceptorConfiguration TraceObject ) -mkAcceptorsConfigs TracerConfig{..} localSocket stopEKG stopTF = (ekgConfig, tfConfig) - where - ekgConfig = - EKGF.AcceptorConfiguration +mkAcceptorsConfigs TracerConfig{ekgRequestFreq, loRequestNum} p stopEKG stopTF = + ( EKGF.AcceptorConfiguration { EKGF.acceptorTracer = nullTracer - , EKGF.forwarderEndpoint = forEKGF localSocket - , EKGF.requestFrequency = secondsToNominalDiffTime ekgRequestFreq + , EKGF.forwarderEndpoint = EKGF.LocalPipe p + , EKGF.requestFrequency = secondsToNominalDiffTime $ fromMaybe 1.0 ekgRequestFreq , EKGF.whatToRequest = EKGF.GetAllMetrics - , EKGF.actionOnResponse = print , EKGF.shouldWeStop = stopEKG - , EKGF.actionOnDone = putStrLn "EKGF: we are done!" } - - tfConfig :: TF.AcceptorConfiguration TraceObject - tfConfig = - TF.AcceptorConfiguration + , TF.AcceptorConfiguration { TF.acceptorTracer = nullTracer - , TF.forwarderEndpoint = forTF localSocket - , TF.whatToRequest = TF.GetTraceObjects loRequestNum - , TF.actionOnReply = print + , TF.forwarderEndpoint = TF.LocalPipe p + , TF.whatToRequest = TF.NumberOfTraceObjects $ fromMaybe 100 loRequestNum , TF.shouldWeStop = stopTF - , TF.actionOnDone = putStrLn "TF: we are done!" } + ) - forTF (LocalSocket p) = TF.LocalPipe p - forEKGF (LocalSocket p) = EKGF.LocalPipe p - -runAcceptor - :: ConnectMode - -> Address +runAcceptorsInit + :: TracerConfig + -> FilePath -> (EKGF.AcceptorConfiguration, TF.AcceptorConfiguration TraceObject) - -> TVar ThreadId - -> AcceptedItems + -> AcceptedMetrics + -> AcceptedNodeInfo -> IO () -runAcceptor mode (LocalSocket localSock) (ekgConfig, tfConfig) tidVar acceptedItems = withIOManager $ \iocp -> do - let snock = localSnocket iocp localSock - addr = localAddressFromPath localSock - case mode of - Initiator -> - doConnectToAcceptor snock addr noTimeLimitsHandshake $ - appInitiator - [ (runEKGAcceptorInit ekgConfig acceptedItems, 1) - , (runTraceObjectsAcceptorInit tfConfig acceptedItems, 2) - ] - Responder -> - doListenToForwarder snock addr noTimeLimitsHandshake tidVar $ - appResponder - [ (runEKGAcceptor ekgConfig acceptedItems, 1) - , (runTraceObjectsAcceptor tfConfig acceptedItems, 2) - ] +runAcceptorsInit config p (ekgConfig, tfConfig) acceptedMetrics acceptedNodeInfo = + withIOManager $ \iocp -> + doConnectToForwarder (localSnocket iocp p) (localAddressFromPath p) noTimeLimitsHandshake $ + appInitiator + [ (runEKGAcceptorInit ekgConfig acceptedMetrics, 1) + , (runTraceObjectsAcceptorInit config tfConfig acceptedNodeInfo, 2) + ] where - appResponder protocols = + appInitiator protocols = OuroborosApplication $ \connectionId _shouldStopSTM -> [ MiniProtocol { miniProtocolNum = MiniProtocolNum num @@ -168,7 +149,22 @@ runAcceptor mode (LocalSocket localSock) (ekgConfig, tfConfig) tidVar acceptedIt | (protocol, num) <- protocols ] - appInitiator protocols = +runAcceptorsResp + :: TracerConfig + -> FilePath + -> (EKGF.AcceptorConfiguration, TF.AcceptorConfiguration TraceObject) + -> AcceptedMetrics + -> AcceptedNodeInfo + -> IO () +runAcceptorsResp config p (ekgConfig, tfConfig) acceptedMetrics acceptedNodeInfo = + withIOManager $ \iocp -> do + doListenToForwarder (localSnocket iocp p) (localAddressFromPath p) noTimeLimitsHandshake $ + appResponder + [ (runEKGAcceptor ekgConfig acceptedMetrics, 1) + , (runTraceObjectsAcceptor config tfConfig acceptedNodeInfo, 2) + ] + where + appResponder protocols = OuroborosApplication $ \connectionId _shouldStopSTM -> [ MiniProtocol { miniProtocolNum = MiniProtocolNum num @@ -178,13 +174,13 @@ runAcceptor mode (LocalSocket localSock) (ekgConfig, tfConfig) tidVar acceptedIt | (protocol, num) <- protocols ] -doConnectToAcceptor +doConnectToForwarder :: Snocket IO fd addr -> addr -> ProtocolTimeLimits (Handshake UnversionedProtocol Term) -> OuroborosApplication 'InitiatorMode addr LBS.ByteString IO () Void -> IO () -doConnectToAcceptor snocket address timeLimits app = +doConnectToForwarder snocket address timeLimits app = connectToNode snocket unversionedHandshakeCodec @@ -204,87 +200,85 @@ doListenToForwarder => Snocket IO fd addr -> addr -> ProtocolTimeLimits (Handshake UnversionedProtocol Term) - -> TVar ThreadId -> OuroborosApplication 'ResponderMode addr LBS.ByteString IO Void () -> IO () -doListenToForwarder snocket address timeLimits tidVar app = do +doListenToForwarder snocket address timeLimits app = do networkState <- newNetworkMutableState - nsAsync <- async $ cleanNetworkMutableState networkState - clAsync <- async . void $ - withServerNode - snocket - nullNetworkServerTracers - networkState - (AcceptedConnectionsLimit maxBound maxBound 0) - address - unversionedHandshakeCodec - timeLimits - (cborTermVersionDataCodec unversionedProtocolDataCodec) - acceptableVersion - (simpleSingletonVersions - UnversionedProtocol - UnversionedProtocolData - (SomeResponderApplication app) - ) - nullErrorPolicies - $ \_ serverAsync -> do - -- Store 'serverAsync' to be able to kill it later. - atomically $ modifyTVar' tidVar $ const (asyncThreadId serverAsync) - wait serverAsync -- Block until async exception. - void $ waitAnyCancel [nsAsync, clAsync] + race_ (cleanNetworkMutableState networkState) + $ withServerNode + snocket + nullNetworkServerTracers + networkState + (AcceptedConnectionsLimit maxBound maxBound 0) + address + unversionedHandshakeCodec + timeLimits + (cborTermVersionDataCodec unversionedProtocolDataCodec) + acceptableVersion + (simpleSingletonVersions + UnversionedProtocol + UnversionedProtocolData + (SomeResponderApplication app) + ) + nullErrorPolicies + $ \_ serverAsync -> do + wait serverAsync -- Block until async exception. runEKGAcceptor :: Show addr => EKGF.AcceptorConfiguration - -> AcceptedItems + -> AcceptedMetrics -> ConnectionId addr -> RunMiniProtocol 'ResponderMode LBS.ByteString IO Void () -runEKGAcceptor ekgConfig acceptedItems connId = do - let (_, _, (ekgStore, localStore)) = - unsafePerformIO $ prepareStores acceptedItems connId +runEKGAcceptor ekgConfig acceptedMetrics connId = do + let (ekgStore, localStore) = unsafePerformIO $ prepareMetricsStores acceptedMetrics connId acceptEKGMetrics ekgConfig ekgStore localStore runTraceObjectsAcceptor :: Show addr - => TF.AcceptorConfiguration TraceObject - -> AcceptedItems + => TracerConfig + -> TF.AcceptorConfiguration TraceObject + -> AcceptedNodeInfo -> ConnectionId addr -> RunMiniProtocol 'ResponderMode LBS.ByteString IO Void () -runTraceObjectsAcceptor tfConfig acceptedItems connId = do - let (niStore, trObQueue, _) = - unsafePerformIO $ prepareStores acceptedItems connId - acceptTraceObjects tfConfig trObQueue niStore +runTraceObjectsAcceptor config tfConfig acceptedNodeInfo connId = do + let nodeId = connIdToNodeId connId + acceptTraceObjects + tfConfig + (traceObjectsHandler config nodeId acceptedNodeInfo) + (nodeInfoHandler config nodeId acceptedNodeInfo) runEKGAcceptorInit :: Show addr => EKGF.AcceptorConfiguration - -> AcceptedItems + -> AcceptedMetrics -> ConnectionId addr -> RunMiniProtocol 'InitiatorMode LBS.ByteString IO () Void -runEKGAcceptorInit ekgConfig acceptedItems connId = do - let (_, _, (ekgStore, localStore)) = - unsafePerformIO $ prepareStores acceptedItems connId +runEKGAcceptorInit ekgConfig acceptedMetrics connId = do + let (ekgStore, localStore) = unsafePerformIO $ prepareMetricsStores acceptedMetrics connId acceptEKGMetricsInit ekgConfig ekgStore localStore runTraceObjectsAcceptorInit :: Show addr - => TF.AcceptorConfiguration TraceObject - -> AcceptedItems + => TracerConfig + -> TF.AcceptorConfiguration TraceObject + -> AcceptedNodeInfo -> ConnectionId addr -> RunMiniProtocol 'InitiatorMode LBS.ByteString IO () Void -runTraceObjectsAcceptorInit tfConfig acceptedItems connId = do - let (niStore, trObQueue, _) = - unsafePerformIO $ prepareStores acceptedItems connId - acceptTraceObjectsInit tfConfig trObQueue niStore +runTraceObjectsAcceptorInit config tfConfig acceptedNodeInfo connId = do + let nodeId = connIdToNodeId connId + acceptTraceObjectsInit + tfConfig + (traceObjectsHandler config nodeId acceptedNodeInfo) + (nodeInfoHandler config nodeId acceptedNodeInfo) -prepareStores +prepareMetricsStores :: Show addr - => AcceptedItems + => AcceptedMetrics -> ConnectionId addr - -> IO (TF.NodeInfoStore, TraceObjects, Metrics) -prepareStores acceptedItems ConnectionId{..} = do - -- Remote address of the node is unique identifier, from the tracer's point of view. - let nodeId = addressToNodeId $ show remoteAddress - prepareAcceptedItems nodeId acceptedItems - items <- readIORef acceptedItems - return $ items ! nodeId + -> IO Metrics +prepareMetricsStores acceptedMetrics connId = do + let nodeId = connIdToNodeId connId + prepareAcceptedMetrics nodeId acceptedMetrics + metrics <- readTVarIO acceptedMetrics + return $ metrics ! nodeId diff --git a/cardano-tracer/src/Cardano/Tracer/CLI.hs b/cardano-tracer/src/Cardano/Tracer/CLI.hs index f31755172e7..b5c9a2d0789 100644 --- a/cardano-tracer/src/Cardano/Tracer/CLI.hs +++ b/cardano-tracer/src/Cardano/Tracer/CLI.hs @@ -8,8 +8,7 @@ module Cardano.Tracer.CLI import Data.Aeson (FromJSON, ToJSON) import GHC.Generics (Generic) -import Options.Applicative (Parser, bashCompleter, completer, - help, long, metavar, strOption) +import Options.Applicative -- | Type for CLI parameters required for the service. newtype TracerParams = TracerParams @@ -17,24 +16,11 @@ newtype TracerParams = TracerParams } deriving (Generic, FromJSON, ToJSON) parseTracerParams :: Parser TracerParams -parseTracerParams = - TracerParams - <$> parseFilePath - "config" - "file" - "Configuration file for cardano-tracer service" - --- Aux parsers - -parseFilePath - :: String - -> String - -> String - -> Parser FilePath -parseFilePath optname completion desc = strOption flags - where - flags = - long optname +parseTracerParams = TracerParams <$> + strOption ( + long "config" + <> short 'c' <> metavar "FILEPATH" - <> help desc - <> completer (bashCompleter completion) + <> help "Configuration file for cardano-tracer service" + <> completer (bashCompleter "file") + ) diff --git a/cardano-tracer/src/Cardano/Tracer/Configuration.hs b/cardano-tracer/src/Cardano/Tracer/Configuration.hs index b64e2f5bcc7..f77ed8ab9c2 100644 --- a/cardano-tracer/src/Cardano/Tracer/Configuration.hs +++ b/cardano-tracer/src/Cardano/Tracer/Configuration.hs @@ -12,23 +12,26 @@ module Cardano.Tracer.Configuration , LogMode (..) , LogFormat (..) , LoggingParams (..) - , ConnectMode (..) + , Network (..) , TracerConfig (..) , readTracerConfig ) where import Data.Aeson (FromJSON, ToJSON, eitherDecodeFileStrict') import Data.Fixed (Pico) +import Data.List.NonEmpty import Data.Word (Word16, Word64) import GHC.Generics (Generic) -import qualified System.Exit as Ex +import System.Exit (die) type Host = String type Port = Int +-- | Only local socket is supported. newtype Address = LocalSocket FilePath deriving (Eq, Generic, FromJSON, Show, ToJSON) +-- | Endpoint for internal services. data Endpoint = Endpoint !Host !Port deriving (Eq, Generic, FromJSON, Show, ToJSON) @@ -38,45 +41,44 @@ data RotationParams = RotationParams , rpKeepFilesNum :: !Word -- ^ Number of files to keep } deriving (Eq, Generic, FromJSON, Show, ToJSON) +-- | Log mode: in the file or in Linux journal service. data LogMode = FileMode | JournalMode deriving (Eq, Generic, FromJSON, Show, ToJSON) +-- | Format of log file: for human (text) or for machine (json). data LogFormat = ForHuman | ForMachine deriving (Eq, Generic, FromJSON, Show, ToJSON) +-- | Logging parameters. data LoggingParams = LoggingParams - { logRoot :: !FilePath - , logMode :: !LogMode - , logFormat :: !LogFormat + { logRoot :: !FilePath -- ^ Root directory where all subdirs with logs will be created. + , logMode :: !LogMode -- ^ Log mode. + , logFormat :: !LogFormat -- ^ Log format. } deriving (Eq, Generic, FromJSON, Show, ToJSON) --- | 'cardano-tracer' can be both an initiator and a responder, from --- networking point of view: --- 1. In 'Initiator' mode it tries to establish the connection with the node. --- 2. In 'Responder' mode it accepts the conection from the node. -data ConnectMode - = Initiator - | Responder +data Network + = AcceptAt !Address + | ConnectTo !(NonEmpty Address) deriving (Eq, Generic, FromJSON, Show, ToJSON) +-- | Complete configuration. data TracerConfig = TracerConfig - { connectMode :: !ConnectMode - , acceptAt :: ![Address] - , loRequestNum :: !Word16 -- ^ How many 'TraceObject's in one request. - , ekgRequestFreq :: !Pico -- ^ How often to request EKG-metrics. - , hasEKG :: !(Maybe Endpoint) - , hasPrometheus :: !(Maybe Endpoint) - , logging :: ![LoggingParams] - , rotation :: !(Maybe RotationParams) + { network :: !Network -- ^ How cardano-tracer will be connected to node(s). + , loRequestNum :: !(Maybe Word16) -- ^ How many 'TraceObject's will be asked in each request. + , ekgRequestFreq :: !(Maybe Pico) -- ^ How often to request for EKG-metrics, in seconds. + , hasEKG :: !(Maybe Endpoint) -- ^ Endpoint for EKG web-page. + , hasPrometheus :: !(Maybe Endpoint) -- ^ Endpoint for Promeheus web-page. + , logging :: !(NonEmpty LoggingParams) -- ^ Logging parameters. + , rotation :: !(Maybe RotationParams) -- ^ Rotation parameters. } deriving (Eq, Generic, FromJSON, Show, ToJSON) --- | Reads the tracer's configuration file (path is passed via '--config' CLI option). +-- | Read the tracer's configuration file (path is passed via '--config' CLI option). readTracerConfig :: FilePath -> IO TracerConfig readTracerConfig pathToConfig = eitherDecodeFileStrict' pathToConfig >>= \case - Left e -> Ex.die $ "Invalid tracer's configuration: " <> show e + Left e -> die $ "Invalid tracer's configuration: " <> show e Right (config :: TracerConfig) -> return config diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers.hs b/cardano-tracer/src/Cardano/Tracer/Handlers.hs index acb3b8b7396..f721c766acf 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers.hs @@ -1,18 +1,73 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE PackageImports #-} + module Cardano.Tracer.Handlers - ( runHandlers + ( nodeInfoHandler + , traceObjectsHandler ) where -import Control.Concurrent.Async (concurrently_) +import Control.Concurrent.Async (forConcurrently_) +import Control.Concurrent.STM (atomically) +import Control.Concurrent.STM.TVar (modifyTVar', readTVarIO) +import Control.Exception (IOException, try) +import "contra-tracer" Control.Tracer (showTracing, stdoutTracer, traceWith) +import Data.HashMap.Strict ((!), insert) +import qualified Data.List.NonEmpty as NE + +import Trace.Forward.Protocol.Type (NodeInfo (..)) + +import Cardano.Logging (TraceObject) import Cardano.Tracer.Configuration -import Cardano.Tracer.Types (AcceptedItems) -import Cardano.Tracer.Handlers.Logs.Run (runLogsHandler) -import Cardano.Tracer.Handlers.Metrics.Run (runMetricsHandler) +import Cardano.Tracer.Handlers.Logs.File +import Cardano.Tracer.Handlers.Logs.Journal +import Cardano.Tracer.Types -runHandlers +-- | Node's info is required by many parts of 'cardano-tracer'. +-- But some of these parts may be inactive (yet) when node's info +-- is already accepted, so we have to store it. +nodeInfoHandler :: TracerConfig - -> AcceptedItems + -> NodeId + -> AcceptedNodeInfo + -> NodeInfo -> IO () -runHandlers config acceptedItems = - concurrently_ (runLogsHandler config acceptedItems) - (runMetricsHandler config acceptedItems) +nodeInfoHandler TracerConfig{logging} nodeId acceptedNodeInfo ni = do + atomically . modifyTVar' acceptedNodeInfo $ insert nodeId ni + -- We can already write this node's info in the beginning of log and/or journal. + forConcurrently_ (NE.nub logging) $ \LoggingParams{logMode, logRoot, logFormat} -> + case logMode of + FileMode -> + showProblemIfAny $ writeNodeInfoToFile nodeId logRoot logFormat ni + JournalMode -> + showProblemIfAny $ writeNodeInfoToJournal nodeId ni + +traceObjectsHandler + :: TracerConfig + -> NodeId + -> AcceptedNodeInfo + -> [TraceObject] + -> IO () +traceObjectsHandler _ _ _ [] = return () +traceObjectsHandler TracerConfig{logging} nodeId acceptedNodeInfo traceObjects = do + -- The protocol guarantees that node's info is received _before_ any trace object(s) from that node. + -- So if we are here, it means that info about corresponding node is already received and stored. + nodesInfo <- readTVarIO acceptedNodeInfo + let NodeInfo{niName} = nodesInfo ! nodeId + forConcurrently_ (NE.nub logging) $ \LoggingParams{logMode, logRoot, logFormat} -> + case logMode of + FileMode -> + showProblemIfAny $ writeTraceObjectsToFile nodeId niName logRoot logFormat traceObjects + JournalMode -> + showProblemIfAny $ writeTraceObjectsToJournal nodeId niName traceObjects + +showProblemIfAny :: IO () -> IO () +showProblemIfAny action = + try action >>= \case + Left (e :: IOException) -> logTrace $ "cardano-tracer, cannot write trace objects: " <> show e + Right _ -> return () + where + logTrace = traceWith $ showTracing stdoutTracer diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/File.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/File.hs index 3c9e2db7f4e..2b6b38e5ea0 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/File.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/File.hs @@ -1,20 +1,21 @@ {-# LANGUAGE CPP #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} module Cardano.Tracer.Handlers.Logs.File - ( writeTraceObjectsToFile + ( writeNodeInfoToFile + , writeTraceObjectsToFile ) where -import Control.Monad.Extra (unlessM) +import Control.Monad (unless) +import Control.Monad.Extra (ifM, unlessM) import Data.Aeson (ToJSON, (.=), object, toJSON) import Data.Aeson.Text (encodeToLazyText) import qualified Data.ByteString.Lazy as LBS import Data.Char (isDigit) import Data.Maybe (mapMaybe) import qualified Data.Text as T +import Data.Text (Text) import qualified Data.Text.Lazy as TL import Data.Text.Lazy.Encoding (encodeUtf8) import Data.Time.Clock (UTCTime) @@ -24,65 +25,136 @@ import System.FilePath (()) import Cardano.Logging +import Trace.Forward.Protocol.Type (NodeInfo (..)) + import Cardano.Tracer.Configuration -import Cardano.Tracer.Handlers.Logs.Log (createLogAndSymLink, doesSymLinkValid, - symLinkName) -import Cardano.Tracer.Types (NodeId, NodeName) +import Cardano.Tracer.Handlers.Logs.Log +import Cardano.Tracer.Types + +-- | Log files structure can be represented like this (for example, with json format): +-- +-- rootDir/ +-- nodeFullId/ +-- node-*-1.json +-- node-*-2.json +-- node-*-3.json +-- ... +-- node.json -> node-*-3.json +-- +-- So 'TraceObject's and 'NodeInfo' received from the node will be stored in the logs +-- saved in the corresponding subdirectory inside of 'rootDir'. + +writeNodeInfoToFile + :: NodeId + -> FilePath + -> LogFormat + -> NodeInfo + -> IO () +writeNodeInfoToFile nodeId rootDir format nodeInfo = do + pathToCurrentLog <- prepareLogsStructure nodeId (niName nodeInfo) rootDir format + LBS.appendFile pathToCurrentLog . encodeUtf8 $ preparedNodeInfo + where + preparedNodeInfo = + case format of + ForHuman -> nodeInfoToText nodeInfo + ForMachine -> nodeInfoToJSON nodeInfo writeTraceObjectsToFile :: NodeId - -> NodeName + -> Text -> FilePath -> LogFormat -> [TraceObject] -> IO () writeTraceObjectsToFile _ _ _ _ [] = return () writeTraceObjectsToFile nodeId nodeName rootDir format traceObjects = do - createDirectoryIfMissing True rootDir + pathToCurrentLog <- prepareLogsStructure nodeId nodeName rootDir format + unless (null itemsToWrite) $ + LBS.appendFile pathToCurrentLog . encodeUtf8 . TL.concat $ itemsToWrite + where + itemsToWrite = + case format of + ForHuman -> mapMaybe traceObjectToText traceObjects + ForMachine -> mapMaybe traceObjectToJSON traceObjects + +prepareLogsStructure + :: NodeId + -> Text + -> FilePath + -> LogFormat + -> IO FilePath +prepareLogsStructure nodeId nodeName rootDir format = do + -- Root directory (as a parent for subDirForLogs) will be created as well if needed. createDirectoryIfMissing True subDirForLogs - unlessM (doesFileExist pathToCurrentLog) $ - createLogAndSymLink subDirForLogs format - -- Symlink can be broken, check it. - doesSymLinkValid pathToCurrentLog >>= \case - True -> - writeTraceObjects pathToCurrentLog (formatter format) traceObjects - False -> do - -- Symlink is here, but it's broken. + ifM (doesFileExist pathToCurrentLog) + (unlessM (doesSymLinkValid pathToCurrentLog) $ do removeFile pathToCurrentLog - createLogAndSymLink subDirForLogs format + createLogAndSymLink subDirForLogs format) + (createLogAndSymLink subDirForLogs format) + return pathToCurrentLog where subDirForLogs = rootDir nodeFullId - nodeFullId = if T.null nodeName - then show nodeId - else T.unpack nodeName <> "-" <> show nodeId + nodeFullId = T.unpack $ printNodeFullId nodeName nodeId -- This is a symlink to the current log file, please see rotation parameters. pathToCurrentLog = subDirForLogs symLinkName format - formatter ForHuman = traceObjectToText - formatter ForMachine = traceObjectToJSON - - writeTraceObjects logPath formatIt = - -- It's much more efficiently to encode 'Text' explicitly and - -- then perform 'ByteString'-level 'IO' than perform 'Text'-level 'IO'. - LBS.appendFile logPath - . encodeUtf8 - . TL.append nl - . TL.intercalate nl - . mapMaybe formatIt - +nl :: TL.Text #if defined(mingw32_HOST_OS) - nl = "\r\n" +nl = "\r\n" #else - nl = "\n" +nl = "\n" #endif +nodeInfoToText :: NodeInfo -> TL.Text +nodeInfoToText NodeInfo{..} = TL.intercalate nl + [ "Node info" + , " name: " <> TL.fromStrict niName + , " protocol: " <> TL.fromStrict niProtocol + , " version: " <> TL.fromStrict niVersion + , " commit: " <> TL.fromStrict niCommit + , " start time: " <> TL.pack (show niStartTime) + , " system start time: " <> TL.pack (show niSystemStartTime) + , nl + ] + +data NodeInfoForJSON = NodeInfoForJSON + { jName :: !T.Text + , jProtocol :: !T.Text + , jVersion :: !T.Text + , jCommit :: !T.Text + , jStartTime :: !UTCTime + , jSystemStartTime :: !UTCTime + } + +instance ToJSON NodeInfoForJSON where + toJSON NodeInfoForJSON{..} = + object [ "nodeName" .= jName + , "protocol" .= jProtocol + , "version" .= jVersion + , "commit" .= jCommit + , "startTime" .= formatTime defaultTimeLocale "%FT%T%2Q%Z" jStartTime + , "systemStartTime" .= formatTime defaultTimeLocale "%FT%T%2Q%Z" jSystemStartTime + ] + +nodeInfoToJSON :: NodeInfo -> TL.Text +nodeInfoToJSON NodeInfo{..} = TL.append nl . encodeToLazyText $ + NodeInfoForJSON + { jName = niName + , jProtocol = niProtocol + , jVersion = niVersion + , jCommit = niCommit + , jStartTime = niStartTime + , jSystemStartTime = niSystemStartTime + } + traceObjectToText :: TraceObject -> Maybe TL.Text traceObjectToText TraceObject{..} = case toHuman of Nothing -> Nothing Just msgForHuman -> Just $ - "[" <> host <> ":" <> name <> ":" <> sev <> ":" <> thId <> "] [" <> time <> "] " <> - TL.fromStrict msgForHuman + "[" <> host <> ":" <> name <> ":" <> sev <> ":" <> thId <> "] [" <> time <> "] " + <> TL.fromStrict msgForHuman + <> nl where host = TL.pack toHostname name = mkName toNamespace @@ -91,13 +163,13 @@ traceObjectToText TraceObject{..} = time = TL.pack $ formatTime defaultTimeLocale "%F %T%2Q %Z" toTimestamp mkName :: Namespace -> TL.Text -mkName [] = "noname" +mkName [] = "noname" mkName names = TL.fromStrict $ T.intercalate "." names data TraceObjectForJSON = TraceObjectForJSON { jAt :: !UTCTime - , jNS :: !TL.Text - , jData :: !T.Text -- !Value + , jNS :: !T.Text + , jData :: !T.Text , jHost :: !T.Text , jSev :: !T.Text , jTId :: !T.Text @@ -106,7 +178,7 @@ data TraceObjectForJSON = TraceObjectForJSON instance ToJSON TraceObjectForJSON where toJSON TraceObjectForJSON{..} = object [ "at" .= formatTime defaultTimeLocale "%FT%T%2Q%Z" jAt - , "ns" .= TL.toStrict jNS + , "ns" .= jNS , "data" .= jData , "host" .= jHost , "sev" .= jSev @@ -117,10 +189,10 @@ traceObjectToJSON :: TraceObject -> Maybe TL.Text traceObjectToJSON TraceObject{..} = case toMachine of Nothing -> Nothing - Just msgForMachine -> Just . encodeToLazyText $ + Just msgForMachine -> Just . TL.append nl . encodeToLazyText $ TraceObjectForJSON { jAt = toTimestamp - , jNS = mkName toNamespace + , jNS = TL.toStrict $ mkName toNamespace , jData = msgForMachine , jHost = T.pack toHostname , jSev = T.pack $ show toSeverity diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Journal.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Journal.hs index 19df62fb037..37a99e89f99 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Journal.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Journal.hs @@ -7,18 +7,23 @@ #endif module Cardano.Tracer.Handlers.Logs.Journal - ( writeTraceObjectsToJournal + ( writeNodeInfoToJournal + , writeTraceObjectsToJournal ) where #if defined(LINUX) import qualified Data.HashMap.Strict as HM import Data.Maybe (fromMaybe) import qualified Data.Text as T +import Data.Text (Text) import Data.Text.Encoding (encodeUtf8) +import Data.Time.Clock (UTCTime) import Data.Time.Format (defaultTimeLocale, formatTime) import Systemd.Journal (Priority (..), message, mkJournalField, priority, sendJournalFields, syslogIdentifier) +import Trace.Forward.Protocol.Type (NodeInfo (..)) + import Cardano.Logging (TraceObject (..)) import qualified Cardano.Logging as L @@ -28,13 +33,39 @@ import System.IO (hPutStrLn, stderr) import Cardano.Logging +import Trace.Forward.Protocol.Type (NodeInfo) + import Cardano.Tracer.Types #endif #if defined(LINUX) +writeNodeInfoToJournal + :: NodeId + -> NodeInfo + -> IO () +writeNodeInfoToJournal nodeId ni = + sendJournalFields mkJournalFields + where + mkJournalFields = + syslogIdentifier (niName ni <> T.pack (show nodeId)) + <> HM.fromList [ (nodeName, encodeUtf8 $ niName ni) + , (protocol, encodeUtf8 $ niProtocol ni) + , (version, encodeUtf8 $ niVersion ni) + , (commit, encodeUtf8 $ niCommit ni) + , (startTime, encodeUtf8 . formatAsIso8601 $ niStartTime ni) + , (sysStartTime, encodeUtf8 . formatAsIso8601 $ niSystemStartTime ni) + ] + + nodeName = mkJournalField "nodeName" + protocol = mkJournalField "protocol" + version = mkJournalField "version" + commit = mkJournalField "commit" + startTime = mkJournalField "startTime" + sysStartTime = mkJournalField "systemStartTime" + writeTraceObjectsToJournal :: NodeId - -> NodeName + -> Text -> [TraceObject] -> IO () writeTraceObjectsToJournal _ _ [] = return () @@ -64,7 +95,8 @@ writeTraceObjectsToJournal nodeId nodeName traceObjects = thread = mkJournalField "thread" time = mkJournalField "time" - formatAsIso8601 = T.pack . formatTime defaultTimeLocale "%F %T%12QZ" +formatAsIso8601 :: UTCTime -> Text +formatAsIso8601 = T.pack . formatTime defaultTimeLocale "%F %T%12QZ" mkPriority :: L.SeverityS -> Priority mkPriority L.Debug = Debug @@ -76,6 +108,13 @@ mkPriority L.Critical = Critical mkPriority L.Alert = Alert mkPriority L.Emergency = Emergency #else +writeNodeInfoToJournal + :: NodeId + -> NodeInfo + -> IO () +writeNodeInfoToJournal _ _ = + hPutStrLn stderr "Writing to systemd's journal is available on Linux only." + writeTraceObjectsToJournal :: NodeId -> NodeName diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Log.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Log.hs index 191ffc666c2..e74f40f8c6e 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Log.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Log.hs @@ -1,9 +1,7 @@ {-# LANGUAGE OverloadedStrings #-} module Cardano.Tracer.Handlers.Logs.Log - ( logPrefix - , logExtension - , createLogAndSymLink + ( createLogAndSymLink , createLogAndUpdateSymLink , doesSymLinkValid , getTimeStampFromLog @@ -56,37 +54,44 @@ doesSymLinkValid pathToSymLink = do isItLog :: LogFormat -> FilePath -> Bool isItLog format pathToLog = hasProperPrefix && hasTimestamp && hasProperExt - where + where fileName = takeFileName pathToLog hasProperPrefix = T.pack logPrefix `T.isPrefixOf` T.pack fileName hasTimestamp = isJust timeStamp + timeStamp :: Maybe UTCTime - timeStamp = parseTimeM True defaultTimeLocale timeStampFormat (T.unpack maybeTimestamp) + timeStamp = parseTimeM True defaultTimeLocale timeStampFormat $ T.unpack maybeTimestamp + maybeTimestamp = T.drop (length logPrefix) . T.pack . takeBaseName $ fileName hasProperExt = takeExtension fileName == logExtension format --- | Create a new log file and symlink to it, from scratch. +-- | Create a new log file and a symlink to it, from scratch. createLogAndSymLink :: FilePath -> LogFormat -> IO () -createLogAndSymLink subDirForLogs format = withCurrentDirectory subDirForLogs $ - createLog format >>= flip createFileLink (symLinkName format) +createLogAndSymLink subDirForLogs format = + createLog subDirForLogs format >>= flip createFileLink symLink + where + symLink = subDirForLogs symLinkName format -- | Create a new log file and move existing symlink -- from the old log file to the new one. createLogAndUpdateSymLink :: FilePath -> LogFormat -> IO () -createLogAndUpdateSymLink subDirForLogs format = withCurrentDirectory subDirForLogs $ do - newLog <- createLog format - let tmpSymLink = symLinkNameTmp format - realSymLink = symLinkName format - whenM (doesFileExist tmpSymLink) $ removeFile tmpSymLink +createLogAndUpdateSymLink subDirForLogs format = do + newLog <- createLog subDirForLogs format + whenM (doesFileExist tmpSymLink) $ + removeFile tmpSymLink createFileLink newLog tmpSymLink renamePath tmpSymLink realSymLink -- Atomic operation, uses POSIX.rename. + where + tmpSymLink = subDirForLogs symLinkNameTmp format + realSymLink = subDirForLogs symLinkName format -createLog :: LogFormat -> IO FilePath -createLog format = do +createLog :: FilePath -> LogFormat -> IO FilePath +createLog subDirForLogs format = do ts <- formatTime defaultTimeLocale timeStampFormat <$> getCurrentTime let logName = logPrefix <> ts <.> logExtension format - LBS.writeFile logName LBS.empty - return logName + pathToLog = subDirForLogs logName + LBS.writeFile pathToLog LBS.empty -- Create an empty log file. + return pathToLog -- | This function is applied to the log we already checked, -- so we definitely know it contains timestamp. diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Rotator.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Rotator.hs index e2eb42246d5..7328eb2369c 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Rotator.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Logs/Rotator.hs @@ -1,6 +1,7 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PackageImports #-} {-# LANGUAGE ScopedTypeVariables #-} module Cardano.Tracer.Handlers.Logs.Rotator @@ -8,100 +9,73 @@ module Cardano.Tracer.Handlers.Logs.Rotator ) where import Control.Exception (SomeException, try) -import Control.Concurrent (threadDelay) import Control.Concurrent.Async (forConcurrently_) -import Control.Monad (forM_, forever, when) -import Control.Monad.Extra (whenM) -import Data.List (find, nub, sort) +import Control.Monad (forM_, forever, unless, when) +import Control.Monad.Extra (whenJust, whenM) +import "contra-tracer" Control.Tracer (showTracing, stdoutTracer, traceWith) +import Data.List (nub, sort) +import Data.List.Extra (dropEnd) +import qualified Data.List.NonEmpty as NE import Data.Time (diffUTCTime, getCurrentTime) import Data.Word (Word64) import System.Directory import System.Directory.Extra (listDirectories, listFiles) -import System.FilePath ((), takeDirectory, takeFileName) -import System.IO (hPutStrLn, stderr) +import System.FilePath ((), takeDirectory) +import System.Time.Extra (sleep) import Cardano.Tracer.Configuration import Cardano.Tracer.Handlers.Logs.Log runLogsRotator :: TracerConfig -> IO () -runLogsRotator TracerConfig{..} = - case rotation of - Nothing -> return () -- No rotation parameters are defined. - Just rotParams -> launchRotator rotParams rootDirsWithFormats +runLogsRotator TracerConfig{rotation, logging} = + whenJust rotation $ + launchRotator loggingParamsForFiles where - rootDirsWithFormats = nub . map getRootAndFormat . filter fileParamsOnly $ logging - fileParamsOnly LoggingParams{..} = logMode == FileMode - getRootAndFormat LoggingParams{..} = (logRoot, logFormat) + loggingParamsForFiles = nub . NE.filter filesOnly $ logging + filesOnly LoggingParams{logMode} = logMode == FileMode -- | All the logs with 'TraceObject's received from particular node -- will be stored in a separate directory, so they can be checked -- concurrently. launchRotator - :: RotationParams - -> [(FilePath, LogFormat)] + :: [LoggingParams] + -> RotationParams -> IO () -launchRotator _ [] = return () -launchRotator rotParams rootDirsWithFormats = forever $ do - try (forM_ rootDirsWithFormats $ checkRootDir rotParams) >>= \case +launchRotator [] _ = return () +launchRotator loggingParamsForFiles rotParams = forever $ do + try (forM_ loggingParamsForFiles $ checkRootDir rotParams) >>= \case Left (e :: SomeException) -> - hPutStrLn stderr $ "Problem with rotation of log files: " <> show e + logTrace $ "cardano-tracer, problem with logs rotation: " <> show e Right _ -> return () - threadDelay 10000000 + sleep 15.0 + where + logTrace = traceWith $ showTracing stdoutTracer checkRootDir :: RotationParams - -> (FilePath, LogFormat) + -> LoggingParams -> IO () -checkRootDir rotParams (rootDir, format) = - whenM (doesDirectoryExist rootDir) $ +checkRootDir rotParams LoggingParams{logRoot, logFormat} = + whenM (doesDirectoryExist logRoot) $ -- All the logs received from particular node will be stored in corresponding subdir. - listDirectories rootDir >>= \case + listDirectories logRoot >>= \case [] -> -- There are no nodes' subdirs yet (or they were deleted), - -- so no rotation can be performed. + -- so no rotation can be performed for now. return () - subDirs -> do - let fullPathsToSubDirs = map (rootDir ) subDirs - -- Ok, list of subdirs is here, check each of them in parallel. - forConcurrently_ fullPathsToSubDirs $ checkLogsFromNode rotParams format + nodesSubDirs -> do + let fullPathsToSubDirs = map (logRoot ) nodesSubDirs + forConcurrently_ fullPathsToSubDirs $ checkLogs rotParams logFormat -checkLogsFromNode +checkLogs :: RotationParams -> LogFormat -> FilePath -> IO () -checkLogsFromNode RotationParams{..} format subDirForLogs = - listFiles subDirForLogs >>= \case - [] -> - -- There are no logs in this subdir (probably they were deleted), - -- so no rotation can be performed. - return () - [oneFile] -> - -- At least two files must be there: one log and one symlink. - -- So if there is only one file, it's a weird situation, - -- (probably invalid symlink only), we have to try to fix it. - fixLog (subDirForLogs oneFile) format - logs -> do - let fullPathsToLogs = map (subDirForLogs ) logs - checkIfCurrentLogIsFull fullPathsToLogs format rpLogLimitBytes - checkIfThereAreOldLogs fullPathsToLogs format rpMaxAgeHours rpKeepFilesNum - -fixLog - :: FilePath - -> LogFormat - -> IO () -fixLog oneFile format = - isItSymLink format oneFile >>= \case - True -> do - -- It is a symlink, but corresponding log was deleted, - -- whch means that symlink is already invalid. - removeFile oneFile - createLogAndSymLink (takeDirectory oneFile) format - False -> - when (isItLog format oneFile) $ - -- It is a single log, but its symlink was deleted. - withCurrentDirectory (takeDirectory oneFile) $ - createFileLink (takeFileName oneFile) (symLinkName format) +checkLogs RotationParams{rpLogLimitBytes, rpMaxAgeHours, rpKeepFilesNum} format subDirForLogs = do + logs <- map (subDirForLogs ) . filter (isItLog format) <$> listFiles subDirForLogs + checkIfCurrentLogIsFull logs format rpLogLimitBytes + checkIfThereAreOldLogs logs rpMaxAgeHours rpKeepFilesNum checkIfCurrentLogIsFull :: [FilePath] @@ -110,64 +84,44 @@ checkIfCurrentLogIsFull -> IO () checkIfCurrentLogIsFull [] _ _ = return () checkIfCurrentLogIsFull logs format maxSizeInBytes = - case find (\logPath -> takeFileName logPath == symLinkName format) logs of - Just symLink -> - doesSymLinkValid symLink >>= \case - True -> - whenM (isLogFull =<< getPathToLatestLog symLink) $ - createLogAndUpdateSymLink subDirForLogs format - False -> - -- Remove invalid symlink. - removeFile symLink - Nothing -> - -- There is no symlink we need, so skip check for now: - -- this symlink will be created when the new 'TraceObject's will be received. - return () + whenM (logIsFull pathToCurrentLog) $ + createLogAndUpdateSymLink (takeDirectory pathToCurrentLog) format where - subDirForLogs = takeDirectory $ head logs -- All these logs are in the same subdir. - - getPathToLatestLog symlink = do - logName <- getSymbolicLinkTarget symlink - return $ subDirForLogs logName - - isLogFull logName = do - sz <- getFileSize logName - return $ fromIntegral sz >= maxSizeInBytes + logIsFull logName = do + size <- getFileSize logName + return $ fromIntegral size >= maxSizeInBytes + -- Since logs' names contain timestamps, the maximum one is the latest log, + -- or current log (i.e. the log we write 'TraceObject's in). + pathToCurrentLog = maximum logs checkIfThereAreOldLogs :: [FilePath] - -> LogFormat -> Word -> Word -> IO () -checkIfThereAreOldLogs [] _ _ _ = return () -checkIfThereAreOldLogs logs format maxAgeInHours keepFilesNum = do - now <- getCurrentTime - let logsWeNeed = filter (isItLog format) logs - -- Sort by name with timestamp, so the latest logs will always be in the end. - oldLogs = sort . filter (oldLog now) $ logsWeNeed - remainingLogsNum = length logsWeNeed - length oldLogs - if remainingLogsNum >= fromIntegral keepFilesNum - then mapM_ removeFile oldLogs - else removeSomeOldLogs oldLogs remainingLogsNum +checkIfThereAreOldLogs [] _ _ = return () +checkIfThereAreOldLogs logs maxAgeInHours keepFilesNum = do + -- Logs' names contain timestamp, so we can sort them. + let fromOldestToNewest = sort logs + -- N ('keepFilesNum') newest files have to be kept in any case. + logsWeHaveToCheck = dropEnd (fromIntegral keepFilesNum) fromOldestToNewest + unless (null logsWeHaveToCheck) $ do + now <- getCurrentTime + checkOldLogs now logsWeHaveToCheck where - oldLog now' logName = - case getTimeStampFromLog logName of - Just timeStamp -> - let logAge = now' `diffUTCTime` timeStamp - in toSeconds logAge >= maxAgeInSecs - Nothing -> False + checkOldLogs _ [] = return () + checkOldLogs now' (oldestLog:otherLogs) = + case getTimeStampFromLog oldestLog of + Just ts -> do + let oldestLogAge = toSeconds $ now' `diffUTCTime` ts + when (oldestLogAge >= maxAgeInSecs) $ do + removeFile oldestLog + checkOldLogs now' otherLogs + -- If 'oldestLog' isn't outdated (yet), other logs aren't + -- outdated too (because they are newer), so we shouldn't check them. + Nothing -> + -- Something is wrong with log's name, continue. + checkOldLogs now' otherLogs maxAgeInSecs = fromIntegral maxAgeInHours * 3600 toSeconds age = fromEnum age `div` 1000000000000 - - removeSomeOldLogs [] _ = return () - removeSomeOldLogs oldLogs remainingLogsNum = do - -- Too many logs are old, so make sure we keep enough latest logs. - let oldLogsNumToKeep = fromIntegral keepFilesNum - remainingLogsNum - -- Reverse logs to place the latest ones in the beginning and drop - -- 'oldLogsNumToKeep' to keep them. - let oldLogsToRemove = drop oldLogsNumToKeep . reverse $ oldLogs - -- If the total num of old logs is less than 'keepFilesNum', all - -- of them will be kept. - mapM_ removeFile oldLogsToRemove diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Monitoring.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Monitoring.hs index 0b2bff5bfb4..780559da43a 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Monitoring.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Monitoring.hs @@ -1,5 +1,6 @@ {-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PackageImports #-} {-# LANGUAGE ScopedTypeVariables #-} module Cardano.Tracer.Handlers.Metrics.Monitoring @@ -7,35 +8,38 @@ module Cardano.Tracer.Handlers.Metrics.Monitoring ) where import Control.Concurrent (threadDelay) +import Control.Concurrent.STM.TVar (readTVarIO) import Control.Exception (SomeException, try) -import Control.Monad (forever, unless, void) +import Control.Monad (forever, unless) +import "contra-tracer" Control.Tracer (showTracing, stdoutTracer, traceWith) import qualified Data.ByteString.Char8 as BSC import qualified Data.HashMap.Strict as HM -import Data.IORef (readIORef) -import System.IO (hPutStrLn, stderr) import System.Remote.Monitoring (forkServerWith) import Cardano.Tracer.Configuration -import Cardano.Tracer.Types (AcceptedItems) +import Cardano.Tracer.Types (AcceptedMetrics) runMonitoringServer :: Endpoint - -> AcceptedItems + -> AcceptedMetrics -> IO () -runMonitoringServer (Endpoint host port) acceptedItems = forever $ do - (try serveEKGPage) >>= \case +runMonitoringServer (Endpoint host port) acceptedMetrics = forever $ do + try serveEKGPage >>= \case Left (e :: SomeException) -> - hPutStrLn stderr $ "Problem with EKG web server: " <> show e - Right _ -> return () + logTrace $ "cardano-tracer, problem with EKG web server: " <> show e + Right _ -> + return () threadDelay 1000000 where serveEKGPage = do - items <- readIORef acceptedItems - unless (HM.null items) $ do + metrics <- readTVarIO acceptedMetrics + unless (HM.null metrics) $ do -- TODO: temporary solution for testing -- (serve the metrics received from the first found node only). - let (_, _, (storeForFirstNode, _)) = snd . head . HM.toList $ items - void $ forkServerWith storeForFirstNode (BSC.pack host) port + let (storeForFirstNode, _) = snd . head . HM.toList $ metrics + _server <- forkServerWith storeForFirstNode (BSC.pack host) port waitForever waitForever = forever $ threadDelay 1000000000 + + logTrace = traceWith $ showTracing stdoutTracer diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Prometheus.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Prometheus.hs index 5b4eb11175c..c51407ac1ef 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Prometheus.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Prometheus.hs @@ -1,4 +1,5 @@ {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -7,12 +8,12 @@ module Cardano.Tracer.Handlers.Metrics.Prometheus ) where import Prelude hiding (head) +import Control.Concurrent.STM.TVar (readTVarIO) import Control.Monad (forM, forever) import Control.Monad.IO.Class (liftIO) import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BSC import qualified Data.HashMap.Strict as HM -import Data.IORef (readIORef) import Data.List (find) import qualified Data.Map.Strict as M import Data.String (IsString (..)) @@ -28,16 +29,17 @@ import Text.Blaze.Html import Text.Blaze.Html5 hiding (map) import Text.Blaze.Html5.Attributes hiding (title) -import Trace.Forward.Protocol.Type (NodeInfoStore) +import Trace.Forward.Protocol.Type (NodeInfo (..)) import Cardano.Tracer.Configuration import Cardano.Tracer.Types runPrometheusServer :: Endpoint - -> AcceptedItems + -> AcceptedMetrics + -> AcceptedNodeInfo -> IO () -runPrometheusServer (Endpoint host port) acceptedItems = forever $ +runPrometheusServer (Endpoint host port) acceptedMetrics acceptedNodeInfo = forever $ -- If everything is okay, the function 'simpleHttpServe' never returns. -- But if there is some problem, it never throws an exception, but just stops. -- So if it stopped - it will be re-started. @@ -56,18 +58,14 @@ runPrometheusServer (Endpoint host port) acceptedItems = forever $ renderListOfNodes :: Snap () renderListOfNodes = - HM.toList <$> liftIO (readIORef acceptedItems) >>= \case - [] -> writeText "There are no connected nodes yet." - items -> blaze =<< liftIO (mkListOfHrefs items) - - mkListOfHrefs :: [(NodeId, (NodeInfoStore, TraceObjects, Metrics))] -> IO Html - mkListOfHrefs items = do - nodeHrefs <- forM items $ \(nodeId, (niStore, _, _)) -> do - maybeName <- getNodeName niStore - let nodeFullId = - case maybeName of - Nothing -> show nodeId - Just aName -> T.unpack aName <> "-" <> show nodeId + HM.toList <$> liftIO (readTVarIO acceptedNodeInfo) >>= \case + [] -> writeText "There are no connected nodes yet." + ni -> blaze =<< liftIO (mkListOfHrefs ni) + + mkListOfHrefs :: [(NodeId, NodeInfo)] -> IO Html + mkListOfHrefs ni = do + nodeHrefs <- forM ni $ \(nodeId, NodeInfo{niName}) -> do + let nodeFullId = T.unpack $ printNodeFullId niName nodeId return $ a ! href (mkURL nodeFullId) $ toHtml nodeFullId return $ mkPage nodeHrefs @@ -86,7 +84,7 @@ runPrometheusServer (Endpoint host port) acceptedItems = forever $ Nothing -> writeText "No such a node!" Just nodeFullId -> - writeText =<< liftIO (getMetricsFromNode nodeFullId acceptedItems) + writeText =<< liftIO (getMetricsFromNode nodeFullId acceptedMetrics) type MetricName = Text type MetricValue = Text @@ -94,21 +92,21 @@ type MetricsList = [(MetricName, MetricValue)] getMetricsFromNode :: [BS.ByteString] - -> AcceptedItems + -> AcceptedMetrics -> IO Text getMetricsFromNode [] _ = return "No such a node!" -getMetricsFromNode (nodeFullId':_) acceptedItems = do - items <- readIORef acceptedItems - if HM.null items +getMetricsFromNode (nodeFullId':_) acceptedMetrics = do + metrics <- readTVarIO acceptedMetrics + if HM.null metrics then return "No such a node!" else do - case find nodeIdWeNeed $ HM.keys items of + case find nodeIdWeNeed $ HM.keys metrics of Nothing -> return "No such a node!" Just nodeId -> do - let (_, _, (ekgStore, _)) = items HM.! nodeId + let (ekgStore, _) = metrics HM.! nodeId sampleAll ekgStore >>= return . renderListOfMetrics . getListOfMetrics where - -- For example, "127.0.0.1-17890" is suffix of "node-1-127.0.0.1-17890" + -- For example, "run-user-1000-core.sock" is suffix of "core-1--run-user-1000-core.sock" nodeIdWeNeed nodeId = T.pack (show nodeId) `T.isSuffixOf` nodeFullId nodeFullId = decodeUtf8 nodeFullId' diff --git a/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Run.hs b/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Run.hs index 01bb9b1810b..cda32f1a3a0 100644 --- a/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Run.hs +++ b/cardano-tracer/src/Cardano/Tracer/Handlers/Metrics/Run.hs @@ -1,4 +1,4 @@ -{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE NamedFieldPuns #-} module Cardano.Tracer.Handlers.Metrics.Run ( runMetricsHandler @@ -9,16 +9,21 @@ import Control.Concurrent.Async (concurrently_) import Cardano.Tracer.Configuration (TracerConfig (..)) import Cardano.Tracer.Handlers.Metrics.Monitoring (runMonitoringServer) import Cardano.Tracer.Handlers.Metrics.Prometheus (runPrometheusServer) -import Cardano.Tracer.Types (AcceptedItems) +import Cardano.Tracer.Types runMetricsHandler :: TracerConfig - -> AcceptedItems + -> AcceptedMetrics + -> AcceptedNodeInfo -> IO () -runMetricsHandler TracerConfig{..} acceptedItems = +runMetricsHandler TracerConfig{hasEKG, hasPrometheus} acceptedMetrics acceptedNodeInfo = case (hasEKG, hasPrometheus) of - (Nothing, Nothing) -> return () - (Nothing, Just prom) -> runPrometheusServer prom acceptedItems - (Just ekg, Nothing) -> runMonitoringServer ekg acceptedItems - (Just ekg, Just prom) -> concurrently_ (runPrometheusServer prom acceptedItems) - (runMonitoringServer ekg acceptedItems) + (Nothing, Nothing) -> + return () + (Nothing, Just prom) -> + runPrometheusServer prom acceptedMetrics acceptedNodeInfo + (Just ekg, Nothing) -> + runMonitoringServer ekg acceptedMetrics + (Just ekg, Just prom) -> + concurrently_ (runPrometheusServer prom acceptedMetrics acceptedNodeInfo) + (runMonitoringServer ekg acceptedMetrics) diff --git a/cardano-tracer/src/Cardano/Tracer/Run.hs b/cardano-tracer/src/Cardano/Tracer/Run.hs index 5bc0bc099e3..ecb5032dc0d 100644 --- a/cardano-tracer/src/Cardano/Tracer/Run.hs +++ b/cardano-tracer/src/Cardano/Tracer/Run.hs @@ -1,31 +1,61 @@ -{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE NamedFieldPuns #-} --- | This top-level module will be used by the 'cardano-tracer' app. +-- | This top-level module is used by 'cardano-tracer' app. module Cardano.Tracer.Run ( runCardanoTracer -- | For testing purposes. , runCardanoTracerWithConfig + , runCardanoTracerWithConfigBrakes ) where -import Control.Concurrent.Async (concurrently_) +import Control.Concurrent.Async (withAsync, wait) +import Control.Concurrent.STM.TVar (TVar) +import Control.Monad (void) +import Data.List.NonEmpty (NonEmpty) -import Cardano.Tracer.Acceptors (runAcceptors) +import Cardano.Tracer.Acceptors (runAcceptors, runAcceptorsWithBrakes) import Cardano.Tracer.CLI (TracerParams (..)) import Cardano.Tracer.Configuration (TracerConfig, readTracerConfig) -import Cardano.Tracer.Handlers (runHandlers) -import Cardano.Tracer.Types (initAcceptedItems) +import Cardano.Tracer.Handlers.Logs.Rotator (runLogsRotator) +import Cardano.Tracer.Handlers.Metrics.Run (runMetricsHandler) +import Cardano.Tracer.Types runCardanoTracer :: TracerParams -> IO () -runCardanoTracer TracerParams{..} = +runCardanoTracer TracerParams{tracerConfig} = readTracerConfig tracerConfig >>= runCardanoTracerWithConfig -runCardanoTracerWithConfig :: TracerConfig -> IO () +runCardanoTracerWithConfig + :: TracerConfig + -> IO () runCardanoTracerWithConfig config = do - acceptedItems <- initAcceptedItems - -- Run two main threads: - -- 1. For all acceptors: they ask 'TraceObject's and metrics from the node - -- and collect them in 'acceptedItems'. - -- 2. For all handlers: they take items from 'acceptedItems' and do something - -- with them (write to log files and return by web-request via EKG API). - concurrently_ (runAcceptors config acceptedItems) - (runHandlers config acceptedItems) + acceptedMetrics <- initAcceptedMetrics + acceptedNodeInfo <- initAcceptedNodeInfo + run3ActionsInParallel + (runLogsRotator config) + (runMetricsHandler config acceptedMetrics acceptedNodeInfo) + (runAcceptors config acceptedMetrics acceptedNodeInfo) + +runCardanoTracerWithConfigBrakes + :: TracerConfig + -> NonEmpty (TVar Bool, TVar Bool) + -> IO () +runCardanoTracerWithConfigBrakes config protocolsBrakes = do + acceptedMetrics <- initAcceptedMetrics + acceptedNodeInfo <- initAcceptedNodeInfo + run3ActionsInParallel + (runLogsRotator config) + (runMetricsHandler config acceptedMetrics acceptedNodeInfo) + (runAcceptorsWithBrakes config acceptedMetrics acceptedNodeInfo protocolsBrakes) + +run3ActionsInParallel + :: IO () + -> IO () + -> IO () + -> IO () +run3ActionsInParallel action1 action2 action3 = + withAsync action1 $ \a1 -> + withAsync action2 $ \a2 -> + withAsync action3 $ \a3 -> do + void $ wait a1 + void $ wait a2 + void $ wait a3 diff --git a/cardano-tracer/src/Cardano/Tracer/Types.hs b/cardano-tracer/src/Cardano/Tracer/Types.hs index 24382c8f74d..44e665ab86e 100644 --- a/cardano-tracer/src/Cardano/Tracer/Types.hs +++ b/cardano-tracer/src/Cardano/Tracer/Types.hs @@ -1,90 +1,80 @@ {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} module Cardano.Tracer.Types - ( AcceptedItems - , TraceObjects + ( AcceptedMetrics + , AcceptedNodeInfo , Metrics , NodeId (..) - , NodeName - , getNodeName - , addressToNodeId - , initAcceptedItems - , prepareAcceptedItems + , connIdToNodeId + , initAcceptedMetrics + , initAcceptedNodeInfo + , prepareAcceptedMetrics + , printNodeFullId ) where -import Control.Concurrent.STM.TBQueue (TBQueue, newTBQueueIO) +import Control.Concurrent.STM (atomically) +import Control.Concurrent.STM.TVar (TVar, modifyTVar', newTVarIO, readTVarIO) import Control.Monad (unless) import Data.Hashable (Hashable) import Data.HashMap.Strict (HashMap) import qualified Data.HashMap.Strict as HM -import Data.IORef (IORef, atomicModifyIORef', newIORef, readIORef) -import Data.Text (Text, pack, replace, splitOn, unpack) -import Data.Word (Word16) +import Data.Text (Text) +import qualified Data.Text as T import GHC.Generics (Generic) import qualified System.Metrics as EKG -import Cardano.Logging (TraceObject) +import Ouroboros.Network.Socket (ConnectionId (..)) -import Trace.Forward.Protocol.Type (NodeInfoStore) +import Trace.Forward.Protocol.Type (NodeInfo) import System.Metrics.Store.Acceptor (MetricsLocalStore, emptyMetricsLocalStore) --- | Human-readable name of node. -type NodeName = Text +-- | Unique identifier of the node, based on 'remoteAddress' from 'ConnectionId'. +newtype NodeId = NodeId Text + deriving (Eq, Generic, Hashable, Ord, Show) -getNodeName :: NodeInfoStore -> IO (Maybe NodeName) -getNodeName niStore = lookup "NodeName" <$> readIORef niStore - --- | Unique identifier of the node. -data NodeId = NodeId - { nodeHost :: !String - , nodePort :: !Word16 - } deriving (Eq, Generic, Hashable, Ord) +connIdToNodeId :: Show addr => ConnectionId addr -> NodeId +connIdToNodeId ConnectionId{remoteAddress} = NodeId preparedAddress + where + -- We have to remove "wrong" symbols from 'NodeId', + -- to make it appropriate for the name of the subdirectory. + preparedAddress = + T.replace "LocalAddress" "" -- There are only local addresses by design. + . T.replace " " "-" + . T.replace "\"" "" + . T.replace "/" "-" + . T.pack + $ show remoteAddress -instance Show NodeId where - show (NodeId pipeWithNum 0) = pipeWithNum - show (NodeId ip port) = ip <> "-" <> show port +printNodeFullId :: Text -> NodeId -> Text +printNodeFullId "" (NodeId p) = T.drop 2 p -- In this case, '--' in the beginning is useless. +printNodeFullId nodeName (NodeId p) = nodeName <> p -addressToNodeId :: String -> NodeId -addressToNodeId remoteAddress = - -- The string 'remoteAddress' can contain two kinds of address: - -- 1. the pair of IP:port - -- 2. the path to local socket file with the connection number, - -- to make 'remoteAddress' unique for each connected node. - case splitOn ":" . pack $ remoteAddress of - [ip, port] -> NodeId (unpack ip) (read (unpack port) :: Word16) - _ -> NodeId preparedLocalSocket 0 - where - preparedLocalSocket = - -- The format of 'remoteAddress' in this case looks like 'LocalAddress "temp-NUM"', - -- so make it simpler, like 'LocalAddress-temp-NUM'. - unpack . replace " " "-" . replace "\"" "" . replace "/" "-" . pack $ remoteAddress +-- | We have to create EKG.Store and MetricsLocalStore +-- to keep the metrics accepted from the node. +type Metrics = (EKG.Store, TVar MetricsLocalStore) -type TraceObjects = TBQueue TraceObject +type AcceptedMetrics = TVar (HashMap NodeId Metrics) -type Metrics = (EKG.Store, IORef MetricsLocalStore) +type AcceptedNodeInfo = TVar (HashMap NodeId NodeInfo) -type AcceptedItems = IORef (HashMap NodeId (NodeInfoStore, TraceObjects, Metrics)) +initAcceptedMetrics :: IO AcceptedMetrics +initAcceptedMetrics = newTVarIO HM.empty -initAcceptedItems :: IO AcceptedItems -initAcceptedItems = newIORef HM.empty +initAcceptedNodeInfo :: IO AcceptedNodeInfo +initAcceptedNodeInfo = newTVarIO HM.empty -prepareAcceptedItems +prepareAcceptedMetrics :: NodeId - -> AcceptedItems + -> AcceptedMetrics -> IO () -prepareAcceptedItems nodeId itemsIORef = do - items' <- readIORef itemsIORef - -- If such 'nodeId' is already presented in 'items', it means that this node - -- already worked with the tracer and now it's re-connect to the tracer. - -- No need to re-create its stores. - unless (nodeId `HM.member` items') $ do - niStore <- newIORef [] - trObQueue <- newTBQueueIO 2000 - ekgStore <- EKG.newStore - localStore <- newIORef emptyMetricsLocalStore - let storesForNewNode = (niStore, trObQueue, (ekgStore, localStore)) - atomicModifyIORef' itemsIORef $ \items -> - (HM.insert nodeId storesForNewNode items, ()) +prepareAcceptedMetrics nodeId acceptedMetrics = do + metrics <- readTVarIO acceptedMetrics + unless (nodeId `HM.member` metrics) $ do + storesForNewNode <- + (,) <$> EKG.newStore + <*> newTVarIO emptyMetricsLocalStore + atomically $ modifyTVar' acceptedMetrics $ HM.insert nodeId storesForNewNode diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Forwarder.hs b/cardano-tracer/test/Cardano/Tracer/Test/Forwarder.hs index a2b4b6827b5..b08bb03971a 100644 --- a/cardano-tracer/test/Cardano/Tracer/Test/Forwarder.hs +++ b/cardano-tracer/test/Cardano/Tracer/Test/Forwarder.hs @@ -1,6 +1,5 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE PackageImports #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -11,120 +10,140 @@ module Cardano.Tracer.Test.Forwarder import Codec.CBOR.Term (Term) import Control.Concurrent (threadDelay) -import Control.Concurrent.Async (async) -import Control.Concurrent.STM.TBQueue (TBQueue, newTBQueueIO, writeTBQueue) -import Control.Exception (SomeException, try) +import Control.Concurrent.Async import Control.Monad (forever) -import Control.Monad.STM (atomically) import "contra-tracer" Control.Tracer (nullTracer) import qualified Data.ByteString.Lazy as LBS -import Data.Fixed (Pico) -import Data.Time.Clock (NominalDiffTime, UTCTime, getCurrentTime, secondsToNominalDiffTime) +import Data.Time.Clock (UTCTime, getCurrentTime) import Data.Void (Void) import Data.Word (Word16) + +import Ouroboros.Network.IOManager (IOManager, withIOManager) + import Ouroboros.Network.Driver.Limits (ProtocolTimeLimits) -import Ouroboros.Network.IOManager (withIOManager) -import Ouroboros.Network.Mux (MiniProtocol (..), MiniProtocolLimits (..), - MiniProtocolNum (..), MuxMode (..), - OuroborosApplication (..), RunMiniProtocol (..), - miniProtocolLimits, miniProtocolNum, miniProtocolRun) -import Ouroboros.Network.Protocol.Handshake.Codec (cborTermVersionDataCodec, - timeLimitsHandshake) -import Ouroboros.Network.Protocol.Handshake.Unversioned (UnversionedProtocol (..), - UnversionedProtocolData (..), - unversionedHandshakeCodec, - unversionedProtocolDataCodec) +import Ouroboros.Network.ErrorPolicy (nullErrorPolicies) +import Ouroboros.Network.Mux (MiniProtocol (..), + MiniProtocolLimits (..), MiniProtocolNum (..), + MuxMode (..), OuroborosApplication (..), + RunMiniProtocol (..), miniProtocolLimits, miniProtocolNum, + miniProtocolRun) +import Ouroboros.Network.Protocol.Handshake.Codec + (cborTermVersionDataCodec, noTimeLimitsHandshake) import Ouroboros.Network.Protocol.Handshake.Type (Handshake) -import Ouroboros.Network.Protocol.Handshake.Version (acceptableVersion, simpleSingletonVersions) -import Ouroboros.Network.Snocket (Snocket, localAddressFromPath, localSnocket) -import Ouroboros.Network.Socket (connectToNode, nullNetworkConnectTracers) +import Ouroboros.Network.Protocol.Handshake.Unversioned + (UnversionedProtocol (..), UnversionedProtocolData (..), + unversionedHandshakeCodec, unversionedProtocolDataCodec) +import Ouroboros.Network.Protocol.Handshake.Version + (acceptableVersion, simpleSingletonVersions) +import Ouroboros.Network.Snocket (Snocket, localAddressFromPath, + localSnocket) +import Ouroboros.Network.Socket (AcceptedConnectionsLimit (..), + SomeResponderApplication (..), cleanNetworkMutableState, + newNetworkMutableState, nullNetworkServerTracers, + withServerNode) + import qualified System.Metrics as EKG -import Cardano.Logging +import Cardano.Logging (DetailLevel (..), SeverityS (..), TraceObject (..)) import qualified Trace.Forward.Configuration as TF -import Trace.Forward.Network.Forwarder (forwardTraceObjects) +import Trace.Forward.Network.Forwarder import Trace.Forward.Protocol.Type (NodeInfo (..)) +import Trace.Forward.Utils import qualified System.Metrics.Configuration as EKGF -import System.Metrics.Network.Forwarder (forwardEKGMetrics) +import System.Metrics.Network.Forwarder -launchForwardersSimple :: String -> IO () -launchForwardersSimple localSock = do +launchForwardersSimple + :: FilePath + -> Word + -> Word + -> IO () +launchForwardersSimple p connSize disconnSize = withIOManager $ \iomgr -> + runActionInLoop + (launchForwardersSimple' iomgr p connSize disconnSize) + (TF.LocalPipe p) + 1 + +launchForwardersSimple' + :: IOManager + -> FilePath + -> Word + -> Word + -> IO () +launchForwardersSimple' iomgr p connSize disconnSize = do now <- getCurrentTime - try (launchForwarders' localSock Nothing (ekgConfig, tfConfig now)) >>= \case - Left (_e :: SomeException) -> - launchForwardersSimple localSock - Right _ -> return () + let snocket = localSnocket iomgr p + address = localAddressFromPath p + doListenToAcceptor snocket address noTimeLimitsHandshake (ekgConfig, tfConfig now) where ekgConfig :: EKGF.ForwarderConfiguration ekgConfig = EKGF.ForwarderConfiguration - { EKGF.forwarderTracer = nullTracer - , EKGF.acceptorEndpoint = EKGF.LocalPipe localSock + { EKGF.forwarderTracer = nullTracer + , EKGF.acceptorEndpoint = EKGF.LocalPipe p , EKGF.reConnectFrequency = 1.0 - , EKGF.actionOnRequest = const (return ()) + , EKGF.actionOnRequest = const $ return () } tfConfig :: UTCTime -> TF.ForwarderConfiguration TraceObject tfConfig now = TF.ForwarderConfiguration - { TF.forwarderTracer = nullTracer - , TF.acceptorEndpoint = TF.LocalPipe localSock - , TF.nodeBasicInfo = return - NodeInfo - { niName = "core-1" - , niProtocol = "Shelley" - , niVersion = "1.28.0" - , niCommit = "abcdefg" - , niStartTime = now - , niSystemStartTime = now - } + { TF.forwarderTracer = nullTracer + , TF.acceptorEndpoint = TF.LocalPipe p + , TF.getNodeInfo = + return NodeInfo + { niName = "core-1" + , niProtocol = "Shelley" + , niVersion = "1.28.0" + , niCommit = "abcdefg" + , niStartTime = now + , niSystemStartTime = now + } + , TF.disconnectedQueueSize = disconnSize + , TF.connectedQueueSize = connSize } -launchForwarders' - :: String - -> Maybe Pico - -> (EKGF.ForwarderConfiguration, TF.ForwarderConfiguration TraceObject) - -> IO () -launchForwarders' localSock benchFillFreq configs = withIOManager $ \iocp -> do - let snocket = localSnocket iocp localSock - address = localAddressFromPath localSock - doConnectToAcceptor snocket address timeLimitsHandshake benchFillFreq configs - -doConnectToAcceptor - :: Snocket IO fd addr +doListenToAcceptor + :: Ord addr + => Snocket IO fd addr -> addr -> ProtocolTimeLimits (Handshake UnversionedProtocol Term) - -> Maybe Pico -> (EKGF.ForwarderConfiguration, TF.ForwarderConfiguration TraceObject) -> IO () -doConnectToAcceptor snocket address timeLimits benchFillFreq (ekgConfig, tfConfig) = do - tfQueue <- newTBQueueIO 1000000 - _ <- async $ traceObjectsWriter tfQueue benchFillFreq +doListenToAcceptor snocket address timeLimits (ekgConfig, tfConfig) = do store <- EKG.newStore EKG.registerGcMetrics store - - connectToNode - snocket - unversionedHandshakeCodec - timeLimits - (cborTermVersionDataCodec unversionedProtocolDataCodec) - nullNetworkConnectTracers - acceptableVersion - (simpleSingletonVersions - UnversionedProtocol - UnversionedProtocolData $ - forwarderApp [ (forwardEKGMetrics ekgConfig store, 1) - , (forwardTraceObjects tfConfig tfQueue, 2) - ] - ) - Nothing - address + sink <- initForwardSink tfConfig + withAsync (traceObjectsWriter sink) $ \_ -> do + networkState <- newNetworkMutableState + race_ (cleanNetworkMutableState networkState) + $ withServerNode + snocket + nullNetworkServerTracers + networkState + (AcceptedConnectionsLimit maxBound maxBound 0) + address + unversionedHandshakeCodec + timeLimits + (cborTermVersionDataCodec unversionedProtocolDataCodec) + acceptableVersion + (simpleSingletonVersions + UnversionedProtocol + UnversionedProtocolData + (SomeResponderApplication $ + forwarderApp [ (forwardEKGMetricsResp ekgConfig store, 1) + , (forwardTraceObjectsResp tfConfig sink, 2) + ] + ) + ) + nullErrorPolicies + $ \_ serverAsync -> + wait serverAsync -- Block until async exception. where forwarderApp - :: [(RunMiniProtocol 'InitiatorMode LBS.ByteString IO () Void, Word16)] - -> OuroborosApplication 'InitiatorMode addr LBS.ByteString IO () Void + :: [(RunMiniProtocol 'ResponderMode LBS.ByteString IO Void (), Word16)] + -> OuroborosApplication 'ResponderMode addr LBS.ByteString IO Void () forwarderApp protocols = OuroborosApplication $ \_connectionId _shouldStopSTM -> [ MiniProtocol @@ -135,26 +154,18 @@ doConnectToAcceptor snocket address timeLimits benchFillFreq (ekgConfig, tfConfi | (prot, num) <- protocols ] -traceObjectsWriter :: TBQueue TraceObject -> Maybe Pico -> IO () -traceObjectsWriter queue benchFillFreq = forever $ do - now <- getCurrentTime - atomically $ writeTBQueue queue (mkTraceObject now) - threadDelay fillPause +traceObjectsWriter :: ForwardSink TraceObject -> IO () +traceObjectsWriter sink = forever $ do + writeToSink sink . mkTraceObject =<< getCurrentTime + threadDelay 50000 where - mkTraceObject now' = TraceObject + mkTraceObject now = TraceObject { toHuman = Just "Human Message" , toMachine = Just "{\"msg\": \"forMachine\"}" , toNamespace = ["demoNamespace"] , toSeverity = Info , toDetails = DNormal - , toTimestamp = now' - , toHostname = "linux" + , toTimestamp = now + , toHostname = "nixos" , toThreadId = "1" } - - fillPause = case benchFillFreq of - Just ff -> toMicroSecs . secondsToNominalDiffTime $ ff - Nothing -> 500000 - - toMicroSecs :: NominalDiffTime -> Int - toMicroSecs dt = fromEnum dt `div` 1000000 diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Logs/File.hs b/cardano-tracer/test/Cardano/Tracer/Test/Logs/File.hs deleted file mode 100644 index d7369cb5a97..00000000000 --- a/cardano-tracer/test/Cardano/Tracer/Test/Logs/File.hs +++ /dev/null @@ -1,94 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE ScopedTypeVariables #-} - -module Cardano.Tracer.Test.Logs.File - ( tests - ) where - -import Control.Concurrent (forkIO, killThread, threadDelay) -import Control.Monad (filterM) -import Test.Tasty -import Test.Tasty.QuickCheck -import System.Directory -import System.FilePath - -import Cardano.Tracer.Configuration -import Cardano.Tracer.Handlers.Logs.Log (isItLog, isItSymLink) -import Cardano.Tracer.Run (runCardanoTracerWithConfig) - -import Cardano.Tracer.Test.Forwarder (launchForwardersSimple) - -tests :: TestTree -tests = localOption (QuickCheckTests 1) $ testGroup "Test.Logs.File" - [ testProperty ".log" $ propFile ForHuman "text" "cardano-tracer-log.sock" - , testProperty ".json" $ propFile ForMachine "json" "cardano-tracer-json.sock" - ] - -propFile - :: LogFormat - -> FilePath - -> String - -> Property -propFile format suffix localSockName = ioProperty $ do - tmpDir <- getTemporaryDirectory - let rootDir = tmpDir ("test-logs-" <> suffix) - localSock = tmpDir localSockName - -- Remove rootDir if needed. - removePathForcibly rootDir - -- Remove localSock if needed. - removePathForcibly localSock - -- Run cardano-tracer and demo-forwarder-mux. - tracerThr <- forkIO $ runCardanoTracerWithConfig (config rootDir localSock) - threadDelay 500000 - forwarderThr <- forkIO $ launchForwardersSimple localSock - -- Wait for some 'TraceObject's... - threadDelay 5000000 - -- Stop both sides. - killThread forwarderThr - killThread tracerThr - threadDelay 100000 - -- Check that rootDir exists... - doesDirectoryExist rootDir >>= \case - True -> - -- ... and contains one node's subdir... - listDirectory rootDir >>= \case - [] -> false "root dir is empty" - [subDir] -> - withCurrentDirectory rootDir $ - -- ... with *.log-files inside... - listDirectory subDir >>= \case - [] -> false "subdir is empty" - logsAndSymLink -> - withCurrentDirectory subDir $ - case filter (isItLog format) logsAndSymLink of - [] -> false "subdir doesn't contain expected logs" - logsWeNeed -> - -- ... and one symlink... - filterM (isItSymLink format) logsAndSymLink >>= \case - [] -> false "subdir doesn't contain a symlink" - [symLink] -> do - -- ... to the latest *.log-file. - maybeLatestLog <- getSymbolicLinkTarget symLink - -- The logs' names contain timestamps, so the - -- latest log is the maximum one. - let latestLog = maximum logsWeNeed - return $ latestLog === maybeLatestLog - _ -> false "there is more than one symlink" - _ -> false "root dir contains more than one subdir" - False -> false "root dir doesn't exist" - where - config rootDir' localSock' = TracerConfig - { connectMode = Initiator - , acceptAt = [LocalSocket localSock'] - , loRequestNum = 1 - , ekgRequestFreq = 1.0 - , hasEKG = Nothing - , hasPrometheus = Nothing - , logging = [LoggingParams rootDir' FileMode format] - , rotation = Nothing - } - - false :: String -> IO Property - false msg = return . counterexample msg $ property False diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Logs/Rotator.hs b/cardano-tracer/test/Cardano/Tracer/Test/Logs/Rotator.hs deleted file mode 100644 index d8043526f97..00000000000 --- a/cardano-tracer/test/Cardano/Tracer/Test/Logs/Rotator.hs +++ /dev/null @@ -1,86 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE ScopedTypeVariables #-} - -module Cardano.Tracer.Test.Logs.Rotator - ( tests - ) where - -import Control.Concurrent (forkIO, killThread, threadDelay) -import Test.Tasty -import Test.Tasty.QuickCheck -import System.Directory -import System.FilePath - -import Cardano.Tracer.Configuration -import Cardano.Tracer.Handlers.Logs.Log (isItLog) -import Cardano.Tracer.Run (runCardanoTracerWithConfig) - -import Cardano.Tracer.Test.Forwarder (launchForwardersSimple) - -tests :: TestTree -tests = localOption (QuickCheckTests 1) $ testGroup "Test.Logs.Rotator" - [ testProperty "basic" $ propRotator "cardano-tracer-totator.sock" - ] - -propRotator :: String -> Property -propRotator localSockName = ioProperty $ do - tmpDir <- getTemporaryDirectory - let rootDir = tmpDir "test-logs-rotator" - localSock = tmpDir localSockName - -- Remove rootDir if needed. - removePathForcibly rootDir - -- Remove localSock if needed. - removePathForcibly localSock - -- Run cardano-tracer and demo-forwarder-mux. - tracerThr <- forkIO $ runCardanoTracerWithConfig (config rootDir localSock) - threadDelay 500000 - forwarderThr <- forkIO $ launchForwardersSimple localSock - -- Wait while rotation will occure... - threadDelay 25000000 - -- Stop both sides. - killThread forwarderThr - killThread tracerThr - threadDelay 100000 - -- Check that rootDir exists... - doesDirectoryExist rootDir >>= \case - True -> - -- ... and contains one node's subdir... - listDirectory rootDir >>= \case - [] -> false "root dir is empty" - [subDir] -> - withCurrentDirectory rootDir $ - -- ... with *.log-files inside... - listDirectory subDir >>= \case - [] -> false "subdir is empty" - logsAndSymLink -> - withCurrentDirectory subDir $ - case filter (isItLog format) logsAndSymLink of - [] -> false "subdir doesn't contain expected logs" - logsWeNeed -> do - let thereAreMoreThanOneLog = length logsWeNeed > 1 - return $ thereAreMoreThanOneLog === True - _ -> false "root dir contains more than one subdir" - False -> false "root dir doesn't exist" - where - config rootDir' localSock' = TracerConfig - { connectMode = Initiator - , acceptAt = [LocalSocket localSock'] - , loRequestNum = 1 - , ekgRequestFreq = 1.0 - , hasEKG = Nothing - , hasPrometheus = Nothing - , logging = [LoggingParams rootDir' FileMode format] - , rotation = Just $ - RotationParams - { rpLogLimitBytes = 100 - , rpMaxAgeHours = 1 - , rpKeepFilesNum = 10 - } - } - - format = ForHuman - - false :: String -> IO Property - false msg = return . counterexample msg $ property False diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Logs/Tests.hs b/cardano-tracer/test/Cardano/Tracer/Test/Logs/Tests.hs new file mode 100644 index 00000000000..f19726f1152 --- /dev/null +++ b/cardano-tracer/test/Cardano/Tracer/Test/Logs/Tests.hs @@ -0,0 +1,91 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Cardano.Tracer.Test.Logs.Tests + ( tests + ) where + +import Control.Concurrent.Async (withAsync) +import Control.Concurrent.STM (atomically) +import Control.Concurrent.STM.TVar (modifyTVar', newTVarIO) +import Control.Monad (filterM) +import qualified Data.List.NonEmpty as NE +import Test.Tasty +import Test.Tasty.QuickCheck +import System.Directory +import System.FilePath +import System.Time.Extra + +import Cardano.Tracer.Configuration +import Cardano.Tracer.Handlers.Logs.Log (isItLog, isItSymLink) +import Cardano.Tracer.Run (runCardanoTracerWithConfigBrakes) + +import Cardano.Tracer.Test.Forwarder (launchForwardersSimple) +import Cardano.Tracer.Test.Utils + +tests :: TestTree +tests = localOption (QuickCheckTests 1) $ testGroup "Test.Logs" + [ testProperty ".log" $ propRunInLogsStructure (propLogs ForHuman) + , testProperty ".json" $ propRunInLogsStructure (propLogs ForMachine) + ] + +propLogs :: LogFormat -> FilePath -> FilePath -> IO Property +propLogs format rootDir localSock = do + stopEKG <- newTVarIO False + stopTF <- newTVarIO False + let brakes = NE.fromList [(stopEKG, stopTF)] + withAsync (runCardanoTracerWithConfigBrakes (config rootDir localSock) brakes) $ \_ -> + withAsync (launchForwardersSimple localSock 1000 10000) $ \_ -> do + sleep 15.0 -- Wait till some rotation is done. + atomically $ do + modifyTVar' stopEKG . const $ True + modifyTVar' stopTF . const $ True + sleep 1.0 + + doesDirectoryExist rootDir >>= \case + True -> + -- ... and contains one node's subdir... + listDirectory rootDir >>= \case + [] -> false "root dir is empty" + [subDir] -> + withCurrentDirectory rootDir $ + -- ... with *.log-files inside... + listDirectory subDir >>= \case + [] -> false "subdir is empty" + logsAndSymLink -> + withCurrentDirectory subDir $ + case filter (isItLog format) logsAndSymLink of + [] -> false "subdir doesn't contain expected logs" + logsWeNeed -> + if length logsWeNeed > 1 + then + -- ... and one symlink... + filterM (isItSymLink format) logsAndSymLink >>= \case + [] -> false "subdir doesn't contain a symlink" + [symLink] -> do + -- ... to the latest *.log-file. + maybeLatestLog <- getSymbolicLinkTarget symLink + -- The logs' names contain timestamps, so the + -- latest log is the maximum one. + let latestLog = maximum logsWeNeed + return $ latestLog === takeFileName maybeLatestLog + _ -> false "there is more than one symlink" + else false "there is still 1 single log, no rotation" + _ -> false "root dir contains more than one subdir" + False -> false "root dir doesn't exist" + where + config root p = TracerConfig + { network = ConnectTo $ NE.fromList [LocalSocket p] + , loRequestNum = Just 1 + , ekgRequestFreq = Just 1.0 + , hasEKG = Nothing + , hasPrometheus = Nothing + , logging = NE.fromList [LoggingParams root FileMode format] + , rotation = Just $ RotationParams + { rpLogLimitBytes = 100 + , rpMaxAgeHours = 1 + , rpKeepFilesNum = 10 + } + } diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Network/Tests.hs b/cardano-tracer/test/Cardano/Tracer/Test/Network/Tests.hs new file mode 100644 index 00000000000..a98c8c96b5e --- /dev/null +++ b/cardano-tracer/test/Cardano/Tracer/Test/Network/Tests.hs @@ -0,0 +1,78 @@ +{-# LANGUAGE CPP #-} + +module Cardano.Tracer.Test.Network.Tests + ( tests + ) where + +import Control.Concurrent.Async (asyncBound, uninterruptibleCancel) +import Control.Monad.Extra (ifM) +import qualified Data.List.NonEmpty as NE +import Test.Tasty +import Test.Tasty.QuickCheck +import System.Time.Extra (sleep) + +import Cardano.Tracer.Configuration +import Cardano.Tracer.Run + +import Cardano.Tracer.Test.Forwarder +import Cardano.Tracer.Test.Utils + +data SideToRestart = First | Second + +tests :: TestTree +tests = localOption (QuickCheckTests 1) $ testGroup "Test.Network" + [ testProperty "restart forwarder" $ propRunInLogsStructure (propNetwork First) + , testProperty "restart acceptor" $ propRunInLogsStructure (propNetwork Second) + ] + +propNetwork :: SideToRestart -> FilePath -> FilePath -> IO Property +propNetwork whichSide rootDir localSock = do + case whichSide of + First -> + propNetwork' + rootDir + ( runCardanoTracerWithConfig (config rootDir localSock) + , launchForwardersSimple localSock 1000 10000 + ) + Second -> + propNetwork' + rootDir + ( launchForwardersSimple localSock 1000 10000 + , runCardanoTracerWithConfig (config rootDir localSock) + ) + +propNetwork' :: FilePath -> (IO (), IO ()) -> IO Property +propNetwork' rootDir (fstSide, sndSide) = do + f <- asyncBound fstSide + s <- asyncBound sndSide + -- Now sides should be connected and do some work. + sleep 3.0 + -- Forcibly stop the first side (like killing the process in the real world). + uninterruptibleCancel f + -- Now the second side is working without the first one, and tries to re-connect. + sleep 4.0 + removeDirectoryContent rootDir -- To check it later. + -- Restart the first side, now the connection should be re-established. + f' <- asyncBound fstSide + -- Now it should be connected to the second side again, + -- and, if so, the root dir should be re-created. + sleep 3.0 + -- Forcibly kill both sides. + uninterruptibleCancel s + uninterruptibleCancel f' + -- Check if the root directory isn't empty, which means that the connection + -- between parts was re-established and some work was performed. + ifM (doesDirectoryEmpty rootDir) + (false "root dir is empty") + (return $ property True) + +config :: FilePath -> FilePath -> TracerConfig +config root p = TracerConfig + { network = ConnectTo $ NE.fromList [LocalSocket p] + , loRequestNum = Just 1 + , ekgRequestFreq = Just 1.0 + , hasEKG = Nothing + , hasPrometheus = Nothing + , logging = NE.fromList [LoggingParams root FileMode ForMachine] + , rotation = Nothing + } diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Queue/Tests.hs b/cardano-tracer/test/Cardano/Tracer/Test/Queue/Tests.hs new file mode 100644 index 00000000000..c6d257fce8c --- /dev/null +++ b/cardano-tracer/test/Cardano/Tracer/Test/Queue/Tests.hs @@ -0,0 +1,53 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE OverloadedStrings #-} + +module Cardano.Tracer.Test.Queue.Tests + ( tests + ) where + +import Control.Concurrent.Async (withAsyncBound) +import GHC.IO.Handle (hDuplicateTo) +import qualified Data.Text as T +import qualified Data.Text.IO as TIO +import Test.Tasty +import Test.Tasty.QuickCheck +import System.IO +import System.IO.Extra +import System.Time.Extra (sleep) + +import Cardano.Tracer.Test.Forwarder +import Cardano.Tracer.Test.Utils + +tests :: TestTree +tests = localOption (QuickCheckTests 1) $ testGroup "Test.Queue" + [ testProperty "check queue" $ propRunInLogsStructure propQueue + ] + +propQueue :: FilePath -> FilePath -> IO Property +propQueue _rootDir localSock = + withTempFile $ \tmpStdout -> + -- Run the forwarder only. It imitates the case when the acceptor is + -- misconfigured and the connection cannot be established at all. + -- In this case, the forwarder should collect trace items in its internal + -- "flexible queue" and periodically flush them to stdout. + withAsyncBound (launchForwardersSimple localSock connSize disconnSize) $ \_ -> do + redirectStdoutToFile tmpStdout + (True ===) <$> stdoutAnalyzer tmpStdout + +redirectStdoutToFile :: FilePath -> IO () +redirectStdoutToFile f = do + fh <- openFile f ReadWriteMode + hDuplicateTo fh stdout + +stdoutAnalyzer :: FilePath -> IO Bool +stdoutAnalyzer f = do + sleep 10.0 + -- Now the file 'f' should already contain flushed tracing items, so analyze them. + content <- TIO.readFile f + -- Check that all 'disconnSize' items are in the file. + let traceObjectsNum = T.count "TraceObject" content + return $ traceObjectsNum == fromIntegral disconnSize + +connSize, disconnSize :: Word +connSize = 50 +disconnSize = 100 diff --git a/cardano-tracer/test/Cardano/Tracer/Test/SSH/Tests.hs b/cardano-tracer/test/Cardano/Tracer/Test/SSH/Tests.hs new file mode 100644 index 00000000000..8f6d2784381 --- /dev/null +++ b/cardano-tracer/test/Cardano/Tracer/Test/SSH/Tests.hs @@ -0,0 +1,86 @@ +{-# LANGUAGE CPP #-} + +module Cardano.Tracer.Test.SSH.Tests + ( tests + ) where + +import Control.Concurrent.Async (withAsync) +import Control.Concurrent.STM (atomically) +import Control.Concurrent.STM.TVar (modifyTVar', newTVarIO) +import Control.Monad.Extra (ifM) +import qualified Data.List.NonEmpty as NE +import Test.Tasty +import Test.Tasty.QuickCheck +import System.Process +import System.Time.Extra (sleep) + +import Cardano.Tracer.Configuration +import Cardano.Tracer.Run + +import Cardano.Tracer.Test.Forwarder +import Cardano.Tracer.Test.Utils + +data NetworkMode = Initiator | Responder + +tests :: TestTree +tests = localOption (QuickCheckTests 1) $ testGroup "Test.Network" + [ testProperty "SSH forwarding, initiator" $ + propRunInLogsStructure2 (propSSHForward Initiator) + , testProperty "SSH forwarding, responder" $ + propRunInLogsStructure2 (propSSHForward Responder) + ] + +propSSHForward + :: NetworkMode + -> FilePath + -> FilePath + -> FilePath + -> IO Property +propSSHForward netMode rootDir localSock1 localSock2 = do + withCreateProcess (sshForward localSock1 localSock2) $ \_ _ _ _ -> do + sleep 0.5 + -- Please note that "node" and tracer use different local sockets + -- in this test, so they cannot establish the connection. + -- The only way to do it is an SSH forwarding between these local sockets. + stopEKG <- newTVarIO False + stopTF <- newTVarIO False + let brakes = NE.fromList [(stopEKG, stopTF)] + withAsync (runCardanoTracerWithConfigBrakes config brakes) $ \_ -> + withAsync (launchForwardersSimple localSock2 1000 10000) $ \_ -> do + sleep 4.0 -- Wait till some work is done. + atomically $ do + modifyTVar' stopEKG . const $ True + modifyTVar' stopTF . const $ True + sleep 1.0 + + -- Check if the root directory isn't empty, which means that the connection + -- between parts was established. It proves that SSH tunnel works. + ifM (doesDirectoryEmpty rootDir) + (false "root dir is empty") + (return $ property True) + where + config = TracerConfig + { network = net + , loRequestNum = Just 1 + , ekgRequestFreq = Just 1.0 + , hasEKG = Nothing + , hasPrometheus = Nothing + , logging = NE.fromList [LoggingParams rootDir FileMode ForMachine] + , rotation = Nothing + } + + net = case netMode of + Initiator -> ConnectTo $ NE.fromList [LocalSocket localSock1] + Responder -> AcceptAt $ LocalSocket localSock1 + +sshForward :: FilePath -> FilePath -> CreateProcess +sshForward tracerSock forwarderSock = proc "ssh" + [ "-nNT" + , "-L" + , tracerSock + , ":" + , forwarderSock + , "-o" + , "\"ExitOnForwardFailure yes\"" + , "denis@127.0.0.1" + ] diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Types.hs b/cardano-tracer/test/Cardano/Tracer/Test/Types.hs deleted file mode 100644 index a823d69ce4d..00000000000 --- a/cardano-tracer/test/Cardano/Tracer/Test/Types.hs +++ /dev/null @@ -1,9 +0,0 @@ -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE TypeFamilies #-} - -{-# OPTIONS_GHC -Wno-orphans #-} - -module Cardano.Tracer.Test.Types - ( - ) where - diff --git a/cardano-tracer/test/Cardano/Tracer/Test/Utils.hs b/cardano-tracer/test/Cardano/Tracer/Test/Utils.hs new file mode 100644 index 00000000000..1dce885ec9e --- /dev/null +++ b/cardano-tracer/test/Cardano/Tracer/Test/Utils.hs @@ -0,0 +1,47 @@ +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeFamilies #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +module Cardano.Tracer.Test.Utils + ( false + , propRunInLogsStructure + , propRunInLogsStructure2 + , removeDirectoryContent + , doesDirectoryEmpty + ) where + +import Control.Exception (finally) +import System.Directory +import System.Directory.Extra +import System.IO.Extra +import Test.Tasty.QuickCheck + +false :: String -> IO Property +false msg = return . counterexample msg $ property False + +propRunInLogsStructure + :: (FilePath -> FilePath -> IO Property) + -> Property +propRunInLogsStructure testAction = ioProperty $ do + tmpDir <- getTemporaryDirectory + (rootDir, deleteDir) <- newTempDirWithin tmpDir + (localSock, deleteSock) <- newTempFileWithin tmpDir + testAction rootDir localSock `finally` deleteSock >> deleteDir + +propRunInLogsStructure2 + :: (FilePath -> FilePath -> FilePath -> IO Property) + -> Property +propRunInLogsStructure2 testAction = ioProperty $ do + tmpDir <- getTemporaryDirectory + (rootDir, deleteDir) <- newTempDirWithin tmpDir + (localSock1, deleteSock1) <- newTempFileWithin tmpDir + (localSock2, deleteSock2) <- newTempFileWithin tmpDir + testAction rootDir localSock1 localSock2 + `finally` deleteSock1 >> deleteSock2 >> deleteDir + +removeDirectoryContent :: FilePath -> IO () +removeDirectoryContent dir = listContents dir >>= mapM_ removePathForcibly + +doesDirectoryEmpty :: FilePath -> IO Bool +doesDirectoryEmpty dir = listContents dir >>= return . null diff --git a/cardano-tracer/test/cardano-tracer-test.hs b/cardano-tracer/test/cardano-tracer-test.hs index 598c2a1249b..f910f477edf 100644 --- a/cardano-tracer/test/cardano-tracer-test.hs +++ b/cardano-tracer/test/cardano-tracer-test.hs @@ -1,14 +1,15 @@ import Test.Tasty -import qualified Cardano.Tracer.Test.Logs.File as Test.File -import qualified Cardano.Tracer.Test.Logs.Rotator as Test.Rotator +import qualified Cardano.Tracer.Test.Logs.Tests as Logs +import qualified Cardano.Tracer.Test.Network.Tests as Network +import qualified Cardano.Tracer.Test.Queue.Tests as Queue +import qualified Cardano.Tracer.Test.SSH.Tests as SSH main :: IO () -main = defaultMain tests - -tests :: TestTree -tests = +main = defaultMain $ testGroup "cardano-tracer" - [ Test.File.tests - , Test.Rotator.tests + [ Logs.tests + , Network.tests + , Queue.tests + , SSH.tests ]