diff --git a/documentation/docusaurus.config.js b/documentation/docusaurus.config.js index 1749be56ff..7cb379e6b8 100644 --- a/documentation/docusaurus.config.js +++ b/documentation/docusaurus.config.js @@ -35,12 +35,7 @@ const config = { docs: { sidebarPath: require.resolve('./sidebars.js'), lastVersion: 'current', - editLocalizedFiles: true, - versions: { - current: { - label: 'v3' - }, - }, + editLocalizedFiles: true }, theme: { customCss: require.resolve('./src/css/custom.css') }, }, @@ -75,11 +70,6 @@ const config = { position: 'left', label: 'Use cases', }, - { - type: 'docsVersionDropdown', - position: 'right', - dropdownActiveClassDisabled: true, - }, { type: 'localeDropdown', position: 'right', diff --git a/documentation/i18n/zh/docusaurus-plugin-content-docs/version-v2.json b/documentation/i18n/zh/docusaurus-plugin-content-docs/version-v2.json deleted file mode 100644 index 9569b9c834..0000000000 --- a/documentation/i18n/zh/docusaurus-plugin-content-docs/version-v2.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "version.label": { - "message": "v2", - "description": "The label for version v2" - }, - "sidebar.guideSidebar.category.Engine": { - "message": "Engine", - "description": "The label for category Engine in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.Engine.link.generated-index.description": { - "message": "Learn how to set up OIBus Engine.", - "description": "The generated-index page description for category Engine in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.South connectors": { - "message": "South connectors", - "description": "The label for category South connectors in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.South connectors.link.generated-index.description": { - "message": "Learn how to use and set up south connectors.", - "description": "The generated-index page description for category South connectors in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.North connectors": { - "message": "North connectors", - "description": "The label for category North connectors in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.North connectors.link.generated-index.description": { - "message": "Learn how to use and set up north connectors.", - "description": "The generated-index page description for category North connectors in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.Advanced": { - "message": "Advanced", - "description": "The label for category Advanced in sidebar guideSidebar" - }, - "sidebar.guideSidebar.category.Advanced.link.generated-index.description": { - "message": "Learn some advanced concepts about OIBus.", - "description": "The generated-index page description for category Advanced in sidebar guideSidebar" - } -} diff --git a/documentation/versioned_docs/version-v2/developer/certificates.md b/documentation/versioned_docs/version-v2/developer/certificates.md deleted file mode 100644 index 462c8629cd..0000000000 --- a/documentation/versioned_docs/version-v2/developer/certificates.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -displayed_sidebar: developerSidebar -sidebar_position: 2 ---- - -# Certificates -Some protocols and tools use certificates for authentication or signing purposes. If you need to create self-signed -certificates to test OIBus, you can follow this guide. -A configuration file cert.conf should be created to insert some settigns for the certificate creation. Here is an example -that will be used for this guide: -``` -[ req ] -default_bits = 2048 -default_md = sha256 -distinguished_name = subject -req_extensions = req_ext -x509_extensions = req_ext -string_mask = utf8only -prompt = no - -[ req_ext ] -basicConstraints = CA:FALSE -nsCertType = client, server -keyUsage = nonRepudiation, digitalSignature, keyEncipherment, dataEncipherment, keyCertSign -extendedKeyUsage= serverAuth, clientAuth -nsComment = "OIBus User Cert" -subjectKeyIdentifier=hash -authorityKeyIdentifier=keyid,issuer -subjectAltName = URI:urn:opcua:user:oibus,IP: 127.0.0.1 - -[ subject ] -countryName = FR -stateOrProvinceName = FR -localityName = Chambéry -organizationName = OI -commonName = oibus -``` - -## Using certificates with ProSys OPC UA Simulation Server -1. Create a private key and certificate using the `cert.conf`: -``` -openssl req -new -x509 -keyout oibus.key -out oibus.pem -config cert.conf -``` -2. Remove private key passphrase: -``` -openssl rsa -in oibus.key -out oibus.key -``` -3. Create DER cert for ProSys: -``` -openssl x509 -inform PEM -outform DER -in oibus.pem -out oibus.der -``` - -4. Copy the DER cert in ProSys USERS_PKI certificate folder: `prosys-opc-ua-simulation-server\USERS_PKI\CA\certs` - -## Signing OIBus Windows Installer -These commands can be used with **Powershell**, on a Windows system. -1. Generate CSR (Certificate Signing Request) from `cert.conf` file, and keep secret the private.key: -``` -openssl req -new -newkey rsa:4096 -keyout private.key -sha256 -nodes -out oibus.csr -config cert.conf -``` -2. Create a local self-signed certificate -``` -openssl x509 -req -in oibus.csr -signkey private.key -out oibus.crt -``` -3. Convert the cert file to PFX file -``` -openssl pkcs12 -export -in oibus.crt -inkey private.key -out oibus.pfx -passout pass:password -name OIBus -``` -4. Convert PFX certificate file to _base64_ -``` -base64 oibus.pfx > oibus64.pfx -``` -5. Run sign tool -``` -$env:PFX_PASSWORD = "password" ; $env:PFX_PATH = "path" ; npm run build:win-setup -``` diff --git a/documentation/versioned_docs/version-v2/developer/index.md b/documentation/versioned_docs/version-v2/developer/index.md deleted file mode 100644 index 904e976071..0000000000 --- a/documentation/versioned_docs/version-v2/developer/index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -displayed_sidebar: developerSidebar -sidebar_position: 1 ---- - -# OIBus developer handbook - -## Steps to try out the application -- Clone the repository : `git clone ` -- Run command `npm install` in project root -- Run command `npm run internal:build:web-client`. It will create a `build/web-client` folder for the frontend bundle. If you edit -the frontend and want to auto-recompile the bundle, you can instead use the command `npm run watch:web-client`. -- Run command `npm start` (this will start both the backend and frontend) -- Open up in the browser the following url: `http://localhost:2223`. The port is specified in the `default-config.json` -file (currently 2223 is the default port, it can be changed locally in your own config file generated at project startup) - -The folder `data-folder` is used to store the cache, logs and configuration files. - -The project is up and running, but currently there are no South or North connectors. A simple way to try out OIBus is -to create a `FolderScanner` South connector and a `Console` North connector. - - -## Run database servers - -With the help of the `tests/docker-compose.yml` file, we can run a few databases with the following command: - -`npm run test:setup-env` - -The following services will start: **mysql, mssql, postgresql**. -If you want to change the credentials or the ports for the services, you can create your own `.env` file that won't be -pushed to the repository. Note that in this case you will need to replace the environment file path to `./.env` in the -command above (`package.json`). - -## Commit and branch naming conventions - -The default branch is `main`, every new branches should be created from here. - -Branch naming convention: **descriptive-name-of-the-issue#\** - -For example: `fix-folder-scanner-path#1564` - -Commits and PR name convention must follow the [conventional commits standard](https://www.conventionalcommits.org/en/v1.0.0/) diff --git a/documentation/versioned_docs/version-v2/guide/advanced/_category_.json b/documentation/versioned_docs/version-v2/guide/advanced/_category_.json deleted file mode 100644 index ac98b81949..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Advanced", - "position": 6, - "link": { - "type": "generated-index", - "description": "Learn some advanced concepts about OIBus." - } -} diff --git a/documentation/versioned_docs/version-v2/guide/advanced/oibus-data-rate.md b/documentation/versioned_docs/version-v2/guide/advanced/oibus-data-rate.md deleted file mode 100644 index 9b25bece19..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/oibus-data-rate.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Data rate estimation and cache sizing -OIBus sends values to a target application via North connectors (OIConnect, OIAnalytics...). There are two sending -modes: -- through a file with a files' endpoint -- through JSON payloads with a values' endpoint. - -The volumes to be taken into account can be estimated according to the data to be sent and the sending mode selected. -These estimates can also be used to size the amount of cache storage needed to ensure the store and forward under -good conditions. - -This section gives some hints on how to estimate the cache size. - -## Sending files (CSV) -We will focus on data in the form of CSV files. In this case the volume will depend on several parameters: -- The data sampling frequency -- The file sending frequency -- The timestamp format -- The data format: number of characters used (precision) -- The size of data references -- The file format: in lines or in columns - -In the following examples, we will calculate how much space a CSV file generated by OIBus takes. -We took the following assumptions: -- The sampling frequency: one point per minute. -- The frequency of sending the file: one file every 30 minutes. -- The timestamp format: ISO 8601 format, 24 bytes in size. -- Data format: 3 digits with a separator for the decimal places. Therefore, the data in the following examples have a -size of 4 bytes. -- The size of the point ID (data reference): DataXXX, where XXX represents three numbers characters. Therefore, the -references of the following examples have a size of 7 bytes. - -### Column files -This format is particularly suitable for data repeated on the same timestamp. It saves space compared to a _lines_ -format. - -````csv -Timestamp Data001 Data002 Data003 -2020-02-01T20:04:00.000Z 12.0 10.0 10.0 -2020-02-01T20:05:00.000Z 10.0 19.0 10.0 -2020-02-01T20:06:00.000Z 10.0 10.0 14.0 -... -```` - -The size of the header is `10 + 1 + 7 + 1 + 7 + 1 + 7 + 1 = 35 bytes`. - -The size of one line is `24 + 1 + 4 + 1 + 4 + 1 + 4 + 1 = 40 bytes` (column separators and newlines are taken into -account). - -The number of lines depends on the frequency of the data, here one line every minute. With a file sent every 30 minutes, -it will therefore have a size of `35+40x30 = 1235 bytes`. Over a day, there will be 48 files, a total of 59,280 bytes -or 58 kB. - -### Row files -This format is particularly suitable when the different data transmitted do not have the same sampling frequency. In -the example we assume that all data has the same sample rate. - -````csv -Timestamp Reference Value -2020-02-01T20:04:00.000Z Data001 12.0 -2020-02-01T20:04:00.000Z Data002 10.0 -2020-02-01T20:04:00.000Z Data003 10.0 -2020-02-01T20:05:00.000Z Data001 10.0 -2020-02-01T20:05:00.000Z Data002 19.0 -2020-02-01T20:05:00.000Z Data003 10.0 -2020-02-01T20:06:00.000Z Data001 10.0 -2020-02-01T20:06:00.000Z Data002 10.0 -2020-02-01T20:06:00.000Z Data003 14.0 -... -```` - -The size of the header is `10 + 1 + 9 + 1 + 6 + 1 = 28 octets`. -The size of a line is `24 + 1 + 7 + 1 + 4 + 1 = 38 bytes` (column separators and newlines are taken into account). - -The number of lines depends on the frequency of the data and the number of references, here one line every minute -multiplied by 3 references (which makes 3 lines per minute). With one file sent every 30 minutes, it will therefore have -a size of `28+38x30x3 = 3448 bytes`. Over a day, there will be 48 files, a total of 165,504 bytes or 162 kB. - -### Column row files -This format has the advantage of the column file and allows the pooling of data identifiers (001, 002, 003) with the -references if there are several, which is not the case here since only Data is used. This allows you to obtain the -references Data001, Data002, Data003. - -````csv -Timestamp Reference 001 002 003 -2020-02-01T20:04:00.000Z Data 12,0 10,0 10,0 -2020-02-01T20:05:00.000Z Data 10.0 19.0 10.0 -2020-02-01T20:06:00.000Z Data 10.0 10.0 14.0 -... -```` - -The size of the header is `10 + 1 + 9 + 1 + 3 + 1 + 3 + 1 + 3 + 1 = 33 bytes`. - -The size of a line here is `24 + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 4 + 1 = 45 bytes` (column separators and newlines are taken -into account). - -The number of lines depends on the frequency of the data and the number of references, here a line every minute -multiplied by a reference (which makes one line per minute). With one file sent every 30 minutes, the file to be sent -will therefore have a size of `33+45x30 = 1383 bytes`. Over a day, there will be 48 files, a total of 66,384 bytes or -65 kB. - -## Sending values (JSON payload) -When values are retrieved by the North connector and sent to a values' endpoint (OIConnect or OIAnalytics), they are -formatted in an array like this: - -````json -[ - {"timestamp": "2020-02-01T20:04:00.000Z", "pointId":"Data001", "data": {"value": "12.0", "quality": "192"}}, - {"timestamp": "2020-02-01T20:04:00.000Z", "pointId":"Data002", "data": {"value": "10.0", "quality": "192"}}, - {"timestamp": "2020-02-01T20:04:00.000Z", "pointId":"Data003", "data": {"value": "10.0", "quality": "192"}} -] -```` - -Each field has the following meaning: -- **timestamp**: indicates the timestamp of the value in ISO 8601 format -- **pointId**: reference of the value -- **data**: JSON object containing the recorded value (value) and the quality (quality) - -We will focus on data in JSON file format. In this case the size depends on several parameters: -- The data sampling frequency -- The number of points grouped by sending (defined by [_Group Count_](../north-connectors/common-settings.md#caching)) -- The sending frequency (defined by [_Send Interval_](../north-connectors/common-settings.md#caching)) -- The format of data and quality: number of characters used (precision) -- The size of the data references - -It is then possible to estimate the space occupied by a value. -- The timestamp size is 39 bytes (`"timestamp": "2020-02-01T20: 00: 00.000Z"`) -- The pointId size is of the form of `"pointId": "DataXXX"`, i.e. 13 bytes added to the number of bytes of the -reference (here the 7 bytes of _DataXXX_) -- The data field size is 10 bytes (`"data": {...}`) added to its content: - - The value field is of the form of `"value": "10.0"`, i.e. 11 bytes added the variable number of bytes on -which is encoded the value (here 4 bytes) - - The quality field is of the form of `"quality": "192"`, i.e. 13 bytes plus the variable number of bytes on which -the quality is encoded (here 3 bytes) - -Hence, the size of the object representing a value can be broken down into: -- The constant object size: `39 + 13 + 10 + 11 + 13 + 6 = 92 bytes` (6 corresponding to the separators of the different -elements: commas...) -- The size of the reference: 7 bytes -- The size of the value: 4 bytes -- The size of the quality: 3 bytes - -The size of a single object to sent is therefore 106 bytes, for a single value. - -With a sampling frequency of 1 point per minute and 3 data, with Group Count equal to 1000 and Send Interval equal to -1000ms, then OIBus will transmit a JSON every minute with 3 data or 318 bytes. - -Over one day, this will represent 318 x 24 x 60 = 457,920 bytes, or 447 kB. - -## Comparison -Under the conditions defined in the example, it appears that the transmission mode and the data format have a -significant impact on the transmitted volumes. This will be even more critical when the number of data and -their sampling frequency are higher than described in this example. - -| | CSV columns | CSV rows | CSV rows + columns | JSON payload | -|---------------|:-------------:|:------------:|:--------------------:|:--------------:| -| Sent by day | 58 kB | 162 kB | 65 kB | 447 kB | -| Size by value | 13,7 bytes | 38,3 bytes | 15,4 bytes | 106 bytes | diff --git a/documentation/versioned_docs/version-v2/guide/advanced/oibus-security.md b/documentation/versioned_docs/version-v2/guide/advanced/oibus-security.md deleted file mode 100644 index 70903fbe67..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/oibus-security.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_position: 1 ---- - -# OIBus security - -OIBus is usually installed on a dedicated machine (which can be a virtual machine) located at the customer site. The -OIBus behavior is fully managed by the OIBus configuration file (oibus.json). It is important to consider several -aspects to protect OIBus: -- Access to the machine -- Access to the OIBus administration interface -- Protection of passwords, secret keys, etc… - - -## Access to the OIBus machine -Of course, local or remote access (using RDP - Remote Desktop Protocol - or disk sharing for example) to the machine -where OIBus is installed is a risk to consider. Indeed, a local user could delete OIBus files or directly modify the -configuration file. - -It is important to limit access to the OIBus machine so that no one can access it except the OIBus administrator. - -## Access to the OIBus administration interface -The OIBus administration interface is web-based and can be launched locally or from any remote PC with a -web browser. We recommend to use the interface using the local URL `http://localhost:2223`. - -To use it from a remote PC, you must configure the [IP Filters](../engine/access.md#ip-filters) section of the OIBus -Engine. - -Access to the administration interface requires a user/password. The default username is **admin**. The default -password is **pass**. - -Changing the default password is strongly recommended. - -## Forgotten password -The administrator with access to the OIBus configuration file (`oibus.json`)can use a text editor to delete the -_password_ value in the _Engine_ section of the OIBus configuration file. The password will then be restored to its -default value **pass**. - -## HTTP protocol and Basic Auth -OIBus uses the Basic Auth method in addition to the HTTP protocol supported by most web browsers. This method -**does not provide any privacy protection** for the transmitted credentials sent in the header at each HTTP request. - -The **filters** in the OIBus Engine can mitigate this risk by limiting the IP addresses allowed, but this is not a 100% -guaranteed protection as impersonating another computer system with a fake IP address is not difficult for hackers. -In addition, the privacy of the network over which the HTTP request is passing through must be respected to be sure -that the credentials will not leak. - -Therefore, remote access to the OIBus administration interface should be limited to within the customer’s LAN and -should not be accessible over the Internet. The use of a VPN is strongly advised. - -## Protection of passwords and secrets in the configuration file -OIBus needs to access multiple sources of information (Histories, DCS, LIMS, MES, Databases, etc.). Many of these -sources require a username/password pair or a secret key to grant access. - -This information is also stored in the OIBus configuration file (`oibus.json`), but it is all encrypted. This adds a -level of protection that prevents anyone from reading this information unencrypted. - -This encryption uses public/private keys stored in the OIBus cache folder. These keys are created automatically at each -startup if they do not already exist. - -If these keys are deleted, it will be impossible for OIBus to decrypt the passwords or secret keys. A new key pair will -be generated when OIBus is restarted. In this case it will be necessary to use the administration interface and -re-enter all passwords, including the admin password. If the administration interface is not accessible anymore because -the keys have changed, use the [forgotten password procedure](#forgotten-password) to access it again and change every -password and secrets. diff --git a/documentation/versioned_docs/version-v2/guide/advanced/oibus-to-oibus.md b/documentation/versioned_docs/version-v2/guide/advanced/oibus-to-oibus.md deleted file mode 100644 index 1955f672ec..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/oibus-to-oibus.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_position: 2 ---- - -# OIBus to OIBus communication -## Context -Sometimes, PLCs or databases are only accessible in a private network. Let's call it the **industrial network**. This -network often exists inside or beside another network ; let's call it the **office network**. -Two options exist to access these data: -- Allow connections to each data source from the OIBus machine (which is in the office network, outside the industrial -network) through the firewall -- Install one OIBus in the industrial network and one OIBus in the office network.Manage a single communication between -the two networks. - -The first option is acceptable if you have only one machine on which to install OIBus, but it involves more network -settings to manage and risk the exposing of your machines. -The second option is preferable. Indeed, the first OIBus in the industrial network - OIBus1 - can access the machine in -the same network, and send data to the office network through a single connection allowed in the firewall (from OIBus1 -to OIBus2). - -Let's see how to set up this communication. - -## Data -### Set up a North connector OIConnect in OIBus1 -OIConnect is very useful when one OIBus has no internet access (because it is isolated in an industrial network) but -can communicate to another OIBus which is in another network with internet access. - -In this case, let's keep the default endpoints `/engine/addValues` and `/engine/addFile` for values and file endpoints -respectively. -The host could be something like `http://1.2.3.4:2223` where 1.2.3.4 is the IP address and 2223 is the port of the -second OIBus. Be careful to authorize remote connection in the second OIBus Engine settings in the -[IP Filter section](../engine/access) and to use the appropriate username and password (using Basic -Authentication). In this case, the OIBus username and password must be used (by default, admin and pass). - -### Set up an External source in OIBus2 -On the second OIBus, if you have a North connector with no subscription, the data will automatically be sent to the -North. - -However, if you want a North to subscribe to a specific external source, you must declare an additional external source -in the [Engine settings](../engine/external-sources). Its name must follow the syntax of the [name query -param](../north-connectors/oiconnect#query-param), for example `MyOIBus:MyOIConnect`. - -The North connector can now subscribe to this specific external source. - - -## Logs -### Loki through another OIBus -To send logs to OIBus2, go to the Engine page in the _Loki logs_ section, and specify the OIBus2 address in the -**Host** field and its associated endpoint. For example: `http://1.2.3.4:2223/logs`. -OIBus2 uses Basic Auth. Keep empty the token address field and fill the username and password used to connect to OIBus2. - -If the loki level set in OIBus1 is **info**, only info and above levels will be sent to OIBus2. In OIBus2, if the -console and file levels are set to **error**, only error levels will be logged on the console and -file. However, if the loki level is set to **info** too, all the logs received from OIBus1 will be sent to this loki -endpoint. - -## Health Signal -When the health signal is triggered through the logs, it follows the logic when sending logs to loki. Nothing else -has to be done. - -When the health signal is triggered though an HTTP request, it can be sent to OIBus2 to be forwarded to its own HTTP -health signal. If the OIBus2 HTTP health signal is disabled, the OIBus1 signal will only be stored in OIBus2 logs but -not forwarded to another HTTP endpoint. - -The endpoint to use for HTTP health signal to another OIBus is `/engine/aliveSignal`. diff --git a/documentation/versioned_docs/version-v2/guide/advanced/opchda-agent.md b/documentation/versioned_docs/version-v2/guide/advanced/opchda-agent.md deleted file mode 100644 index e9700d3035..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/opchda-agent.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -sidebar_position: 5 ---- - -# OPCHDA agent -OIBus embeds an agent used by OIBus to interact with OPC Servers. As a standalone agent, it can also be run through a -Command Line Interface (CLI). - -Because OPC depends on COM/DCOM technology, the agent can be run on Windows only with COM/DCOM -settings enabled. - -OIBusOPCHDA is built in C# with .NET Framework 4.8. - -OPC Core components, from OPCFoundation are required to compile and use this library: -https://opcfoundation.org/developer-tools/samples-and-tools-classic/core-components/ - -Newtonsoft.Json and CommandLineParser libraries are also required to interact with TCP commands and CLI commands -respectively. - -# HdaAgent (standalone) -The agent is an executable that requires the following DLLs to run: -- CommandLine.dll -- Newtonsoft.Json.dll -- OpcComRcw.dll -- OpcNetApi.Com.dll -- OpcNetApi.dll - -Several actions are possible: -- ping: to check connection and gives server information -- catalog: to list available tags and store them in a CSV file -- bulk: to request history and store it in one file per tag - -The following options are available for all commands: - -```` --h --host Host name (or IP address). --s --server HDA Server name (ex: Matrikon.OPC.Simulation.1) --l --consoleLevel Verbosity level for Console (error, warning, info, debug, trace). Default debug --x --fileLevel Verbosity level for File (error, warning, info, debug, trace). Default debug -```` - -## ping -The following option is available: -```` --i --infos Display supported aggregates and attributes from the server. Default: false -```` - -### Usage -```` -.\HdaAgent.exe ping -h localhost -s Matrikon.OPC.Simulation -i -```` - -The ping command with the _-i_ option returns three messages from the Matrikon simulation server: - -**Status infos:** -```` -{ - "VendorInfo": "Matrikon Inc +1-780-945-4011 http://www.matrikonopc.com", - "ProductVersion": "1.7.7433", - "ServerState": 1, - "StatusInfo": "", - "StartTime": "2022-05-16T14:27:46.3709266+00:00", - "CurrentTime": "2022-08-02T09:18:29.5739742+00:00", - "MaxReturnValues": 0 -} -```` - -**Supported aggregates** -```` -[ - { - "ID": 1, - "Name": "INTERPOLATIVE", - "Description": "Retrieve interpolated values." - }, - { - "ID": 4, - "Name": "TIMEAVERAGE", - "Description": "Retrieve the time weighted average data over the resample interval." - }, - { - "ID": 7, - "Name": "MINIMUMACTUALTIME", - "Description": "Retrieve the minimum value in the resample interval and the timestamp of the minimum value." - }, - { - "ID": 8, - "Name": "MINIMUM", - "Description": "Retrieve the minimum value in the resample interval." - }, - { - "ID": 9, - "Name": "MAXIMUMACTUALTIME", - "Description": "Retrieve the maximum value in the resample interval and the timestamp of the maximum value." - }, - { - "ID": 10, - "Name": "MAXIMUM", - "Description": "Retrieve the maximum value in the resample interval." - } -] -```` - -**Supported types:** -```` -[ - { - "ID": 1, - "Name": "DATA_TYPE", - "Description": "Data type", - "DataType": "System.Int16, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" - }, - { - "ID": 2, - "Name": "DESCRIPTION", - "Description": "Item Description", - "DataType": "System.String, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" - }, - { - "ID": 11, - "Name": "NORMAL_MAXIMUM", - "Description": "High EU", - "DataType": "System.Double, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" - }, - { - "ID": 12, - "Name": "NORMAL_MINIMUM", - "Description": "Low EU", - "DataType": "System.Double, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" - }, - { - "ID": 13, - "Name": "ITEMID", - "Description": "Item ID", - "DataType": "System.String, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" - }, - { - "ID": -5, - "Name": "TRIANGLE", - "Description": "Triangle Wave", - "DataType": "Opc.Type, OpcNetApi, Version=2.1.0.0, Culture=neutral, PublicKeyToken=9a40e993cbface53" - }, - { - "ID": -4, - "Name": "SQUARE", - "Description": "Square Wave", - "DataType": "Opc.Type, OpcNetApi, Version=2.1.0.0, Culture=neutral, PublicKeyToken=9a40e993cbface53" - }, - { - "ID": -3, - "Name": "SAWTOOTH", - "Description": "Saw-toothed Wave", - "DataType": "Opc.Type, OpcNetApi, Version=2.1.0.0, Culture=neutral, PublicKeyToken=9a40e993cbface53" - }, - { - "ID": -2, - "Name": "RANDOM", - "Description": "Random", - "DataType": "Opc.Type, OpcNetApi, Version=2.1.0.0, Culture=neutral, PublicKeyToken=9a40e993cbface53" - }, - { - "ID": -1, - "Name": "BUCKET", - "Description": "Bucket Brigade", - "DataType": "Opc.Type, OpcNetApi, Version=2.1.0.0, Culture=neutral, PublicKeyToken=9a40e993cbface53" - } -] -```` - -## catalog -HdaAgent Catalog creates a csv file catalog.csv using the browse API. - -The program displays information about the server (API ServerStatus), Aggregates (getAggregates) -and Attributes (getAttributes) as JSON string in the console. - -The following options are available: -```` --i --includesAll Includes all Items in the server (i.e. folders). Default: false --f --file Name of the output folder. Default: catalog.csv -```` - -### Basic usage -`.\HDAAgent.exe catalog -h localhost -s Matrikon.OPC.Simulation` - -```` -Name,Address -"ArrayOfReal8","Bucket Brigade.ArrayOfReal8" -"ArrayOfString","Bucket Brigade.ArrayOfString" -... -```` -### Includes all and specific file -`.\HDAAgent.exe catalog -h localhost -s Matrikon.OPC.Simulation --includesAll --file myFile.csv` - -```` -Name,Address,isItem -"Root","",False -"Simulation Items,"Simulation Items",False -"Bucket Brigade","Bucket Brigade",False -"ArrayOfReal8","Bucket Brigade.ArrayOfReal8",True -"ArrayOfString","Bucket Brigade.ArrayOfString",True -... -```` - -## bulk -The following options are available: -```` --b --startTime Start Time of the history --e --endTime End Time of the history --d --delay Throttle: add a delay between requests to minimize load on HDA Servers (in ms) --m --max Maximum number of values returned in a request. Defaut 0 (no maximum) --o --output Name of the output folder. Default current folder --c --catalog Name of the catalog file listing the tags --a --aggregate Aggregate value. RAW=0, TOTAL=2, AVERAGE=3, MINIMUM=8, MAXIMUM=10, START=11, END=12. Default 0 --i --interval Interval (in second) if an aggregate is requested -```` -### Basic usage -Request raw values from _Matrikon.OPC.Simulation_ server located on _localhost_, for points listed in catalog.csv between -2022-01-01 00:00:00 and 2022-02-01 00:00:00. -```` -.\HdaAgent.exe bulk -h localhost -s Matrikon.OPC.Simulation -c catalog.csv -b "2022-01-01 00:00:00" -e "2022-02-01 00:00:00" -a 0 -```` -### With aggregates -Request by group intervals of 60s (_-i 60_) the last value (_-a 12_) of each group for points listed in catalog.csv from -_Matrikon.OPC.Simulation_ server located on _localhost_, between 2022-01-01 00:00:00 and 2022-02-01 00:00:00. Display all logs in the console with trace -```` -.\HdaAgent.exe bulk -h localhost -s Matrikon.OPC.Simulation -c catalog.csv -b "2022-01-01 00:00:00" -e "2022-02-01 00:00:00" -a 12 -i 60 -l trace -```` - -# HdaAgent (with OIBus) -OIBus communicates with the HdaAgent through a TCP communication. See [OIBus OPCHDA documentation](../south-connectors/opchda.md) for more information. diff --git a/documentation/versioned_docs/version-v2/guide/advanced/opchda-dcom.md b/documentation/versioned_docs/version-v2/guide/advanced/opchda-dcom.md deleted file mode 100644 index 068ef21b1f..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/opchda-dcom.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -sidebar_position: 4 ---- - -# OPCHDA COM/DCOM setup -## Background -### COM -COM is the standard protocol for communication between objects located on the same computer but which are part of -different programs. The server is the object providing services, such as making data available. The client is an -application that uses the services provided by the server. - -### DCOM -DCOM represents an expansion of COM functionality to allow access to objects on remote computers. This protocol allows -standardized data exchange between applications from industry, administrative offices and manufacturing. Previously, the -applications that accessed the process data were tied to the access protocols of the communication network. The OPC -standard software interface allows devices and applications from different manufacturers to be combined in a uniform way. - -The OPC client is an application that accesses process data, messages, and archives of an OPC server. Access is through -the OPC software interface. An OPC server is a program that provides standard software interface to read or write data. -The OPC server is the intermediate layer between the applications for handling process data, the various network -protocols and the interfaces for accessing these data. Only devices with operating systems based on Windows COM and -DCOM technology can use the OPC software interface for data exchange. - -:::info DCOM connectivity - -This page gives some hints on how to set up a communication with COM/DCOM to an OPCHDA server. However, in industrial -context, it is often the responsibility of the IT team to correctly set the permissions, firewall and Windows -configuration. - -::: - - -## Windows settings (client) -### Client machine settings -Follow these steps to enable COM/DCOM communications from the client. First, open the Component services, and access the -_Properties_ of the computer. - -![Component Services](@site/static/img/guide/south/opchda/OPCHDA-component-services.png) - -Be sure to enable _Distributed COM_ on this computer. - -![Computer Properties](@site/static/img/guide/south/opchda/OPCHDA-computer-properties.png) - -On the COM Security tab, edit default access permissions. - -![COM Security](@site/static/img/guide/south/opchda/OPCHDA-COM-security.png) - -On the Access permissions window, allow the following permissions: -- Local Launch -- Remote Launch -- Local Activation -- Remote Activation - -![Access Permissions](@site/static/img/guide/south/opchda/OPCHDA-access-permissions.png) - -### Test communication -DCOM uses port 135 of the HDA server to exchange with the client. To do so, it is interesting to use the tnc command of -the Windows Powershell installed as standard. Below, a test that fails (because of the firewall) then a test that -succeeds: - -`tnc 35.180.44.30 -port 135` - -![Test DCOM communication](@site/static/img/guide/south/opchda/OPCHDA-test-communication.png) - -If you have a communication problem, see the [firewall configuration section](#firewall-configuration) which is probably the source of the problem. - -### Authentication -An OPCDA client program will communicate with the DA/HDA server with the IP address or hostname of the server followed -by the “progId” of the server. It will then have to be identified at the Windows level with a name and a password which -are (by default) those of the user who launches the client program. This user must therefore be known on the HDA -server as well. You must therefore either: -- Create a user with the same password on the HDA server (assuming it is accessible) -- Be part of the same domain (so the user is accessible from all computers in the domain) - -:::info Important - -The user must be a member of the _Distributed COM Users_ group - -::: - -:::tip Service - -If the program runs through a service (such as OIBus), go to the Service manager window, and right-click on the service. -Then click on _Launch as user_. - -::: - -### Firewall configuration - -In case of communication issue, the most likely cause is the configuration of a firewall between the two computers -and/or at the hosting company in the case of machines on the cloud. On a Windows server, it is possible to configure -the firewall by adding a rule on port 135. - -![Windows Firewall Configuration](@site/static/img/guide/south/opchda/OPCHDA-windows-firewall.png) - -In the case of a server hosted by Lightsail, there is an additional firewall in which a custom rule must be configured -for port 135. - -![Lightsail Firewall Configuration](@site/static/img/guide/south/opchda/OPCHDA-lightsail-firewall.png) - -### OPCEnum tool -The OPC Foundation has provided a tool to allow OPCHDA clients to locate servers on remote nodes, without having -information about those servers in the local registry. This tool is called OPCEnum and is freely distributed by the OPC -Foundation. The PI OPCHDA interface installation installs OPCEnum as well. The primary function of OPCEnum is to inform -or request information from other instances of OPCEnum about existing OPCHDA Servers on the local system. When OPCEnum -is installed, it grants Launch and Access DCOM permission to _Everyone_ and sets the _Authentication level_ to NONE. -This allows access to any user who can log on to the system. The permissions can be changed using `dcomcnfg.exe`. - -#### RPC unavailable -If the RPC server is unavailable, try again testing COM/DCOM communication -[testing COM/DCOM communication](#test-communication) and check your firewall. - -![RPC Unavailable](@site/static/img/guide/south/opchda/OPCHDA-rpc-unavailable.png) - -#### Access denied -Access rights can be diagnosed using the server security log. If the following error happens, check the user and its -password created on the HDA server and that the user is in the _Distributed COM Users_ group on the HDA server. - -![Access denied](@site/static/img/guide/south/opchda/OPCHDA-access-denied.png) - - -## Server settings -Check on the server machine if DCOM is enabled for the OPC Server application by opening the _Component Service_ window. - -![Server Machine DCOM Configuration](@site/static/img/guide/south/opchda/OPCHDA-server-DCOM-configuration.png) diff --git a/documentation/versioned_docs/version-v2/guide/advanced/sql-with-odbc.md b/documentation/versioned_docs/version-v2/guide/advanced/sql-with-odbc.md deleted file mode 100644 index 0725421344..0000000000 --- a/documentation/versioned_docs/version-v2/guide/advanced/sql-with-odbc.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -sidebar_position: 6 ---- - -# SQL with ODBC - -## What is ODBC -ODBC stands for Open Database Connectivity. It is a standard application programming interface (API) for accessing -databases. It was developed by the SQL Access Group in the early 1990s and is now maintained by the Open Data Base -Connectivity Foundation (ODBC Foundation). - -To connect OIBus with a database through ODBC technology, a driver must be installed on the OIBus machine. Each -database has its own driver. This article will explore how to set up an ODBC connection with a MSSQL database. - -## Example with MSSQL ODBC -Microsoft already offers documentation to install its driver on -[Window](https://learn.microsoft.com/en-us/sql/connect/odbc/windows/microsoft-odbc-driver-for-sql-server-on-windows?view=sql-server-ver16), -[Linux](https://learn.microsoft.com/en-us/sql/connect/odbc/linux-mac/installing-the-microsoft-odbc-driver-for-sql-server?view=sql-server-ver16) -and [MacOS](https://learn.microsoft.com/en-us/sql/connect/odbc/linux-mac/install-microsoft-odbc-driver-sql-server-macos?view=sql-server-ver16) - -Once the driver installed on the OIBus machine, locate the **ODBC Driver Path** on the SQL connector, and specify the -Driver path: - - For macOS, it can be like `/opt/homebrew/lib/libmsodbcsql.18.dylib` - - For Windows, only the ODBC Driver Name is needed : `ODBC Driver 18 for SQL Server`. You can retrieve the list of -installed ODBC driver in the ODBC drivers Tab of the Windows ODBC data sources. diff --git a/documentation/versioned_docs/version-v2/guide/engine/_category_.json b/documentation/versioned_docs/version-v2/guide/engine/_category_.json deleted file mode 100644 index c44865472b..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Engine", - "position": 3, - "link": { - "type": "generated-index", - "description": "Learn how to set up OIBus Engine." - } -} diff --git a/documentation/versioned_docs/version-v2/guide/engine/access.md b/documentation/versioned_docs/version-v2/guide/engine/access.md deleted file mode 100644 index a7d482d837..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/access.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -sidebar_position: 1 ---- - -# OIBus access -## OIBus port -The default port is 2223 and is used to access the OIBus settings from a web interface at `http://localhost:2223`. This -port can be changed in case of conflict or for security reasons. - -## Safe mode -In case of OIBus error, the safe mode is activated. Running OIBus in safe mode deactivates all connectors: it is mostly -used to be able to access OIBus settings even if there is some runtime issues with a connector. The safe mode must be -deactivated for OIBus to receive and send data. - -## IP Filters -Only local access is enabled by default. You can see that from the IP Filter section where localhost is defined in IPv4 -and IPv6 format. - -You can add a remote address to access OIBus from a remote workstation. However, keep in mind that only http is used to -access OIBus since OIBus is rarely attached to a machine with a domain name and certificate installed. So, if you need -to access OIBus remotely, please do so through a VPN or any secure channels. See the -[security section](../advanced/oibus-security.md) to learn more about the security in OIBus. diff --git a/documentation/versioned_docs/version-v2/guide/engine/cache-and-archive.md b/documentation/versioned_docs/version-v2/guide/engine/cache-and-archive.md deleted file mode 100644 index c0d367d19d..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/cache-and-archive.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Cache and archive -All files and values are stored in local caches (one for each North connector). In case communication errors prevent -OIBus to send information to a North connector, they will be retried regularly, even after a machine restart. - -When the communication is restored, all files and values in the cache are forwarded to the North connector. - -:::tip Cache location - -The cache is located in the `cache` folder (`data-stream` and `history-query`) and each connector has its own folder in -the form `north-id` or `south-id`. - -::: - -## Managing values -When a South connector retrieves values, they are sent to each activated North and gathered in batches, directly written -on disk for persistence in case of server crash (in the folder `values`). - -When getting values, the North cache first create a `.buffer.tmp file` file which contains a JSON with the -values retrieved from the South. These files allow OIBus to persist values right away. - -Every 300ms, the North cache gather the `.buffer.tmp` files into a `.queue.tmp` single file -and put it at the end of the connector queue. - -The queue is used at regular interval (parameter _Send Interval_) to send values into the North target. The values can -be sent when a _Group count_ is reached. - -In case of failure (for example a network error), the size of the queue will grow. If _Max group count_ is reach, several -queue files will be gathered into a single `.compact.tmp` JSON file. These files will be on top of the queue -to be sent once the network comes back online. Increasing the max chunk size (number of values in each chunk) -will increase the size of these compact files. - -## Managing files -When a South connector retrieves files, it copies each file in the North cache directory (in the folder `files`). - -If several North connectors are set and enabled, files will be duplicated in each North folder. In this case, make sure -to have enough disk space to manage them. - -To set up archive mode, and tune caching settings from North specific configuration, refer to -[this page](../north-connectors/common-settings.md). \ No newline at end of file diff --git a/documentation/versioned_docs/version-v2/guide/engine/external-sources.md b/documentation/versioned_docs/version-v2/guide/engine/external-sources.md deleted file mode 100644 index 946d61c21a..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/external-sources.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -sidebar_position: 7 ---- - -# External sources -Declaring an external source is useful when values and files are sent directly to OIBus endpoints `/engine/addValues` -and `/engine/addFile`. These endpoints require the `name` query param to identify the source. When it comes from another -OIBus with [OIConnect](../north-connectors/oiconnect.md), the name can be for example `MyFirstOIBus:MyOIConnect`. - -A North connector can only subscribe to external sources of data, explicitly defined in the **External sources** -section. - -To do so, click on the add button and fill the ID of the newly created external source with `MyFirstOIBus:MyOIConnect` -where MyFirstOIBus is the name of the source OIBus, and MyOIConnect is the name given to the North connector of the -source OIBus. diff --git a/documentation/versioned_docs/version-v2/guide/engine/health-signal.md b/documentation/versioned_docs/version-v2/guide/engine/health-signal.md deleted file mode 100644 index ea9e109d33..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/health-signal.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -sidebar_position: 6 ---- - -# Health signal -A message can be sent regularly to the logs or to an HTTP endpoint to give information about OIBus status. - -## Log -When enabled, the health signal is sent to the logs with an `info` criticality, at the desired frequency. It will be -sent to the appropriate channels (console, file, SQLite, loki...) according to the -[logging settings](../engine/logging-parameters.md). - -## HTTP -It is also possible to send the OIBus health signal to a remote HTTP endpoint as a JSON payload: - -````json -{ - "version": "OIBus version", - "architecture": "OS architecture", - "executable": "path to the OIBus binary", - "processId": "Process ID", - "hostname": "OS hostname", - "osRelease": "OS release", - "osType": "OS type", - "id": "OIBusName" - } -```` - -To do so, activate the HTTP signal and fill in the following fields: -- **Host**: the hostname or IP address -- **Endpoint**: endpoint that will receive the JSON payload -- **Frequency**: time interval between HTTP signals (in s) -- **Proxy**: select a proxy to use if needed -- **Verbose**: to have more details about the status of OIBus - -Also fill in the authentication section according to the authentication method used in the target endpoint. diff --git a/documentation/versioned_docs/version-v2/guide/engine/logging-parameters.md b/documentation/versioned_docs/version-v2/guide/engine/logging-parameters.md deleted file mode 100644 index 63084cbe4f..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/logging-parameters.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Logging parameters -OIBus logs have five levels (from the most to the less critical): -- Error -- Warning -- Info -- Debug -- Trace - -Activating _**Info**_ logs will also activate _**Warning**_ and _**Error**_ logs. Activating _**Error**_ logs will only -display _**Error**_ logs. -Obviously, having _**Trace**_ logs activated will result in extremely verbose logs. Use _**Trace**_ and _**Debug**_ for -troubleshooting purposes. - -## Console -This section displays the logs in the Console, alongside values if a [North Console connector -](../north-connectors/console) is used. - -## File -To store logs in one or several files. You can choose the file maximum size and the number of files to roll logs. - -## SQLite -To store logs in a local SQLite database to be displayed in the _Logs tab_ of OIBus. - -## Loki -To send the logs to a remote _loki_ instance. Logs are sent to the specified host, in batches a tunable time period -(default is 60s). You can change this period to have smaller or bigger batches of logs. - -Loki can be accessed directly by _username_ and _password_ using Basic Auth. If a JWT token should be retrieved first, -fill the _Token address_ to use to retrieve the token, using Basic Auth (with the username and password). -The token will be used by OIBus to send logs to the remote _loki_ instance. Keep the token address empty if you -don't use JWT token authentication. - -:::caution Loki logs with multiple OIBus - -Logs sent to loki are identified by the OIBus engine name. Be sure to update this name appropriately to find your OIBus -logs in your loki instance. - -::: diff --git a/documentation/versioned_docs/version-v2/guide/engine/proxy.md b/documentation/versioned_docs/version-v2/guide/engine/proxy.md deleted file mode 100644 index 529fb95aeb..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/proxy.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -sidebar_position: 5 ---- - -# Proxy -Some connectors need to send information through a proxy. Each proxy must be defined in the engine settings and can be -reused by different OIBus connectors. - -To define a proxy, the following fields must be specified: -- **Name**: identify the proxy. This name is used in OIBus connectors -- **Protocol**: `http` or `https` -- **Host**: the hostname or the IP address of the proxy -- **Port**: the port to connect to the proxy -- **User**: the username to authenticate the connection with the proxy -- **Password**: the password associated to the username diff --git a/documentation/versioned_docs/version-v2/guide/engine/scan-modes.md b/documentation/versioned_docs/version-v2/guide/engine/scan-modes.md deleted file mode 100644 index 8be5229cdc..0000000000 --- a/documentation/versioned_docs/version-v2/guide/engine/scan-modes.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Scan modes -Scan modes are Cron defined and used in OIBus to retrieve data at specific dates and interval from -[South connectors](../south-connectors/common-settings.md). - -Four default scan modes are defined: -- everySecond -- every10Seconds -- every1Minute -- every10Minutes - -You can define your own scan modes by adding one, giving it a name and selecting the _every_ option. Then, select the -interval and the unit (msec, sec, minute, hour, day, week, month, year). - -Under the hood, OIBus transforms these intervals into Cron. So naturally, Cron can be used to tune scan modes. To do so, -switch the _every_ option to _custom_, and type your Cron. - -:::danger Specific cron syntax - -The Cron in OIBus supports milliseconds cron, but with a reverse order: - -` ` - -::: diff --git a/documentation/versioned_docs/version-v2/guide/index.md b/documentation/versioned_docs/version-v2/guide/index.md deleted file mode 100644 index 07885c402e..0000000000 --- a/documentation/versioned_docs/version-v2/guide/index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -displayed_sidebar: guideSidebar -sidebar_position: 1 ---- - -# Main concepts -## Open-source, scalable and flexible -OIBus is a lightweight and flexible data collection solution that simplifies data recovery in an industrial environment. -It collects data from a wide variety of sources encountered in the industry and transmits it to target applications -whether they are on-premises or in the cloud. - -OIBus is an open-source solution which allows great flexibility to answer various use cases. Historically developed to -power the OIAnalytics® solution, Optimistik offers and maintains since 2020 this solution under [(EU-PL) -](https://ec.europa.eu/info/european-union-public-licence_en) open-source license. - -It is used by other solution providers who choose to join this initiative so that the collection of data in industry is -no longer an obstacle to its digitization. - -## A streaming solution -OIBus is built in a modular way with a South (data collection from source systems), a North (transmission to target -systems) and an Engine (mainly in charge of configuration, orchestration and cache). - -![Example banner](./oibus-EN.png) - -This structure facilitates the scalability of the solution by concentrating most of the complexity in the Engine. Thus, -the development of North or South modules is made easier. - -## Advanced capabilities -OIBus already supports many industrial data sources and can be enriched thanks to its open-source code. - -- **Industrial information systems**: PLCs, supervisions, historians with various protocols (OPCUA-HA, OPCUA-HDA, -OPC-HDA, TwinCAT ADS, ModBus…) -- **Business information systems**: Access to business information systems by SQL queries (Oracle, Microsoft SQL Server, -PostgreSQL, MySQL, MariaDB, SQLite…), files retrieval (xls, csv...) -- **IoT Sensors**: Subscribe to IoT messaging services (MQTT, API...) - -OIBus also supports many application targets which can be extended as needed. -- **SaaS Applications**: OIAnalytics®, AWS S3, REST API… -- **IoT platforms**: Subscription to IoT messaging services (MQTT, API...) -- **Databases**: InfluxDB, TimeScale DB, MongoDB... - -In addition, OIBus has been designed to withstand large loads and is used on many industrial sites with data streams -ranging from 10 to 10,000 points with second-scale precision. - -Overall, OIBus can manage: -- Reliable, secure and optimized communications -- Store and forward so as not to miss any data -- Communications secured by HTTPS -- Data compression -- Tunneling and proxy management diff --git a/documentation/versioned_docs/version-v2/guide/installation.mdx b/documentation/versioned_docs/version-v2/guide/installation.mdx deleted file mode 100644 index 188a9358ed..0000000000 --- a/documentation/versioned_docs/version-v2/guide/installation.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -sidebar_position: 2 ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import WindowsInstallation from './installation/_windows.mdx'; -import LinuxInstallation from './installation/_linux.mdx'; -import MacOSInstallation from './installation/_macos.mdx'; - -# Installation - -Let's install OIBus in less than 2 minutes. - -## Requirements -To operate, OIBus must be installed on an environment with the following minimum characteristics: -- **64 bits architecture**. 32bits and ARM architectures are not supported. -- **RAM: 2GB minimum**. OIBus may require more RAM depending on the configuration of South and North connectors. -- **Disk space: 200 MB**. For the application. It is however recommended to have extra space (several GB, depending on -your data stream) to store the collected data in the cache if an upstream network failure occurs. -- **Admin rights** on the environment to install OIBus and register it as a service (for Windows and Linux). -- **Modern web browser** (IE not supported). Used to access the OIBus configuration interface. The browser can be on the -targeted computer or on another, provided you have configured the [necessary access](engine/access.md). - -## Installation steps - - - - - - - - - - - - -## Access OIBus interface -OIBus configuration interface is available on `http://localhost:2223` (assuming the default port has been kept). - -:::caution Default access -By default, the user is **admin** and the password is **pass**. - -We strongly advise to change the password in the Engines settings. - -::: - -When installing OIBus, remember to choose an appropriate name. The _Engine name_ is important mainly if you use several -OIBus and send the logs to a remote [loki instance](./engine/logging-parameters.md#loki). diff --git a/documentation/versioned_docs/version-v2/guide/installation/_linux.mdx b/documentation/versioned_docs/version-v2/guide/installation/_linux.mdx deleted file mode 100644 index 4a31ca3668..0000000000 --- a/documentation/versioned_docs/version-v2/guide/installation/_linux.mdx +++ /dev/null @@ -1,124 +0,0 @@ -import DownloadButton from '@site/src/components/DownloadButton'; - - -
- -
-
{`OIBus v2.11`}
-
Linux (x64)
-
-
-
- -To install or update OIBus, you should run the installation script. To do that, you may either download from the above -button and unzip the downloaded file, or use the following cURL or Wget command: - -``` -curl -o- -L https://github.com/OptimistikSAS/OIBus/releases/download/v2.11.0/oibus-linux.tar.gz | tar -xzv -``` - - -``` -wget -c https://github.com/OptimistikSAS/OIBus/releases/download/v2.11.0/oibus-linux.tar.gz -O - | tar -xzv -``` - -The following files have been extracted: -``` -default-config.json // default configuration that will be updated during the installation process -oibus // OIBus binary -oibus-setup.sh // installation script -oibus-uninstall.sh // uninstallation script that will be updated during the installation process -``` - -Bash scripts have been tested on Ubuntu. They interact with the user to fill the first OIBus configuration, and set OIBus -as a service. Alternatively, it is possible to [run OIBus binary only](#run-oibus-in-standalone). - -## Installation of OIBus as a Linux service -The installation script can be run with the following command. The admin rights will que required during the script execution. -``` -./oibus-setup.sh -``` - -The following questions will be asked during the installation (default answers have been kept in this example): -``` -Administrative permissions are required to proceed. Do you wish to continue? (Y/n) -Administrative permissions granted. -Enter the directory in which you want to install the OIBus binary (default: ./OIBus/): -Enter the directory in which you want to save all your OIBus related data, caches, and logs (default: ./OIBusData/): -Enter a username for your session. It will be used every time you log into OIBus (default: admin): -Enter a name for your OIBus. It will help to identify your OIBus, and assist in potential troubleshooting (default: OIBus): -Enter the port on which you want OIBus to run (default 2223): -Installing oibus service... -Service file successfully created. Enabling oibus service startup on system boot... -Created symlink /etc/systemd/system/default.target.wants/oibus.service → /etc/systemd/system/oibus.service. -Starting OIBus service... -Setting oibus-uninstall.sh... -Installation procedure completed ! - -Useful commands: - Check service status: sudo systemctl status oibus - Check service-logs: sudo journalctl -u oibus -f - -Access OIBus: http://localhost:2223/ -``` - -## Update -To update OIBus, download the latest version, unzip the archive and run the setup install: -``` -./oibus-setup.sh -``` - -During the installation process, fill the OIBus directory and OIBus data directory with the already existing directories. -At start, OIBus will automatically update the configuration file (oibus.json) and the cache structure if needed. Here is -an output example, the default values: - -``` -Administrative permissions are required to proceed. Do you wish to continue? (Y/n) -Administrative permissions granted. -Enter the directory in which you want to install the OIBus binary (default: ./OIBus/): -Enter the directory in which you want to save all your OIBus related data, caches, and logs (default: ./OIBusData/): -An oibus.json file was found. Do you want to use it for this OIBus? (Y/n) -Stopping oibus service... -Removed /etc/systemd/system/default.target.wants/oibus.service. -The oibus service has been stopped and disabled! -Installing oibus service... -Service file successfully created. Enabling oibus service startup on system boot... -Created symlink /etc/systemd/system/default.target.wants/oibus.service → /etc/systemd/system/oibus.service. -Starting OIBus service... -Setting oibus-uninstall.sh... -Installation procedure completed ! - -Useful commands: - Check service status: sudo systemctl status oibus - Check service-logs: sudo journalctl -u oibus -f -``` - -## Uninstall OIBus -An uninstallation script has been created in the OIBus binary folder. Once in this folder, enter the following command: -``` -oibus-uninstall.sh -``` - -Here is an output example: - -``` -Administrative permissions are required to proceed with uninstall. Do you wish to continue ? (Y/n) -Administrative permissions granted. -Do you wish to remove all OIBus data (cache, logs...)? All data, credentials and logs about your current OIBus will be permanently erased. (y/N) y -Removed /etc/systemd/system/default.target.wants/oibus.service. -OIBus service was successfully removed. -``` - -By default, the data are kept if you want to reinstall OIBus later. You can remove them, but you will lose all credentials -and the whole OIBus configuration. - -## Run OIBus in standalone -If you want to run OIBus without installing it as a service, once the archive downloaded and unzipped, you can run -the following command: - -``` -./oibus --config ./OIBusData/oibus.json -``` - -Make sure the OIBusData folder already exists. The cache, logs and configuration files are stored in this folder (in the -example `OIBusData`). diff --git a/documentation/versioned_docs/version-v2/guide/installation/_macos.mdx b/documentation/versioned_docs/version-v2/guide/installation/_macos.mdx deleted file mode 100644 index e4f57f8f59..0000000000 --- a/documentation/versioned_docs/version-v2/guide/installation/_macos.mdx +++ /dev/null @@ -1,39 +0,0 @@ -import CodeBlock from '@theme/CodeBlock'; -import DownloadButton from '@site/src/components/DownloadButton'; - -
- -
-
{`OIBus v2.11`}
-
MacOS (Intel chip)
-
-
-
- -:::caution - -Apple chip Mac (M1) are not supported yet with binaries. However, it is possible to start OIBus from the -[source code](https://github.com/OptimistikSAS/OIBus)! - -::: - -MacOS OIBus binary can be run through its executable. Once unzipped, start a Terminal and enter the following command: - - - {`./oibus --config ./oibus.json`} - - -The `./oibus.json` file path must be adapted according to the place where the OIBus cache and config will be stored. - -For example, if the binary is stored in `/bin/` and the cache and configuration files are stored in -`~/test/oibus-data`, the command will be: - - - {`/bin/oibus --config ~/test/oibus-data/oibus.json`} - - -:::caution - -Be sure to have admin permissions to run the binary - -::: diff --git a/documentation/versioned_docs/version-v2/guide/installation/_windows.mdx b/documentation/versioned_docs/version-v2/guide/installation/_windows.mdx deleted file mode 100644 index 9fa6abd140..0000000000 --- a/documentation/versioned_docs/version-v2/guide/installation/_windows.mdx +++ /dev/null @@ -1,70 +0,0 @@ -import DownloadButton from '../../../../src/components/DownloadButton'; - -
- -
-
{`OIBus v2.11 (installer)`}
-
Windows (x64)
-
-
-
- -## Installation -### With the Windows Installer -1. Run the Windows Installer, you should see the following welcome screen: - -![Installer welcome screen](./win_installer/1.png) - -2. Accept the EU-PL license and the OPCHDA license. - -3. Choose the path where you want to install the binaries. - -![Installer binaries path](./win_installer/2.png) - -4. Choose the path where you want to store the cache, logs and configuration files - -![Installer data folder path](./win_installer/3.png) - -5. The next step offers you to tune the following OIBus settings: -- The OIBus name (default `OIBus`) -- The admin name (default `admin`) -- The port to use (default `2223`) - -![Installer settings](./win_installer/4.png) - -6. Validate the settings and wait for the installer to extract and copy the files in the appropriate folder. - -![Installer validation step](./win_installer/5.png) - -7. The final screen confirms the installation. You can click on the link at the bottom to directly access OIBus from its -web interface. - -![Installer final step](./win_installer/6.png) - -:::caution Browser support - -Note that Internet Explorer is not supported. - -::: - - -## Update -### With the Windows Installer -In case of OIBus update, you can use the OIBus Windows Installer and specify the already existing executable and -config path. You can choose to keep the existing configuration file or overwrite it. - -During the update, OIBus service will be stopped briefly. - -The configuration file `oibus.json` will be updated to its latest version during the first startup. - -### With binaries -Alternatively, you can download the [zip file -](https://github.com/OptimistikSAS/OIBus/releases/latest/download/OIBus-win32x64.zip) containing the OIBus executable -and other dependencies. - -1. Go to the Windows service manager. -2. Stop the OIBus service. -3. Copy and paste the content of the zip file into the OIBus executable folder. Overwrite all existing files. -4. Start the OIBus service. - -The configuration file `oibus.json` will be updated to its latest version during the first startup. diff --git a/documentation/versioned_docs/version-v2/guide/installation/win_installer/1.png b/documentation/versioned_docs/version-v2/guide/installation/win_installer/1.png deleted file mode 100644 index 92c9800d2c..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/installation/win_installer/1.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/installation/win_installer/2.png b/documentation/versioned_docs/version-v2/guide/installation/win_installer/2.png deleted file mode 100644 index cab9bd51b9..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/installation/win_installer/2.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/installation/win_installer/3.png b/documentation/versioned_docs/version-v2/guide/installation/win_installer/3.png deleted file mode 100644 index 937ab4a092..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/installation/win_installer/3.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/installation/win_installer/4.png b/documentation/versioned_docs/version-v2/guide/installation/win_installer/4.png deleted file mode 100644 index 0dba68a2de..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/installation/win_installer/4.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/installation/win_installer/5.png b/documentation/versioned_docs/version-v2/guide/installation/win_installer/5.png deleted file mode 100644 index 843e2b9ae1..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/installation/win_installer/5.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/installation/win_installer/6.png b/documentation/versioned_docs/version-v2/guide/installation/win_installer/6.png deleted file mode 100644 index f232d51e93..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/installation/win_installer/6.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/_category_.json b/documentation/versioned_docs/version-v2/guide/north-connectors/_category_.json deleted file mode 100644 index 39db39272f..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "North connectors", - "position": 5, - "link": { - "type": "generated-index", - "description": "Learn how to use and set up north connectors." - } -} diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/aws-s3.md b/documentation/versioned_docs/version-v2/guide/north-connectors/aws-s3.md deleted file mode 100644 index 3dc2ab9724..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/aws-s3.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -sidebar_position: 5 ---- - -# AWS S3 -The AWS S3 connector simply writes files received from South connectors to the specified S3 bucket: -- **Bucket**: name of the bucket -- **Region**: region where the bucket is (example: `eu-west-3`) -- **Folder**: the bucket folder to store the file to -- **Key**: the authentication key used to connect to Amazon S3 bucket -- **Secret**: the secret associated to the key diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/common-settings.md b/documentation/versioned_docs/version-v2/guide/north-connectors/common-settings.md deleted file mode 100644 index 8d7c6110bc..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/common-settings.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Common North settings -## Add a connector -To add a North connector, click on the Engine menu, and select _Add North_. Select one of the available North connector -types, name the connector and validate by clicking on _Add_. - -## Main settings -The window may change depending on the selected type of connector. However, some concepts are the same. The -navigation sub-menu (the grey bar at the page top) allows you to edit the connector name (top left) or to access the -status of the connector (top right). - -The status window displays several metrics, according to the connector. - -The connector can be enabled or disabled from the toggle action, at the top of the **General settings** section. - -## Caching -Refer to [this page](../engine/cache-and-archive.md) to understand how and why OIBus manages caches. - -The caching section allow OIBus to better manage network congestion: -- **Send interval**: time to wait between successive sending of data to a North (in ms). -- **Retry interval**: time to wait before retrying to send data to a North after a failure (in ms). -- **Group count**: Instead of waiting for _Send interval_, trigger the North connector to send the data as soon as the -number of data to send reach this number. -- **Max group count**: When the connection is lost for some time, the cache of a north connector can store many data. -To avoid sending them all at once, this field can be set to split the data to send in several smaller chunks of data, -separated by _Send interval_. - -It is also possible to enable archive mode, and to set a retention duration. With archive mode enabled, files will be -kept in the `archive` subfolder. Otherwise, they are deleted once sent to the North application. - -:::caution Disk space - -If you choose to keep files indefinitely, be careful to manually clear the archive folder from time to time. Otherwise, -the archive folder may use a lot of disk space. - -::: - -## Subscription -By default, a North connector receives data from all activated South connectors. It is possible to subscribe a -North connector to a specific South (or list of South connectors). In the Subscription section, add a South connector. -Only data from this South connector will be added to the cache of this North connector, all other South data will be -discarded or sent to other North connectors if they are active. - -:::info No data for disabled North - -If a North connector is disabled, it won't store any data in its cache. - -::: - -## Network -If some proxies are defined in the [Engine section](../engine/proxy.md), it is possible to select a proxy from the -North connector to use when an HTTP query occurs. This setting is only possible for HTTP-compatible North connectors -(OIAnalytics, OIConnect). - - - diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/console.md b/documentation/versioned_docs/version-v2/guide/north-connectors/console.md deleted file mode 100644 index 3f0dc6ba23..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/console.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -sidebar_position: 6 ---- - -# Console -The console is mainly used for development or troubleshooting purposes. - -It only accepts one option: **verbose**. When verbose is enabled, received data are fully displayed in tables in -the console. Otherwise, only the number of values received by this North connector is displayed. - -## Display Console in production -In production, specially on Windows or Linux, you may have OIBus running as a service. In this case, to see the -console output: -- Stop the service -- Run OIBus from a terminal with admin access from its installation folder: - - On Windows: run `go.bat` script - - On Linux: run `go.sh` script diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/file-writer.md b/documentation/versioned_docs/version-v2/guide/north-connectors/file-writer.md deleted file mode 100644 index 3c49240a34..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/file-writer.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -sidebar_position: 4 ---- - -# File Writer -The File Writer connector simply writes what is received from the South on the specified folder: -- **Output folder**: the folder to store the files into -- **Prefix file name**: add a prefix to the file name -- **Suffix file name**: add a suffix to the filename (before the file extension) - -For JSON payload, the JSON is stored in a JSON file. For example: - -````json -[{"timestamp":"2020-01-01T00:00:00.000Z","data":"{ value: 28 }","pointId":"MyPointId1"}] -```` diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/oianalytics.md b/documentation/versioned_docs/version-v2/guide/north-connectors/oianalytics.md deleted file mode 100644 index aba1b1f4ef..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/oianalytics.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -sidebar_position: 2 ---- - -# OIAnalytics - -OIAnalytics SaaS application manages both JSON and file payloads. JSON payloads are formatted points retrieved from a -South point protocol (OPCUA, MQTT) to follow the OIAnalytics API specification. - -Files are sent as they are received by the North (compressed or not). OIAnalytics can manage CSV, TXT or XLSX files. There is no need to -transform these files thanks to OIAnalytics file parsers: the parsing happens directly on the SaaS application thanks -to its configuration. - -To send data (JSON or files) to OIAnalytics, the following fields must be filled: -- **Host**: the hostname of the SaaS application (example: `https://optimistik.oianalytics.com`) -- **Authentication type**: only _Basic_ is supported by OIAnalytics -- **Username**: the username to connect to -- **Password**: the password associated to the username - -The user used to send data to OIAnalytics must have the API access, since the data will be sent through the API. The API -**does not use standard login and password**. Instead, an API key must be created on OIanalytics: -- Go to Configuration -> Users and click on the key icon of the user you want to create an API key to. -- Create an API key to generate a new API. Copy and store the key and its password somewhere safe. -- Use the key for username, and the password in OIBus - -![OIAnalytics API Key gen](@site/static/img/guide/north/oianalytics/oia-api-key-gen.png) - -:::danger Password retrieval - -The API key generation is the only time you will be able to copy the password. If you loose it, a new API key must be -generated. - -::: - -:::tip API user - -We suggest to create a dedicated API user in OIAnalytics (with only API access) and to give each OIBus a dedicated API -key, in case one of the key should be revoked. - -::: diff --git a/documentation/versioned_docs/version-v2/guide/north-connectors/oiconnect.md b/documentation/versioned_docs/version-v2/guide/north-connectors/oiconnect.md deleted file mode 100644 index d1934bc586..0000000000 --- a/documentation/versioned_docs/version-v2/guide/north-connectors/oiconnect.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -sidebar_position: 3 ---- - -# OIConnect -OIConnect is a North connector used to send both files and JSON payloads to a REST API endpoint (one for JSON, one for -files). The files are not transformed, they are sent as they are received by the North (compressed or not). - - -## Connection -To send data (JSON or files) to OIAnalytics, the following fields must be filled: -- **Host**: the hostname of the SaaS application (example: `https://myapp.mycompany.com`) -- **Values endpoint**: the endpoint that will receive JSON payloads [(see JSON payload section)](#json-payload) -- **File endpoint**: the endpoint that will receive files -- **Host**: the hostname of the SaaS application (example: `https://myapp.mycompany.com`) -- **Authentication type**: Basic, Bearer, Api key (custom) -- **Username** (for _Basic_): the username to connect to -- **Password** (for _Basic_): the password associated to the username -- **Token** (for _Bearer_): The token to use in the HTTP header -- **Key** (for _API key_): the name of the key field in the HTTP header -- **Secret** (for _API key_): the value associated to the key field in the HTTP header - -## JSON payload -The target application must be able to manage the payload that OIConnect send. Here is a payload example: -````json -[ - { - "timestamp": "2020-01-01T00:00:00.000Z", - "data": "{ value: 28 }", - "pointId": "MyPointId1" - } -] -```` - -## Query param -A query param is added to the HTTP query. It is called _name_ and can be used to identify the source of the data. -Its value is in the form of _`:`_. - -Example of an HTTP query: `http://1.2.3.4:2223/engine/addValues?name=MyOIBus:MyOIConnect` - - -## Connecting two OIBus together -See [this doc](../advanced/oibus-to-oibus.md) to learn more on how to connect one OIBus to another. diff --git a/documentation/versioned_docs/version-v2/guide/oibus-EN.png b/documentation/versioned_docs/version-v2/guide/oibus-EN.png deleted file mode 100644 index 9924bf6644..0000000000 Binary files a/documentation/versioned_docs/version-v2/guide/oibus-EN.png and /dev/null differ diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/_category_.json b/documentation/versioned_docs/version-v2/guide/south-connectors/_category_.json deleted file mode 100644 index 0e9b937dbf..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "South connectors", - "position": 4, - "link": { - "type": "generated-index", - "description": "Learn how to use and set up south connectors." - } -} diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/ads.md b/documentation/versioned_docs/version-v2/guide/south-connectors/ads.md deleted file mode 100644 index dfca8a66c1..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/ads.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -sidebar_position: 7 ---- - -# ADS - TwinCAT -The ADS protocol (Automation Device Specification) is a transport layer within TwinCAT systems, developed by -Beckhoff. - -Each data item is referenced by a unique address within the controller and can be accessed by OIBus with the ADS -connector. - -OIBus uses the [ads-client](https://github.com/jisotalo/ads-client) library. - -## Connection settings -The AMS Router is the entity which connects ADS clients (OIBus) to PLCs and TwinCAT runtime. This allows OIBus to -access PLCs data. - -Depending on the AMS Router location, several setups are possible. - -### With local TwinCAT runtime -When TwinCAT is installed on the same machine as OIBus, the ADS connector can use the same TwinCAT runtime and directly -communicate with the PLC, with its Net ID and ADS Port. - -The Net ID is an IP-like address with two additional numbers. Usually, the Net ID is the IP address on which the PLC is -addressed from the network, with two additional numbers to address the appropriate PLC (several PLCs can be accessed -from one AMS Router), for example `127.0.0.1.1.1`. - -The port is the one used to contact the PLC from the AMS Router (by default 851). - -### With remote ADS server - -For a remote ADS server, the Net ID and the ADS Port are still required, and other fields are needed: -- **Router address**: the IP address (or domain name) of the AMS router -- **Router TCP port**: the port used by the AMS router -- **Client AMS Net ID**: a client identifier used to identify a connection with the TwinCAT runtime. -- **Client ADS port** (optional): the port used by the client to exchange data. If empty, it is given randomly by the -AMS server. If filled, be sure that the port is not used by another client. - -TwinCAT runtime must accept the communication from the ADS connector. To do so, Static Routes must be added in the -_TwinCAT Static Routes_ tool. The following example accepts two routes whose AmsNetId is to be used on the OIBus -side. It is important that the AmsNetId is used through the IP address specified. - -![TwinCAT Static Routes tool](@site/static/img/guide/south/ads/installation-ads-distant.png) - -:::danger Multiple ADS connectors - -Only one remote ADS connector can be set for OIBus. If two ADS connectors are needed to connect two PLCs, use a local -ADS server (available by default if OIBus is installed on the same machine as -[the TwinCAT runtime](#with-local-twincat-runtime)). - -::: - -## Data settings and structures -### Points list -The ADS connector retrieves values from specific addresses. These can be added in the Points section (in the upper right -corner). - -In this list, points can be added with: -- **Point ID**: the address of the data in the targeted PLC (example: `GVL_Test1.TestINT`) -- **Scan mode**: the request frequency. To define more scan modes, see [Engine settings](../engine/scan-modes.md). - -### Data settings -#### PLC name -In the case where data from similar PLCs (sharing the same point addresses for example) are retrieved from two ADS -connectors and sent to the same North, the values will have the same point ID even though they come from two different -PLCs. - -To avoid this ambiguity, the _PLC name_ can be added in front of each point ID once the data is retrieved. In this way, -the point IDs sent to the North connector will be differentiated. - -:::tip Example - -If PLC1 has for PLC name _PLC001._ (the dot is included in the name) and the point ID is MyVariable.Value, the -resulting point ID once the values are retrieved will be _PLC001.MyVariable.Value_ and this will allow to identify the -data in a different way than the PLC2 which will have a resulting point ID of _PLC002.MyVariable.Value_. - -::: - -#### Enumerations and booleans -An enumeration can be retrieved as an integer or as a character string (the PLC knows both thanks to its programming). - -A boolean value can be retrieved as an integer or a string (with 0 = false,1 = true). - -#### Structures - -It is also possible to query an entire data structure. For example, if the data **MyVariable** is of type _MyStructure_ and -has the following fields: -- MyDate -- MyNumber -- Value - -And if only _MyDate_ and _MyNumber_ must be retrieved, then, in the _ADS structures section_ a new structure can be added -with the name _MyStructure_, and in the fields part, only the two fields can be specified, separated by commas: -_MyDate,MyNumber_ - -This is especially useful when several data (here **MyVariable**) are of type MyStructure, and only a few fields of the -structure are requested (here _MyDate_ and _MyNumber_). The more fields the structure has, the more useful this feature -is. - -In the end, each field will give a unique resulting point ID. In the previous example, this will give for the single -point **MyVariable** the following two points: -- MyVariable.MyDate -- MyVariable.MyNumber diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/common-settings.md b/documentation/versioned_docs/version-v2/guide/south-connectors/common-settings.md deleted file mode 100644 index e880db9835..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/common-settings.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Common South settings -## Add a connector -To add a South connector, click on the Engine menu, and select _Add South_. Select one of the available South -connector types, name the connector and validate by clicking on _Add_. - -## Main settings -The window may change depending on the type of connector selected. However, some concepts are the same. The -navigation submenu (the gray bar at the page top) allows to change the connector name (top left) or to access the -status of the connector (top right). - -The status window displays several metrics, depending on the connector. - -The connector can be enabled or disabled from the toggle action at the top of the **General Settings** section. diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/folder-scanner.md b/documentation/versioned_docs/version-v2/guide/south-connectors/folder-scanner.md deleted file mode 100644 index 7f4c67329c..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/folder-scanner.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Folder Scanner - -The Folder Scanner connector periodically checks the input folder for new files at an interval specified by the scan -mode. When a new file is detected, it is sent to any North capable of handling files and configured to accept files -from this South. - -## Settings -### Input folder -The folder path must be entered in the _Input folder_ field. The path can be absolute or relative. Be careful: -**the path is case-sensitive**. - -:::tip Relative path - -The relative path is computed from the cache folder (mentioned in the About section, _configuration directory_ field). - -::: - -:::danger User access - -The user running OIBus (logged user when OIBus run from a terminal, the service session when OIBus run from a service) -must have read access to the input folder to be able to read the files. - -::: - -OIBus can also read folders from a remote location. To do so, remote path can be specified (example: -`/remote.server/data` or `D:\\Remote disk\\DATA `). Be sure to have access to this folder (network and authentication). - -#### Preserve File and modified date -When _Preserve File?_ is checked, retrieved files are kept in the folder. Otherwise, they are deleted once copied in -the OIBus cache. - -:::info Important - -When this field file is not preserved, OIBus moves it from the input folder to its cache, which in computer terms means it -deletes the file from the input folder. For this reason, OIBus also needs write access. Otherwise, the file will be -copied. - -::: - -When this field is checked, OIBus keeps track of the modification date of files already retrieved. It will only retrieve -a file if its modification date has changed. - -If _Ignore modified date_ is checked, files will be resent each time the folder is scanned, regardless of when the file -was modified. This field is not used when _Preserve File?_ is not checked. - -### Filtering -#### RegExp -A RegExp can be used to retrieve only certain files matching the regular expression. -- `.*` retrieves all files of the input folder -- `.*.txt` retrieves all txt files of the input folder -- `.*.csv` retrieves all csv files of the input folder -- `.csv||.xlsx` retrieves all csv or xlsx files of the input folder - -#### Minimum age -Writing large files to the input folder can take some time. To avoid retrieving a corrupted file (because it is -being written), the _Minimum Age_ field can be adjusted. By default, OIBus recovers files that has been written more -than one second ago. - -### Compression -By default, files are retrieved exactly as they are in the input folder. They can be compressed to reduce their size -during transfer. If enabled, files stored in the OIBus cache are compressed too. - -:::danger Important - -When compression is enabled, OIBus writes the compressed file to the input folder. Therefore, OIBus also needs -write access. Otherwise, the compression will be ignored and the raw file will be copied. - -::: diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/modbus.md b/documentation/versioned_docs/version-v2/guide/south-connectors/modbus.md deleted file mode 100644 index 2ab24ebf82..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/modbus.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -sidebar_position: 6 ---- - -# Modbus - -Modbus is a communication protocol used for PLC networks. Historically, it was designed for communication on a serial -interface (RS232, RS422,RS485) and was extended to support the TCP mode. - -OIBus uses the [jsmodbus](https://github.com/Cloud-Automation/node-modbus) library **in TCP mode only**. - -## Connection settings -In this TCP mode, Modbus sets up a client-server connection: the server provides data referenced by addresses but -remains passive. It is the Modbus client that fetches the data values. The Modbus connector is a Modbus client. It is -therefore necessary to indicate to the connector: -- The **host** (IP address or hostname of the Modbus server machine) -- The **port** (502 by default) -- The **slave ID** to identify the Modbus source machine if necessary (1 by default). - - -## PLC settings -Depending on the PLC, several settings are possible on how to access the data. These settings are common to the whole -PLC. Here are the following parameters: -- **Address offset**: For most of the PLCs, there is no offset (_Modbus_ option). Some PLCs start the address range at 1 -instead of 0 (_JBus_ option). -- **Endianness**: Indicates the type of bit encoding (Big Endian or LittleEndian) -- **Swap Bytes**: Indicates whether the bytes within a group of 16 bits (a word) should be inverted or not -- **Swap Words**: Indicates whether the words (16-bit group) should be inverted or not within a 32-bit group. - -## Points and Modbus addresses -The Modbus connector retrieves values from specific addresses. These can be added in the Points section (in the upper right -corner). - -In this list, points can be added with: -- **Point ID**: the name of the data in the result (example: `My point variable`) -- **Address**: the address of the data in the machine -- **Modbus type**: _coil_, _discrete input_, _input register_ or _holding register_ (default) -- **Data type**: Used in case of _holding registers_ or _input registers_ (ignored otherwise). This parameter indicates -the type of data retrieved from the register: UInt16 (default), Int16, UInt32, Int32, UInt64, Int64, Float or Double. -- **Multiplier Coefficient** (default 1) -- **Scan mode**: the request frequency. To define more scan modes, see [Engine settings](../engine/scan-modes.md). - -The address corresponds to the address of the variable in the PLC, **in hexadecimal without the data type digit**. For -example: -- For the _holding register_ data 0x40001, enter the address 0x0001 (omit the digit `4`) and specify the Modbus type - _holdingRegister_. -- For the _coil_ data 0x009C, enter 0x009C and specify the Modbus type _coil_. - -Modbus data addresses follow the [**Modicon Convention Notation** (MCN)](https://www.modbus.org/docs/PI_MBUS_300.pdf): -- Coil: `[0x00001 - 0x09999]` ; from 1 to 39,321 -- Discrete Input: `[0x10001 - 0x19999]` ; from 65,537 to 104,857 -- Input Register: `[0x30001 - 0x39999]` ; from 196,609 to 235,929 -- Holding Register: `[0x40001 - 0x49999]` ; from 262,145 to 301,465 - -An extended version of MCN allows the user to specify larger address spaces: -- Coil: `[0x000001 - 0x065535]` -- Discrete Input: `[0x100001 - 0x165535]` -- Input Register: `[0x300001 - 0x365535]` -- Holding Register: `[0x400001 - 0x465535]` - - - diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/mqtt.md b/documentation/versioned_docs/version-v2/guide/south-connectors/mqtt.md deleted file mode 100644 index be35b4f2ca..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/mqtt.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -sidebar_position: 8 ---- - -# MQTT -MQTT (Message Queuing Telemetry Transport) is a real-time messaging protocol for exchanging data by topic with a -_publish / subscribe_ approach. A topic is a point (or a set of points) representing a piece of data (or a set of data). - -The MQTT protocol is made up of two main types of entities: -- The MQTT broker: collects and makes available the requested data. It plays the role of server. -- The MQTT client: it can publish data to a broker and subscribe topics to receive data from a broker. - -OIBus is a MQTT client and uses the [MQTT.js](https://github.com/mqttjs/MQTT.js) library. - -## Connection settings -### Connection -To connect to a broker, the MQTT connector requires some information: -- **URL**: of the form `mqtt://address:1883`. The default MQTT port is 1883 but may differ depending on the broker’s -configuration. -- **Quality of service (QoS)**: agreement between the sender of a message and the receiver of a message that defines the -guarantee of delivery for a specific message. Three modes are available: - - QoS 0: at most once. This means that the message is sent once but MQTT does not guarantee that the message will be -received correctly. Good reception will depend on the quality of the underlying network of MQTT. - - QoS 1: at least once. This means that the message is sent multiple times as duplicates until the client validates -the correct receipt of at least one of the duplicates. In some cases, the client may receive the same message more than -once. - - QoS 2: exactly once. This means that the message is sent only once, and a new attempt to send the message takes -place after a certain time until the client confirms the good reception. There is no risk of multiple receptions in this -case. -- **Persistence**: Both QoS1 and QoS2 allow persistent connections. A persistent connection allows the broker to keep a -certain number of messages in memory (set on the broker) until the client reconnects in case of connection loss. -Persistence is ignored for QoS 0. - -### Network -Several options are available to better manage network failure or inactivity: -- **Keep alive interval**: time to send a keep alive signal to the broker (in ms) -- **Reconnect period**: in case of connection failure, time to wait before reconnecting (in ms) -- **Connect timeout**: time to wait before aborting connection (in ms) - -### Authentication -#### Basic -If only the username and password fields are filled, the MQTT connector will connect to the broker using username and -password only. The username must be defined on the broker. - -#### Certificates -A certificate file can be used with `mqtts://` protocol to secure the communication: -- Cert file: signed file to authenticate OIBus from the broker. The certificate must be accepted by the broker. -- Key file: the key used to sign the certificate (cert file) -- CA file: the certificate authority used to generate the cert file. If empty, the cert file is considered as -self-signed. - -The broker also uses certificates. An option to **reject unauthorized connection** can be used to reject self-signed -certificates or obsolete certificates from the broker. - -#### None -If no username / password and no certificates are specified, an anonymous connection will be established (if authorized -on the broker). - -## Points and topics -### Address space -The MQTT connector retrieves values from specific topics. These can be added in the _Points section_ (in the upper right -corner). A topic is the address associated to a data in the broker space address. - -The broker organizes the data by tree structures. Here is an example: - -```` -France - | -> Paris - | -> temperatureTank1 - | -> temperatureTank2 - | -> Chambery - | -> temperatureTank1 - | -> temperatureTank2 -```` - - -It is then possible to subscribe to a dataset by entering a parent node, for example `France/#` or `France/Chambery/#`. -It is also possible to directly enter a complete path, for example `France/Paris/temperatureTank1` to subscribe to only -one data. - -### Subscriptions -When a MQTT client subscribes to a data item on the broker, the broker sends the new values as soon as they are -available for each client subscribing to this data. The scan mode is therefore always **listen**. It is a subscription -of the MQTT client, which waits for the broker to send him new values. - -Once the value is retrieved, it is associated to the point ID (which can be different from the topic). Ths point ID -will then be used to the North connectors to which it will be sent. - -When subscribing to a set of points as with `France/#`, then the list of topic retrieved will be: -- France:Paris/temperatureTank1 -- France:Paris/temperatureTank2 -- France:Chambery/temperatureTank1 -- France:Chambery/temperatureTank2 - -## Payload and timestamp -The payload contained in the messages sent by the broker may differ from broker to broker. The OIBus MQTT client can -adapt to this payload through the **MQTT payload** section: - -For example, if the payload is: -```` -{ - "pointId": "point1", - "value": "666.666", - "timestamp": "2020-02-02 02:02:02", - "quality": "true" -} -```` - -Then the following configuration must be applied: -- **Data array path**: _Ø_ -- **Value path**: _value_ -- **Point ID path**: _Ø_ (default is pointId) -- **Timestamp path**: _timestamp_ -- **Quality path**: _quality_ - -Another example, with a payload such as: -```` -"metrics": [ - { - "customName": "point1", - "customValue": "666.666", - "customTimestamp": "2020-02-02 02:02:02", - "customQuality": "true" - } -] -```` -Then the following configuration must be applied: -- **Data array path**: _metrics_. Each element of the _metrics_ array will be parse with the following fields. -- **Value path**: _customValue_ -- **Point ID path**: _customName_ -- **Timestamp path**: _customTimestamp_ -- **Quality path**: _customQuality_ - -Sometimes, the timestamp is not in the payload. In this case, the _OIBus_ option can be selected in the **timestamp -origin** field. The resulting timestamp will be the one from the OIBus machine in UTC format. - -When the timestamp is retrieved from the payload, it is parsed according to the specified **timestamp format**, and -convert to UTC from the specified **timestamp timezone**. diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/opchda.md b/documentation/versioned_docs/version-v2/guide/south-connectors/opchda.md deleted file mode 100644 index 352ff44fd7..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/opchda.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -sidebar_position: 5 ---- - -# OPCHDA (Windows only) -OPCDA and OPCHDA are communication protocols used in the industrial world and developed by the -[OPC Foundation](https://opcfoundation.org/). This technology has been replaced by OPCUA but is still widely used in -the industry. To use OPCUA in OIBus, see the [OPCUA connector documentation](../south-connectors/opcua.md). - -An HDA server allows to retrieve the history of data over a more or less long period, while a DA server allows to -retrieve only the most recent value of a tag. - -Only OPCHDA is supported by OIBus. OIBus uses an HDA agent, i.e. a module integrated to OIBus, but available in -standalone, to perform OPC history extractions in command line. See the -[OPCHDA agent documentation](../advanced/opchda-agent.md) to use the agent in standalone. - -Both the OPCHDA connector and the standalone agent are available under Windows only and use Microsoft’s proprietary DCOM -technology to transfer information over the network. This technology is much more complex to set up than traditional TCP -communications. A dedicated guide is offered [here](../advanced/opchda-dcom.md) to correctly setup HDA communications -with COM/DCOM interfaces. - - -## OPCHDA connector -OIBus uses a HDA agent, compiled for Windows platforms, to interact with COM/DCOM interfaces. The HDA agent can also be -used [in standalone](../advanced/opchda-agent.md). - -### HDA Agent section -OIBus exchanges commands and data with the HDA agent through a TCP server/client communication. Therefore, several -fields must be filled to make OIBus communicate with the HDA Agent: -- **Agent filename**: the file path of the HDA Agent. By default, the HDA agent is in the same folder as the OIBus binary. -- **TCP port**: the TCP port that the HDA Agent will use to create its own TCP server. If you need two OPCHDA connectors, -be careful to have two distinct TCP ports to avoid conflicts. -- **Logging level**: the level of log the HDA Agent will use. If the HDA agent log level is lower than the OIBus log levels, -the lowest logs will be lost. See the [Engine log section](../engine/logging-parameters.md) to know more about logging parameters. - -### Connection and network -Some information are required to connect to the OPCHDA server: -- **Host**: the hostname or its IP address -- **Server name**: the name of the OPCHDA server - -Several options are available to better manage network failure or inactivity: -- **Retry interval**: in case of connection failure, time to wait before reconnecting (in ms) -- **Max read interval**: split the request interval into smaller chunks (in s) -- **Read interval delay**: time to wait (in ms) between two sub-intervals in case a split occurs (ignored otherwise) -- **Max return values**: max number of values to retrieve **per node**. If 100 nodes are requested, this value is -multiplied by 100 to have the total number of values retrieved. - - -### Accessing data -#### Scan groups -OIBus retrieves data by intervals. It is then possible to aggregate these values or to resample them. To do so, a scan -mode must be selected (to create additional scan modes, see [Engine settings](../engine/scan-modes.md)), with its -associated aggregate and resampling options. - -:::info Creating scan groups - -Creating scan groups is mandatory to choose them in the _Points_ section when adding new points to request. - -::: - -:::danger Compatibility with the OPCUA server - -Not every aggregate and resampling are supported by OPCUA server. _Raw_ aggregate and _None_ resampling are preferred to -avoid compatibility issues. - -::: - - -#### Points and nodes -The OPCHDA connector retrieves values from specific addresses. Addresses (called node ID, or just node) are organized in -namespaces, in a tree-like structure. These can be added in the _Points section_ (in the upper right corner). - -To request a data, specify the following fields: -- Point ID -- Node ID -- Scan group - -The Node ID matches the path of the data in the appropriate namespace in the OPCHDA server. The point ID will be used -when sent to North connectors. It can be the same as the Node ID, but it allows friendlier names to manage. diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/opcua.md b/documentation/versioned_docs/version-v2/guide/south-connectors/opcua.md deleted file mode 100644 index c6332bd472..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/opcua.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -sidebar_position: 4 ---- - -# OPCUA - -OPCUA technology is a protocol for accessing data in read or write mode. The data are organized in a tree-like address -space and are referenced with a unique address each (called node ID). OPCUA is a modern standard based on TPC, replacing -OPC HDA/DA (see [OPCHDA connector](../south-connectors/opchda.md)) technologies, and is often embedded natively in industrial controllers. - -OPCUA embeds two variants of the protocol: HA (Historical Access) and DA (Data Access). The first mode allows access to -a history of values over a time interval for the requested points (data), while the second mode accesses the values at -each request. - -OIBus integrates the two OPCUA modes (HA and DA) in read-only mode. Each mode has its own connector. -The [node-opcua](https://github.com/node-opcua/node-opcua) library is used. - -## Connection settings -To connect to a OPCUA server, OIBus needs an URL which is composed of several part: -- The protocol: `opc.tcp://` (for now, only this one is supported) -- The host or IP address -- The port number -- The endpoint (or server name) - -All together, these elements form a URL. Here is an example: `opc.tcp://localhost:53530/OPCUA/MyServer` - -### Network -Several options are available to better manage network failure or inactivity: -- **Retry interval**: in case of connection failure, time to wait before reconnecting (in ms) -- **Read timeout** (HA only): time to wait before aborting a read request (in ms). It may happen if the read request -retrieves too many values at once or if the network has a problem. -- **Max read interval** (HA only): split the request interval into smaller chunks (in s) -- **Read interval delay** (HA only): time to wait (in ms) between two sub-interval in case a split occurs (ignored -otherwise) -- **Max return values** (HA only): max number of values to retrieve **per node**. If 100 nodes are requested, this value -is multiplied by 100 to have the total number of values retrieved. - -## Security settings -### Communication -The communications can be secured thanks to the **security mode** and **security policy** fields. Available modes are: -- None -- Sign -- SignAndEncrypt - -OIBus supports the following policies: -- None -- Basic128 -- Basic192 -- Basic256 -- Basic128Rsa15 -- Basic192Rsa15 -- Basic256Rsa15 -- Basic256Sha256 -- Aes128_Sha256_RsaOaep -- PubSub_Aes128_CTR -- PubSub_Aes256_CTR - - -:::info Compatibility with the OPCUA server - -Be careful to select a security mode and a security policy supported by the OPCUA server! - -::: - -If a security mode other than _None_ is used, a certificate will be needed to sign and possible encrypt the -communications. A self-signed certificate called **OIBus** (generated by OIBus at startup) is used to secure the -communication with the OPCUA server. It must be trusted by the OPCUA server to allow communication. - -:::info Example on Prosys OPCUA Simulation Server - -![Prosys OPCUA Simulation Server Certificates](@site/static/img/guide/south/opcua/prosys-opcua-simulation-server-certificates.png) - -If the certificate is not trusted, an error will occur: `Error: The connection may have been rejected by server` -::: - -### Authentication -If the certificate and key paths are empty, OIBus will try to use a user authentication with username and password. The -username and password must exist on the OPCUA server. - -If the username and password are empty, an anonymous authentication will be used. In this case, Anonymous authentication -must be accepted by the OPCUA server. - -The certificate, used to authenticate the client, must be added in the trusted user certificates of the OPCUA server. It -is managed differently than the **OIBus** certificate mentioned before, used for [secure communication](#communication). - -:::info Example on Prosys OPCUA Simulation Server - -For Prosys, the certificate used to authenticate OIBus must be placed in the `.prosysopc\prosys-opc-ua-simulation-server\USERS_PKI\CA\certs` -folder. Otherwise, an error will occur: `Error: serviceResult = BadIdentityTokenRejected (0x80210000)`. - -If a connection has already been tried and rejected, the certificate must be removed from the rejected certificates' -folder `.prosysopc\prosys-opc-ua-simulation-server\USERS_PKI\CA\rejected` and be placed in the trusted folder -(`.prosysopc\prosys-opc-ua-simulation-server\USERS_PKI\CA\certs`). -::: - -:::tip Use the same certificate for user authentication and secure communications - -The same certificate can be used. To do that, the cert.pem and privateKey.pem file paths must be specified. They are -located in the cache/certs folder of OIBus. - -On the OPCUA server side, the **OIBus** certificate (cert.pem) must be copied in the user certificates' folder. - -For example, with Prosys OPCUA Simulation Server: `.prosysopc\prosys-opc-ua-simulation-server\USERS_PKI\CA\certs`. - -::: - -## Accessing data -### Scan groups (HA only) -With HA mode, data are retrieved by intervals. It is then possible to aggregate these values or to resample them. To do -so, a scan mode must be selected (to create additional scan modes, see [Engine settings](../engine/scan-modes.md)), with -its associated aggregate and resampling options. - -:::info Creating scan groups - -Creating scan groups is mandatory to choose them in the _Points_ section when adding new points to request. - -::: - -:::danger Compatibility with the OPCUA server - -Not every aggregate and resampling are supported by OPCUA server. _Raw_ aggregate and _None_ resampling are preferred to -avoid compatibility issues. - -::: - - -### Points and nodes -The OPCUA connector retrieves values from specific addresses. Addresses (called node ID, or just node) are organized in -namespaces, in a tree-like structure. These can be added in the _Points section_ (in the upper right corner). - -To request a data, specify the following fields: -- Point ID -- Node ID -- Scan Mode (DA only) -- Scan Group (HA only) - -The Node ID matches the path of the data in the appropriate namespace in the OPCUA server. The point ID will be used -when sent to North connectors. It can be the same as the Node ID, but it allows friendlier names to manage. diff --git a/documentation/versioned_docs/version-v2/guide/south-connectors/sql.md b/documentation/versioned_docs/version-v2/guide/south-connectors/sql.md deleted file mode 100644 index 2bcf85bd47..0000000000 --- a/documentation/versioned_docs/version-v2/guide/south-connectors/sql.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -sidebar_position: 2 ---- - -# SQL -The SQL connector allows you to run an SQL query at regular intervals and retrieve the results as CSV files. - -## Connection to a SQL database -### Driver -Several databases are supported by OIBus: -- MSSQL -- PostgreSQL -- Oracle. This driver requires the local installation of an Oracle Instant Client (Basic or Basic Light, minimal supported version is 18.5.0.0.0) -- MySQL -- SQLite -- ODBC. This driver requires [the installation of a local module](../advanced/sql-with-odbc.md) that matches the -Database you want to connect. - -### Connection -Several fields are required to connect to a database. - -- **Host**: the address of the SQL server -- **Port**: SQL server port -- **Database**: the name of the database to connect to -- **Username**: the username used for authentication -- **Password**: the password used for authentication -- **Domain** (MSSQL only): this field is useful for example when the user wishes to connect to an Active Directory domain -- **Database path** (SQLite only): Path of the SQLite file -- **Encryption** (MSSQL only): Encrypt the data between the database and OIBus (can overload the server) -- **ODBC Driver Path** (ODBC only): The path to the ODBC driver config to be used to connect to your DB (for example libmsodbcsql.18.dylib) - -:::tip Database access - -Using a read only user to connect to the database is strongly advised. - -::: - -## SQL Query -The query must follow the syntax of the selected driver. - -### Query variables -Several OIBus variables can be used and will be interpreted according to the selected driver by OIBus. - -OIBus manages some variables for the SQL connector. These variables, if used in the query, will be replaced by their -values. - -#### @StartTime -The @StartTime variable initially takes the date of the first execution of the query. When results are retrieved from -the database, @StartTime value is set to the most recent timestamp among those results. - -The most recent timestamp is retrieved from the field specified in the _Time column_ field that must match a -column in the results. - -#### @EndTime -The @EndTime variable is set to the current time (_now()_) or to the end of the sub-interval if a query is split. - -#### Example -```sql -SELECT * FROM table WHERE timestamp > @StartTime AND timestamp < @EndTime -``` - - -### Splitting large queries -In some cases, a query can be quite heavy for the server, especially if a large time interval is requested. If -@StartTime and @EndTime query variables are used, the query can be split in several sub-queries with smaller intervals -using the _Max read interval_ field. It gives the length of the smaller intervals (in seconds) the query will be split -to. If this field is set to 0, queries won't be split. - -Additionally, to not overload the server, a delay between sub-queries can be set (in milliseconds) in the _Read interval -delay_ field. - -Each sub-query will result in a specific file and a file name variable can be used. - -### Result exportation -The result of each query (or sub-query) will be stored in a CSV file that can be compressed with gunzip (.gz extension). -By default, a comma is used to delimit the CSV columns. However, another delimiter can be chosen. - -The _Time column_ field is used to specify which field of the query results contains the timestamp (if needed). If -identified in the query results, this field will be parsed and associated to the specified _Timezone_ (example: -Europe/Paris) at the appropriate _Date format_ (example: yyyy-MM-dd HH:mm:ss.SSS). - -:::tip Time column - -The _Time column_ must match a column returned by the query, not a column in the database Table. For example -`SELECT prod_date AS timestamp` will result timestamp column. So _Time column_ should be set to **timestamp**. - -::: - -The file name where to store the results can be specified with variables to make it unique, so it is not overwritten by -another query. - -#### @CurrentDate -The date at file creation. The date format is yyyy_MM_dd_HH_mm_ss_SSS. - -#### @ConnectorName -The name of the current south connector. - -#### @QueryPart -The sub-query part of a big query that has been split by OIBus. If the query is not split, this value will always be 0. - -#### Example -Assuming the SQL connector name is _sql_, and that the file will be created on the first of January 2020 at midnight, as -a result of one query (not split), `@ConnectorName-@CurrentDate-@QueryPart.csv` will yield the following name: -sql-2020_01_01_00_00_00-0.csv. diff --git a/documentation/versioned_sidebars/version-v2-sidebars.json b/documentation/versioned_sidebars/version-v2-sidebars.json deleted file mode 100644 index 5be7b03151..0000000000 --- a/documentation/versioned_sidebars/version-v2-sidebars.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "guideSidebar": [ - { - "type": "autogenerated", - "dirName": "guide" - } - ], - "developerSidebar": [ - { - "type": "autogenerated", - "dirName": "developer" - } - ] -} diff --git a/documentation/versions.json b/documentation/versions.json index 97b1ae88cf..fe51488c70 100644 --- a/documentation/versions.json +++ b/documentation/versions.json @@ -1,3 +1 @@ -[ - "v2" -] +[]