diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index d487325a191..5c675045e1b 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -9,6 +9,10 @@ env: CARGO_TERM_COLOR: always RUST_BACKTRACE: full +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: release: strategy: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8f5a5f5732a..7d0fe668a25 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,6 +16,10 @@ env: RUST_BACKTRACE: full RUSTFLAGS: "--deny warnings" +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: # Quick tests on each commit/PR sanity: @@ -56,6 +60,10 @@ jobs: with: shared-key: "check" save-if: ${{ github.ref_name == 'testnet_21' }} + - uses: arduino/setup-protoc@v1 + with: + version: '3.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions-rs/cargo@v1 with: command: check @@ -79,6 +87,10 @@ jobs: with: shared-key: "clippy" save-if: ${{ github.ref_name == 'testnet_21' }} + - uses: arduino/setup-protoc@v1 + with: + version: '3.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} @@ -109,6 +121,10 @@ jobs: with: shared-key: "massa" save-if: ${{ github.ref_name == 'testnet_21' }} + - uses: arduino/setup-protoc@v1 + with: + version: '3.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions-rs/cargo@v1 with: command: install @@ -181,10 +197,14 @@ jobs: with: shared-key: "doc" save-if: ${{ github.ref_name == 'testnet_21' }} + - uses: arduino/setup-protoc@v1 + with: + version: '3.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions-rs/cargo@v1 with: command: doc - args: --no-deps + args: --no-deps --all-features --document-private-items - uses: JamesIves/github-pages-deploy-action@4.1.7 with: branch: gh-pages diff --git a/.github/workflows/security-audit.yml b/.github/workflows/security-audit.yml index 783f5d59b87..420c0f6054c 100644 --- a/.github/workflows/security-audit.yml +++ b/.github/workflows/security-audit.yml @@ -5,6 +5,11 @@ on: paths: - '**/Cargo.toml' - '**/Cargo.lock' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: security_audit: runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 24afa353f3c..925e18f58b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -185,6 +185,28 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-stream" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +dependencies = [ + "proc-macro2 1.0.54", + "quote 1.0.26", + "syn 1.0.109", +] + [[package]] name = "async-trait" version = "0.1.68" @@ -213,6 +235,51 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f8ccfd9221ee7d1f3d4b33e1f8319b3a81ed8f61f2ea40b37b859794b4491" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.67" @@ -1799,6 +1866,18 @@ dependencies = [ "webpki-roots", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "iana-time-zone" version = "0.1.54" @@ -2299,6 +2378,7 @@ dependencies = [ "massa_factory_exports", "massa_factory_worker", "massa_final_state", + "massa_grpc", "massa_ledger_exports", "massa_ledger_worker", "massa_logging", @@ -2381,7 +2461,7 @@ dependencies = [ "tokio", "tokio-stream", "tower", - "tower-http", + "tower-http 0.3.5", "tracing", ] @@ -2666,6 +2746,39 @@ dependencies = [ "tracing", ] +[[package]] +name = "massa_grpc" +version = "0.1.0" +dependencies = [ + "crossbeam", + "displaydoc", + "futures-util", + "h2", + "itertools", + "massa_consensus_exports", + "massa_execution_exports", + "massa_hash 0.1.0", + "massa_models", + "massa_network_exports", + "massa_pool_exports", + "massa_pos_exports", + "massa_proto", + "massa_protocol_exports", + "massa_serialization 0.1.0", + "massa_storage", + "massa_time", + "massa_wallet", + "serde", + "thiserror", + "tokio", + "tokio-stream", + "tonic", + "tonic-health", + "tonic-reflection", + "tonic-web", + "tracing", +] + [[package]] name = "massa_hash" version = "0.1.0" @@ -2749,6 +2862,7 @@ dependencies = [ "displaydoc", "lazy_static", "massa_hash 0.1.0", + "massa_proto", "massa_serialization 0.1.0", "massa_signature", "massa_time", @@ -2879,6 +2993,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "massa_proto" +version = "0.1.0" +dependencies = [ + "glob", + "prost", + "prost-build", + "prost-types", + "tonic", + "tonic-build", +] + [[package]] name = "massa_protocol_exports" version = "0.1.0" @@ -3035,6 +3161,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + [[package]] name = "memchr" version = "2.5.0" @@ -3135,6 +3267,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + [[package]] name = "nibble_vec" version = "0.1.0" @@ -3601,6 +3739,16 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2 1.0.54", + "syn 1.0.109", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -3662,6 +3810,60 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prost" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" +dependencies = [ + "bytes", + "heck 0.4.1", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 1.0.109", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2 1.0.54", + "quote 1.0.26", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" +dependencies = [ + "prost", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -4596,6 +4798,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "tap" version = "1.0.1" @@ -4753,6 +4961,16 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.0.0" @@ -4828,6 +5046,89 @@ dependencies = [ "winnow", ] +[[package]] +name = "tonic" +version = "0.8.3" +source = "git+https://github.com/hyperium/tonic?rev=ff33119#ff331199e45c8b53e93f1bd51ccd74dafc2146ac" +dependencies = [ + "async-trait", + "axum", + "base64 0.21.0", + "bytes", + "flate2", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.8.4" +source = "git+https://github.com/hyperium/tonic?rev=ff33119#ff331199e45c8b53e93f1bd51ccd74dafc2146ac" +dependencies = [ + "prettyplease", + "proc-macro2 1.0.54", + "prost-build", + "quote 1.0.26", + "syn 1.0.109", +] + +[[package]] +name = "tonic-health" +version = "0.8.0" +source = "git+https://github.com/hyperium/tonic?rev=ff33119#ff331199e45c8b53e93f1bd51ccd74dafc2146ac" +dependencies = [ + "async-stream", + "prost", + "tokio", + "tokio-stream", + "tonic", +] + +[[package]] +name = "tonic-reflection" +version = "0.6.0" +source = "git+https://github.com/hyperium/tonic?rev=ff33119#ff331199e45c8b53e93f1bd51ccd74dafc2146ac" +dependencies = [ + "prost", + "prost-types", + "tokio", + "tokio-stream", + "tonic", +] + +[[package]] +name = "tonic-web" +version = "0.5.0" +source = "git+https://github.com/hyperium/tonic?rev=ff33119#ff331199e45c8b53e93f1bd51ccd74dafc2146ac" +dependencies = [ + "base64 0.21.0", + "bytes", + "futures-core", + "http", + "http-body", + "hyper", + "pin-project", + "tonic", + "tower-http 0.4.0", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.4.13" @@ -4879,6 +5180,24 @@ dependencies = [ "uuid", ] +[[package]] +name = "tower-http" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -5412,6 +5731,17 @@ dependencies = [ "webpki", ] +[[package]] +name = "which" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +dependencies = [ + "either", + "libc", + "once_cell", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 1f8e9c50e6d..13e35cbf105 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,8 @@ members = [ "massa-pos-worker", "massa-versioning-worker", "massa-versioning-exports", + "massa-grpc", + "massa-proto", ] resolver = "2" diff --git a/README.md b/README.md index 23a3870f361..9d8242efd9e 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,9 @@ mass adoption. [![Docs](https://img.shields.io/static/v1?label=docs&message=massa&color=&style=flat)](https://massalabs.github.io/massa/massa_node/) [![Open in Gitpod](https://shields.io/badge/Gitpod-contribute-brightgreen?logo=gitpod&style=flat)](https://gitpod.io/#https://github.com/massalabs/massa) -[![OpenRPC Playground](https://img.shields.io/static/v1?label=interactive-api-specification&message=massa&style=for-the-badge&color=blue)](https://playground.open-rpc.org/?schemaUrl=https://test.massa.net/api/v2&uiSchema\[appBar\]\[ui:input\]=false&uiSchema\[appBar\]\[ui:inputPlaceholder\]=Enter+Massa+JSON-RPC+server+URL&uiSchema\[appBar\]\[ui:logoUrl\]=https://massa.net/favicons/favicon.ico&uiSchema\[appBar\]\[ui:splitView\]=false&uiSchema\[appBar\]\[ui:darkMode\]=false&uiSchema\[appBar\]\[ui:title\]=Massa&uiSchema\[appBar\]\[ui:examplesDropdown\]=false&uiSchema\[methods\]\[ui:defaultExpanded\]=false&uiSchema\[methods\]\[ui:methodPlugins\]=true&uiSchema\[params\]\[ui:defaultExpanded\]=false) +[![OpenRPC Playground](https://img.shields.io/static/v1?label=INTERACTIVE-JSONRPC-API-SPECIFICATION&message=massa&style=for-the-badge&color=blue)](https://playground.open-rpc.org/?schemaUrl=https://test.massa.net/api/v2&uiSchema\[appBar\]\[ui:input\]=false&uiSchema\[appBar\]\[ui:inputPlaceholder\]=Enter+Massa+JSON-RPC+server+URL&uiSchema\[appBar\]\[ui:logoUrl\]=https://massa.net/favicons/favicon.ico&uiSchema\[appBar\]\[ui:splitView\]=false&uiSchema\[appBar\]\[ui:darkMode\]=false&uiSchema\[appBar\]\[ui:title\]=Massa&uiSchema\[appBar\]\[ui:examplesDropdown\]=false&uiSchema\[methods\]\[ui:defaultExpanded\]=false&uiSchema\[methods\]\[ui:methodPlugins\]=true&uiSchema\[params\]\[ui:defaultExpanded\]=false) +[![gRPC documentation](https://img.shields.io/static/v1?label=GRPC-API-DOCUMENTATION&message=massa&style=for-the-badge&color=yellow)](./massa-proto/doc/index.html) +[![Postman workspace](https://img.shields.io/static/v1?logo=postman&label=POSTMAN-WORKSPACE&message=massa&style=for-the-badge&color=orange)](https://www.postman.com/massalabs) ## Introduction @@ -27,7 +29,12 @@ with its explorer available at . ## Massa API -You can interact with Massa JSON-RPC API via the [interactive API specification](https://playground.open-rpc.org/?schemaUrl=https://test.massa.net/api/v2&uiSchema\[appBar\]\[ui:input\]=false&uiSchema\[appBar\]\[ui:inputPlaceholder\]=Enter+Massa+JSON-RPC+server+URL&uiSchema\[appBar\]\[ui:logoUrl\]=https://massa.net/favicons/favicon.ico&uiSchema\[appBar\]\[ui:splitView\]=false&uiSchema\[appBar\]\[ui:darkMode\]=false&uiSchema\[appBar\]\[ui:title\]=Massa&uiSchema\[appBar\]\[ui:examplesDropdown\]=false&uiSchema\[methods\]\[ui:defaultExpanded\]=false&uiSchema\[methods\]\[ui:methodPlugins\]=true&uiSchema\[params\]\[ui:defaultExpanded\]=false) which is the equivalent of SwaggerUI for OpenRPC specifications. +You can interact with Massa JSON-RPC API via the [interactive API specification](https://playground.open-rpc.org/?schemaUrl=https://test.massa.net/api/v2&uiSchema\[appBar\]\[ui:input\]=false&uiSchema\[appBar\]\[ui:inputPlaceholder\]=Enter+Massa+JSON-RPC+server+URL&uiSchema\[appBar\]\[ui:logoUrl\]=https://massa.net/favicons/favicon.ico&uiSchema\[appBar\]\[ui:splitView\]=false&uiSchema\[appBar\]\[ui:darkMode\]=false&uiSchema\[appBar\]\[ui:title\]=Massa&uiSchema\[appBar\]\[ui:examplesDropdown\]=false&uiSchema\[methods\]\[ui:defaultExpanded\]=false&uiSchema\[methods\]\[ui:methodPlugins\]=true&uiSchema\[params\]\[ui:defaultExpanded\]=false) which is the equivalent of SwaggerUI for OpenRPC specifications. + +You can use the [gRPC API](./massa-proto/proto/massa/api/v1/)(experimental) to communicate with Massa node. This allows you to incorporate Massa's functionality into your own applications or services using gRPC's high-performance, language-agnostic RPC framework. + +You can easily import APIs collections from [Massa's Postman workspace](https://www.postman.com/massalabs) and start testing and exploring the provided functionalities by Massa API's. + ## Testnet Incentives diff --git a/massa-api-exports/src/page.rs b/massa-api-exports/src/page.rs index f1503dd6f5c..5a04b5a6637 100644 --- a/massa-api-exports/src/page.rs +++ b/massa-api-exports/src/page.rs @@ -4,7 +4,7 @@ use paginate::Pages; use serde::{Deserialize, Serialize, Serializer}; /// Represents a Vec that can be split across Pages -/// Cf. https://docs.rs/paginate/latest/paginate/ +/// Cf. pub struct PagedVec { res: Vec, _total_count: usize, diff --git a/massa-api/src/api_trait.rs b/massa-api/src/api_trait.rs index b285921723d..12a4108571e 100644 --- a/massa-api/src/api_trait.rs +++ b/massa-api/src/api_trait.rs @@ -38,7 +38,7 @@ pub trait MassaApi { #[subscription( name = "subscribe_new_blocks_headers" => "new_blocks_headers", unsubscribe = "unsubscribe_new_blocks_headers", - item = BlockHeader + item = SecureShare )] fn subscribe_new_blocks_headers(&self); diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index e1a409b5b69..213050fb45c 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -10,7 +10,7 @@ //! # Updater //! //! Runs on a dedicated thread. Signal sent my manager stop method terminates the thread. -//! Shares an Arc> guarded list of white and blacklists with the main worker. +//! Shares an `Arc>` guarded list of white and blacklists with the main worker. //! Periodically does a read-only check to see if list needs updating. //! Creates an updated list then swaps it out with write-locked list //! Assuming no errors in code, this is the only write occurance, and is only a pointer-swap diff --git a/massa-consensus-exports/src/channels.rs b/massa-consensus-exports/src/channels.rs index 2ce15cf6f69..5398b5edad0 100644 --- a/massa-consensus-exports/src/channels.rs +++ b/massa-consensus-exports/src/channels.rs @@ -1,7 +1,9 @@ use massa_execution_exports::ExecutionController; -use massa_models::block::{Block, FilledBlock}; +use massa_models::block::{FilledBlock, SecureShareBlock}; use massa_models::block_header::BlockHeader; +use massa_models::block_id::BlockId; use massa_models::denunciation::DenunciationPrecursor; +use massa_models::secure_share::SecureShare; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; use massa_protocol_exports::ProtocolCommandSender; @@ -22,9 +24,9 @@ pub struct ConsensusChannels { /// Channel to send commands to the Protocol module pub protocol_command_sender: ProtocolCommandSender, /// Channel used for Websocket broadcast (if enabled) of new blocks being integrated in the graph - pub block_sender: tokio::sync::broadcast::Sender, + pub block_sender: tokio::sync::broadcast::Sender, /// Channel used for Websocket broadcast (if enabled) of new block headers being integrated in the graph - pub block_header_sender: tokio::sync::broadcast::Sender, + pub block_header_sender: tokio::sync::broadcast::Sender>, /// Channel use by Websocket (if they are enable) to broadcast a new block integrated pub filled_block_sender: tokio::sync::broadcast::Sender, /// Channel use for Denunciation factory to create denunciations diff --git a/massa-consensus-exports/src/controller_trait.rs b/massa-consensus-exports/src/controller_trait.rs index c6f334bc62d..2eac77859fa 100644 --- a/massa-consensus-exports/src/controller_trait.rs +++ b/massa-consensus-exports/src/controller_trait.rs @@ -134,7 +134,7 @@ impl Clone for Box { pub trait ConsensusManager { /// Stop the consensus thread /// Note that we do not take self by value to consume it - /// because it is not allowed to move out of Box + /// because it is not allowed to move out of `Box` /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. fn stop(&mut self); } diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs index 9e727a7a607..cad3dddce14 100644 --- a/massa-consensus-worker/src/controller.rs +++ b/massa-consensus-worker/src/controller.rs @@ -19,7 +19,7 @@ use massa_models::{ use massa_storage::Storage; use parking_lot::RwLock; use std::sync::{mpsc::SyncSender, Arc}; -use tracing::log::warn; +use tracing::log::{debug, warn}; use crate::{commands::ConsensusCommand, state::ConsensusState}; @@ -245,18 +245,25 @@ impl ConsensusController for ConsensusControllerImpl { }) .collect(); - let _block_receivers_count = self - .channels - .block_sender - .send(verifiable_block.content.clone()); - let _filled_block_receivers_count = - self.channels.filled_block_sender.send(FilledBlock { - header: verifiable_block.content.header.clone(), - operations, - }); + if let Err(err) = self.channels.block_sender.send(verifiable_block.clone()) { + debug!( + "error trying to broadcast block with id {} due to: {}", + block_id, err + ); + } + + if let Err(err) = self.channels.filled_block_sender.send(FilledBlock { + header: verifiable_block.content.header.clone(), + operations, + }) { + debug!( + "error trying to broadcast filled block with id {} due to: {}", + block_id, err + ); + } } else { - warn!( - "error no ws event sent, block with id {} not found", + debug!( + "error, no broadcast event sent, block with id {} not found", block_id ); }; @@ -277,10 +284,12 @@ impl ConsensusController for ConsensusControllerImpl { fn register_block_header(&self, block_id: BlockId, header: SecureShare) { if self.broadcast_enabled { - let _ = self - .channels - .block_header_sender - .send(header.clone().content); + if let Err(err) = self.channels.block_header_sender.send(header.clone()) { + debug!( + "error trying to broadcast block header with block id {}: {}", + block_id, err + ); + } } if let Err(e) = self diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 0ae76e31d5e..3fe55666e44 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -108,6 +108,9 @@ pub use worker::start_execution_worker; ))] pub use interface_impl::InterfaceImpl; +#[cfg(any(feature = "benchmarking"))] +use criterion as _; + #[cfg(any( test, feature = "gas_calibration", diff --git a/massa-factory-exports/src/controller_traits.rs b/massa-factory-exports/src/controller_traits.rs index 7b6ab888475..c33b859ae0d 100644 --- a/massa-factory-exports/src/controller_traits.rs +++ b/massa-factory-exports/src/controller_traits.rs @@ -7,7 +7,7 @@ pub trait FactoryManager { /// Stop the factory thread /// Note that we do not take self by value to consume it - /// because it is not allowed to move out of Box + /// because it is not allowed to move out of `Box` /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. fn stop(&mut self); } diff --git a/massa-grpc/Cargo.toml b/massa-grpc/Cargo.toml new file mode 100644 index 00000000000..d6da7be5b4d --- /dev/null +++ b/massa-grpc/Cargo.toml @@ -0,0 +1,44 @@ +# Copyright (c) 2023 MASSA LABS + +[package] +name = "massa_grpc" +version = "0.1.0" +edition = "2021" +description = "GRPC API for Massa Blockchain" +repository = "https://github.com/massalabs/massa/" +homepage = "https://massa.net" +documentation = "https://docs.massa.net/" + +[dependencies] +displaydoc = "0.2" +thiserror = "1.0" +#TODO add stable version when released +tonic = { git = "https://github.com/hyperium/tonic", rev = "ff33119", features = ["gzip"] } +tonic-web = { git = "https://github.com/hyperium/tonic", rev = "ff33119" } +tonic-reflection = { git = "https://github.com/hyperium/tonic", rev = "ff33119" } +tonic-health = { git = "https://github.com/hyperium/tonic", rev = "ff33119" } +futures-util = "0.3.25" +serde = { version = "1.0", features = ["derive"] } +tokio = { version = "1.23", features = ["rt-multi-thread", "macros"] } +tokio-stream = "0.1.12" +itertools = "0.10" +h2 = "0.3.15" +tracing = "0.1" +# custom modules +massa_consensus_exports = { path = "../massa-consensus-exports" } +massa_hash = { path = "../massa-hash" } +massa_models = { path = "../massa-models" } +massa_network_exports = { path = "../massa-network-exports" } +massa_pos_exports = { path = "../massa-pos-exports" } +massa_pool_exports = { path = "../massa-pool-exports" } +massa_protocol_exports = { path = "../massa-protocol-exports" } +massa_execution_exports = { path = "../massa-execution-exports" } +massa_storage = { path = "../massa-storage" } +massa_time = { path = "../massa-time" } +massa_wallet = { path = "../massa-wallet" } +massa_serialization = { path = "../massa-serialization" } +massa_proto = { path = "../massa-proto" } + +[dev-dependencies] +crossbeam = "0.8.2" +massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } diff --git a/massa-grpc/README.md b/massa-grpc/README.md new file mode 100644 index 00000000000..f08af359548 --- /dev/null +++ b/massa-grpc/README.md @@ -0,0 +1,103 @@ + + +### Massa gRPC + +In order to compile proto files, you must have the `protoc` compiler installed on your system. `protoc` is a protocol buffer compiler that can generate code in a variety of programming languages. + +To check if you have `protoc` installed on your system, you can run the following command in your terminal: + +``` +protoc --version +``` + +If you see a version number printed out, then you have `protoc` installed. If not, you will need to download and install it. + +Installing Protoc +----------------- + +### macOS + +To install `protoc` on macOS using Homebrew, run the following command: + +``` +brew install protobuf +``` + +### Linux + +To install `protoc` on Linux, you can download the binary file for your architecture from the [official Protobuf releases page](https://github.com/protocolbuffers/protobuf/releases). Once downloaded, extract the contents of the archive and move the `protoc` binary to a location on your system PATH. + +Alternatively, you can use your distribution's package manager to install `protoc`. On Ubuntu, for example, you can run: + +``` +sudo apt-get install protobuf-compiler +``` + +### Windows + +To install `protoc` on Windows, you can download the binary file for your architecture from the [official Protobuf releases page](https://github.com/protocolbuffers/protobuf/releases). Once downloaded, extract the contents of the archive and move the `protoc` binary to a location on your system PATH. + +After installing `protoc`, you should be able to compile proto files using the appropriate language-specific plugin (e.g. `protoc --go_out=./ path/to/my_proto_file.proto`). + + +After installing `protoc`, please verify that the `protoc` command is accessible by running `protoc --version` again. + + +To keep the documentation synchronised with our proto files, you must install `protoc-gen-doc`. You can use your package manager or download the binary from the official [GitHub repository releases](https://github.com/pseudomuto/protoc-gen-doc/releases) and add it to your system's `PATH` + + +Project build and run +--------------------- + +The project is set up to automatically compile proto files during the build process using +[massa-proto/build.rs](../massa-proto/build.rs). + +When the project is built, `build.rs` is executed and it uses the `tonic-build` crate to generate Rust code from the proto files. The generated Rust code could be found in [massa-proto/src/](../massa-proto/src/). + + +Before launching your Massa node, please add this following configuration to your `config.toml` file: + +```toml + +[api] + # whether to broadcast for blocks, endorsement and operations + enable_broadcast = true +[grpc] + # whether to enable gRPC + enabled = true +``` + +You can easily import APIs collections from [Massa's Postman workspace](https://www.postman.com/massalabs) and start testing and exploring the provided functionalities by Massa API's. + + +VSCode integration +------------------ + +1- Install [vscode-proto3](https://marketplace.visualstudio.com/items?itemName=zxh404.vscode-proto3) extension. + +2- The following settings contain a `protoc` configuration block: + +```json +{ + "rust-analyzer.procMacro.enable": true, // Enables Rust macro support for the Rust Analyzer extension. + "rust-analyzer.cargo.buildScripts.enable": true, // Enables cargo build scripts for the Rust Analyzer extension. + "protoc": { // Specifies the configuration for the protoc plugin. + "path": "/path/to/protoc", // Sets the path to the protoc binary that will be used to compile the protobuf files. + "compile_on_save": true, // Enables automatic compilation of protobuf files when they are saved. + "options": [ // Specifies the command line options that will be passed to protoc. + "{workspaceRoot}/massa-proto/proto/**/*.proto", // Specifies the path to the protobuf files that should be compiled. + "--proto_path=${workspaceRoot}/massa-proto/proto/massa/api/v1", // Specifies the directory to search for imported protobuf files. + "--proto_path=${workspaceRoot}/massa-proto/proto/third-party", // Specifies the directory to search for imported third-party protobuf files. + // "--java_out=${workspaceRoot}/target/", // Generates Java code from the protobuf files. + "--doc_out=${workspaceRoot}/massa-proto/doc/", // Generates documentation in HTML/markdown format from the protobuf files. + "--doc_opt=html,index.html", // Specifies the options for generating the HTML documentation. + // "--doc_opt=markdown,docs.md", // Specifies the options for generating the markdown documentation. + "--descriptor_set_out=${workspaceRoot}/massa-proto/src/api.bin" // Generates a binary descriptor set for the protobuf files which is used for server reflection. + ] + } +} + +``` + +3- Add the snippet above to `.vscode/settings.json`. + diff --git a/massa-grpc/src/api.rs b/massa-grpc/src/api.rs new file mode 100644 index 00000000000..536c0f971a1 --- /dev/null +++ b/massa-grpc/src/api.rs @@ -0,0 +1,252 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::GrpcError; +use crate::server::MassaGrpc; +use itertools::izip; +use massa_models::address::Address; +use massa_models::slot::Slot; +use massa_models::timeslots; +use massa_proto::massa::api::v1 as grpc; +use std::str::FromStr; +use tracing::log::warn; + +/// get blocks by slots +pub(crate) fn get_blocks_by_slots( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + let inner_req = request.into_inner(); + let storage = grpc.storage.clone_without_refs(); + + let mut blocks = Vec::new(); + + for slot in inner_req.slots.into_iter() { + let Some(block_id) = grpc.consensus_controller.get_blockclique_block_at_slot(Slot { + period: slot.period, + thread: slot.thread as u8, + }) else { + continue; + }; + + let res = storage.read_blocks().get(&block_id).map(|b| { + // TODO rework ? + let header = b.clone().content.header; + // transform to grpc struct + let parents = header + .content + .parents + .into_iter() + .map(|p| p.to_string()) + .collect(); + + let endorsements = header + .content + .endorsements + .into_iter() + .map(|endorsement| endorsement.into()) + .collect(); + + let block_header = grpc::BlockHeader { + slot: Some(grpc::Slot { + period: header.content.slot.period, + thread: header.content.slot.thread as u32, + }), + parents, + operation_merkle_root: header.content.operation_merkle_root.to_string(), + endorsements, + }; + + let operations: Vec = b + .content + .operations + .iter() + .map(|ope| ope.to_string()) + .collect(); + + ( + grpc::SignedBlockHeader { + content: Some(block_header), + signature: header.signature.to_string(), + content_creator_pub_key: header.content_creator_pub_key.to_string(), + content_creator_address: header.content_creator_address.to_string(), + id: header.id.to_string(), + }, + operations, + ) + }); + + if let Some(block) = res { + blocks.push(grpc::Block { + header: Some(block.0), + operations: block.1, + }); + } + } + + Ok(grpc::GetBlocksBySlotsResponse { + id: inner_req.id, + blocks, + }) +} + +/// get multiple datastore entries +pub(crate) fn get_datastore_entries( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + let inner_req = request.into_inner(); + let id = inner_req.id; + + let filters = inner_req + .queries + .into_iter() + .map(|query| match query.filter { + Some(filter) => Address::from_str(filter.address.as_str()) + .map(|address| (address, filter.key)) + .map_err(|e| e.into()), + None => Err(GrpcError::InvalidArgument("filter is missing".to_string())), + }) + .collect::, _>>()?; + + let entries = grpc + .execution_controller + .get_final_and_active_data_entry(filters) + .into_iter() + .map(|output| grpc::DatastoreEntry { + final_value: output.0.unwrap_or_default(), + candidate_value: output.1.unwrap_or_default(), + }) + .collect(); + + Ok(grpc::GetDatastoreEntriesResponse { id, entries }) +} + +/// get next block best parents +pub(crate) fn get_next_block_best_parents( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + let inner_req = request.into_inner(); + let parents = grpc + .consensus_controller + .get_best_parents() + .into_iter() + .map(|p| grpc::BlockParent { + block_id: p.0.to_string(), + period: p.1, + }) + .collect(); + Ok(grpc::GetNextBlockBestParentsResponse { + id: inner_req.id, + parents, + }) +} + +pub(crate) fn get_selector_draws( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + let inner_req = request.into_inner(); + let id = inner_req.id; + + let addresses = inner_req + .queries + .into_iter() + .map(|query| match query.filter { + Some(filter) => Address::from_str(filter.address.as_str()).map_err(|e| e.into()), + None => Err(GrpcError::InvalidArgument("filter is missing".to_string())), + }) + .collect::, _>>()?; + + // get future draws from selector + let selection_draws = { + let cur_slot = match timeslots::get_current_latest_block_slot( + grpc.grpc_config.thread_count, + grpc.grpc_config.t0, + grpc.grpc_config.genesis_timestamp, + ) { + Ok(slot) => slot.unwrap_or_else(Slot::min), + Err(e) => { + warn!("failed to get current slot with error: {}", e); + Slot::min() + } + }; + + let slot_end = Slot::new( + cur_slot + .period + .saturating_add(grpc.grpc_config.draw_lookahead_period_count), + cur_slot.thread, + ); + addresses + .iter() + .map(|addr| { + let (nt_block_draws, nt_endorsement_draws) = grpc + .selector_controller + .get_address_selections(addr, cur_slot, slot_end) + .unwrap_or_default(); + + let mut proto_nt_block_draws = Vec::with_capacity(addresses.len()); + let mut proto_nt_endorsement_draws = Vec::with_capacity(addresses.len()); + let iterator = izip!(nt_block_draws.into_iter(), nt_endorsement_draws.into_iter()); + for (next_block_draw, next_endorsement_draw) in iterator { + proto_nt_block_draws.push(next_block_draw.into()); + proto_nt_endorsement_draws.push(next_endorsement_draw.into()); + } + + (proto_nt_block_draws, proto_nt_endorsement_draws) + }) + .collect::>() + }; + + // compile results + let mut res = Vec::with_capacity(addresses.len()); + let iterator = izip!(addresses.into_iter(), selection_draws.into_iter()); + for (address, (next_block_draws, next_endorsement_draws)) in iterator { + res.push(grpc::SelectorDraws { + address: address.to_string(), + next_block_draws, + next_endorsement_draws, + }); + } + + Ok(grpc::GetSelectorDrawsResponse { + id, + selector_draws: res, + }) +} + +/// get transactions throughput +pub(crate) fn get_transactions_throughput( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + let stats = grpc.execution_controller.get_stats(); + let nb_sec_range = stats + .time_window_end + .saturating_sub(stats.time_window_start) + .to_duration() + .as_secs(); + + // checked_div + let throughput = stats + .final_executed_operations_count + .checked_div(nb_sec_range as usize) + .unwrap_or_default() as u32; + + Ok(grpc::GetTransactionsThroughputResponse { + id: request.into_inner().id, + throughput, + }) +} + +// get node version +pub(crate) fn get_version( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + Ok(grpc::GetVersionResponse { + id: request.into_inner().id, + version: grpc.version.to_string(), + }) +} diff --git a/massa-grpc/src/config.rs b/massa-grpc/src/config.rs new file mode 100644 index 00000000000..fdf530e85db --- /dev/null +++ b/massa-grpc/src/config.rs @@ -0,0 +1,79 @@ +// Copyright (c) 2023 MASSA LABS + +use massa_time::MassaTime; +use serde::Deserialize; +use std::{net::SocketAddr, time::Duration}; + +/// gRPC configuration. +/// the gRPC configuration +#[derive(Debug, Deserialize, Clone)] +pub struct GrpcConfig { + /// whether to enable gRPC + pub enabled: bool, + /// whether to accept HTTP/1.1 requests. + pub accept_http1: bool, + /// whether to enable gRPC reflection + pub enable_reflection: bool, + /// bind for the Massa gRPC API + pub bind: SocketAddr, + /// which compression encodings does the server accept for requests + pub accept_compressed: Option, + /// which compression encodings might the server use for responses + pub send_compressed: Option, + /// limits the maximum size of a decoded message. Defaults to 4MB + pub max_decoding_message_size: usize, + /// limits the maximum size of an encoded message. Defaults to 4MB + pub max_encoding_message_size: usize, + /// set the concurrency limit applied to on requests inbound per connection. Defaults to 32 + pub concurrency_limit_per_connection: usize, + /// set a timeout on for all request handlers + pub timeout: Duration, + /// sets the SETTINGS_INITIAL_WINDOW_SIZE spec option for HTTP2 stream-level flow control. Default is 65,535 + pub initial_stream_window_size: Option, + /// sets the max connection-level flow control for HTTP2. Default is 65,535 + pub initial_connection_window_size: Option, + /// sets the SETTINGS_MAX_CONCURRENT_STREAMS spec option for HTTP2 connections. Default is no limit (`None`) + pub max_concurrent_streams: Option, + /// set whether TCP keepalive messages are enabled on accepted connections + pub tcp_keepalive: Option, + /// set the value of `TCP_NODELAY` option for accepted connections. Enabled by default + pub tcp_nodelay: bool, + /// set whether HTTP2 Ping frames are enabled on accepted connections. Default is no HTTP2 keepalive (`None`) + pub http2_keepalive_interval: Option, + /// sets a timeout for receiving an acknowledgement of the keepalive ping. Default is 20 seconds + pub http2_keepalive_timeout: Option, + /// sets whether to use an adaptive flow control. Defaults to false + pub http2_adaptive_window: Option, + /// sets the maximum frame size to use for HTTP2. If not set, will default from underlying transport + pub max_frame_size: Option, + /// thread count + pub thread_count: u8, + /// max operations per block + pub max_operations_per_block: u32, + /// endorsement count + pub endorsement_count: u32, + /// max endorsements per message + pub max_endorsements_per_message: u32, + /// max datastore value length + pub max_datastore_value_length: u64, + /// max op datastore entry + pub max_op_datastore_entry_count: u64, + /// max datastore key length + pub max_op_datastore_key_length: u8, + /// max datastore value length + pub max_op_datastore_value_length: u64, + /// max function name length + pub max_function_name_length: u16, + /// max parameter size + pub max_parameter_size: u32, + /// max operations per message in the network to avoid sending to big data packet + pub max_operations_per_message: u32, + /// `genesis_timestamp` + pub genesis_timestamp: MassaTime, + /// t0 + pub t0: MassaTime, + /// limits the maximum size of streaming channel + pub max_channel_size: usize, + /// when looking for next draw we want to look at max `draw_lookahead_period_count` + pub draw_lookahead_period_count: u64, +} diff --git a/massa-grpc/src/error.rs b/massa-grpc/src/error.rs new file mode 100644 index 00000000000..c2a5ee83080 --- /dev/null +++ b/massa-grpc/src/error.rs @@ -0,0 +1,84 @@ +// Copyright (c) 2023 MASSA LABS + +use std::error::Error; + +use displaydoc::Display; + +use massa_consensus_exports::error::ConsensusError; +use massa_execution_exports::ExecutionError; +use massa_hash::MassaHashError; +use massa_models::error::ModelsError; +use massa_network_exports::NetworkError; +use massa_protocol_exports::ProtocolError; +use massa_time::TimeError; +use massa_wallet::WalletError; + +/// Errors of the gRPC component. +#[non_exhaustive] +#[derive(Display, thiserror::Error, Debug)] +pub enum GrpcError { + /// `massa_hash` error: {0} + MassaHashError(#[from] MassaHashError), + /// consensus error: {0} + ConsensusError(#[from] ConsensusError), + /// execution error: {0} + ExecutionError(#[from] ExecutionError), + /// Network error: {0} + NetworkError(#[from] NetworkError), + /// Protocol error: {0} + ProtocolError(#[from] ProtocolError), + /// Reflection error : {0} + ReflectionError(#[from] tonic_reflection::server::Error), + /// Models error: {0} + ModelsError(#[from] ModelsError), + /// Time error: {0} + TimeError(#[from] TimeError), + /// Wallet error: {0} + WalletError(#[from] WalletError), + /// Internal server error: {0} + InternalServerError(String), + /// Invalid argument error: {0} + InvalidArgument(String), +} + +impl From for tonic::Status { + fn from(error: GrpcError) -> Self { + match error { + GrpcError::MassaHashError(e) => tonic::Status::internal(e.to_string()), + GrpcError::ConsensusError(e) => tonic::Status::internal(e.to_string()), + GrpcError::ExecutionError(e) => tonic::Status::internal(e.to_string()), + GrpcError::NetworkError(e) => tonic::Status::internal(e.to_string()), + GrpcError::ProtocolError(e) => tonic::Status::internal(e.to_string()), + GrpcError::ModelsError(e) => tonic::Status::internal(e.to_string()), + GrpcError::TimeError(e) => tonic::Status::internal(e.to_string()), + GrpcError::WalletError(e) => tonic::Status::internal(e.to_string()), + GrpcError::InternalServerError(e) => tonic::Status::internal(e), + GrpcError::ReflectionError(e) => tonic::Status::internal(e.to_string()), + GrpcError::InvalidArgument(e) => tonic::Status::invalid_argument(e), + } + } +} + +/// returns the first IO error found +pub fn match_for_io_error(err_status: &tonic::Status) -> Option<&std::io::Error> { + let mut err: &(dyn Error + 'static) = err_status; + + loop { + if let Some(io_err) = err.downcast_ref::() { + return Some(io_err); + } + + // h2::Error do not expose std::io::Error with `source()` + // https://github.com/hyperium/h2/pull/462 + if let Some(h2_err) = err.downcast_ref::() { + if let Some(io_err) = h2_err.get_io() { + return Some(io_err); + } + } + + err = match err.source() { + Some(err) => err, + None => return None, + }; + } +} diff --git a/massa-grpc/src/handler.rs b/massa-grpc/src/handler.rs new file mode 100644 index 00000000000..bc4665e7263 --- /dev/null +++ b/massa-grpc/src/handler.rs @@ -0,0 +1,192 @@ +// Copyright (c) 2023 MASSA LABS + +use massa_proto::massa::api::v1 as grpc; + +use crate::api::{ + get_blocks_by_slots, get_datastore_entries, get_next_block_best_parents, get_selector_draws, + get_transactions_throughput, get_version, +}; +use crate::server::MassaGrpc; +use crate::stream::new_blocks::{new_blocks, NewBlocksStream}; +use crate::stream::new_blocks_headers::{new_blocks_headers, NewBlocksHeadersStream}; +use crate::stream::new_filled_blocks::{new_filled_blocks, NewFilledBlocksStream}; +use crate::stream::new_operations::{new_operations, NewOperationsStream}; +use crate::stream::tx_throughput::{transactions_throughput, TransactionsThroughputStream}; +use crate::stream::{ + send_blocks::{send_blocks, SendBlocksStream}, + send_endorsements::{send_endorsements, SendEndorsementsStream}, + send_operations::{send_operations, SendOperationsStream}, +}; + +#[tonic::async_trait] +impl grpc::massa_service_server::MassaService for MassaGrpc { + /// handler for get multiple datastore entries. + async fn get_datastore_entries( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + match get_datastore_entries(self, request) { + Ok(response) => Ok(tonic::Response::new(response)), + Err(e) => Err(e.into()), + } + } + + /// handler for get selector draws + async fn get_selector_draws( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + match get_selector_draws(self, request) { + Ok(response) => Ok(tonic::Response::new(response)), + Err(e) => Err(e.into()), + } + } + + async fn get_transactions_throughput( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + match get_transactions_throughput(self, request) { + Ok(response) => Ok(tonic::Response::new(response)), + Err(e) => Err(e.into()), + } + } + + /// handler for get_next_block_best_parents + async fn get_next_block_best_parents( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + match get_next_block_best_parents(self, request) { + Ok(response) => Ok(tonic::Response::new(response)), + Err(e) => Err(e.into()), + } + } + + async fn get_blocks_by_slots( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + match get_blocks_by_slots(self, request) { + Ok(response) => Ok(tonic::Response::new(response)), + Err(e) => Err(e.into()), + } + } + + /// handler for get version + async fn get_version( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + match get_version(self, request) { + Ok(response) => Ok(tonic::Response::new(response)), + Err(e) => Err(e.into()), + } + } + + // ███████╗████████╗██████╗ ███████╗ █████╗ ███╗ ███╗ + // ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██╔══██╗████╗ ████║ + // ███████╗ ██║ ██████╔╝█████╗ ███████║██╔████╔██║ + // ╚════██║ ██║ ██╔══██╗██╔══╝ ██╔══██║██║╚██╔╝██║ + // ███████║ ██║ ██║ ██║███████╗██║ ██║██║ ╚═╝ ██║ + + type SendBlocksStream = SendBlocksStream; + + /// handler for send_blocks_stream + async fn send_blocks( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match send_blocks(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + type SendEndorsementsStream = SendEndorsementsStream; + /// handler for send_endorsements + async fn send_endorsements( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match send_endorsements(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + type SendOperationsStream = SendOperationsStream; + /// handler for send_operations + async fn send_operations( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match send_operations(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + + type TransactionsThroughputStream = TransactionsThroughputStream; + + /// handler for transactions throughput + async fn transactions_throughput( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match transactions_throughput(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + + type NewOperationsStream = NewOperationsStream; + + /// handler for subscribe new operations stream + async fn new_operations( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match new_operations(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + + type NewBlocksStream = NewBlocksStream; + + /// handler for subscribe new blocks + async fn new_blocks( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match new_blocks(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + + type NewBlocksHeadersStream = NewBlocksHeadersStream; + + /// handler for subscribe new blocks headers + async fn new_blocks_headers( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match new_blocks_headers(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } + + type NewFilledBlocksStream = NewFilledBlocksStream; + + /// handler for subscribe new blocks with operations content + async fn new_filled_blocks( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status> { + match new_filled_blocks(self, request).await { + Ok(res) => Ok(tonic::Response::new(res)), + Err(e) => Err(e.into()), + } + } +} diff --git a/massa-grpc/src/lib.rs b/massa-grpc/src/lib.rs new file mode 100644 index 00000000000..278ec499fb0 --- /dev/null +++ b/massa-grpc/src/lib.rs @@ -0,0 +1,37 @@ +// Copyright (c) 2023 MASSA LABS +// +//! ## **Overview** +//! +//! This Rust module is a gRPC API for providing services for the Massa blockchain. +//! It implements gRPC services defined in the [massa_proto] module. +//! +//! ## **Structure** +//! +//! * `api.rs`: implements gRPC service methods without streams. +//! * `handler.rs`: defines the logic for handling incoming gRPC requests. +//! * `server`: initializes the gRPC service and serve It. +//! * `stream/`: contains the gRPC streaming methods implementations files. + +#![feature(async_closure)] +#![warn(missing_docs)] +#![warn(unused_crate_dependencies)] + +use tonic_health as _; +use tonic_reflection as _; +use tonic_web as _; + +/// business code for non stream methods +pub mod api; +/// gRPC configuration +pub mod config; +/// models error +pub mod error; +/// gRPC API implementation +pub mod handler; +/// gRPC service initialization and serve +pub mod server; +/// business code for stream methods +pub mod stream; + +#[cfg(test)] +mod tests; diff --git a/massa-grpc/src/server.rs b/massa-grpc/src/server.rs new file mode 100644 index 00000000000..e1c46c29c1a --- /dev/null +++ b/massa-grpc/src/server.rs @@ -0,0 +1,128 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::config::GrpcConfig; +use crate::error::GrpcError; +use futures_util::FutureExt; +use massa_consensus_exports::{ConsensusChannels, ConsensusController}; +use massa_execution_exports::ExecutionController; +use massa_pool_exports::{PoolChannels, PoolController}; +use massa_pos_exports::SelectorController; +use massa_proto::massa::api::v1::massa_service_server::MassaServiceServer; +use massa_proto::massa::api::v1::FILE_DESCRIPTOR_SET; +use massa_protocol_exports::ProtocolCommandSender; +use massa_storage::Storage; +use tokio::sync::oneshot; +use tonic::codec::CompressionEncoding; +use tonic_web::GrpcWebLayer; +use tracing::log::{info, warn}; + +/// gRPC API content +pub struct MassaGrpc { + /// link(channels) to the consensus component + pub consensus_controller: Box, + /// link(channels) to the consensus component + pub consensus_channels: ConsensusChannels, + /// link to the execution component + pub execution_controller: Box, + /// link(channels) to the pool component + pub pool_channels: PoolChannels, + /// link to the pool component + pub pool_command_sender: Box, + /// link(channels) to the protocol component + pub protocol_command_sender: ProtocolCommandSender, + /// link to the selector component + pub selector_controller: Box, + /// link to the storage component + pub storage: Storage, + /// gRPC configuration + pub grpc_config: GrpcConfig, + /// node version + pub version: massa_models::version::Version, +} + +impl MassaGrpc { + /// Start the gRPC API + pub async fn serve(self, config: &GrpcConfig) -> Result { + let mut svc = MassaServiceServer::new(self) + .max_decoding_message_size(config.max_decoding_message_size) + .max_encoding_message_size(config.max_encoding_message_size); + + if let Some(encoding) = &config.accept_compressed { + if encoding.eq_ignore_ascii_case("Gzip") { + svc = svc.accept_compressed(CompressionEncoding::Gzip); + }; + } + + if let Some(encoding) = &config.send_compressed { + if encoding.eq_ignore_ascii_case("Gzip") { + svc = svc.send_compressed(CompressionEncoding::Gzip); + }; + } + + let (shutdown_send, shutdown_recv) = oneshot::channel::<()>(); + + let mut server_builder = tonic::transport::Server::builder() + .concurrency_limit_per_connection(config.concurrency_limit_per_connection) + .timeout(config.timeout) + .initial_stream_window_size(config.initial_stream_window_size) + .initial_connection_window_size(config.initial_connection_window_size) + .max_concurrent_streams(config.max_concurrent_streams) + .tcp_keepalive(config.tcp_keepalive) + .tcp_nodelay(config.tcp_nodelay) + .http2_keepalive_interval(config.http2_keepalive_interval) + .http2_keepalive_timeout(config.http2_keepalive_timeout) + .http2_adaptive_window(config.http2_adaptive_window) + .max_frame_size(config.max_frame_size); + + if config.accept_http1 { + let mut router_with_http1 = server_builder + .accept_http1(true) + .layer(GrpcWebLayer::new()) + .add_service(svc); + + if config.enable_reflection { + let reflection_service = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(FILE_DESCRIPTOR_SET) + .build()?; + + router_with_http1 = router_with_http1.add_service(reflection_service); + } + + tokio::spawn( + router_with_http1.serve_with_shutdown(config.bind, shutdown_recv.map(drop)), + ); + } else { + let mut router = server_builder.add_service(svc); + + if config.enable_reflection { + let reflection_service = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(FILE_DESCRIPTOR_SET) + .build()?; + + router = router.add_service(reflection_service); + } + + tokio::spawn(router.serve_with_shutdown(config.bind, shutdown_recv.map(drop))); + } + + Ok(StopHandle { + stop_cmd_sender: shutdown_send, + }) + } +} + +/// Used to be able to stop the gRPC API +pub struct StopHandle { + stop_cmd_sender: oneshot::Sender<()>, +} + +impl StopHandle { + /// stop the gRPC API gracefully + pub fn stop(self) { + if let Err(e) = self.stop_cmd_sender.send(()) { + warn!("gRPC API thread panicked: {:?}", e); + } else { + info!("gRPC API finished cleanly"); + } + } +} diff --git a/massa-grpc/src/stream/mod.rs b/massa-grpc/src/stream/mod.rs new file mode 100644 index 00000000000..5a9b1d24fde --- /dev/null +++ b/massa-grpc/src/stream/mod.rs @@ -0,0 +1,18 @@ +// Copyright (c) 2023 MASSA LABS + +/// stream new blocks +pub mod new_blocks; +/// stream new blocks with operations content +pub mod new_blocks_headers; +/// stream new blocks headers +pub mod new_filled_blocks; +/// subscribe new operations +pub mod new_operations; +/// send_blocks streaming +pub mod send_blocks; +/// send endorsements +pub mod send_endorsements; +/// send operations +pub mod send_operations; +/// subscribe tx througput +pub mod tx_throughput; diff --git a/massa-grpc/src/stream/new_blocks.rs b/massa-grpc/src/stream/new_blocks.rs new file mode 100644 index 00000000000..d80c472e5cc --- /dev/null +++ b/massa-grpc/src/stream/new_blocks.rs @@ -0,0 +1,98 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::{match_for_io_error, GrpcError}; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_proto::massa::api::v1 as grpc; +use std::io::ErrorKind; +use std::pin::Pin; +use tokio::select; +use tonic::codegen::futures_core; +use tonic::{Request, Streaming}; +use tracing::log::{error, warn}; + +/// Type declaration for NewBlocks +pub type NewBlocksStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// Creates a new stream of new produced and received blocks +pub(crate) async fn new_blocks( + grpc: &MassaGrpc, + request: Request>, +) -> Result { + // Create a channel to handle communication with the client + let (tx, rx) = tokio::sync::mpsc::channel(grpc.grpc_config.max_channel_size); + // Get the inner stream from the request + let mut in_stream = request.into_inner(); + // Subscribe to the new blocks channel + let mut subscriber = grpc.consensus_channels.block_sender.subscribe(); + + tokio::spawn(async move { + // Initialize the request_id string + let mut request_id = String::new(); + loop { + select! { + // Receive a new block from the subscriber + event = subscriber.recv() => { + match event { + Ok(massa_block) => { + // Send the new block through the channel + if let Err(e) = tx.send(Ok(grpc::NewBlocksResponse { + id: request_id.clone(), + block: Some(massa_block.into()) + })).await { + error!("failed to send new block : {}", e); + break; + } + }, + Err(e) => {error!("error on receive new block : {}", e)} + } + }, + // Receive a new message from the in_stream + res = in_stream.next() => { + match res { + Some(res) => { + match res { + // Get the request_id from the received data + Ok(data) => { + request_id = data.id + }, + // Handle any errors that may occur during receiving the data + Err(err) => { + // Check if the error matches any IO errors + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == ErrorKind::BrokenPipe { + warn!("client disconnected, broken pipe: {}", io_err); + break; + } + } + error!("{}", err); + // Send the error response back to the client + if let Err(e) = tx.send(Err(err)).await { + error!("failed to send back new_blocks error response: {}", e); + break; + } + } + } + }, + None => { + // The client has disconnected + break; + }, + } + } + } + } + }); + + // Create a new stream from the received channel. + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + + // Return the new stream of blocks. + Ok(Box::pin(out_stream) as NewBlocksStream) +} diff --git a/massa-grpc/src/stream/new_blocks_headers.rs b/massa-grpc/src/stream/new_blocks_headers.rs new file mode 100644 index 00000000000..49868a13848 --- /dev/null +++ b/massa-grpc/src/stream/new_blocks_headers.rs @@ -0,0 +1,95 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::{match_for_io_error, GrpcError}; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_proto::massa::api::v1 as grpc; +use std::io::ErrorKind; +use std::pin::Pin; +use tokio::select; +use tonic::codegen::futures_core; +use tonic::{Request, Streaming}; +use tracing::log::{error, warn}; + +/// Type declaration for NewBlocksHeaders +pub type NewBlocksHeadersStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// Creates a new stream of new produced and received blocks headers +pub(crate) async fn new_blocks_headers( + grpc: &MassaGrpc, + request: Request>, +) -> Result { + // Create a channel to handle communication with the client + let (tx, rx) = tokio::sync::mpsc::channel(grpc.grpc_config.max_channel_size); + // Get the inner stream from the request + let mut in_stream = request.into_inner(); + // Subscribe to the new blocks headers channel + let mut subscriber = grpc.consensus_channels.block_header_sender.subscribe(); + + tokio::spawn(async move { + // Initialize the request_id string + let mut request_id = String::new(); + loop { + select! { + // Receive a new block header from the subscriber + event = subscriber.recv() => { + match event { + Ok(massa_block_header) => { + // Send the new block header through the channel + if let Err(e) = tx.send(Ok(grpc::NewBlocksHeadersResponse { + id: request_id.clone(), + block_header: Some(massa_block_header.into()) + })).await { + error!("failed to send new block header : {}", e); + break; + } + }, + Err(e) => {error!("error on receive new block header : {}", e)} + } + }, + // Receive a new message from the in_stream + res = in_stream.next() => { + match res { + Some(res) => { + match res { + // Get the request_id from the received data + Ok(data) => { + request_id = data.id + }, + // Handle any errors that may occur during receiving the data + Err(err) => { + // Check if the error matches any IO errors + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == ErrorKind::BrokenPipe { + warn!("client disconnected, broken pipe: {}", io_err); + break; + } + } + error!("{}", err); + // Send the error response back to the client + if let Err(e) = tx.send(Err(err)).await { + error!("failed to send back new_blocks_headers error response: {}", e); + break; + } + } + } + }, + None => { + // The client has disconnected + break; + }, + } + } + } + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as NewBlocksHeadersStream) +} diff --git a/massa-grpc/src/stream/new_filled_blocks.rs b/massa-grpc/src/stream/new_filled_blocks.rs new file mode 100644 index 00000000000..ec2c8f05ca8 --- /dev/null +++ b/massa-grpc/src/stream/new_filled_blocks.rs @@ -0,0 +1,95 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::{match_for_io_error, GrpcError}; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_proto::massa::api::v1 as grpc; +use std::io::ErrorKind; +use std::pin::Pin; +use tokio::select; +use tonic::codegen::futures_core; +use tonic::{Request, Streaming}; +use tracing::log::{error, warn}; + +/// Type declaration for NewFilledBlocks +pub type NewFilledBlocksStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// Creates a new stream of new produced and received filled blocks +pub(crate) async fn new_filled_blocks( + grpc: &MassaGrpc, + request: Request>, +) -> Result { + // Create a channel to handle communication with the client + let (tx, rx) = tokio::sync::mpsc::channel(grpc.grpc_config.max_channel_size); + // Get the inner stream from the request + let mut in_stream = request.into_inner(); + // Subscribe to the new filled blocks channel + let mut subscriber = grpc.consensus_channels.filled_block_sender.subscribe(); + + tokio::spawn(async move { + // Initialize the request_id string + let mut request_id = String::new(); + loop { + select! { + // Receive a new filled block from the subscriber + event = subscriber.recv() => { + match event { + Ok(massa_filled_block) => { + // Send the new filled block through the channel + if let Err(e) = tx.send(Ok(grpc::NewFilledBlocksResponse { + id: request_id.clone(), + filled_block: Some(massa_filled_block.into()) + })).await { + error!("failed to send new block : {}", e); + break; + } + }, + Err(e) => {error!("error on receive new block : {}", e)} + } + }, + // Receive a new message from the in_stream + res = in_stream.next() => { + match res { + Some(res) => { + match res { + // Get the request_id from the received data + Ok(data) => { + request_id = data.id + }, + // Handle any errors that may occur during receiving the data + Err(err) => { + // Check if the error matches any IO errors + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == ErrorKind::BrokenPipe { + warn!("client disconnected, broken pipe: {}", io_err); + break; + } + } + error!("{}", err); + // Send the error response back to the client + if let Err(e) = tx.send(Err(err)).await { + error!("failed to send back new_filled_blocks error response: {}", e); + break; + } + } + } + }, + None => { + // The client has disconnected + break; + }, + } + } + } + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as NewFilledBlocksStream) +} diff --git a/massa-grpc/src/stream/new_operations.rs b/massa-grpc/src/stream/new_operations.rs new file mode 100644 index 00000000000..ba4c51e6bdb --- /dev/null +++ b/massa-grpc/src/stream/new_operations.rs @@ -0,0 +1,118 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::GrpcError; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_proto::massa::api::v1 as grpc; +use std::pin::Pin; +use tokio::select; +use tonic::codegen::futures_core; +use tonic::{Request, Streaming}; +use tracing::log::error; + +/// Type declaration for StreamTransactionsThroughput +pub type NewOperationsStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// Creates a new stream of new produced and received operations +pub(crate) async fn new_operations( + grpc: &MassaGrpc, + request: Request>, +) -> Result { + // Create a channel to handle communication with the client + let (tx, rx) = tokio::sync::mpsc::channel(grpc.grpc_config.max_channel_size); + // Get the inner stream from the request + let mut in_stream = request.into_inner(); + // Subscribe to the new operations channel + let mut subscriber = grpc.pool_channels.operation_sender.subscribe(); + + tokio::spawn(async move { + if let Some(Ok(request)) = in_stream.next().await { + let mut request_id = request.id; + let mut filter = request.query.and_then(|q| q.filter); + + // Spawn a new task for sending new blocks + loop { + select! { + // Receive a new operation from the subscriber + event = subscriber.recv() => { + match event { + Ok(operation) => { + // Check if the operation should be sent + if !should_send(&filter, operation.clone().content.op.into()) { + continue; + } + + // Convert the operation to a gRPC operation + let ret = grpc::SignedOperation { + content: Some(operation.content.into()), + signature: operation.signature.to_string(), + content_creator_pub_key: operation.content_creator_pub_key.to_string(), + content_creator_address: operation.content_creator_address.to_string(), + id: operation.id.to_string() + }; + // Send the new operation through the channel + if let Err(e) = tx.send(Ok(grpc::NewOperationsResponse { + id: request_id.clone(), + operation: Some(ret) + })).await { + error!("failed to send operation : {}", e); + break; + } + }, + Err(e) => error!("{}", e) + } + }, + // Receive a new message from the in_stream + res = in_stream.next() => { + match res { + Some(res) => { + match res { + Ok(data) => { + // Update current filter && request id + filter = data.query + .and_then(|q| q.filter); + request_id = data.id; + }, + Err(e) => { + error!("{}", e); + break; + } + } + }, + None => { + // Client disconnected + break; + }, + } + } + } + } + } else { + error!("empty request"); + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as NewOperationsStream) +} + +/// Return if the type of operation should be send to client +fn should_send(filter_opt: &Option, ope_type: grpc::OpType) -> bool { + if let Some(filter) = filter_opt { + let filtered_ope_ids = &filter.types; + if filtered_ope_ids.is_empty() { + return true; + } + let id: i32 = ope_type as i32; + filtered_ope_ids.contains(&id) + } else { + // if user has no filter = All operations type is send + true + } +} diff --git a/massa-grpc/src/stream/send_blocks.rs b/massa-grpc/src/stream/send_blocks.rs new file mode 100644 index 00000000000..4faee22ab7d --- /dev/null +++ b/massa-grpc/src/stream/send_blocks.rs @@ -0,0 +1,222 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::{match_for_io_error, GrpcError}; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_models::block::{BlockDeserializer, BlockDeserializerArgs, SecureShareBlock}; +use massa_models::error::ModelsError; +use massa_models::secure_share::SecureShareDeserializer; +use massa_proto::google::rpc::Status; +use massa_proto::massa::api::v1 as grpc; +use massa_serialization::{DeserializeError, Deserializer}; +use std::io::ErrorKind; +use std::pin::Pin; +use tokio::sync::mpsc::Sender; +use tonic::codegen::futures_core; +use tonic::Request; +use tracing::log::{error, warn}; + +/// Type declaration for SendBlockStream +pub type SendBlocksStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// This function takes a streaming request of block messages, +/// verifies, saves and propagates the block received in each message, and sends back a stream of +/// block id messages +pub(crate) async fn send_blocks( + grpc: &MassaGrpc, + request: Request>, +) -> Result { + let consensus_controller = grpc.consensus_controller.clone(); + let mut protocol_command_sender = grpc.protocol_command_sender.clone(); + let storage = grpc.storage.clone_without_refs(); + let config = grpc.grpc_config.clone(); + + // Create a channel to handle communication with the client + let (tx, rx) = tokio::sync::mpsc::channel(config.max_channel_size); + // Extract the incoming stream of block messages + let mut in_stream = request.into_inner(); + + // Spawn a task that reads incoming messages and processes the block in each message + tokio::spawn(async move { + while let Some(result) = in_stream.next().await { + match result { + Ok(req_content) => { + let Some(proto_block) = req_content.block else { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + "the request payload is empty".to_owned(), + ).await; + continue; + }; + + let pub_key_b = proto_block.content_creator_pub_key.as_bytes(); + // Concatenate signature, public key, and data into a single byte vector + let mut blk_serialized = Vec::with_capacity( + proto_block.signature.len() + + pub_key_b.len() + + proto_block.serialized_data.len(), + ); + + blk_serialized.extend_from_slice(proto_block.signature.as_bytes()); + blk_serialized.extend_from_slice(pub_key_b); + blk_serialized.extend_from_slice(&proto_block.serialized_data); + // Create a block deserializer arguments + let args = BlockDeserializerArgs { + thread_count: config.thread_count, + max_operations_per_block: config.max_operations_per_block, + endorsement_count: config.endorsement_count, + }; + // Deserialize and verify received block in the incoming message + match SecureShareDeserializer::new(BlockDeserializer::new(args)) + .deserialize::(&blk_serialized) + { + Ok(tuple) => { + let (rest, res_block): (&[u8], SecureShareBlock) = tuple; + if !rest.is_empty() { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + "the request payload is too large".to_owned(), + ) + .await; + continue; + } + if let Err(e) = res_block + .verify_signature() + .and_then(|_| res_block.content.header.verify_signature()) + .map(|_| { + res_block + .content + .header + .content + .endorsements + .iter() + .map(|endorsement| endorsement.verify_signature()) + .collect::>>() + }) + { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + format!("wrong signature: {}", e), + ) + .await; + continue; + } + + let block_id = res_block.id; + let slot = res_block.content.header.content.slot; + let mut block_storage = storage.clone_without_refs(); + + // Add the received block to the graph + block_storage.store_block(res_block.clone()); + consensus_controller.register_block( + block_id, + slot, + block_storage.clone(), + false, + ); + + // Propagate the block(header) to the network + if let Err(e) = + protocol_command_sender.integrated_block(block_id, block_storage) + { + // If propagation failed, send an error message back to the client + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::Internal, + format!("failed to propagate block: {}", e), + ) + .await; + continue; + }; + + // Build the response message + let result = grpc::BlockResult { + block_id: res_block.id.to_string(), + }; + // Send the response message back to the client + if let Err(e) = tx + .send(Ok(grpc::SendBlocksResponse { + id: req_content.id.clone(), + message: Some(grpc::send_blocks_response::Message::Result( + result, + )), + })) + .await + { + error!("failed to send back block response: {}", e); + }; + } + // If the verification failed, send an error message back to the client + Err(e) => { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + format!("failed to deserialize block: {}", e), + ) + .await; + continue; + } + }; + } + // Handle any errors that may occur during receiving the data + Err(err) => { + // Check if the error matches any IO errors + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == ErrorKind::BrokenPipe { + warn!("client disconnected, broken pipe: {}", io_err); + break; + } + } + error!("{}", err); + // Send the error response back to the client + if let Err(e) = tx.send(Err(err)).await { + error!("failed to send back send_blocks error response: {}", e); + break; + } + } + } + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as SendBlocksStream) +} + +/// This function reports an error to the sender by sending a gRPC response message to the client +async fn report_error( + id: String, + sender: Sender>, + code: tonic::Code, + error: String, +) { + error!("{}", error); + // Attempt to send the error response message to the sender + if let Err(e) = sender + .send(Ok(grpc::SendBlocksResponse { + id, + message: Some(grpc::send_blocks_response::Message::Error(Status { + code: code.into(), + message: error, + details: Vec::new(), + })), + })) + .await + { + // If sending the message fails, log the error message + error!("failed to send back send_blocks error response: {}", e); + } +} diff --git a/massa-grpc/src/stream/send_endorsements.rs b/massa-grpc/src/stream/send_endorsements.rs new file mode 100644 index 00000000000..55f565cb848 --- /dev/null +++ b/massa-grpc/src/stream/send_endorsements.rs @@ -0,0 +1,228 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::{match_for_io_error, GrpcError}; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_models::endorsement::{EndorsementDeserializer, SecureShareEndorsement}; +use massa_models::secure_share::SecureShareDeserializer; +use massa_proto::massa::api::v1 as grpc; +use massa_serialization::{DeserializeError, Deserializer}; +use std::collections::HashMap; +use std::io::ErrorKind; +use std::pin::Pin; +use tonic::codegen::futures_core; +use tracing::log::{error, warn}; + +/// Type declaration for SendEndorsements +pub type SendEndorsementsStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// This function takes a streaming request of endorsements messages, +/// verifies, saves and propagates the endorsements received in each message, and sends back a stream of +/// endorsements ids messages +pub(crate) async fn send_endorsements( + grpc: &MassaGrpc, + request: tonic::Request>, +) -> Result { + let mut pool_command_sender = grpc.pool_command_sender.clone(); + let mut protocol_command_sender = grpc.protocol_command_sender.clone(); + let config = grpc.grpc_config.clone(); + let storage = grpc.storage.clone_without_refs(); + + // Create a channel to handle communication with the client + let (tx, rx) = tokio::sync::mpsc::channel(config.max_channel_size); + // Extract the incoming stream of endorsements messages + let mut in_stream = request.into_inner(); + + // Spawn a task that reads incoming messages and processes the endorsements in each message + tokio::spawn(async move { + while let Some(result) = in_stream.next().await { + match result { + Ok(req_content) => { + // If the incoming message has no endorsements, send an error message back to the client + if req_content.endorsements.is_empty() { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + "the request payload is empty".to_owned(), + ) + .await; + } else { + // If there are too many endorsements in the incoming message, send an error message back to the client + let proto_endorsement = req_content.endorsements; + if proto_endorsement.len() as u32 > config.max_endorsements_per_message { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + "too many endorsements per message".to_owned(), + ) + .await; + } else { + // Deserialize and verify each endorsement in the incoming message + let endorsement_deserializer = + SecureShareDeserializer::new(EndorsementDeserializer::new( + config.thread_count, + config.endorsement_count, + )); + let verified_eds_res: Result, GrpcError> = proto_endorsement + .into_iter() + .map(|proto_endorsement| { + + let pub_key_b = proto_endorsement.content_creator_pub_key.as_bytes(); + // Concatenate signature, public key, and data into a single byte vector + let mut ed_serialized = Vec::with_capacity( + proto_endorsement.signature.len() + + pub_key_b.len() + + proto_endorsement.serialized_data.len(), + ); + ed_serialized.extend_from_slice(proto_endorsement.signature.as_bytes()); + ed_serialized.extend_from_slice(pub_key_b); + ed_serialized.extend_from_slice(&proto_endorsement.serialized_data); + + let verified_op = match endorsement_deserializer.deserialize::(&ed_serialized) { + Ok(tuple) => { + // Deserialize the endorsement and verify its signature + let (rest, res_endorsement): (&[u8], SecureShareEndorsement) = tuple; + if rest.is_empty() { + res_endorsement.verify_signature() + .map(|_| (res_endorsement.id.to_string(), res_endorsement)) + .map_err(|e| e.into()) + } else { + Err(GrpcError::InternalServerError( + "there is data left after endorsement deserialization".to_owned() + )) + } + } + Err(e) => { + Err(GrpcError::InternalServerError(format!("failed to deserialize endorsement: {}", e) + )) + } + }; + verified_op + }) + .collect(); + + match verified_eds_res { + // If all endorsements in the incoming message are valid, store and propagate them + Ok(verified_eds) => { + let mut endorsement_storage = storage.clone_without_refs(); + endorsement_storage.store_endorsements( + verified_eds.values().cloned().collect(), + ); + // Add the received endorsements to the endorsements pool + pool_command_sender + .add_endorsements(endorsement_storage.clone()); + + // Propagate the endorsements to the network + if let Err(e) = protocol_command_sender + .propagate_endorsements(endorsement_storage) + { + // If propagation failed, send an error message back to the client + let error = + format!("failed to propagate endorsement: {}", e); + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::Internal, + error.to_owned(), + ) + .await; + }; + + // Build the response message + let result = grpc::EndorsementResult { + endorsements_ids: verified_eds.keys().cloned().collect(), + }; + // Send the response message back to the client + if let Err(e) = tx + .send(Ok(grpc::SendEndorsementsResponse { + id: req_content.id.clone(), + message: Some( + grpc::send_endorsements_response::Message::Result( + result, + ), + ), + })) + .await + { + error!("failed to send back endorsement response: {}", e) + }; + } + // If the verification failed, send an error message back to the client + Err(e) => { + let error = format!("invalid endorsement(s): {}", e); + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + error.to_owned(), + ) + .await; + } + } + } + } + } + // Handles errors that occur while sending a response back to a client + Err(err) => { + // Check if the error matches any IO errors + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == ErrorKind::BrokenPipe { + warn!("client disconnected, broken pipe: {}", io_err); + break; + } + } + error!("{}", err); + // Send the error response back to the client + if let Err(e) = tx.send(Err(err)).await { + error!( + "failed to send back send_endorsements error response: {}", + e + ); + break; + } + } + } + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as SendEndorsementsStream) +} + +/// This function reports an error to the sender by sending a gRPC response message to the client +async fn report_error( + id: String, + sender: tokio::sync::mpsc::Sender>, + code: tonic::Code, + error: String, +) { + error!("{}", error); + // Attempt to send the error response message to the sender + if let Err(e) = sender + .send(Ok(grpc::SendEndorsementsResponse { + id, + message: Some(grpc::send_endorsements_response::Message::Error( + massa_proto::google::rpc::Status { + code: code.into(), + message: error, + details: Vec::new(), + }, + )), + })) + .await + { + // If sending the message fails, log the error message + error!( + "failed to send back send_endorsements error response: {}", + e + ); + } +} diff --git a/massa-grpc/src/stream/send_operations.rs b/massa-grpc/src/stream/send_operations.rs new file mode 100644 index 00000000000..966cccc1680 --- /dev/null +++ b/massa-grpc/src/stream/send_operations.rs @@ -0,0 +1,221 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::error::{match_for_io_error, GrpcError}; +use crate::server::MassaGrpc; +use futures_util::StreamExt; +use massa_models::operation::{OperationDeserializer, SecureShareOperation}; +use massa_models::secure_share::SecureShareDeserializer; +use massa_proto::massa::api::v1 as grpc; +use massa_serialization::{DeserializeError, Deserializer}; +use std::collections::HashMap; +use std::io::ErrorKind; +use std::pin::Pin; +use tonic::codegen::futures_core; +use tracing::log::{error, warn}; + +/// Type declaration for SendOperations +pub type SendOperationsStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// This function takes a streaming request of operations messages, +/// verifies, saves and propagates the operations received in each message, and sends back a stream of +/// operations ids messages +pub(crate) async fn send_operations( + grpc: &MassaGrpc, + request: tonic::Request>, +) -> Result { + let mut pool_command_sender = grpc.pool_command_sender.clone(); + let mut protocol_command_sender = grpc.protocol_command_sender.clone(); + let config = grpc.grpc_config.clone(); + let storage = grpc.storage.clone_without_refs(); + + // Create a channel for sending responses to the client + let (tx, rx) = tokio::sync::mpsc::channel(config.max_channel_size); + // Extract the incoming stream of operations messages + let mut in_stream = request.into_inner(); + + // Spawn a task that reads incoming messages and processes the operations in each message + tokio::spawn(async move { + while let Some(result) = in_stream.next().await { + match result { + Ok(req_content) => { + // If the incoming message has no operations, send an error message back to the client + if req_content.operations.is_empty() { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + "the request payload is empty".to_owned(), + ) + .await; + } else { + // If there are too many operations in the incoming message, send an error message back to the client + if req_content.operations.len() as u32 > config.max_operations_per_message { + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + "too many operations per message".to_owned(), + ) + .await; + } else { + // Deserialize and verify each operation in the incoming message + let operation_deserializer = + SecureShareDeserializer::new(OperationDeserializer::new( + config.max_datastore_value_length, + config.max_function_name_length, + config.max_parameter_size, + config.max_op_datastore_entry_count, + config.max_op_datastore_key_length, + config.max_op_datastore_value_length, + )); + let verified_ops_res: Result, GrpcError> = req_content.operations + .into_iter() + .map(|proto_operation| { + let pub_key_b = proto_operation.content_creator_pub_key.as_bytes(); + // Concatenate signature, public key, and data into a single byte vector + let mut op_serialized = Vec::with_capacity( + proto_operation.signature.len() + + pub_key_b.len() + + proto_operation.serialized_data.len(), + ); + op_serialized.extend_from_slice(proto_operation.signature.as_bytes()); + op_serialized.extend_from_slice(pub_key_b); + op_serialized.extend_from_slice(&proto_operation.serialized_data); + + // Deserialize the operation and verify its signature + let verified_op_res = match operation_deserializer.deserialize::(&op_serialized) { + Ok(tuple) => { + let (rest, res_operation): (&[u8], SecureShareOperation) = tuple; + if rest.is_empty() { + res_operation.verify_signature() + .map(|_| (res_operation.id.to_string(), res_operation)) + .map_err(|e| e.into()) + } else { + Err(GrpcError::InternalServerError( + "there is data left after operation deserialization".to_owned() + )) + } + } + Err(e) => { + Err(GrpcError::InternalServerError(format!("failed to deserialize operation: {}", e))) + } + }; + verified_op_res + }) + .collect(); + + match verified_ops_res { + // If all operations in the incoming message are valid, store and propagate them + Ok(verified_ops) => { + let mut operation_storage = storage.clone_without_refs(); + operation_storage + .store_operations(verified_ops.values().cloned().collect()); + // Add the received operations to the operations pool + pool_command_sender.add_operations(operation_storage.clone()); + + // Propagate the operations to the network + if let Err(e) = protocol_command_sender + .propagate_operations(operation_storage) + { + // If propagation failed, send an error message back to the client + let error = + format!("failed to propagate operations: {}", e); + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::Internal, + error.to_owned(), + ) + .await; + }; + + // Build the response message + let result = grpc::OperationResult { + operations_ids: verified_ops.keys().cloned().collect(), + }; + // Send the response message back to the client + if let Err(e) = tx + .send(Ok(grpc::SendOperationsResponse { + id: req_content.id.clone(), + message: Some( + grpc::send_operations_response::Message::Result( + result, + ), + ), + })) + .await + { + error!("failed to send back operations response: {}", e); + }; + } + // If the verification failed, send an error message back to the client + Err(e) => { + let error = format!("invalid operation(s): {}", e); + report_error( + req_content.id.clone(), + tx.clone(), + tonic::Code::InvalidArgument, + error.to_owned(), + ) + .await; + } + } + } + } + } + // Handle any errors that may occur during receiving the data + Err(err) => { + // Check if the error matches any IO errors + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == ErrorKind::BrokenPipe { + warn!("client disconnected, broken pipe: {}", io_err); + break; + } + } + error!("{}", err); + // Send the error response back to the client + if let Err(e) = tx.send(Err(err)).await { + error!("failed to send back send_operations error response: {}", e); + break; + } + } + } + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as SendOperationsStream) +} + +/// This function reports an error to the sender by sending a gRPC response message to the client +async fn report_error( + id: String, + sender: tokio::sync::mpsc::Sender>, + code: tonic::Code, + error: String, +) { + error!("{}", error); + // Attempt to send the error response message to the sender + if let Err(e) = sender + .send(Ok(grpc::SendOperationsResponse { + id, + message: Some(grpc::send_operations_response::Message::Error( + massa_proto::google::rpc::Status { + code: code.into(), + message: error, + details: Vec::new(), + }, + )), + })) + .await + { + // If sending the message fails, log the error message + error!("failed to send back send_operations error response: {}", e); + } +} diff --git a/massa-grpc/src/stream/tx_throughput.rs b/massa-grpc/src/stream/tx_throughput.rs new file mode 100644 index 00000000000..83340681fed --- /dev/null +++ b/massa-grpc/src/stream/tx_throughput.rs @@ -0,0 +1,97 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::{error::GrpcError, server::MassaGrpc}; +use futures_util::StreamExt; +use massa_proto::massa::api::v1 as grpc; +use std::pin::Pin; +use std::time::Duration; +use tokio::{select, time}; +use tonic::codegen::futures_core; +use tracing::log::error; + +/// default throughput interval in seconds +/// +/// set 'high' value to avoid spamming the client with updates who doesn't need +/// +/// end user can override this value by sending a request with a custom interval +const DEFAULT_THROUGHPUT_INTERVAL: u64 = 10; + +/// Type declaration for TransactionsThroughput +pub type TransactionsThroughputStream = Pin< + Box< + dyn futures_core::Stream> + + Send + + 'static, + >, +>; + +/// The function returns a stream of transaction throughput statistics +pub(crate) async fn transactions_throughput( + grpc: &MassaGrpc, + request: tonic::Request>, +) -> Result { + let execution_controller = grpc.execution_controller.clone(); + + // Create a channel for sending responses to the client + let (tx, rx) = tokio::sync::mpsc::channel(grpc.grpc_config.max_channel_size); + // Extract the incoming stream of operations messages + let mut in_stream = request.into_inner(); + + // Spawn a new Tokio task to handle the stream processing + tokio::spawn(async move { + let mut request_id = "".to_string(); + let mut interval = time::interval(Duration::from_secs(DEFAULT_THROUGHPUT_INTERVAL)); + + // Continuously loop until the stream ends or an error occurs + loop { + select! { + // Receive a new message from the in_stream + res = in_stream.next() => { + match res { + Some(Ok(req)) => { + // Update the request ID + request_id = req.id; + // Update the interval timer based on the request (or use the default) + let new_timer = req.interval.unwrap_or(DEFAULT_THROUGHPUT_INTERVAL); + interval = time::interval(Duration::from_secs(new_timer)); + interval.reset(); + }, + _ => { + // Client disconnected + break; + } + } + }, + // Execute the code block whenever the timer ticks + _ = interval.tick() => { + let stats = execution_controller.get_stats(); + // Calculate the throughput over the time window + let nb_sec_range = stats + .time_window_end + .saturating_sub(stats.time_window_start) + .to_duration() + .as_secs(); + let throughput = stats + .final_executed_operations_count + .checked_div(nb_sec_range as usize) + .unwrap_or_default() as u32; + // Send the throughput response back to the client + if let Err(e) = tx + .send(Ok(grpc::TransactionsThroughputResponse { + id: request_id.clone(), + throughput, + })) + .await + { + // Log an error if sending the response fails + error!("failed to send back transactions_throughput response: {}", e); + break; + } + } + } + } + }); + + let out_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Box::pin(out_stream) as TransactionsThroughputStream) +} diff --git a/massa-grpc/src/tests/mod.rs b/massa-grpc/src/tests/mod.rs new file mode 100644 index 00000000000..15d1061b69f --- /dev/null +++ b/massa-grpc/src/tests/mod.rs @@ -0,0 +1,3 @@ +// Copyright (c) 2023 MASSA LABS + +mod test; diff --git a/massa-grpc/src/tests/test.rs b/massa-grpc/src/tests/test.rs new file mode 100644 index 00000000000..f14776e3871 --- /dev/null +++ b/massa-grpc/src/tests/test.rs @@ -0,0 +1,113 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::config::GrpcConfig; +use crate::server::MassaGrpc; +use massa_consensus_exports::test_exports::MockConsensusController; +use massa_consensus_exports::ConsensusChannels; +use massa_execution_exports::test_exports::MockExecutionController; +use massa_models::config::{ + ENDORSEMENT_COUNT, GENESIS_TIMESTAMP, MAX_DATASTORE_VALUE_LENGTH, MAX_ENDORSEMENTS_PER_MESSAGE, + MAX_FUNCTION_NAME_LENGTH, MAX_OPERATIONS_PER_BLOCK, MAX_OPERATIONS_PER_MESSAGE, + MAX_OPERATION_DATASTORE_ENTRY_COUNT, MAX_OPERATION_DATASTORE_KEY_LENGTH, + MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_PARAMETERS_SIZE, PROTOCOL_CONTROLLER_CHANNEL_SIZE, + T0, THREAD_COUNT, VERSION, +}; +use massa_pool_exports::test_exports::MockPoolController; +use massa_pool_exports::PoolChannels; +use massa_pos_exports::test_exports::MockSelectorController; +use massa_proto::massa::api::v1::massa_service_client::MassaServiceClient; +use massa_protocol_exports::{ProtocolCommand, ProtocolCommandSender}; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use tokio::sync::mpsc; + +#[tokio::test] +async fn test_start_grpc_server() { + let consensus_controller = MockConsensusController::new_with_receiver(); + let execution_ctrl = MockExecutionController::new_with_receiver(); + let shared_storage: massa_storage::Storage = massa_storage::Storage::create_root(); + let selector_ctrl = MockSelectorController::new_with_receiver(); + let pool_ctrl = MockPoolController::new_with_receiver(); + + let (consensus_event_sender, _consensus_event_receiver) = crossbeam::channel::bounded(1024); + let (denunciation_factory_sender, _denunciation_factory_receiver) = + crossbeam::channel::bounded(1024); + + let (protocol_command_sender, _protocol_command_receiver) = + mpsc::channel::(PROTOCOL_CONTROLLER_CHANNEL_SIZE); + + let consensus_channels = ConsensusChannels { + execution_controller: execution_ctrl.0.clone(), + selector_controller: selector_ctrl.0.clone(), + pool_command_sender: pool_ctrl.0.clone(), + controller_event_tx: consensus_event_sender, + protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + block_sender: tokio::sync::broadcast::channel(100).0, + block_header_sender: tokio::sync::broadcast::channel(100).0, + filled_block_sender: tokio::sync::broadcast::channel(100).0, + denunciation_factory_sender: denunciation_factory_sender, + }; + + let operation_sender = tokio::sync::broadcast::channel(5000).0; + + let grpc_config = GrpcConfig { + enabled: true, + accept_http1: true, + enable_reflection: true, + bind: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8888), + accept_compressed: None, + send_compressed: None, + max_decoding_message_size: 4194304, + max_encoding_message_size: 4194304, + concurrency_limit_per_connection: 0, + timeout: Default::default(), + initial_stream_window_size: None, + initial_connection_window_size: None, + max_concurrent_streams: None, + tcp_keepalive: None, + tcp_nodelay: false, + http2_keepalive_interval: None, + http2_keepalive_timeout: None, + http2_adaptive_window: None, + max_frame_size: None, + thread_count: THREAD_COUNT, + max_operations_per_block: MAX_OPERATIONS_PER_BLOCK, + endorsement_count: ENDORSEMENT_COUNT, + max_endorsements_per_message: MAX_ENDORSEMENTS_PER_MESSAGE, + max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, + max_op_datastore_entry_count: MAX_OPERATION_DATASTORE_ENTRY_COUNT, + max_op_datastore_key_length: MAX_OPERATION_DATASTORE_KEY_LENGTH, + max_op_datastore_value_length: MAX_OPERATION_DATASTORE_VALUE_LENGTH, + max_function_name_length: MAX_FUNCTION_NAME_LENGTH, + max_parameter_size: MAX_PARAMETERS_SIZE, + max_operations_per_message: MAX_OPERATIONS_PER_MESSAGE, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + max_channel_size: 128, + draw_lookahead_period_count: 10, + }; + + let service = MassaGrpc { + consensus_controller: consensus_controller.0, + consensus_channels, + execution_controller: execution_ctrl.0, + pool_channels: PoolChannels { operation_sender }, + pool_command_sender: pool_ctrl.0, + protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + selector_controller: selector_ctrl.0, + storage: shared_storage, + grpc_config: grpc_config.clone(), + version: *VERSION, + }; + + let stop_handle = service.serve(&grpc_config).await.unwrap(); + // std::thread::sleep(Duration::from_millis(100)); + + // start grpc client and connect to the server + let channel = tonic::transport::Channel::from_static("grpc://localhost:8888") + .connect() + .await + .unwrap(); + + let _res = MassaServiceClient::new(channel); + stop_handle.stop(); +} diff --git a/massa-models/Cargo.toml b/massa-models/Cargo.toml index a10f9f5a136..5e134549632 100644 --- a/massa-models/Cargo.toml +++ b/massa-models/Cargo.toml @@ -24,6 +24,7 @@ massa_hash = { path = "../massa-hash" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } +massa_proto = { path = "../massa-proto" } const_format = "0.2.30" [dev-dependencies] diff --git a/massa-models/src/lib.rs b/massa-models/src/lib.rs index 60b2987ec93..0f8008c24dc 100644 --- a/massa-models/src/lib.rs +++ b/massa-models/src/lib.rs @@ -42,6 +42,8 @@ pub mod error; pub mod execution; /// ledger related structures pub mod ledger; +/// mapping grpc +pub mod mapping_grpc; /// node related structure pub mod node; /// operations @@ -52,7 +54,7 @@ pub mod output_event; pub mod prehash; /// rolls pub mod rolls; -/// trait for [Signature] secured data-structs +/// trait for [massa_signature::Signature] secured data-structs pub mod secure_share; /// serialization pub mod serialization; diff --git a/massa-models/src/mapping_grpc.rs b/massa-models/src/mapping_grpc.rs new file mode 100644 index 00000000000..e575b326925 --- /dev/null +++ b/massa-models/src/mapping_grpc.rs @@ -0,0 +1,211 @@ +// Copyright (c) 2023 MASSA LABS + +use crate::block::{Block, FilledBlock, SecureShareBlock}; +use crate::block_header::{BlockHeader, SecuredHeader}; +use crate::endorsement::{Endorsement, SecureShareEndorsement}; +use crate::operation::{Operation, OperationType, SecureShareOperation}; +use crate::slot::{IndexedSlot, Slot}; +use massa_proto::massa::api::v1 as grpc; + +impl From for grpc::Block { + fn from(value: Block) -> Self { + grpc::Block { + header: Some(value.header.into()), + operations: value + .operations + .into_iter() + .map(|operation| operation.to_string()) + .collect(), + } + } +} + +impl From for grpc::BlockHeader { + fn from(value: BlockHeader) -> Self { + let res = value.endorsements.into_iter().map(|e| e.into()).collect(); + + grpc::BlockHeader { + slot: Some(value.slot.into()), + parents: value + .parents + .into_iter() + .map(|parent| parent.to_string()) + .collect(), + operation_merkle_root: value.operation_merkle_root.to_string(), + endorsements: res, + } + } +} + +impl From for grpc::FilledBlock { + fn from(value: FilledBlock) -> Self { + grpc::FilledBlock { + header: Some(value.header.into()), + operations: value + .operations + .into_iter() + .map(|tuple| grpc::FilledOperationTuple { + operation_id: tuple.0.to_string(), + operation: tuple.1.map(|op| op.into()), + }) + .collect(), + } + } +} + +impl From for grpc::SignedBlock { + fn from(value: SecureShareBlock) -> Self { + grpc::SignedBlock { + content: Some(value.content.into()), + signature: value.signature.to_bs58_check(), + content_creator_pub_key: value.content_creator_pub_key.to_string(), + content_creator_address: value.content_creator_address.to_string(), + id: value.id.to_string(), + } + } +} + +impl From for grpc::SignedBlockHeader { + fn from(value: SecuredHeader) -> Self { + grpc::SignedBlockHeader { + content: Some(value.content.into()), + signature: value.signature.to_bs58_check(), + content_creator_pub_key: value.content_creator_pub_key.to_string(), + content_creator_address: value.content_creator_address.to_string(), + id: value.id.to_string(), + } + } +} + +impl From for grpc::Endorsement { + fn from(value: Endorsement) -> Self { + grpc::Endorsement { + slot: Some(value.slot.into()), + index: value.index, + endorsed_block: value.endorsed_block.to_string(), + } + } +} + +impl From for grpc::SignedEndorsement { + fn from(value: SecureShareEndorsement) -> Self { + grpc::SignedEndorsement { + content: Some(value.content.into()), + signature: value.signature.to_bs58_check(), + content_creator_pub_key: value.content_creator_pub_key.to_string(), + content_creator_address: value.content_creator_address.to_string(), + id: value.id.to_string(), + } + } +} + +impl From for grpc::OperationType { + fn from(operation_type: OperationType) -> grpc::OperationType { + let mut grpc_operation_type = grpc::OperationType::default(); + match operation_type { + OperationType::Transaction { + recipient_address, + amount, + } => { + let transaction = grpc::Transaction { + recipient_address: recipient_address.to_string(), + amount: amount.to_raw(), + }; + grpc_operation_type.transaction = Some(transaction); + } + OperationType::RollBuy { roll_count } => { + let roll_buy = grpc::RollBuy { roll_count }; + grpc_operation_type.roll_buy = Some(roll_buy); + } + OperationType::RollSell { roll_count } => { + let roll_sell = grpc::RollSell { roll_count }; + grpc_operation_type.roll_sell = Some(roll_sell); + } + OperationType::ExecuteSC { + data, + max_gas, + datastore, + } => { + let execute_sc = grpc::ExecuteSc { + data, + max_gas, + datastore: datastore + .into_iter() + .map(|(key, value)| grpc::BytesMapFieldEntry { key, value }) + .collect(), + }; + grpc_operation_type.execut_sc = Some(execute_sc); + } + OperationType::CallSC { + target_addr, + target_func, + param, + max_gas, + coins, + } => { + let call_sc = grpc::CallSc { + target_addr: target_addr.to_string(), + target_func, + param, + max_gas, + coins: coins.to_raw(), + }; + grpc_operation_type.call_sc = Some(call_sc); + } + } + + grpc_operation_type + } +} + +impl From for grpc::Operation { + fn from(op: Operation) -> Self { + grpc::Operation { + fee: op.fee.to_raw(), + expire_period: op.expire_period, + op: Some(op.op.into()), + } + } +} + +impl From for grpc::OpType { + fn from(value: OperationType) -> Self { + match value { + OperationType::Transaction { .. } => grpc::OpType::Transaction, + OperationType::RollBuy { .. } => grpc::OpType::RollBuy, + OperationType::RollSell { .. } => grpc::OpType::RollSell, + OperationType::ExecuteSC { .. } => grpc::OpType::ExecuteSc, + OperationType::CallSC { .. } => grpc::OpType::CallSc, + } + } +} + +impl From for grpc::SignedOperation { + fn from(value: SecureShareOperation) -> Self { + grpc::SignedOperation { + content: Some(value.content.into()), + signature: value.signature.to_bs58_check(), + content_creator_pub_key: value.content_creator_pub_key.to_string(), + content_creator_address: value.content_creator_address.to_string(), + id: value.id.to_string(), + } + } +} + +impl From for grpc::IndexedSlot { + fn from(s: IndexedSlot) -> Self { + grpc::IndexedSlot { + index: s.index as u64, + slot: Some(s.slot.into()), + } + } +} + +impl From for grpc::Slot { + fn from(s: Slot) -> Self { + grpc::Slot { + period: s.period, + thread: s.thread as u32, + } + } +} diff --git a/massa-network-worker/src/network_cmd_impl.rs b/massa-network-worker/src/network_cmd_impl.rs index 2c937f0a4ad..ce0468d7ef7 100644 --- a/massa-network-worker/src/network_cmd_impl.rs +++ b/massa-network-worker/src/network_cmd_impl.rs @@ -6,7 +6,7 @@ //! All following functions are necessary internals (not public) or called by //! the `manage_network_command` in the worker. //! -//! ```ignore +//! ```text //! async fn manage_network_command(&mut self, cmd: NetworkCommand) -> Result<(), NetworkError> { //! use crate::network_cmd_impl::*; //! match cmd { diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 32cee5845a5..5b67034c31e 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -51,6 +51,7 @@ massa_time = { path = "../massa-time" } massa_wallet = { path = "../massa-wallet" } massa_factory_exports = { path = "../massa-factory-exports" } massa_factory_worker = { path = "../massa-factory-worker" } +massa_grpc = { path = "../massa-grpc" } massa_versioning_worker = { path = "../massa-versioning-worker" } # for more information on what are the following features used for, see the cargo.toml at workspace level diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 0c65b38e725..a26b1ee272d 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -1,5 +1,3 @@ - - [logging] # Logging level. High log levels might impact performance. 0: ERROR, 1: WARN, 2: INFO, 3: DEBUG, 4: TRACE level = 2 @@ -7,11 +5,11 @@ [api] # max number of future periods considered during requests draw_lookahead_period_count = 10 - # port on which the node API listens for admin and node management requests. Dangerous if publicly exposed. Bind to "[::1]:port" if you want to access the node from IPv6. + # port on which the node API listens for admin and node management requests. Dangerous if publicly exposed. Bind to "[::1]:port" for IPv6 bind_private = "127.0.0.1:33034" - # port on which the node API listens for public requests. Can be exposed to the Internet. Bind to "[::]:port" if you want to access the node from IPv6. + # port on which the node API listens for public requests. Can be exposed to the Internet. Bind to "[::]:port" for IPv6 bind_public = "0.0.0.0:33035" - # port on which the node API(V2) listens for HTTP requests and WebSockets subscriptions. Can be exposed to the Internet. Bind to "[::]:port" if you want to access the node from IPv6. + # port on which the node API(V2) listens for HTTP requests and WebSockets subscriptions. Can be exposed to the Internet. Bind to "[::]:port" for IPv6 bind_api = "0.0.0.0:33036" # max number of arguments per RPC call max_arguments = 128 @@ -37,7 +35,40 @@ enable_http = true # whether to enable WS. enable_ws = false + # whether to broadcast for blocks, endorsement and operations + enable_broadcast = false +[grpc] + # whether to enable gRPC + enabled = false + # whether to add HTTP 1 layer + accept_http1 = false + # whether to enable gRPC reflection(introspection) + enable_reflection = true + # bind for the Massa gRPC API + bind = "0.0.0.0:33037" + # which compression encodings does the server accept for requests + accept_compressed = "Gzip" + # which compression encodings might the server use for responses + send_compressed = "Gzip" + # limits the maximum size of a decoded message. Defaults to 50MB + max_decoding_message_size = 52428800 + # limits the maximum size of an encoded message. Defaults to 50MB + max_encoding_message_size = 52428800 + # limits the maximum size of streaming channel + max_channel_size = 128 + # set a timeout on for all request handlers in seconds. Defaults to 60s + timeout = 60 + # sets the maximum frame size to use for HTTP2(must be within 16,384 and 16,777,215). Defaults to 16MB + max_frame_size = 16777215 + # set the concurrency limit applied to on requests inbound per connection. Defaults to 32 + concurrency_limit_per_connection = 32 + # sets the SETTINGS_MAX_CONCURRENT_STREAMS spec option for HTTP2 connections + max_concurrent_streams = 1024 + # set the value of `TCP_NODELAY` option for accepted connections. Enabled by default + tcp_nodelay = true + # max number of future periods considered during requests + draw_lookahead_period_count = 10 [execution] # max number of generated events kept in RAM max_final_events = 10000 diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index dc2899b3607..e704159f8dd 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -4,6 +4,7 @@ #![warn(missing_docs)] #![warn(unused_crate_dependencies)] extern crate massa_logging; + use crate::settings::SETTINGS; use crossbeam_channel::{Receiver, TryRecvError}; @@ -21,6 +22,8 @@ use massa_execution_worker::start_execution_worker; use massa_factory_exports::{FactoryChannels, FactoryConfig, FactoryManager}; use massa_factory_worker::start_factory; use massa_final_state::{FinalState, FinalStateConfig}; +use massa_grpc::config::GrpcConfig; +use massa_grpc::server::MassaGrpc; use massa_ledger_exports::LedgerConfig; use massa_ledger_worker::FinalLedger; use massa_logging::massa_trace; @@ -49,6 +52,7 @@ use massa_models::config::constants::{ }; use massa_models::config::{ CONSENSUS_BOOTSTRAP_PART_SIZE, DENUNCIATION_EXPIRE_PERIODS, DENUNCIATION_ITEMS_MAX_CYCLE_DELTA, + MAX_OPERATIONS_PER_MESSAGE, }; use massa_models::denunciation::DenunciationPrecursor; use massa_network_exports::{Establisher, NetworkConfig, NetworkManager}; @@ -97,6 +101,7 @@ async fn launch( StopHandle, StopHandle, StopHandle, + Option, ) { info!("Node version : {}", *VERSION); if let Some(end) = *END_TIMESTAMP { @@ -381,7 +386,7 @@ async fn launch( max_operation_pool_size_per_thread: SETTINGS.pool.max_pool_size_per_thread, max_endorsements_pool_size_per_thread: SETTINGS.pool.max_pool_size_per_thread, channels_size: POOL_CONTROLLER_CHANNEL_SIZE, - broadcast_enabled: SETTINGS.api.enable_ws, + broadcast_enabled: SETTINGS.api.enable_broadcast, broadcast_operations_capacity: SETTINGS.pool.broadcast_operations_capacity, genesis_timestamp: *GENESIS_TIMESTAMP, t0: T0, @@ -428,7 +433,7 @@ async fn launch( max_gas_per_block: MAX_GAS_PER_BLOCK, channel_size: CHANNEL_SIZE, bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, - broadcast_enabled: SETTINGS.api.enable_ws, + broadcast_enabled: SETTINGS.api.enable_broadcast, broadcast_blocks_headers_capacity: SETTINGS.consensus.broadcast_blocks_headers_capacity, broadcast_blocks_capacity: SETTINGS.consensus.broadcast_blocks_capacity, broadcast_filled_blocks_capacity: SETTINGS.consensus.broadcast_filled_blocks_capacity, @@ -587,9 +592,9 @@ async fn launch( // spawn Massa API let api = API::::new( consensus_controller.clone(), - consensus_channels, + consensus_channels.clone(), execution_controller.clone(), - pool_channels, + pool_channels.clone(), api_config.clone(), *VERSION, ); @@ -598,10 +603,95 @@ async fn launch( .await .expect("failed to start MASSA API"); + info!( + "API | EXPERIMENTAL JsonRPC | listening on: {}", + &SETTINGS.api.bind_api + ); + // Disable WebSockets for Private and Public API's let mut api_config = api_config.clone(); api_config.enable_ws = false; + // Whether to spawn gRPC API + let grpc_handle = if SETTINGS.grpc.enabled { + let grpc_config = GrpcConfig { + enabled: SETTINGS.grpc.enabled, + accept_http1: SETTINGS.grpc.accept_http1, + enable_reflection: SETTINGS.grpc.enable_reflection, + bind: SETTINGS.grpc.bind, + accept_compressed: SETTINGS.grpc.accept_compressed.clone(), + send_compressed: SETTINGS.grpc.send_compressed.clone(), + max_decoding_message_size: SETTINGS.grpc.max_decoding_message_size, + max_encoding_message_size: SETTINGS.grpc.max_encoding_message_size, + concurrency_limit_per_connection: SETTINGS.grpc.concurrency_limit_per_connection, + timeout: SETTINGS.grpc.timeout.to_duration(), + initial_stream_window_size: SETTINGS.grpc.initial_stream_window_size, + initial_connection_window_size: SETTINGS.grpc.initial_connection_window_size, + max_concurrent_streams: SETTINGS.grpc.max_concurrent_streams, + tcp_keepalive: SETTINGS.grpc.tcp_keepalive.map(|t| t.to_duration()), + tcp_nodelay: SETTINGS.grpc.tcp_nodelay, + http2_keepalive_interval: SETTINGS + .grpc + .http2_keepalive_interval + .map(|t| t.to_duration()), + http2_keepalive_timeout: SETTINGS + .grpc + .http2_keepalive_timeout + .map(|t| t.to_duration()), + http2_adaptive_window: SETTINGS.grpc.http2_adaptive_window, + max_frame_size: SETTINGS.grpc.max_frame_size, + thread_count: THREAD_COUNT, + max_operations_per_block: MAX_OPERATIONS_PER_BLOCK, + endorsement_count: ENDORSEMENT_COUNT, + max_endorsements_per_message: MAX_ENDORSEMENTS_PER_MESSAGE, + max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, + max_op_datastore_entry_count: MAX_OPERATION_DATASTORE_ENTRY_COUNT, + max_op_datastore_key_length: MAX_OPERATION_DATASTORE_KEY_LENGTH, + max_op_datastore_value_length: MAX_OPERATION_DATASTORE_VALUE_LENGTH, + max_function_name_length: MAX_FUNCTION_NAME_LENGTH, + max_parameter_size: MAX_PARAMETERS_SIZE, + max_operations_per_message: MAX_OPERATIONS_PER_MESSAGE, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + max_channel_size: SETTINGS.grpc.max_channel_size, + draw_lookahead_period_count: SETTINGS.grpc.draw_lookahead_period_count, + }; + + let grpc_api = MassaGrpc { + consensus_controller: consensus_controller.clone(), + consensus_channels: consensus_channels.clone(), + execution_controller: execution_controller.clone(), + pool_channels, + pool_command_sender: pool_controller.clone(), + protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + selector_controller: selector_controller.clone(), + storage: shared_storage.clone(), + grpc_config: grpc_config.clone(), + version: *VERSION, + }; + + // HACK maybe should remove timeout later + if let Ok(result) = + tokio::time::timeout(Duration::from_secs(3), grpc_api.serve(&grpc_config)).await + { + match result { + Ok(stop) => { + info!("API | gRPC | listening on: {}", grpc_config.bind); + Some(stop) + } + Err(e) => { + error!("{}", e); + None + } + } + } else { + error!("Timeout on start grpc API"); + None + } + } else { + None + }; + // spawn private API let (api_private, api_private_stop_rx) = API::::new( network_command_sender.clone(), @@ -613,6 +703,10 @@ async fn launch( .serve(&SETTINGS.api.bind_private, &api_config) .await .expect("failed to start PRIVATE API"); + info!( + "API | PRIVATE JsonRPC | listening on: {}", + api_config.bind_private + ); // spawn public API let api_public = API::::new( @@ -632,6 +726,10 @@ async fn launch( .serve(&SETTINGS.api.bind_public, &api_config) .await .expect("failed to start PUBLIC API"); + info!( + "API | PUBLIC JsonRPC | listening on: {}", + api_config.bind_public + ); #[cfg(feature = "deadlock_detection")] { @@ -676,6 +774,7 @@ async fn launch( api_private_handle, api_public_handle, api_handle, + grpc_handle, ) } @@ -705,6 +804,7 @@ async fn stop( api_private_handle: StopHandle, api_public_handle: StopHandle, api_handle: StopHandle, + grpc_handle: Option, ) { // stop bootstrap if let Some(bootstrap_manager) = bootstrap_manager { @@ -723,6 +823,11 @@ async fn stop( // stop Massa API api_handle.stop(); + // stop Massa gRPC API + if let Some(handle) = grpc_handle { + handle.stop(); + } + // stop factory factory_manager.stop(); @@ -864,6 +969,7 @@ async fn run(args: Args) -> anyhow::Result<()> { api_private_handle, api_public_handle, api_handle, + grpc_handle, ) = launch(&args, node_wallet.clone()).await; // interrupt signal listener @@ -932,6 +1038,7 @@ async fn run(args: Args) -> anyhow::Result<()> { api_private_handle, api_public_handle, api_handle, + grpc_handle, ) .await; diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index d714ee0687f..479f72db28b 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -138,6 +138,8 @@ pub struct APISettings { pub ping_interval: MassaTime, pub enable_http: bool, pub enable_ws: bool, + // whether to broadcast for blocks, endorsement and operations + pub enable_broadcast: bool, } #[derive(Debug, Deserialize, Clone)] @@ -153,6 +155,7 @@ pub struct Settings { pub ledger: LedgerSettings, pub selector: SelectionSettings, pub factory: FactorySettings, + pub grpc: GrpcSettings, } /// Consensus configuration @@ -228,6 +231,54 @@ pub struct ProtocolSettings { pub max_endorsements_propagation_time: MassaTime, } +/// gRPC settings +/// the gRPC settings +#[derive(Debug, Deserialize, Clone)] +pub struct GrpcSettings { + /// whether to enable gRPC + pub enabled: bool, + /// whether to accept HTTP/1.1 requests + pub accept_http1: bool, + /// whether to enable gRPC reflection + pub enable_reflection: bool, + /// bind for the Massa gRPC API + pub bind: SocketAddr, + /// which compression encodings does the server accept for requests + pub accept_compressed: Option, + /// which compression encodings might the server use for responses + pub send_compressed: Option, + /// limits the maximum size of a decoded message. Defaults to 4MB + pub max_decoding_message_size: usize, + /// limits the maximum size of an encoded message. Defaults to 4MB + pub max_encoding_message_size: usize, + /// limits the maximum size of streaming channel + pub max_channel_size: usize, + /// set the concurrency limit applied to on requests inbound per connection. Defaults to 32 + pub concurrency_limit_per_connection: usize, + /// set a timeout on for all request handlers + pub timeout: MassaTime, + /// sets the SETTINGS_INITIAL_WINDOW_SIZE spec option for HTTP2 stream-level flow control. Default is 65,535 + pub initial_stream_window_size: Option, + /// sets the max connection-level flow control for HTTP2. Default is 65,535 + pub initial_connection_window_size: Option, + /// sets the SETTINGS_MAX_CONCURRENT_STREAMS spec option for HTTP2 connections. Default is no limit (`None`) + pub max_concurrent_streams: Option, + /// set whether TCP keepalive messages are enabled on accepted connections + pub tcp_keepalive: Option, + /// set the value of `TCP_NODELAY` option for accepted connections. Enabled by default + pub tcp_nodelay: bool, + /// set whether HTTP2 Ping frames are enabled on accepted connections. Default is no HTTP2 keepalive (`None`) + pub http2_keepalive_interval: Option, + /// sets a timeout for receiving an acknowledgement of the keepalive ping. Default is 20 seconds + pub http2_keepalive_timeout: Option, + /// sets whether to use an adaptive flow control. Defaults to false + pub http2_adaptive_window: Option, + /// sets the maximum frame size to use for HTTP2(must be within 16,384 and 16,777,215). If not set, will default from underlying transport + pub max_frame_size: Option, + /// when looking for next draw we want to look at max `draw_lookahead_period_count` + pub draw_lookahead_period_count: u64, +} + #[cfg(test)] #[test] fn test_load_node_config() { diff --git a/massa-pool-exports/src/channels.rs b/massa-pool-exports/src/channels.rs index b44ec4da659..46faae01549 100644 --- a/massa-pool-exports/src/channels.rs +++ b/massa-pool-exports/src/channels.rs @@ -1,8 +1,8 @@ -use massa_models::operation::Operation; +use massa_models::operation::SecureShareOperation; /// channels used by the pool worker #[derive(Clone)] pub struct PoolChannels { /// Broadcast sender(channel) for new operations - pub operation_sender: tokio::sync::broadcast::Sender, + pub operation_sender: tokio::sync::broadcast::Sender, } diff --git a/massa-pool-exports/src/config.rs b/massa-pool-exports/src/config.rs index 66be14d19ff..881f1e1c680 100644 --- a/massa-pool-exports/src/config.rs +++ b/massa-pool-exports/src/config.rs @@ -27,7 +27,7 @@ pub struct PoolConfig { pub max_block_endorsement_count: u32, /// operations and endorsements communication channels size pub channels_size: usize, - /// Whether WebSockets are enabled + /// whether operations broadcast is enabled pub broadcast_enabled: bool, /// operations sender(channel) capacity pub broadcast_operations_capacity: usize, diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index 6f20d0c9f10..bb13de18157 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -126,8 +126,15 @@ impl OperationPool { .expect("attempting to add operation to pool, but it is absent from storage"); // Broadcast operation to active sender(channel) subscribers. if self.config.broadcast_enabled { - let _ = self.channels.operation_sender.send(op.content.clone()); + if let Err(err) = self.channels.operation_sender.send(op.clone()) { + debug!( + "error trying to broadcast operation with id {} due to: {}", + op.id.clone(), + err + ); + } } + let op_info = OperationInfo::from_op( op, self.config.operation_validity_periods, diff --git a/massa-pos-exports/src/controller_traits.rs b/massa-pos-exports/src/controller_traits.rs index 8b7af8a8b2b..1860a775d1f 100644 --- a/massa-pos-exports/src/controller_traits.rs +++ b/massa-pos-exports/src/controller_traits.rs @@ -89,7 +89,7 @@ impl Clone for Box { pub trait SelectorManager { /// Stop the selector thread /// Note that we do not take self by value to consume it - /// because it is not allowed to move out of Box + /// because it is not allowed to move out of `Box` /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. fn stop(&mut self); } diff --git a/massa-proto/Cargo.toml b/massa-proto/Cargo.toml new file mode 100644 index 00000000000..ff0794d1a5b --- /dev/null +++ b/massa-proto/Cargo.toml @@ -0,0 +1,22 @@ +# Copyright (c) 2023 MASSA LABS + +[package] +name = "massa_proto" +version = "0.1.0" +edition = "2021" +description = "Protobuf definitions for the Massa blockchain" +repository = "https://github.com/massalabs/massa/" +homepage = "https://massa.net" +documentation = "https://docs.massa.net/" + +[dependencies] +prost = "0.11.6" +prost-types = "0.11.6" +#TODO add stable version when released +tonic = { git = "https://github.com/hyperium/tonic", rev = "ff33119" } + +[build-dependencies] +glob = "0.3.1" +prost-build = "0.11.8" +#TODO add stable version when released +tonic-build = { git = "https://github.com/hyperium/tonic", rev = "ff33119" } diff --git a/massa-proto/build.rs b/massa-proto/build.rs new file mode 100644 index 00000000000..331e5664df7 --- /dev/null +++ b/massa-proto/build.rs @@ -0,0 +1,37 @@ +// Copyright (c) 2023 MASSA LABS + +use glob::glob; +use std::path::PathBuf; + +fn main() -> Result<(), Box> { + //TODO add download external protos files instead of doing it manually + let protos = find_protos("proto/massa/"); + + tonic_build::configure() + .build_server(true) + .build_transport(true) + .build_client(true) + .type_attribute( + ".google.api.HttpRule", + "#[cfg(not(doctest))]\n\ + #[allow(dead_code)]\n\ + pub struct HttpRuleComment{}\n\ + /// HACK: see docs in [`HttpRuleComment`] ignored in doctest pass", + ) + .file_descriptor_set_path("src/api.bin") + .include_file("_includes.rs") + .out_dir("src/") + .compile( + &protos, + &["proto/massa/api/v1/", "proto/third-party"], // specify the root location to search proto dependencies + )?; + + Ok(()) +} + +fn find_protos(dir_path: &str) -> Vec { + glob(&format!("{dir_path}/**/*.proto")) + .unwrap() + .flatten() + .collect() +} diff --git a/massa-proto/doc/index.html b/massa-proto/doc/index.html new file mode 100644 index 00000000000..a5dd1b5e9ca --- /dev/null +++ b/massa-proto/doc/index.html @@ -0,0 +1,1971 @@ + + + + + Protocol Documentation + + + + + + + + + + +

Protocol Documentation

+ +

Table of Contents

+ + + + + +
+

api.proto

Top +
+

+ + +

BlockParent

+

Block parent tuple

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
block_idstring

Block id

periodfixed64

Period

+ + + + + +

BlockResult

+

Holds Block response

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
block_idstring

Block id

+ + + + + +

DatastoreEntriesQuery

+

DatastoreEntries Query

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
filterDatastoreEntryFilter

Filter

+ + + + + +

DatastoreEntry

+

DatastoreEntry

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
final_valuebytes

final datastore entry value

candidate_valuebytes

candidate_value datastore entry value

+ + + + + +

DatastoreEntryFilter

+

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
addressstring

Associated address of the entry

keybytes

Datastore key

+ + + + + +

EndorsementResult

+

Holds Endorsement response

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
endorsements_idsstringrepeated

Endorsements ids

+ + + + + +

GetBlocksBySlotsRequest

+

GetBlocksBySlotsRequest holds request for GetBlocksBySlots

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

slotsSlotrepeated

Slots

+ + + + + +

GetBlocksBySlotsResponse

+

GetBlocksBySlotsResponse holds response from GetBlocksBySlots

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

blocksBlockrepeated

Blocks

+ + + + + +

GetDatastoreEntriesRequest

+

GetDatastoreEntriesRequest holds request from GetDatastoreEntries

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

queriesDatastoreEntriesQueryrepeated

Queries

+ + + + + +

GetDatastoreEntriesResponse

+

GetDatastoreEntriesResponse holds response from GetDatastoreEntries

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

entriesDatastoreEntryrepeated

Datastore entries

+ + + + + +

GetNextBlockBestParentsRequest

+

GetNextBlockBestParentsRequest holds request for GetNextBlockBestParents

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

+ + + + + +

GetNextBlockBestParentsResponse

+

GetNextBlockBestParentsResponse holds response from GetNextBlockBestParents

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

parentsBlockParentrepeated

Best parents

+ + + + + +

GetSelectorDrawsRequest

+

GetSelectorDrawsRequest holds request from GetSelectorDraws

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

queriesSelectorDrawsQueryrepeated

Queries

+ + + + + +

GetSelectorDrawsResponse

+

GetSelectorDrawsResponse holds response from GetSelectorDraws

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

selector_drawsSelectorDrawsrepeated

Selector draws

+ + + + + +

GetTransactionsThroughputRequest

+

GetTransactionsThroughputRequest holds request for GetTransactionsThroughput

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

+ + + + + +

GetTransactionsThroughputResponse

+

GetTransactionsThroughputResponse holds response from GetTransactionsThroughput

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

throughputfixed32

Transactions throughput

+ + + + + +

GetVersionRequest

+

GetVersionRequest holds request from GetVersion

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

+ + + + + +

GetVersionResponse

+

GetVersionResponse holds response from GetVersion

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

versionstring

Version

+ + + + + +

NewBlocksHeadersRequest

+

NewBlocksHeadersRequest holds request for NewBlocksHeaders

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

+ + + + + +

NewBlocksHeadersResponse

+

NewBlocksHeadersResponse holds response from NewBlocksHeaders

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

block_headerSignedBlockHeader

Signed block header

+ + + + + +

NewBlocksRequest

+

NewBlocksRequest holds request for NewBlocks

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

+ + + + + +

NewBlocksResponse

+

NewBlocksResponse holds response from NewBlocks

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

blockSignedBlock

Signed block

+ + + + + +

NewFilledBlocksRequest

+

NewFilledBlocksRequest holds request for NewFilledBlocks

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

+ + + + + +

NewFilledBlocksResponse

+

NewFilledBlocksResponse holds response from NewFilledBlocks

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

filled_blockFilledBlock

Block with operations content

+ + + + + +

NewOperationsFilter

+

NewOperations Filter

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
typesOpTyperepeated

Operation type enum

+ + + + + +

NewOperationsQuery

+

NewOperations Query

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
filterNewOperationsFilter

Filter

+ + + + + +

NewOperationsRequest

+

NewOperationsRequest holds request for NewOperations

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

queryNewOperationsQuery

Query

+ + + + + +

NewOperationsResponse

+

NewOperationsResponse holds response from NewOperations

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

operationSignedOperation

Signed operation

+ + + + + +

OperationResult

+

Holds Operation response

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
operations_idsstringrepeated

Operation(s) id(s)

+ + + + + +

SelectorDraws

+

Selector draws

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
addressstring

Address

next_block_drawsSlotrepeated

Next block draws

next_endorsement_drawsIndexedSlotrepeated

Next endorsements draws

+ + + + + +

SelectorDrawsFilter

+

SelectorDraws Filter

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
addressstring

Address

+ + + + + +

SelectorDrawsQuery

+

SelectorDraws Query

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
filterSelectorDrawsFilter

Filter

+ + + + + +

SendBlocksRequest

+

SendBlocksRequest holds parameters to SendBlocks

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

blockSecureShare

Secure shared block

+ + + + + +

SendBlocksResponse

+

SendBlocksResponse holds response from SendBlocks

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

resultBlockResult

Block result

errorgoogle.rpc.Status

gRPC error(status)

+ + + + + +

SendEndorsementsRequest

+

SendEndorsementsRequest holds parameters to SendEndorsements

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

endorsementsSecureSharerepeated

Secure shared endorsements

+ + + + + +

SendEndorsementsResponse

+

SendEndorsementsResponse holds response from SendEndorsements

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

resultEndorsementResult

Endorsement result

errorgoogle.rpc.Status

gRPC error(status)

+ + + + + +

SendOperationsRequest

+

SendOperationsRequest holds parameters to SendOperations

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

operationsSecureSharerepeated

Secured shared operations

+ + + + + +

SendOperationsResponse

+

SendOperationsResponse holds response from SendOperations

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

resultOperationResult

Operation result

errorgoogle.rpc.Status

gRPC error(status)

+ + + + + +

TransactionsThroughputRequest

+

TransactionsThroughputRequest holds request for TransactionsThroughput

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

intervalfixed64optional

Optional timer interval in sec. Defaults to 10s

+ + + + + +

TransactionsThroughputResponse

+

TransactionsThroughputResponse holds response from TransactionsThroughput

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

Request id

throughputfixed32

Transactions throughput

+ + + + + + + +

OpType

+

Operation type enum

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
OP_TYPE_UNSPECIFIED0

Defaut enum value

OP_TYPE_TRANSACTION1

Transaction

OP_TYPE_ROLL_BUY2

Roll buy

OP_TYPE_ROLL_SELL3

Roll sell

OP_TYPE_EXECUTE_SC4

Execute smart contract

OP_TYPE_CALL_SC5

Call smart contract

+ + + + + +

MassaService

+

Massa gRPC service

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
GetBlocksBySlotsGetBlocksBySlotsRequestGetBlocksBySlotsResponse

Get blocks by slots

GetDatastoreEntriesGetDatastoreEntriesRequestGetDatastoreEntriesResponse

Get datastore entries

GetNextBlockBestParentsGetNextBlockBestParentsRequestGetNextBlockBestParentsResponse

Get next block best parents

GetSelectorDrawsGetSelectorDrawsRequestGetSelectorDrawsResponse

Get selector draws

GetTransactionsThroughputGetTransactionsThroughputRequestGetTransactionsThroughputResponse

Get transactions throughput

GetVersionGetVersionRequestGetVersionResponse

Get node version

NewBlocksNewBlocksRequest streamNewBlocksResponse stream

New received and produced blocks

NewBlocksHeadersNewBlocksHeadersRequest streamNewBlocksHeadersResponse stream

New received and produced blocks headers

NewFilledBlocksNewFilledBlocksRequest streamNewFilledBlocksResponse stream

New received and produced blocks with operations

NewOperationsNewOperationsRequest streamNewOperationsResponse stream

New received and produced perations

SendBlocksSendBlocksRequest streamSendBlocksResponse stream

Send blocks

SendEndorsementsSendEndorsementsRequest streamSendEndorsementsResponse stream

Send endorsements

SendOperationsSendOperationsRequest streamSendOperationsResponse stream

Send operations

TransactionsThroughputTransactionsThroughputRequest streamTransactionsThroughputResponse stream

Transactions throughput per second

+ + + + +

Methods with HTTP bindings

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameMethodPatternBody
GetBlocksBySlotsGET/v1/get_blocks_by_slots
GetDatastoreEntriesPOST/v1/get_datastore_entries*
GetNextBlockBestParentsGET/v1/get_next_block_best_parents
GetSelectorDrawsPOST/v1/get_selector_draws*
GetTransactionsThroughputGET/v1/transactions_throughput
GetVersionGET/v1/version
+ + + + +

Scalar Value Types

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
.proto TypeNotesC++JavaPythonGoC#PHPRuby
doubledoubledoublefloatfloat64doublefloatFloat
floatfloatfloatfloatfloat32floatfloatFloat
int32Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead.int32intintint32intintegerBignum or Fixnum (as required)
int64Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead.int64longint/longint64longinteger/stringBignum
uint32Uses variable-length encoding.uint32intint/longuint32uintintegerBignum or Fixnum (as required)
uint64Uses variable-length encoding.uint64longint/longuint64ulonginteger/stringBignum or Fixnum (as required)
sint32Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s.int32intintint32intintegerBignum or Fixnum (as required)
sint64Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s.int64longint/longint64longinteger/stringBignum
fixed32Always four bytes. More efficient than uint32 if values are often greater than 2^28.uint32intintuint32uintintegerBignum or Fixnum (as required)
fixed64Always eight bytes. More efficient than uint64 if values are often greater than 2^56.uint64longint/longuint64ulonginteger/stringBignum
sfixed32Always four bytes.int32intintint32intintegerBignum or Fixnum (as required)
sfixed64Always eight bytes.int64longint/longint64longinteger/stringBignum
boolboolbooleanbooleanboolboolbooleanTrueClass/FalseClass
stringA string must always contain UTF-8 encoded or 7-bit ASCII text.stringStringstr/unicodestringstringstringString (UTF-8)
bytesMay contain any arbitrary sequence of bytes.stringByteStringstr[]byteByteStringstringString (ASCII-8BIT)
+ + + diff --git a/massa-proto/proto/massa/api/v1/api.proto b/massa-proto/proto/massa/api/v1/api.proto new file mode 100644 index 00000000000..34be02760ba --- /dev/null +++ b/massa-proto/proto/massa/api/v1/api.proto @@ -0,0 +1,408 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +import "block.proto"; +import "common.proto"; +import "google/api/annotations.proto"; +import "google/rpc/status.proto"; +import "operation.proto"; +import "slot.proto"; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// Massa gRPC service +service MassaService { + // Get blocks by slots + rpc GetBlocksBySlots(GetBlocksBySlotsRequest) returns (GetBlocksBySlotsResponse) { + option (google.api.http) = {get: "/v1/get_blocks_by_slots"}; + } + + // Get datastore entries + rpc GetDatastoreEntries(GetDatastoreEntriesRequest) returns (GetDatastoreEntriesResponse) { + option (google.api.http) = { + post: "/v1/get_datastore_entries" + body: "*" + }; + } + + // Get next block best parents + rpc GetNextBlockBestParents(GetNextBlockBestParentsRequest) returns (GetNextBlockBestParentsResponse) { + option (google.api.http) = {get: "/v1/get_next_block_best_parents"}; + } + + // Get selector draws + rpc GetSelectorDraws(GetSelectorDrawsRequest) returns (GetSelectorDrawsResponse) { + option (google.api.http) = { + post: "/v1/get_selector_draws" + body: "*" + }; + } + + // Get transactions throughput + rpc GetTransactionsThroughput(GetTransactionsThroughputRequest) returns (GetTransactionsThroughputResponse) { + option (google.api.http) = {get: "/v1/transactions_throughput"}; + } + + // Get node version + rpc GetVersion(GetVersionRequest) returns (GetVersionResponse) { + option (google.api.http) = {get: "/v1/version"}; + } + + // ███████╗████████╗██████╗ ███████╗ █████╗ ███╗ ███╗ + // ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██╔══██╗████╗ ████║ + // ███████╗ ██║ ██████╔╝█████╗ ███████║██╔████╔██║ + // ╚════██║ ██║ ██╔══██╗██╔══╝ ██╔══██║██║╚██╔╝██║ + // ███████║ ██║ ██║ ██║███████╗██║ ██║██║ ╚═╝ ██║ + + // New received and produced blocks + rpc NewBlocks(stream NewBlocksRequest) returns (stream NewBlocksResponse) {} + + // New received and produced blocks headers + rpc NewBlocksHeaders(stream NewBlocksHeadersRequest) returns (stream NewBlocksHeadersResponse) {} + + // New received and produced blocks with operations + rpc NewFilledBlocks(stream NewFilledBlocksRequest) returns (stream NewFilledBlocksResponse) {} + + // New received and produced perations + rpc NewOperations(stream NewOperationsRequest) returns (stream NewOperationsResponse) {} + + // Send blocks + rpc SendBlocks(stream SendBlocksRequest) returns (stream SendBlocksResponse) {} + + // Send endorsements + rpc SendEndorsements(stream SendEndorsementsRequest) returns (stream SendEndorsementsResponse) {} + + // Send operations + rpc SendOperations(stream SendOperationsRequest) returns (stream SendOperationsResponse) {} + + // Transactions throughput per second + rpc TransactionsThroughput(stream TransactionsThroughputRequest) returns (stream TransactionsThroughputResponse) {} +} + +// GetBlocksBySlotsRequest holds request for GetBlocksBySlots +message GetBlocksBySlotsRequest { + // Request id + string id = 1; + // Slots + repeated Slot slots = 2; +} + +// GetBlocksBySlotsResponse holds response from GetBlocksBySlots +message GetBlocksBySlotsResponse { + // Request id + string id = 1; + // Blocks + repeated Block blocks = 2; +} + +// GetDatastoreEntriesRequest holds request from GetDatastoreEntries +message GetDatastoreEntriesRequest { + // Request id + string id = 1; + // Queries + repeated DatastoreEntriesQuery queries = 2; +} + +// DatastoreEntries Query +message DatastoreEntriesQuery { + // Filter + DatastoreEntryFilter filter = 1; +} + +message DatastoreEntryFilter { + /// Associated address of the entry + string address = 1; + // Datastore key + bytes key = 2; +} + +// GetDatastoreEntriesResponse holds response from GetDatastoreEntries +message GetDatastoreEntriesResponse { + // Request id + string id = 1; + // Datastore entries + repeated DatastoreEntry entries = 2; +} + +// DatastoreEntry +message DatastoreEntry { + // final datastore entry value + bytes final_value = 1; + // candidate_value datastore entry value + bytes candidate_value = 2; +} + +// GetNextBlockBestParentsRequest holds request for GetNextBlockBestParents +message GetNextBlockBestParentsRequest { + // Request id + string id = 1; +} + +// GetNextBlockBestParentsResponse holds response from GetNextBlockBestParents +message GetNextBlockBestParentsResponse { + // Request id + string id = 1; + // Best parents + repeated BlockParent parents = 2; +} + +// Block parent tuple +message BlockParent { + // Block id + string block_id = 1; + // Period + fixed64 period = 2; +} + +// GetSelectorDrawsRequest holds request from GetSelectorDraws +message GetSelectorDrawsRequest { + // Request id + string id = 1; + // Queries + repeated SelectorDrawsQuery queries = 2; +} + +// SelectorDraws Query +message SelectorDrawsQuery { + // Filter + SelectorDrawsFilter filter = 1; +} + +// SelectorDraws Filter +message SelectorDrawsFilter { + // Address + string address = 1; +} + +// GetSelectorDrawsResponse holds response from GetSelectorDraws +message GetSelectorDrawsResponse { + // Request id + string id = 1; + // Selector draws + repeated SelectorDraws selector_draws = 2; +} + +// Selector draws +message SelectorDraws { + // Address + string address = 1; + // Next block draws + repeated Slot next_block_draws = 2; + // Next endorsements draws + repeated IndexedSlot next_endorsement_draws = 3; +} + +// GetTransactionsThroughputRequest holds request for GetTransactionsThroughput +message GetTransactionsThroughputRequest { + // Request id + string id = 1; +} + +// GetTransactionsThroughputResponse holds response from GetTransactionsThroughput +message GetTransactionsThroughputResponse { + // Request id + string id = 1; + // Transactions throughput + fixed32 throughput = 2; +} + +// GetVersionRequest holds request from GetVersion +message GetVersionRequest { + // Request id + string id = 1; +} + +// GetVersionResponse holds response from GetVersion +message GetVersionResponse { + // Request id + string id = 1; + // Version + string version = 2; +} + +// NewBlocksRequest holds request for NewBlocks +message NewBlocksRequest { + // Request id + string id = 1; +} + +// NewBlocksResponse holds response from NewBlocks +message NewBlocksResponse { + // Request id + string id = 1; + // Signed block + SignedBlock block = 2; +} + +// NewBlocksHeadersRequest holds request for NewBlocksHeaders +message NewBlocksHeadersRequest { + // Request id + string id = 1; +} + +// NewBlocksHeadersResponse holds response from NewBlocksHeaders +message NewBlocksHeadersResponse { + // Request id + string id = 1; + // Signed block header + SignedBlockHeader block_header = 2; +} + +// NewFilledBlocksRequest holds request for NewFilledBlocks +message NewFilledBlocksRequest { + // Request id + string id = 1; +} + +// NewFilledBlocksResponse holds response from NewFilledBlocks +message NewFilledBlocksResponse { + // Request id + string id = 1; + // Block with operations content + FilledBlock filled_block = 2; +} + +// NewOperationsRequest holds request for NewOperations +message NewOperationsRequest { + // Request id + string id = 1; + // Query + NewOperationsQuery query = 2; +} + +// NewOperations Query +message NewOperationsQuery { + // Filter + NewOperationsFilter filter = 1; +} + +// NewOperations Filter +message NewOperationsFilter { + // Operation type enum + repeated OpType types = 1; +} + +// Operation type enum +enum OpType { + OP_TYPE_UNSPECIFIED = 0; // Defaut enum value + OP_TYPE_TRANSACTION = 1; // Transaction + OP_TYPE_ROLL_BUY = 2; // Roll buy + OP_TYPE_ROLL_SELL = 3; // Roll sell + OP_TYPE_EXECUTE_SC = 4; // Execute smart contract + OP_TYPE_CALL_SC = 5; // Call smart contract +} + +// NewOperationsResponse holds response from NewOperations +message NewOperationsResponse { + // Request id + string id = 1; + // Signed operation + SignedOperation operation = 2; +} + +// SendBlocksRequest holds parameters to SendBlocks +message SendBlocksRequest { + // Request id + string id = 1; + // Secure shared block + SecureShare block = 2; +} + +// SendBlocksResponse holds response from SendBlocks +message SendBlocksResponse { + // Request id + string id = 1; + // Block result or a gRPC status + oneof message { + // Block result + BlockResult result = 2; + // gRPC error(status) + google.rpc.Status error = 3; + } +} + +// Holds Block response +message BlockResult { + // Block id + string block_id = 1; +} + +// SendEndorsementsRequest holds parameters to SendEndorsements +message SendEndorsementsRequest { + // Request id + string id = 1; + // Secure shared endorsements + repeated SecureShare endorsements = 2; +} + +// SendEndorsementsResponse holds response from SendEndorsements +message SendEndorsementsResponse { + // Request id + string id = 1; + // Endorsement result or gRPC status + oneof message { + // Endorsement result + EndorsementResult result = 2; + // gRPC error(status) + google.rpc.Status error = 3; + } +} + +// Holds Endorsement response +message EndorsementResult { + // Endorsements ids + repeated string endorsements_ids = 1; +} + +// SendOperationsRequest holds parameters to SendOperations +message SendOperationsRequest { + // Request id + string id = 1; + // Secured shared operations + repeated SecureShare operations = 2; +} + +// SendOperationsResponse holds response from SendOperations +message SendOperationsResponse { + // Request id + string id = 1; + // Operation result or gRPC status + oneof message { + // Operation result + OperationResult result = 2; + // gRPC error(status) + google.rpc.Status error = 3; + } +} + +// Holds Operation response +message OperationResult { + // Operation(s) id(s) + repeated string operations_ids = 1; +} + +// TransactionsThroughputRequest holds request for TransactionsThroughput +message TransactionsThroughputRequest { + // Request id + string id = 1; + // Optional timer interval in sec. Defaults to 10s + optional fixed64 interval = 2; +} + +// TransactionsThroughputResponse holds response from TransactionsThroughput +message TransactionsThroughputResponse { + // Request id + string id = 1; + // Transactions throughput + fixed32 throughput = 2; +} \ No newline at end of file diff --git a/massa-proto/proto/massa/api/v1/block.proto b/massa-proto/proto/massa/api/v1/block.proto new file mode 100644 index 00000000000..e9d0d3411a1 --- /dev/null +++ b/massa-proto/proto/massa/api/v1/block.proto @@ -0,0 +1,82 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +import "endorsement.proto"; +import "operation.proto"; +import "slot.proto"; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// Block +message Block { + // Signed header + SignedBlockHeader header = 1; + // Operations ids + repeated string operations = 2; +} + +// Filled block +message FilledBlock { + // Signed header + SignedBlockHeader header = 1; + // Operations + repeated FilledOperationTuple operations = 2; +} + +// Block header +message BlockHeader { + // Slot + Slot slot = 1; + // parents + repeated string parents = 2; + // All operations hash + string operation_merkle_root = 3; + // Signed endorsements + repeated SignedEndorsement endorsements = 4; +} + +// Filled Operation Tuple +message FilledOperationTuple { + // Operation id + string operation_id = 1; + // Signed operation + SignedOperation operation = 2; +} + +// Signed block +message SignedBlock { + // Block + Block content = 1; + // A cryptographically generated value using `serialized_data` and a public key. + string signature = 2; + // The public-key component used in the generation of the signature + string content_creator_pub_key = 3; + // Derived from the same public key used to generate the signature + string content_creator_address = 4; + // A secure hash of the data. See also [massa_hash::Hash] + string id = 5; +} + +// Signed block header +message SignedBlockHeader { + // BlockHeader + BlockHeader content = 1; + // A cryptographically generated value using `serialized_data` and a public key. + string signature = 2; + // The public-key component used in the generation of the signature + string content_creator_pub_key = 3; + // Derived from the same public key used to generate the signature + string content_creator_address = 4; + // A secure hash of the data. See also [massa_hash::Hash] + string id = 5; +} diff --git a/massa-proto/proto/massa/api/v1/common.proto b/massa-proto/proto/massa/api/v1/common.proto new file mode 100644 index 00000000000..5fb3492d57c --- /dev/null +++ b/massa-proto/proto/massa/api/v1/common.proto @@ -0,0 +1,36 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// BytesMapFieldEntry +message BytesMapFieldEntry { + // bytes key + bytes key = 1; + // bytes key + bytes value = 2; +} + +// Packages a type such that it can be securely sent and received in a trust-free network +message SecureShare { + // Content in sharable, deserializable form. Is used in the secure verification protocols + bytes serialized_data = 1; + // A cryptographically generated value using `serialized_data` and a public key. + string signature = 2; + // The public-key component used in the generation of the signature + string content_creator_pub_key = 3; + // Derived from the same public key used to generate the signature + string content_creator_address = 4; + // A secure hash of the data. See also [massa_hash::Hash] + string id = 5; +} diff --git a/massa-proto/proto/massa/api/v1/endorsement.proto b/massa-proto/proto/massa/api/v1/endorsement.proto new file mode 100644 index 00000000000..2b9794e534e --- /dev/null +++ b/massa-proto/proto/massa/api/v1/endorsement.proto @@ -0,0 +1,41 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +import "slot.proto"; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// An endorsement, as sent in the network +message Endorsement { + // Slot in which the endorsement can be included + Slot slot = 1; + // Endorsement index inside the including block + fixed32 index = 2; + // Hash of endorsed block + // This is the parent in thread `self.slot.thread` of the block in which the endorsement is included + string endorsed_block = 3; +} + +// Signed endorsement +message SignedEndorsement { + // Endorsement + Endorsement content = 1; + // A cryptographically generated value using `serialized_data` and a public key. + string signature = 2; + // The public-key component used in the generation of the signature + string content_creator_pub_key = 3; + // Derived from the same public key used to generate the signature + string content_creator_address = 4; + // A secure hash of the data. See also [massa_hash::Hash] + string id = 5; +} diff --git a/massa-proto/proto/massa/api/v1/operation.proto b/massa-proto/proto/massa/api/v1/operation.proto new file mode 100644 index 00000000000..79d7f5e153b --- /dev/null +++ b/massa-proto/proto/massa/api/v1/operation.proto @@ -0,0 +1,98 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +import "common.proto"; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// The operation as sent in the network +message Operation { + // The fee they have decided for this operation + fixed64 fee = 1; + // After `expire_period` slot the operation won't be included in a block + fixed64 expire_period = 2; + // The type specific operation part + OperationType op = 3; +} + +// Type specific operation content +message OperationType { + // Transfer coins from sender to recipient + Transaction transaction = 1; + // The sender buys `roll_count` rolls. Roll price is defined in configuration + RollBuy roll_buy = 2; + // The sender sells `roll_count` rolls. Roll price is defined in configuration + RollSell roll_sell = 3; + // Execute a smart contract + ExecuteSC execut_sc = 4; + // Calls an exported function from a stored smart contract + CallSC call_sc = 5; +} + +// Transfer coins from sender to recipient +message Transaction { + // Recipient address + string recipient_address = 1; + // Amount + fixed64 amount = 2; +} + +// The sender buys `roll_count` rolls. Roll price is defined in configuration +message RollBuy { + // Roll count + fixed64 roll_count = 1; +} + +// The sender sells `roll_count` rolls. Roll price is defined in configuration +message RollSell { + // Roll count + fixed64 roll_count = 1; +} + +// Execute a smart contract +message ExecuteSC { + // Smart contract bytecode. + bytes data = 1; + // The maximum amount of gas that the execution of the contract is allowed to cost + fixed64 max_gas = 2; + // A key-value store associating a hash to arbitrary bytes + repeated BytesMapFieldEntry datastore = 3; +} + +// Calls an exported function from a stored smart contract +message CallSC { + // Target smart contract address + string target_addr = 1; + // Target function name. No function is called if empty + string target_func = 2; + // Parameter to pass to the target function + bytes param = 3; + // The maximum amount of gas that the execution of the contract is allowed to cost + fixed64 max_gas = 4; + // Extra coins that are spent from the caller's balance and transferred to the target + fixed64 coins = 5; +} + +// Signed operation +message SignedOperation { + // Operation + Operation content = 1; + // A cryptographically generated value using `serialized_data` and a public key. + string signature = 2; + // The public-key component used in the generation of the signature + string content_creator_pub_key = 3; + // Derived from the same public key used to generate the signature + string content_creator_address = 4; + // A secure hash of the data. See also [massa_hash::Hash] + string id = 5; +} diff --git a/massa-proto/proto/massa/api/v1/slot.proto b/massa-proto/proto/massa/api/v1/slot.proto new file mode 100644 index 00000000000..ceb6b42054d --- /dev/null +++ b/massa-proto/proto/massa/api/v1/slot.proto @@ -0,0 +1,30 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// When an address is drawn to create an endorsement it is selected for a specific index +message IndexedSlot { + // Slot + Slot slot = 1; + // Endorsement index in the slot + fixed64 index = 2; +} + +// A point in time where a block is expected +message Slot { + // Period + fixed64 period = 1; + // Thread + fixed32 thread = 2; +} diff --git a/massa-proto/proto/third-party/google/api/annotations.proto b/massa-proto/proto/third-party/google/api/annotations.proto new file mode 100644 index 00000000000..8ff42098404 --- /dev/null +++ b/massa-proto/proto/third-party/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} \ No newline at end of file diff --git a/massa-proto/proto/third-party/google/api/http.proto b/massa-proto/proto/third-party/google/api/http.proto new file mode 100644 index 00000000000..31d867a27d5 --- /dev/null +++ b/massa-proto/proto/third-party/google/api/http.proto @@ -0,0 +1,379 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/massa-proto/proto/third-party/google/rpc/status.proto b/massa-proto/proto/third-party/google/rpc/status.proto new file mode 100644 index 00000000000..923e169381a --- /dev/null +++ b/massa-proto/proto/third-party/google/rpc/status.proto @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/massa-proto/src/_includes.rs b/massa-proto/src/_includes.rs new file mode 100644 index 00000000000..f8dcdf7bb84 --- /dev/null +++ b/massa-proto/src/_includes.rs @@ -0,0 +1,15 @@ +pub mod google { + pub mod api { + include!("google.api.rs"); + } + pub mod rpc { + include!("google.rpc.rs"); + } +} +pub mod massa { + pub mod api { + pub mod v1 { + include!("massa.api.v1.rs"); + } + } +} diff --git a/massa-proto/src/api.bin b/massa-proto/src/api.bin new file mode 100644 index 00000000000..1151d958de8 Binary files /dev/null and b/massa-proto/src/api.bin differ diff --git a/massa-proto/src/google.api.rs b/massa-proto/src/google.api.rs new file mode 100644 index 00000000000..e9f0a88b18e --- /dev/null +++ b/massa-proto/src/google.api.rs @@ -0,0 +1,379 @@ +/// Defines the HTTP configuration for an API service. It contains a list of +/// \[HttpRule][google.api.HttpRule\], each specifying the mapping of an RPC method +/// to one or more HTTP REST API methods. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Http { + /// A list of HTTP configuration rules that apply to individual API methods. + /// + /// **NOTE:** All service configuration rules follow "last one wins" order. + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, + /// When set to true, URL path parameters will be fully URI-decoded except in + /// cases of single segment matches in reserved expansion, where "%2F" will be + /// left encoded. + /// + /// The default behavior is to not decode RFC 6570 reserved characters in multi + /// segment matches. + #[prost(bool, tag = "2")] + pub fully_decode_reserved_expansion: bool, +} +/// # gRPC Transcoding +/// +/// gRPC Transcoding is a feature for mapping between a gRPC method and one or +/// more HTTP REST endpoints. It allows developers to build a single API service +/// that supports both gRPC APIs and REST APIs. Many systems, including [Google +/// APIs](), +/// [Cloud Endpoints](), [gRPC +/// Gateway](), +/// and \[Envoy\]() proxy support this feature +/// and use it for large scale production services. +/// +/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +/// how different portions of the gRPC request message are mapped to the URL +/// path, URL query parameters, and HTTP request body. It also controls how the +/// gRPC response message is mapped to the HTTP response body. `HttpRule` is +/// typically specified as an `google.api.http` annotation on the gRPC method. +/// +/// Each mapping specifies a URL path template and an HTTP method. The path +/// template may refer to one or more fields in the gRPC request message, as long +/// as each field is a non-repeated field with a primitive (non-message) type. +/// The path template controls how fields of the request message are mapped to +/// the URL path. +/// +/// Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/{name=messages/*}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string name = 1; // Mapped to URL path. +/// } +/// message Message { +/// string text = 1; // The resource content. +/// } +/// +/// This enables an HTTP REST to gRPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// +/// Any fields in the request message which are not bound by the path template +/// automatically become HTTP query parameters if there is no HTTP request body. +/// For example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get:"/v1/messages/{message_id}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// message SubMessage { +/// string subfield = 1; +/// } +/// string message_id = 1; // Mapped to URL path. +/// int64 revision = 2; // Mapped to URL query parameter `revision`. +/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +/// } +/// +/// This enables a HTTP JSON to RPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +/// "foo"))` +/// +/// Note that fields which are mapped to URL query parameters must have a +/// primitive type or a repeated primitive type or a non-repeated message type. +/// In the case of a repeated type, the parameter can be repeated in the URL +/// as `...?param=A¶m=B`. In the case of a message type, each field of the +/// message is mapped to a separate parameter, such as +/// `...?foo.a=A&foo.b=B&foo.c=C`. +/// +/// For HTTP methods that allow a request body, the `body` field +/// specifies the mapping. Consider a REST update method on the +/// message resource collection: +/// +/// service Messaging { +/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "message" +/// }; +/// } +/// } +/// message UpdateMessageRequest { +/// string message_id = 1; // mapped to the URL +/// Message message = 2; // mapped to the body +/// } +/// +/// The following HTTP JSON to RPC mapping is enabled, where the +/// representation of the JSON in the request body is determined by +/// protos JSON encoding: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" message { text: "Hi!" })` +/// +/// The special name `*` can be used in the body mapping to define that +/// every field not bound by the path template should be mapped to the +/// request body. This enables the following alternative definition of +/// the update method: +/// +/// service Messaging { +/// rpc UpdateMessage(Message) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "*" +/// }; +/// } +/// } +/// message Message { +/// string message_id = 1; +/// string text = 2; +/// } +/// +/// +/// The following HTTP JSON to RPC mapping is enabled: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" text: "Hi!")` +/// +/// Note that when using `*` in the body mapping, it is not possible to +/// have HTTP parameters, as all fields not bound by the path end in +/// the body. This makes this option more rarely used in practice when +/// defining REST APIs. The common usage of `*` is in custom methods +/// which don't use the URL at all for transferring data. +/// +/// It is possible to define multiple HTTP methods for one RPC by using +/// the `additional_bindings` option. Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/messages/{message_id}" +/// additional_bindings { +/// get: "/v1/users/{user_id}/messages/{message_id}" +/// } +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string message_id = 1; +/// string user_id = 2; +/// } +/// +/// This enables the following two alternative HTTP JSON to RPC mappings: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +/// "123456")` +/// +/// ## Rules for HTTP mapping +/// +/// 1. Leaf request fields (recursive expansion nested messages in the request +/// message) are classified into three categories: +/// - Fields referred by the path template. They are passed via the URL path. +/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They +/// are passed via the HTTP +/// request body. +/// - All other fields are passed via the URL query parameters, and the +/// parameter name is the field path in the request message. A repeated +/// field can be represented as multiple query parameters under the same +/// name. +/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL +/// query parameter, all fields +/// are passed via URL path and HTTP request body. +/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP +/// request body, all +/// fields are passed via URL path and URL query parameters. +/// +/// ### Path template syntax +/// +/// Template = "/" Segments [ Verb ] ; +/// Segments = Segment { "/" Segment } ; +/// Segment = "*" | "**" | LITERAL | Variable ; +/// Variable = "{" FieldPath [ "=" Segments ] "}" ; +/// FieldPath = IDENT { "." IDENT } ; +/// Verb = ":" LITERAL ; +/// +/// The syntax `*` matches a single URL path segment. The syntax `**` matches +/// zero or more URL path segments, which must be the last part of the URL path +/// except the `Verb`. +/// +/// The syntax `Variable` matches part of the URL path as specified by its +/// template. A variable template must not contain other variables. If a variable +/// matches a single path segment, its template may be omitted, e.g. `{var}` +/// is equivalent to `{var=*}`. +/// +/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +/// contains any reserved character, such characters should be percent-encoded +/// before the matching. +/// +/// If a variable contains exactly one path segment, such as `"{var}"` or +/// `"{var=*}"`, when such a variable is expanded into a URL path on the client +/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The +/// server side does the reverse decoding. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{var}`. +/// +/// If a variable contains multiple path segments, such as `"{var=foo/*}"` +/// or `"{var=**}"`, when such a variable is expanded into a URL path on the +/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. +/// The server side does the reverse decoding, except "%2F" and "%2f" are left +/// unchanged. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{+var}`. +/// +/// ## Using gRPC API Service Configuration +/// +/// gRPC API Service Configuration (service config) is a configuration language +/// for configuring a gRPC service to become a user-facing product. The +/// service config is simply the YAML representation of the `google.api.Service` +/// proto message. +/// +/// As an alternative to annotating your proto file, you can configure gRPC +/// transcoding in your service config YAML files. You do this by specifying a +/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +/// effect as the proto annotation. This can be particularly useful if you +/// have a proto that is reused in multiple services. Note that any transcoding +/// specified in the service config will override any matching transcoding +/// configuration in the proto. +/// +/// Example: +/// +/// http: +/// rules: +/// # Selects a gRPC method and applies HttpRule to it. +/// - selector: example.v1.Messaging.GetMessage +/// get: /v1/messages/{message_id}/{sub.subfield} +/// +/// ## Special notes +/// +/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +/// proto to JSON conversion must follow the [proto3 +/// specification](). +/// +/// While the single segment variable follows the semantics of +/// [RFC 6570]() Section 3.2.2 Simple String +/// Expansion, the multi segment variable **does not** follow RFC 6570 Section +/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +/// does not expand special characters like `?` and `#`, which would lead +/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +/// for multi segment variables. +/// +/// The path variables **must not** refer to any repeated or mapped field, +/// because client libraries are not capable of handling such variable expansion. +/// +/// The path variables **must not** capture the leading "/" character. The reason +/// is that the most common use case "{var}" does not capture the leading "/" +/// character. For consistency, all path variables must share the same behavior. +/// +/// Repeated message fields must not be mapped to URL query parameters, because +/// no client library can support such complicated mapping. +/// +/// If an API needs to use a JSON array for request or response body, it can map +/// the request or response body to a repeated field. However, some gRPC +/// Transcoding implementations may not support this feature. +#[cfg(not(doctest))] +#[allow(dead_code)] +pub struct HttpRuleComment {} +/// HACK: see docs in [`HttpRuleComment`] ignored in doctest pass +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRule { + /// Selects a method to which this rule applies. + /// + /// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax + /// details. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// The name of the request field whose value is mapped to the HTTP request + /// body, or `*` for mapping all request fields not captured by the path + /// pattern to the HTTP body, or omitted for not having any HTTP request body. + /// + /// NOTE: the referred field must be present at the top-level of the request + /// message type. + #[prost(string, tag = "7")] + pub body: ::prost::alloc::string::String, + /// Optional. The name of the response field whose value is mapped to the HTTP + /// response body. When omitted, the entire response message will be used + /// as the HTTP response body. + /// + /// NOTE: The referred field must be present at the top-level of the response + /// message type. + #[prost(string, tag = "12")] + pub response_body: ::prost::alloc::string::String, + /// Additional HTTP bindings for the selector. Nested bindings must + /// not contain an `additional_bindings` field themselves (that is, + /// the nesting may only be one level deep). + #[prost(message, repeated, tag = "11")] + pub additional_bindings: ::prost::alloc::vec::Vec, + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] + pub pattern: ::core::option::Option, +} +/// Nested message and enum types in `HttpRule`. +pub mod http_rule { + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[cfg(not(doctest))] + #[allow(dead_code)] + pub struct HttpRuleComment {} + /// HACK: see docs in [`HttpRuleComment`] ignored in doctest pass + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Pattern { + /// Maps to HTTP GET. Used for listing and getting information about + /// resources. + #[prost(string, tag = "2")] + Get(::prost::alloc::string::String), + /// Maps to HTTP PUT. Used for replacing a resource. + #[prost(string, tag = "3")] + Put(::prost::alloc::string::String), + /// Maps to HTTP POST. Used for creating a resource or performing an action. + #[prost(string, tag = "4")] + Post(::prost::alloc::string::String), + /// Maps to HTTP DELETE. Used for deleting a resource. + #[prost(string, tag = "5")] + Delete(::prost::alloc::string::String), + /// Maps to HTTP PATCH. Used for updating a resource. + #[prost(string, tag = "6")] + Patch(::prost::alloc::string::String), + /// The custom pattern is used for specifying an HTTP method that is not + /// included in the `pattern` field, such as HEAD, or "*" to leave the + /// HTTP method unspecified for this rule. The wild-card rule is useful + /// for services that provide content to Web (HTML) clients. + #[prost(message, tag = "8")] + Custom(super::CustomHttpPattern), + } +} +/// A custom pattern is used for defining custom HTTP verb. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CustomHttpPattern { + /// The name of this custom HTTP verb. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// The path matched by this custom verb. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} diff --git a/massa-proto/src/google.rpc.rs b/massa-proto/src/google.rpc.rs new file mode 100644 index 00000000000..e20cb148349 --- /dev/null +++ b/massa-proto/src/google.rpc.rs @@ -0,0 +1,25 @@ +/// The `Status` type defines a logical error model that is suitable for +/// different programming environments, including REST APIs and RPC APIs. It is +/// used by \[gRPC\](). Each `Status` message contains +/// three pieces of data: error code, error message, and error details. +/// +/// You can find out more about this error model and how to work with it in the +/// [API Design Guide](). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Status { + /// The status code, which should be an enum value of + /// \[google.rpc.Code][google.rpc.Code\]. + #[prost(int32, tag = "1")] + pub code: i32, + /// A developer-facing error message, which should be in English. Any + /// user-facing error message should be localized and sent in the + /// \[google.rpc.Status.details][google.rpc.Status.details\] field, or localized + /// by the client. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + /// A list of messages that carry the error details. There is a common set of + /// message types for APIs to use. + #[prost(message, repeated, tag = "3")] + pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, +} diff --git a/massa-proto/src/lib.rs b/massa-proto/src/lib.rs new file mode 100644 index 00000000000..2fa9bf404f6 --- /dev/null +++ b/massa-proto/src/lib.rs @@ -0,0 +1,44 @@ +// Copyright (c) 2023 MASSA LABS +// +//! ## **Overview** +//! +//! This module contains Protobuf message definitions for the Massa blockchain API. +//! It uses utilizes the `prost-build` tool to generate Rust code from the Protobuf definitions. +//! +//! ## **Structure** +//! +//! * `build.rs`: This file contains build instructions for generating Rust code from the Protobuf definitions using the `prost-build` tool. +//! * `proto/`: This directory contains the Protobuf message definitions for the Massa blockchain API +//! * `src/`: This directory contains the generated Rust code for the Protobuf message definitions. +//! It also includes a `_includes.rs` file for importing the generated Rust modules and an `api.bin` file for server reflection protocol. +//! +//! ## **Usage** +//! To use this module, simply include it as a dependency in your Rust project's `Cargo.toml` file. +//! You can then import the necessary Rust modules for the Massa API and use the Protobuf messages as needed. +//! +#![warn(unused_crate_dependencies)] + +/// Google protos Module +pub mod google { + /// Google API Module + pub mod api { + include!("google.api.rs"); + } + /// Google RPC Module + pub mod rpc { + include!("google.rpc.rs"); + } +} + +/// Massa protos Module +pub mod massa { + /// Massa API Module + pub mod api { + /// Version 1 of the Massa protos + pub mod v1 { + include!("massa.api.v1.rs"); + /// Compiled file descriptor set for the Massa protos + pub const FILE_DESCRIPTOR_SET: &[u8] = include_bytes!("api.bin"); + } + } +} diff --git a/massa-proto/src/massa.api.v1.rs b/massa-proto/src/massa.api.v1.rs new file mode 100644 index 00000000000..e478016d5eb --- /dev/null +++ b/massa-proto/src/massa.api.v1.rs @@ -0,0 +1,2233 @@ +/// When an address is drawn to create an endorsement it is selected for a specific index +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexedSlot { + /// Slot + #[prost(message, optional, tag = "1")] + pub slot: ::core::option::Option, + /// Endorsement index in the slot + #[prost(fixed64, tag = "2")] + pub index: u64, +} +/// A point in time where a block is expected +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Slot { + /// Period + #[prost(fixed64, tag = "1")] + pub period: u64, + /// Thread + #[prost(fixed32, tag = "2")] + pub thread: u32, +} +/// An endorsement, as sent in the network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Endorsement { + /// Slot in which the endorsement can be included + #[prost(message, optional, tag = "1")] + pub slot: ::core::option::Option, + /// Endorsement index inside the including block + #[prost(fixed32, tag = "2")] + pub index: u32, + /// Hash of endorsed block + /// This is the parent in thread `self.slot.thread` of the block in which the endorsement is included + #[prost(string, tag = "3")] + pub endorsed_block: ::prost::alloc::string::String, +} +/// Signed endorsement +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedEndorsement { + /// Endorsement + #[prost(message, optional, tag = "1")] + pub content: ::core::option::Option, + /// A cryptographically generated value using `serialized_data` and a public key. + #[prost(string, tag = "2")] + pub signature: ::prost::alloc::string::String, + /// The public-key component used in the generation of the signature + #[prost(string, tag = "3")] + pub content_creator_pub_key: ::prost::alloc::string::String, + /// Derived from the same public key used to generate the signature + #[prost(string, tag = "4")] + pub content_creator_address: ::prost::alloc::string::String, + /// A secure hash of the data. See also \[massa_hash::Hash\] + #[prost(string, tag = "5")] + pub id: ::prost::alloc::string::String, +} +/// BytesMapFieldEntry +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BytesMapFieldEntry { + /// bytes key + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + /// bytes key + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, +} +/// Packages a type such that it can be securely sent and received in a trust-free network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecureShare { + /// Content in sharable, deserializable form. Is used in the secure verification protocols + #[prost(bytes = "vec", tag = "1")] + pub serialized_data: ::prost::alloc::vec::Vec, + /// A cryptographically generated value using `serialized_data` and a public key. + #[prost(string, tag = "2")] + pub signature: ::prost::alloc::string::String, + /// The public-key component used in the generation of the signature + #[prost(string, tag = "3")] + pub content_creator_pub_key: ::prost::alloc::string::String, + /// Derived from the same public key used to generate the signature + #[prost(string, tag = "4")] + pub content_creator_address: ::prost::alloc::string::String, + /// A secure hash of the data. See also \[massa_hash::Hash\] + #[prost(string, tag = "5")] + pub id: ::prost::alloc::string::String, +} +/// The operation as sent in the network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Operation { + /// The fee they have decided for this operation + #[prost(fixed64, tag = "1")] + pub fee: u64, + /// After `expire_period` slot the operation won't be included in a block + #[prost(fixed64, tag = "2")] + pub expire_period: u64, + /// The type specific operation part + #[prost(message, optional, tag = "3")] + pub op: ::core::option::Option, +} +/// Type specific operation content +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperationType { + /// Transfer coins from sender to recipient + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + /// The sender buys `roll_count` rolls. Roll price is defined in configuration + #[prost(message, optional, tag = "2")] + pub roll_buy: ::core::option::Option, + /// The sender sells `roll_count` rolls. Roll price is defined in configuration + #[prost(message, optional, tag = "3")] + pub roll_sell: ::core::option::Option, + /// Execute a smart contract + #[prost(message, optional, tag = "4")] + pub execut_sc: ::core::option::Option, + /// Calls an exported function from a stored smart contract + #[prost(message, optional, tag = "5")] + pub call_sc: ::core::option::Option, +} +/// Transfer coins from sender to recipient +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Transaction { + /// Recipient address + #[prost(string, tag = "1")] + pub recipient_address: ::prost::alloc::string::String, + /// Amount + #[prost(fixed64, tag = "2")] + pub amount: u64, +} +/// The sender buys `roll_count` rolls. Roll price is defined in configuration +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RollBuy { + /// Roll count + #[prost(fixed64, tag = "1")] + pub roll_count: u64, +} +/// The sender sells `roll_count` rolls. Roll price is defined in configuration +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RollSell { + /// Roll count + #[prost(fixed64, tag = "1")] + pub roll_count: u64, +} +/// Execute a smart contract +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteSc { + /// Smart contract bytecode. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// The maximum amount of gas that the execution of the contract is allowed to cost + #[prost(fixed64, tag = "2")] + pub max_gas: u64, + /// A key-value store associating a hash to arbitrary bytes + #[prost(message, repeated, tag = "3")] + pub datastore: ::prost::alloc::vec::Vec, +} +/// Calls an exported function from a stored smart contract +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CallSc { + /// Target smart contract address + #[prost(string, tag = "1")] + pub target_addr: ::prost::alloc::string::String, + /// Target function name. No function is called if empty + #[prost(string, tag = "2")] + pub target_func: ::prost::alloc::string::String, + /// Parameter to pass to the target function + #[prost(bytes = "vec", tag = "3")] + pub param: ::prost::alloc::vec::Vec, + /// The maximum amount of gas that the execution of the contract is allowed to cost + #[prost(fixed64, tag = "4")] + pub max_gas: u64, + /// Extra coins that are spent from the caller's balance and transferred to the target + #[prost(fixed64, tag = "5")] + pub coins: u64, +} +/// Signed operation +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedOperation { + /// Operation + #[prost(message, optional, tag = "1")] + pub content: ::core::option::Option, + /// A cryptographically generated value using `serialized_data` and a public key. + #[prost(string, tag = "2")] + pub signature: ::prost::alloc::string::String, + /// The public-key component used in the generation of the signature + #[prost(string, tag = "3")] + pub content_creator_pub_key: ::prost::alloc::string::String, + /// Derived from the same public key used to generate the signature + #[prost(string, tag = "4")] + pub content_creator_address: ::prost::alloc::string::String, + /// A secure hash of the data. See also \[massa_hash::Hash\] + #[prost(string, tag = "5")] + pub id: ::prost::alloc::string::String, +} +/// Block +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Block { + /// Signed header + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Operations ids + #[prost(string, repeated, tag = "2")] + pub operations: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Filled block +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FilledBlock { + /// Signed header + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Operations + #[prost(message, repeated, tag = "2")] + pub operations: ::prost::alloc::vec::Vec, +} +/// Block header +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockHeader { + /// Slot + #[prost(message, optional, tag = "1")] + pub slot: ::core::option::Option, + /// parents + #[prost(string, repeated, tag = "2")] + pub parents: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// All operations hash + #[prost(string, tag = "3")] + pub operation_merkle_root: ::prost::alloc::string::String, + /// Signed endorsements + #[prost(message, repeated, tag = "4")] + pub endorsements: ::prost::alloc::vec::Vec, +} +/// Filled Operation Tuple +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FilledOperationTuple { + /// Operation id + #[prost(string, tag = "1")] + pub operation_id: ::prost::alloc::string::String, + /// Signed operation + #[prost(message, optional, tag = "2")] + pub operation: ::core::option::Option, +} +/// Signed block +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedBlock { + /// Block + #[prost(message, optional, tag = "1")] + pub content: ::core::option::Option, + /// A cryptographically generated value using `serialized_data` and a public key. + #[prost(string, tag = "2")] + pub signature: ::prost::alloc::string::String, + /// The public-key component used in the generation of the signature + #[prost(string, tag = "3")] + pub content_creator_pub_key: ::prost::alloc::string::String, + /// Derived from the same public key used to generate the signature + #[prost(string, tag = "4")] + pub content_creator_address: ::prost::alloc::string::String, + /// A secure hash of the data. See also \[massa_hash::Hash\] + #[prost(string, tag = "5")] + pub id: ::prost::alloc::string::String, +} +/// Signed block header +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedBlockHeader { + /// BlockHeader + #[prost(message, optional, tag = "1")] + pub content: ::core::option::Option, + /// A cryptographically generated value using `serialized_data` and a public key. + #[prost(string, tag = "2")] + pub signature: ::prost::alloc::string::String, + /// The public-key component used in the generation of the signature + #[prost(string, tag = "3")] + pub content_creator_pub_key: ::prost::alloc::string::String, + /// Derived from the same public key used to generate the signature + #[prost(string, tag = "4")] + pub content_creator_address: ::prost::alloc::string::String, + /// A secure hash of the data. See also \[massa_hash::Hash\] + #[prost(string, tag = "5")] + pub id: ::prost::alloc::string::String, +} +/// GetBlocksBySlotsRequest holds request for GetBlocksBySlots +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBlocksBySlotsRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Slots + #[prost(message, repeated, tag = "2")] + pub slots: ::prost::alloc::vec::Vec, +} +/// GetBlocksBySlotsResponse holds response from GetBlocksBySlots +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBlocksBySlotsResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Blocks + #[prost(message, repeated, tag = "2")] + pub blocks: ::prost::alloc::vec::Vec, +} +/// GetDatastoreEntriesRequest holds request from GetDatastoreEntries +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDatastoreEntriesRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Queries + #[prost(message, repeated, tag = "2")] + pub queries: ::prost::alloc::vec::Vec, +} +/// DatastoreEntries Query +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatastoreEntriesQuery { + /// Filter + #[prost(message, optional, tag = "1")] + pub filter: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatastoreEntryFilter { + /// / Associated address of the entry + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + /// Datastore key + #[prost(bytes = "vec", tag = "2")] + pub key: ::prost::alloc::vec::Vec, +} +/// GetDatastoreEntriesResponse holds response from GetDatastoreEntries +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDatastoreEntriesResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Datastore entries + #[prost(message, repeated, tag = "2")] + pub entries: ::prost::alloc::vec::Vec, +} +/// DatastoreEntry +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatastoreEntry { + /// final datastore entry value + #[prost(bytes = "vec", tag = "1")] + pub final_value: ::prost::alloc::vec::Vec, + /// candidate_value datastore entry value + #[prost(bytes = "vec", tag = "2")] + pub candidate_value: ::prost::alloc::vec::Vec, +} +/// GetNextBlockBestParentsRequest holds request for GetNextBlockBestParents +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetNextBlockBestParentsRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// GetNextBlockBestParentsResponse holds response from GetNextBlockBestParents +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetNextBlockBestParentsResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Best parents + #[prost(message, repeated, tag = "2")] + pub parents: ::prost::alloc::vec::Vec, +} +/// Block parent tuple +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockParent { + /// Block id + #[prost(string, tag = "1")] + pub block_id: ::prost::alloc::string::String, + /// Period + #[prost(fixed64, tag = "2")] + pub period: u64, +} +/// GetSelectorDrawsRequest holds request from GetSelectorDraws +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetSelectorDrawsRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Queries + #[prost(message, repeated, tag = "2")] + pub queries: ::prost::alloc::vec::Vec, +} +/// SelectorDraws Query +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SelectorDrawsQuery { + /// Filter + #[prost(message, optional, tag = "1")] + pub filter: ::core::option::Option, +} +/// SelectorDraws Filter +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SelectorDrawsFilter { + /// Address + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, +} +/// GetSelectorDrawsResponse holds response from GetSelectorDraws +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetSelectorDrawsResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Selector draws + #[prost(message, repeated, tag = "2")] + pub selector_draws: ::prost::alloc::vec::Vec, +} +/// Selector draws +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SelectorDraws { + /// Address + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + /// Next block draws + #[prost(message, repeated, tag = "2")] + pub next_block_draws: ::prost::alloc::vec::Vec, + /// Next endorsements draws + #[prost(message, repeated, tag = "3")] + pub next_endorsement_draws: ::prost::alloc::vec::Vec, +} +/// GetTransactionsThroughputRequest holds request for GetTransactionsThroughput +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTransactionsThroughputRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// GetTransactionsThroughputResponse holds response from GetTransactionsThroughput +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTransactionsThroughputResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Transactions throughput + #[prost(fixed32, tag = "2")] + pub throughput: u32, +} +/// GetVersionRequest holds request from GetVersion +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetVersionRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// GetVersionResponse holds response from GetVersion +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetVersionResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Version + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, +} +/// NewBlocksRequest holds request for NewBlocks +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewBlocksRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// NewBlocksResponse holds response from NewBlocks +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewBlocksResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Signed block + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} +/// NewBlocksHeadersRequest holds request for NewBlocksHeaders +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewBlocksHeadersRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// NewBlocksHeadersResponse holds response from NewBlocksHeaders +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewBlocksHeadersResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Signed block header + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, +} +/// NewFilledBlocksRequest holds request for NewFilledBlocks +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewFilledBlocksRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// NewFilledBlocksResponse holds response from NewFilledBlocks +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewFilledBlocksResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Block with operations content + #[prost(message, optional, tag = "2")] + pub filled_block: ::core::option::Option, +} +/// NewOperationsRequest holds request for NewOperations +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewOperationsRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Query + #[prost(message, optional, tag = "2")] + pub query: ::core::option::Option, +} +/// NewOperations Query +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewOperationsQuery { + /// Filter + #[prost(message, optional, tag = "1")] + pub filter: ::core::option::Option, +} +/// NewOperations Filter +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewOperationsFilter { + /// Operation type enum + #[prost(enumeration = "OpType", repeated, tag = "1")] + pub types: ::prost::alloc::vec::Vec, +} +/// NewOperationsResponse holds response from NewOperations +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewOperationsResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Signed operation + #[prost(message, optional, tag = "2")] + pub operation: ::core::option::Option, +} +/// SendBlocksRequest holds parameters to SendBlocks +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendBlocksRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Secure shared block + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} +/// SendBlocksResponse holds response from SendBlocks +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendBlocksResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Block result or a gRPC status + #[prost(oneof = "send_blocks_response::Message", tags = "2, 3")] + pub message: ::core::option::Option, +} +/// Nested message and enum types in `SendBlocksResponse`. +pub mod send_blocks_response { + /// Block result or a gRPC status + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Message { + /// Block result + #[prost(message, tag = "2")] + Result(super::BlockResult), + /// gRPC error(status) + #[prost(message, tag = "3")] + Error(super::super::super::super::google::rpc::Status), + } +} +/// Holds Block response +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockResult { + /// Block id + #[prost(string, tag = "1")] + pub block_id: ::prost::alloc::string::String, +} +/// SendEndorsementsRequest holds parameters to SendEndorsements +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendEndorsementsRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Secure shared endorsements + #[prost(message, repeated, tag = "2")] + pub endorsements: ::prost::alloc::vec::Vec, +} +/// SendEndorsementsResponse holds response from SendEndorsements +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendEndorsementsResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Endorsement result or gRPC status + #[prost(oneof = "send_endorsements_response::Message", tags = "2, 3")] + pub message: ::core::option::Option, +} +/// Nested message and enum types in `SendEndorsementsResponse`. +pub mod send_endorsements_response { + /// Endorsement result or gRPC status + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Message { + /// Endorsement result + #[prost(message, tag = "2")] + Result(super::EndorsementResult), + /// gRPC error(status) + #[prost(message, tag = "3")] + Error(super::super::super::super::google::rpc::Status), + } +} +/// Holds Endorsement response +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EndorsementResult { + /// Endorsements ids + #[prost(string, repeated, tag = "1")] + pub endorsements_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// SendOperationsRequest holds parameters to SendOperations +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendOperationsRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Secured shared operations + #[prost(message, repeated, tag = "2")] + pub operations: ::prost::alloc::vec::Vec, +} +/// SendOperationsResponse holds response from SendOperations +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendOperationsResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Operation result or gRPC status + #[prost(oneof = "send_operations_response::Message", tags = "2, 3")] + pub message: ::core::option::Option, +} +/// Nested message and enum types in `SendOperationsResponse`. +pub mod send_operations_response { + /// Operation result or gRPC status + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Message { + /// Operation result + #[prost(message, tag = "2")] + Result(super::OperationResult), + /// gRPC error(status) + #[prost(message, tag = "3")] + Error(super::super::super::super::google::rpc::Status), + } +} +/// Holds Operation response +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperationResult { + /// Operation(s) id(s) + #[prost(string, repeated, tag = "1")] + pub operations_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// TransactionsThroughputRequest holds request for TransactionsThroughput +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionsThroughputRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Optional timer interval in sec. Defaults to 10s + #[prost(fixed64, optional, tag = "2")] + pub interval: ::core::option::Option, +} +/// TransactionsThroughputResponse holds response from TransactionsThroughput +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionsThroughputResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Transactions throughput + #[prost(fixed32, tag = "2")] + pub throughput: u32, +} +/// Operation type enum +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum OpType { + /// Defaut enum value + Unspecified = 0, + /// Transaction + Transaction = 1, + /// Roll buy + RollBuy = 2, + /// Roll sell + RollSell = 3, + /// Execute smart contract + ExecuteSc = 4, + /// Call smart contract + CallSc = 5, +} +impl OpType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + OpType::Unspecified => "OP_TYPE_UNSPECIFIED", + OpType::Transaction => "OP_TYPE_TRANSACTION", + OpType::RollBuy => "OP_TYPE_ROLL_BUY", + OpType::RollSell => "OP_TYPE_ROLL_SELL", + OpType::ExecuteSc => "OP_TYPE_EXECUTE_SC", + OpType::CallSc => "OP_TYPE_CALL_SC", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "OP_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "OP_TYPE_TRANSACTION" => Some(Self::Transaction), + "OP_TYPE_ROLL_BUY" => Some(Self::RollBuy), + "OP_TYPE_ROLL_SELL" => Some(Self::RollSell), + "OP_TYPE_EXECUTE_SC" => Some(Self::ExecuteSc), + "OP_TYPE_CALL_SC" => Some(Self::CallSc), + _ => None, + } + } +} +/// Generated client implementations. +pub mod massa_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Massa gRPC service + #[derive(Debug, Clone)] + pub struct MassaServiceClient { + inner: tonic::client::Grpc, + } + impl MassaServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MassaServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MassaServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + MassaServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Get blocks by slots + pub async fn get_blocks_by_slots( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetBlocksBySlots", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("massa.api.v1.MassaService", "GetBlocksBySlots"), + ); + self.inner.unary(req, path, codec).await + } + /// Get datastore entries + pub async fn get_datastore_entries( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetDatastoreEntries", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("massa.api.v1.MassaService", "GetDatastoreEntries"), + ); + self.inner.unary(req, path, codec).await + } + /// Get next block best parents + pub async fn get_next_block_best_parents( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetNextBlockBestParents", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "massa.api.v1.MassaService", + "GetNextBlockBestParents", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Get selector draws + pub async fn get_selector_draws( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetSelectorDraws", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("massa.api.v1.MassaService", "GetSelectorDraws"), + ); + self.inner.unary(req, path, codec).await + } + /// Get transactions throughput + pub async fn get_transactions_throughput( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetTransactionsThroughput", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "massa.api.v1.MassaService", + "GetTransactionsThroughput", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Get node version + pub async fn get_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetVersion", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "GetVersion")); + self.inner.unary(req, path, codec).await + } + /// New received and produced blocks + pub async fn new_blocks( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/NewBlocks", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "NewBlocks")); + self.inner.streaming(req, path, codec).await + } + /// New received and produced blocks headers + pub async fn new_blocks_headers( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::NewBlocksHeadersRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/NewBlocksHeaders", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("massa.api.v1.MassaService", "NewBlocksHeaders"), + ); + self.inner.streaming(req, path, codec).await + } + /// New received and produced blocks with operations + pub async fn new_filled_blocks( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::NewFilledBlocksRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/NewFilledBlocks", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "NewFilledBlocks")); + self.inner.streaming(req, path, codec).await + } + /// New received and produced perations + pub async fn new_operations( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::NewOperationsRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/NewOperations", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "NewOperations")); + self.inner.streaming(req, path, codec).await + } + /// Send blocks + pub async fn send_blocks( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/SendBlocks", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "SendBlocks")); + self.inner.streaming(req, path, codec).await + } + /// Send endorsements + pub async fn send_endorsements( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::SendEndorsementsRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/SendEndorsements", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("massa.api.v1.MassaService", "SendEndorsements"), + ); + self.inner.streaming(req, path, codec).await + } + /// Send operations + pub async fn send_operations( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::SendOperationsRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/SendOperations", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "SendOperations")); + self.inner.streaming(req, path, codec).await + } + /// Transactions throughput per second + pub async fn transactions_throughput( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::TransactionsThroughputRequest, + >, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/TransactionsThroughput", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "massa.api.v1.MassaService", + "TransactionsThroughput", + ), + ); + self.inner.streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod massa_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with MassaServiceServer. + #[async_trait] + pub trait MassaService: Send + Sync + 'static { + /// Get blocks by slots + async fn get_blocks_by_slots( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Get datastore entries + async fn get_datastore_entries( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Get next block best parents + async fn get_next_block_best_parents( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Get selector draws + async fn get_selector_draws( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Get transactions throughput + async fn get_transactions_throughput( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Get node version + async fn get_version( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the NewBlocks method. + type NewBlocksStream: futures_core::Stream< + Item = std::result::Result, + > + + Send + + 'static; + /// New received and produced blocks + async fn new_blocks( + &self, + request: tonic::Request>, + ) -> std::result::Result, tonic::Status>; + /// Server streaming response type for the NewBlocksHeaders method. + type NewBlocksHeadersStream: futures_core::Stream< + Item = std::result::Result< + super::NewBlocksHeadersResponse, + tonic::Status, + >, + > + + Send + + 'static; + /// New received and produced blocks headers + async fn new_blocks_headers( + &self, + request: tonic::Request>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the NewFilledBlocks method. + type NewFilledBlocksStream: futures_core::Stream< + Item = std::result::Result, + > + + Send + + 'static; + /// New received and produced blocks with operations + async fn new_filled_blocks( + &self, + request: tonic::Request>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the NewOperations method. + type NewOperationsStream: futures_core::Stream< + Item = std::result::Result, + > + + Send + + 'static; + /// New received and produced perations + async fn new_operations( + &self, + request: tonic::Request>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the SendBlocks method. + type SendBlocksStream: futures_core::Stream< + Item = std::result::Result, + > + + Send + + 'static; + /// Send blocks + async fn send_blocks( + &self, + request: tonic::Request>, + ) -> std::result::Result, tonic::Status>; + /// Server streaming response type for the SendEndorsements method. + type SendEndorsementsStream: futures_core::Stream< + Item = std::result::Result< + super::SendEndorsementsResponse, + tonic::Status, + >, + > + + Send + + 'static; + /// Send endorsements + async fn send_endorsements( + &self, + request: tonic::Request>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the SendOperations method. + type SendOperationsStream: futures_core::Stream< + Item = std::result::Result, + > + + Send + + 'static; + /// Send operations + async fn send_operations( + &self, + request: tonic::Request>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the TransactionsThroughput method. + type TransactionsThroughputStream: futures_core::Stream< + Item = std::result::Result< + super::TransactionsThroughputResponse, + tonic::Status, + >, + > + + Send + + 'static; + /// Transactions throughput per second + async fn transactions_throughput( + &self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Massa gRPC service + #[derive(Debug)] + pub struct MassaServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl MassaServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for MassaServiceServer + where + T: MassaService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/massa.api.v1.MassaService/GetBlocksBySlots" => { + #[allow(non_camel_case_types)] + struct GetBlocksBySlotsSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService + for GetBlocksBySlotsSvc { + type Response = super::GetBlocksBySlotsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).get_blocks_by_slots(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBlocksBySlotsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/GetDatastoreEntries" => { + #[allow(non_camel_case_types)] + struct GetDatastoreEntriesSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService + for GetDatastoreEntriesSvc { + type Response = super::GetDatastoreEntriesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).get_datastore_entries(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetDatastoreEntriesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/GetNextBlockBestParents" => { + #[allow(non_camel_case_types)] + struct GetNextBlockBestParentsSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService + for GetNextBlockBestParentsSvc { + type Response = super::GetNextBlockBestParentsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::GetNextBlockBestParentsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).get_next_block_best_parents(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetNextBlockBestParentsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/GetSelectorDraws" => { + #[allow(non_camel_case_types)] + struct GetSelectorDrawsSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService + for GetSelectorDrawsSvc { + type Response = super::GetSelectorDrawsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).get_selector_draws(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetSelectorDrawsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/GetTransactionsThroughput" => { + #[allow(non_camel_case_types)] + struct GetTransactionsThroughputSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService< + super::GetTransactionsThroughputRequest, + > for GetTransactionsThroughputSvc { + type Response = super::GetTransactionsThroughputResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::GetTransactionsThroughputRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).get_transactions_throughput(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTransactionsThroughputSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/GetVersion" => { + #[allow(non_camel_case_types)] + struct GetVersionSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService + for GetVersionSvc { + type Response = super::GetVersionResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { (*inner).get_version(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/NewBlocks" => { + #[allow(non_camel_case_types)] + struct NewBlocksSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for NewBlocksSvc { + type Response = super::NewBlocksResponse; + type ResponseStream = T::NewBlocksStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { (*inner).new_blocks(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = NewBlocksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/NewBlocksHeaders" => { + #[allow(non_camel_case_types)] + struct NewBlocksHeadersSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for NewBlocksHeadersSvc { + type Response = super::NewBlocksHeadersResponse; + type ResponseStream = T::NewBlocksHeadersStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).new_blocks_headers(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = NewBlocksHeadersSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/NewFilledBlocks" => { + #[allow(non_camel_case_types)] + struct NewFilledBlocksSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for NewFilledBlocksSvc { + type Response = super::NewFilledBlocksResponse; + type ResponseStream = T::NewFilledBlocksStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).new_filled_blocks(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = NewFilledBlocksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/NewOperations" => { + #[allow(non_camel_case_types)] + struct NewOperationsSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for NewOperationsSvc { + type Response = super::NewOperationsResponse; + type ResponseStream = T::NewOperationsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).new_operations(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = NewOperationsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/SendBlocks" => { + #[allow(non_camel_case_types)] + struct SendBlocksSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for SendBlocksSvc { + type Response = super::SendBlocksResponse; + type ResponseStream = T::SendBlocksStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { (*inner).send_blocks(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SendBlocksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/SendEndorsements" => { + #[allow(non_camel_case_types)] + struct SendEndorsementsSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for SendEndorsementsSvc { + type Response = super::SendEndorsementsResponse; + type ResponseStream = T::SendEndorsementsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).send_endorsements(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SendEndorsementsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/SendOperations" => { + #[allow(non_camel_case_types)] + struct SendOperationsSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService + for SendOperationsSvc { + type Response = super::SendOperationsResponse; + type ResponseStream = T::SendOperationsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).send_operations(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SendOperationsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/massa.api.v1.MassaService/TransactionsThroughput" => { + #[allow(non_camel_case_types)] + struct TransactionsThroughputSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::StreamingService< + super::TransactionsThroughputRequest, + > for TransactionsThroughputSvc { + type Response = super::TransactionsThroughputResponse; + type ResponseStream = T::TransactionsThroughputStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).transactions_throughput(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TransactionsThroughputSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for MassaServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for MassaServiceServer { + const NAME: &'static str = "massa.api.v1.MassaService"; + } +} diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index c3f584e05d0..14c9344a850 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -217,7 +217,7 @@ impl ProtocolWorker { }) } - /// Return the sum of all operation's serialized sizes in the Set + /// Return the sum of all operation's serialized sizes in the `Set` fn get_total_operations_size( storage: &Storage, operation_ids: &PreHashSet, diff --git a/massa-protocol-worker/src/worker_operations_impl.rs b/massa-protocol-worker/src/worker_operations_impl.rs index e9c3dd74c41..cdd4aeb81ea 100644 --- a/massa-protocol-worker/src/worker_operations_impl.rs +++ b/massa-protocol-worker/src/worker_operations_impl.rs @@ -1,7 +1,7 @@ //! Contains the implementation of the life cycle of operations //! //! Implement the propagation algorithm written here [redirect to GitHub] -//! (https://github.com/massalabs/massa/issues/2283#issuecomment-1040872779). +//! . //! //! 1) get batches of operations ids //! 2) ask for operations diff --git a/massa-sdk/src/lib.rs b/massa-sdk/src/lib.rs index 4bd8075853a..4ec332c3064 100644 --- a/massa-sdk/src/lib.rs +++ b/massa-sdk/src/lib.rs @@ -25,6 +25,7 @@ use massa_api_exports::{ operation::{OperationInfo, OperationInput}, TimeInterval, }; +use massa_models::secure_share::SecureShare; use massa_models::{ address::Address, block::FilledBlock, @@ -496,7 +497,7 @@ impl RpcClientV2 { /// New produced blocks headers pub async fn subscribe_new_blocks_headers( &self, - ) -> Result, jsonrpsee::core::Error> { + ) -> Result>, jsonrpsee::core::Error> { if let Some(client) = self.ws_client.as_ref() { client .subscribe(