diff --git a/Cargo.lock b/Cargo.lock index 2e5585eb6..f261acf21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -903,9 +903,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1343,6 +1343,20 @@ dependencies = [ "uuid", ] +[[package]] +name = "datadog-crashtracker-ffi" +version = "11.0.0" +dependencies = [ + "anyhow", + "build_common", + "datadog-crashtracker", + "ddcommon", + "ddcommon-ffi", + "hyper 0.14.28", + "symbolic-common", + "symbolic-demangle", +] + [[package]] name = "datadog-ddsketch" version = "11.0.0" @@ -1434,9 +1448,8 @@ version = "11.0.0" dependencies = [ "anyhow", "build_common", - "chrono", "data-pipeline-ffi", - "datadog-crashtracker", + "datadog-crashtracker-ffi", "datadog-profiling", "ddcommon", "ddcommon-ffi", @@ -1464,7 +1477,7 @@ dependencies = [ [[package]] name = "datadog-serverless-trace-mini-agent" -version = "0.5.0" +version = "0.6.0" dependencies = [ "datadog-trace-mini-agent", "datadog-trace-protobuf", @@ -1498,7 +1511,6 @@ dependencies = [ "httpmock", "hyper 0.14.28", "io-lifetimes", - "kernel32-sys", "lazy_static", "libc", "manual_future", @@ -1524,7 +1536,7 @@ dependencies = [ "tracing-log", "tracing-subscriber", "uuid", - "winapi 0.2.8", + "winapi 0.3.9", "windows 0.51.1", "zwohash", ] @@ -1556,7 +1568,7 @@ dependencies = [ [[package]] name = "datadog-trace-mini-agent" -version = "0.5.0" +version = "11.0.0" dependencies = [ "anyhow", "async-trait", @@ -1679,6 +1691,7 @@ version = "11.0.0" dependencies = [ "anyhow", "build_common", + "chrono", "ddcommon", "hyper 0.14.28", ] diff --git a/Cargo.toml b/Cargo.toml index b116cc7b7..5d9e83d21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "alloc", "crashtracker", + "crashtracker-ffi", "profiling", "profiling-ffi", "profiling-replayer", diff --git a/LICENSE-3rdparty.yml b/LICENSE-3rdparty.yml index 02092bb0e..4dc7ebacc 100644 --- a/LICENSE-3rdparty.yml +++ b/LICENSE-3rdparty.yml @@ -1,4 +1,4 @@ -root_name: datadog-alloc, datadog-crashtracker, ddcommon, ddtelemetry, datadog-ddsketch, datadog-profiling, datadog-profiling-ffi, data-pipeline-ffi, data-pipeline, datadog-trace-normalization, datadog-trace-protobuf, datadog-trace-utils, ddcommon-ffi, build_common, ddtelemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, tools, datadog-ipc, datadog-ipc-macros, tarpc, tarpc-plugins, spawn_worker, cc_utils, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, sidecar_mockgen, datadog-trace-obfuscation, test_spawn_from_lib, datadog-serverless-trace-mini-agent, datadog-trace-mini-agent +root_name: datadog-alloc, datadog-crashtracker, ddcommon, ddtelemetry, datadog-ddsketch, datadog-crashtracker-ffi, ddcommon-ffi, build_common, datadog-profiling, datadog-profiling-ffi, data-pipeline-ffi, data-pipeline, datadog-trace-normalization, datadog-trace-protobuf, datadog-trace-utils, ddtelemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, tools, datadog-ipc, datadog-ipc-macros, tarpc, tarpc-plugins, spawn_worker, cc_utils, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, sidecar_mockgen, datadog-trace-obfuscation, test_spawn_from_lib, datadog-serverless-trace-mini-agent, datadog-trace-mini-agent third_party_libraries: - package_name: addr2line package_version: 0.21.0 @@ -5682,7 +5682,7 @@ third_party_libraries: - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" - package_name: chrono - package_version: 0.4.37 + package_version: 0.4.38 repository: https://github.com/chronotope/chrono license: MIT OR Apache-2.0 licenses: diff --git a/build-profiling-ffi.sh b/build-profiling-ffi.sh index 637e31f7c..e71211464 100755 --- a/build-profiling-ffi.sh +++ b/build-profiling-ffi.sh @@ -211,7 +211,7 @@ DESTDIR=$destdir cargo build --package tools --bins echo "Generating $destdir/include/libdatadog headers..." # ADD headers based on selected features. -HEADERS="$destdir/include/datadog/common.h $destdir/include/datadog/profiling.h $destdir/include/datadog/telemetry.h" +HEADERS="$destdir/include/datadog/common.h $destdir/include/datadog/profiling.h $destdir/include/datadog/telemetry.h $destdir/include/datadog/crashtracker.h" case $ARG_FEATURES in *data-pipeline-ffi*) HEADERS="$HEADERS $destdir/include/datadog/data-pipeline.h" diff --git a/crashtracker-ffi/Cargo.toml b/crashtracker-ffi/Cargo.toml new file mode 100644 index 000000000..a5fae0377 --- /dev/null +++ b/crashtracker-ffi/Cargo.toml @@ -0,0 +1,33 @@ +# Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +# SPDX-License-Identifier: Apache-2.0 + +[package] +name = "datadog-crashtracker-ffi" +edition.workspace = true +version.workspace = true +rust-version.workspace = true +license.workspace = true + +[lib] +bench = false + +[features] +default = ["cbindgen", "collector", "demangler", "receiver"] +cbindgen = ["build_common/cbindgen"] +# Enables the in-process collection of crash-info +collector = [] +demangler = ["dep:symbolic-demangle", "dep:symbolic-common"] +# Enables the use of this library to receiver crash-info from a suitable collector +receiver = [] + +[build-dependencies] +build_common = { path = "../build-common" } + +[dependencies] +anyhow = "1.0" +datadog-crashtracker = { path = "../crashtracker" } +ddcommon = { path = "../ddcommon" } +ddcommon-ffi = { path = "../ddcommon-ffi", default-features = false } +hyper = {version = "0.14", default-features = false} +symbolic-demangle = { version = "12.8.0", default-features = false, features = ["rust", "cpp", "msvc"], optional = true } +symbolic-common = { version = "12.8.0", default-features = false, optional = true } diff --git a/crashtracker-ffi/build.rs b/crashtracker-ffi/build.rs new file mode 100644 index 000000000..d6ccc6ad6 --- /dev/null +++ b/crashtracker-ffi/build.rs @@ -0,0 +1,10 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 +extern crate build_common; + +use build_common::generate_and_configure_header; + +fn main() { + let header_name = "crashtracker.h"; + generate_and_configure_header(header_name); +} diff --git a/crashtracker-ffi/cbindgen.toml b/crashtracker-ffi/cbindgen.toml new file mode 100644 index 000000000..b5571f563 --- /dev/null +++ b/crashtracker-ffi/cbindgen.toml @@ -0,0 +1,51 @@ +# Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +# SPDX-License-Identifier: Apache-2.0 + +language = "C" +cpp_compat = true +tab_width = 2 +header = """// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 +""" +include_guard = "DDOG_CRASHTRACKER_H" +style = "both" +pragma_once = true + +no_includes = true +sys_includes = ["stdbool.h", "stddef.h", "stdint.h"] +includes = ["common.h"] + +[export] +prefix = "ddog_crasht_" +renaming_overrides_prefixing = true + +[export.rename] +"ByteSlice" = "ddog_ByteSlice" +"CancellationToken" = "ddog_CancellationToken" +"CharSlice" = "ddog_CharSlice" +"Endpoint" = "ddog_Endpoint" +"Error" = "ddog_Error" +"HttpStatus" = "ddog_HttpStatus" +"Option_U32" = "ddog_Option_U32" +"Slice_CChar" = "ddog_Slice_CChar" +"Slice_I64" = "ddog_Slice_I64" +"Slice_U8" = "ddog_Slice_U8" +"Tag" = "ddog_Tag" +"Timespec" = "ddog_Timespec" +"Vec_Tag" = "ddog_Vec_Tag" +"Vec_U8" = "ddog_Vec_U8" + +[export.mangle] +rename_types = "PascalCase" + +[enum] +prefix_with_name = true +rename_variants = "ScreamingSnakeCase" + +[fn] +must_use = "DDOG_CHECK_RETURN" + +[parse] +parse_deps = true +include = ["ddcommon", "ddcommon-ffi", "datadog-crashtracker", "ux"] + diff --git a/profiling-ffi/src/crashtracker/collector/counters.rs b/crashtracker-ffi/src/collector/counters.rs similarity index 66% rename from profiling-ffi/src/crashtracker/collector/counters.rs rename to crashtracker-ffi/src/collector/counters.rs index e85cb8c34..9d836107a 100644 --- a/profiling-ffi/src/crashtracker/collector/counters.rs +++ b/crashtracker-ffi/src/collector/counters.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use super::datatypes::ProfilingOpTypes; -use crate::crashtracker::datatypes::*; +use crate::Result; use anyhow::Context; /// Resets all counters to 0. @@ -15,9 +15,9 @@ use anyhow::Context; /// No safety concerns. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_prof_Crashtracker_reset_counters() -> CrashtrackerResult { +pub unsafe extern "C" fn ddog_crasht_reset_counters() -> Result { datadog_crashtracker::reset_counters() - .context("ddog_prof_Crashtracker_begin_profiling_op failed") + .context("ddog_crasht_reset_counters failed") .into() } @@ -28,11 +28,9 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_reset_counters() -> Crashtracker /// /// # Safety /// No safety concerns. -pub unsafe extern "C" fn ddog_prof_Crashtracker_begin_profiling_op( - op: ProfilingOpTypes, -) -> CrashtrackerResult { +pub unsafe extern "C" fn ddog_crasht_begin_profiling_op(op: ProfilingOpTypes) -> Result { datadog_crashtracker::begin_profiling_op(op) - .context("ddog_prof_Crashtracker_begin_profiling_op failed") + .context("ddog_crasht_begin_profiling_op failed") .into() } @@ -43,10 +41,8 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_begin_profiling_op( /// /// # Safety /// No safety concerns. -pub unsafe extern "C" fn ddog_prof_Crashtracker_end_profiling_op( - op: ProfilingOpTypes, -) -> CrashtrackerResult { +pub unsafe extern "C" fn ddog_crasht_end_profiling_op(op: ProfilingOpTypes) -> Result { datadog_crashtracker::end_profiling_op(op) - .context("ddog_prof_Crashtracker_end_profiling_op failed") + .context("ddog_crasht_end_profiling_op failed") .into() } diff --git a/profiling-ffi/src/crashtracker/collector/datatypes.rs b/crashtracker-ffi/src/collector/datatypes.rs similarity index 78% rename from profiling-ffi/src/crashtracker/collector/datatypes.rs rename to crashtracker-ffi/src/collector/datatypes.rs index e7f55a4a1..48eafe26f 100644 --- a/profiling-ffi/src/crashtracker/collector/datatypes.rs +++ b/crashtracker-ffi/src/collector/datatypes.rs @@ -1,9 +1,9 @@ // Copyright 2024-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::exporter::{self, ProfilingEndpoint}; use crate::option_from_char_slice; pub use datadog_crashtracker::{ProfilingOpTypes, StacktraceCollection}; +use ddcommon::Endpoint; use ddcommon_ffi::slice::{AsBytes, CharSlice}; use ddcommon_ffi::{Error, Slice}; @@ -14,7 +14,7 @@ pub struct EnvVar<'a> { } #[repr(C)] -pub struct CrashtrackerReceiverConfig<'a> { +pub struct ReceiverConfig<'a> { pub args: Slice<'a, CharSlice<'a>>, pub env: Slice<'a, EnvVar<'a>>, pub path_to_receiver_binary: CharSlice<'a>, @@ -24,11 +24,9 @@ pub struct CrashtrackerReceiverConfig<'a> { pub optional_stdout_filename: CharSlice<'a>, } -impl<'a> TryFrom> - for datadog_crashtracker::CrashtrackerReceiverConfig -{ +impl<'a> TryFrom> for datadog_crashtracker::CrashtrackerReceiverConfig { type Error = anyhow::Error; - fn try_from(value: CrashtrackerReceiverConfig<'a>) -> anyhow::Result { + fn try_from(value: ReceiverConfig<'a>) -> anyhow::Result { let args = { let mut vec = Vec::with_capacity(value.args.len()); for x in value.args.iter() { @@ -60,24 +58,20 @@ impl<'a> TryFrom> } #[repr(C)] -pub struct CrashtrackerConfiguration<'a> { +pub struct Config<'a> { pub additional_files: Slice<'a, CharSlice<'a>>, pub create_alt_stack: bool, - /// The endpoint to send the crash report to (can be a file://) - /// - /// If ProfilingEndpoint is left to a zero value (enum value for Agent + empty charslice), - /// the crashtracker will infer the agent host from env variables. - pub endpoint: ProfilingEndpoint<'a>, + /// The endpoint to send the crash report to (can be a file://). + /// If None, the crashtracker will infer the agent host from env variables. + pub endpoint: Option<&'a Endpoint>, pub resolve_frames: StacktraceCollection, pub timeout_secs: u64, pub wait_for_receiver: bool, } -impl<'a> TryFrom> - for datadog_crashtracker::CrashtrackerConfiguration -{ +impl<'a> TryFrom> for datadog_crashtracker::CrashtrackerConfiguration { type Error = anyhow::Error; - fn try_from(value: CrashtrackerConfiguration<'a>) -> anyhow::Result { + fn try_from(value: Config<'a>) -> anyhow::Result { let additional_files = { let mut vec = Vec::with_capacity(value.additional_files.len()); for x in value.additional_files.iter() { @@ -86,7 +80,7 @@ impl<'a> TryFrom> vec }; let create_alt_stack = value.create_alt_stack; - let endpoint = unsafe { exporter::try_to_endpoint(value.endpoint).ok() }; + let endpoint = value.endpoint.cloned(); let resolve_frames = value.resolve_frames; let wait_for_receiver = value.wait_for_receiver; Self::new( @@ -100,13 +94,13 @@ impl<'a> TryFrom> } #[repr(C)] -pub enum CrashtrackerUsizeResult { +pub enum UsizeResult { Ok(usize), #[allow(dead_code)] Err(Error), } -impl From> for CrashtrackerUsizeResult { +impl From> for UsizeResult { fn from(value: anyhow::Result) -> Self { match value { Ok(x) => Self::Ok(x), diff --git a/profiling-ffi/src/crashtracker/collector/mod.rs b/crashtracker-ffi/src/collector/mod.rs similarity index 53% rename from profiling-ffi/src/crashtracker/collector/mod.rs rename to crashtracker-ffi/src/collector/mod.rs index a746ea467..41d526670 100644 --- a/profiling-ffi/src/crashtracker/collector/mod.rs +++ b/crashtracker-ffi/src/collector/mod.rs @@ -4,8 +4,8 @@ mod counters; mod datatypes; mod spans; -use super::crash_info::CrashtrackerMetadata; -use crate::crashtracker::datatypes::*; +use super::crash_info::Metadata; +use crate::Result; use anyhow::Context; pub use counters::*; pub use datatypes::*; @@ -20,17 +20,17 @@ pub use spans::*; /// exit. /// /// # Preconditions -/// This function assumes that the crash-tracker has previously been -/// initialized. +/// This function assumes that the crashtracker has previously been +/// initialized. /// # Safety -/// Crash-tracking functions are not reentrant. -/// No other crash-handler functions should be called concurrently. +/// Crash-tracking functions are not reentrant. +/// No other crash-handler functions should be called concurrently. /// # Atomicity -/// This function is not atomic. A crash during its execution may lead to -/// unexpected crash-handling behaviour. -pub unsafe extern "C" fn ddog_prof_Crashtracker_shutdown() -> CrashtrackerResult { +/// This function is not atomic. A crash during its execution may lead to +/// unexpected crash-handling behaviour. +pub unsafe extern "C" fn ddog_crasht_shutdown() -> Result { datadog_crashtracker::shutdown_crash_handler() - .context("ddog_prof_Crashtracker_shutdown failed") + .context("ddog_crasht_shutdown failed") .into() } @@ -46,26 +46,26 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_shutdown() -> CrashtrackerResult /// advantage would be to have fewer processes in `ps -a`. /// /// # Preconditions -/// This function assumes that the crash-tracker has previously been -/// initialized. +/// This function assumes that the crash-tracker has previously been +/// initialized. /// # Safety -/// Crash-tracking functions are not reentrant. -/// No other crash-handler functions should be called concurrently. +/// Crash-tracking functions are not reentrant. +/// No other crash-handler functions should be called concurrently. /// # Atomicity -/// This function is not atomic. A crash during its execution may lead to -/// unexpected crash-handling behaviour. -pub unsafe extern "C" fn ddog_prof_Crashtracker_update_on_fork( - config: CrashtrackerConfiguration, - receiver_config: CrashtrackerReceiverConfig, - metadata: CrashtrackerMetadata, -) -> CrashtrackerResult { +/// This function is not atomic. A crash during its execution may lead to +/// unexpected crash-handling behaviour. +pub unsafe extern "C" fn ddog_crasht_update_on_fork( + config: Config, + receiver_config: ReceiverConfig, + metadata: Metadata, +) -> Result { (|| { let config = config.try_into()?; let receiver_config = receiver_config.try_into()?; let metadata = metadata.try_into()?; datadog_crashtracker::on_fork(config, receiver_config, metadata) })() - .context("ddog_prof_Crashtracker_update_on_fork failed") + .context("ddog_crasht_update_on_fork failed") .into() } @@ -74,24 +74,24 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_update_on_fork( /// Initialize the crash-tracking infrastructure. /// /// # Preconditions -/// None. +/// None. /// # Safety -/// Crash-tracking functions are not reentrant. -/// No other crash-handler functions should be called concurrently. +/// Crash-tracking functions are not reentrant. +/// No other crash-handler functions should be called concurrently. /// # Atomicity -/// This function is not atomic. A crash during its execution may lead to -/// unexpected crash-handling behaviour. -pub unsafe extern "C" fn ddog_prof_Crashtracker_init_with_receiver( - config: CrashtrackerConfiguration, - receiver_config: CrashtrackerReceiverConfig, - metadata: CrashtrackerMetadata, -) -> CrashtrackerResult { +/// This function is not atomic. A crash during its execution may lead to +/// unexpected crash-handling behaviour. +pub unsafe extern "C" fn ddog_crasht_init_with_receiver( + config: Config, + receiver_config: ReceiverConfig, + metadata: Metadata, +) -> Result { (|| { let config = config.try_into()?; let receiver_config = receiver_config.try_into()?; let metadata = metadata.try_into()?; datadog_crashtracker::init_with_receiver(config, receiver_config, metadata) })() - .context("ddog_prof_Crashtracker_init failed") + .context("ddog_crasht_init_with_receiver failed") .into() } diff --git a/profiling-ffi/src/crashtracker/collector/spans.rs b/crashtracker-ffi/src/collector/spans.rs similarity index 82% rename from profiling-ffi/src/crashtracker/collector/spans.rs rename to crashtracker-ffi/src/collector/spans.rs index f27b60e83..de9efacaa 100644 --- a/profiling-ffi/src/crashtracker/collector/spans.rs +++ b/crashtracker-ffi/src/collector/spans.rs @@ -1,7 +1,7 @@ // Copyright 2024-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::{crashtracker::datatypes::*, CrashtrackerUsizeResult}; +use crate::{Result, UsizeResult}; use anyhow::Context; /// Resets all stored spans to 0. @@ -14,9 +14,9 @@ use anyhow::Context; /// No safety concerns. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_prof_Crashtracker_clear_span_ids() -> CrashtrackerResult { +pub unsafe extern "C" fn ddog_crasht_clear_span_ids() -> Result { datadog_crashtracker::clear_spans() - .context("ddog_prof_Crashtracker_clear_span_ids failed") + .context("ddog_crasht_clear_span_ids failed") .into() } @@ -30,9 +30,9 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_clear_span_ids() -> Crashtracker /// No safety concerns. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_prof_Crashtracker_clear_trace_ids() -> CrashtrackerResult { +pub unsafe extern "C" fn ddog_crasht_clear_trace_ids() -> Result { datadog_crashtracker::clear_traces() - .context("ddog_prof_Crashtracker_clear_trace_ids failed") + .context("ddog_crasht_clear_trace_ids failed") .into() } @@ -57,13 +57,10 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_clear_trace_ids() -> Crashtracke /// /// # Safety /// No safety concerns. -pub unsafe extern "C" fn ddog_prof_Crashtracker_insert_trace_id( - id_high: u64, - id_low: u64, -) -> CrashtrackerUsizeResult { +pub unsafe extern "C" fn ddog_crasht_insert_trace_id(id_high: u64, id_low: u64) -> UsizeResult { let id: u128 = (id_high as u128) << 64 | (id_low as u128); datadog_crashtracker::insert_trace(id) - .context("ddog_prof_Crashtracker_insert_trace_id failed") + .context("ddog_crasht_insert_trace_id failed") .into() } @@ -89,13 +86,10 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_insert_trace_id( /// /// # Safety /// No safety concerns. -pub unsafe extern "C" fn ddog_prof_Crashtracker_insert_span_id( - id_high: u64, - id_low: u64, -) -> CrashtrackerUsizeResult { +pub unsafe extern "C" fn ddog_crasht_insert_span_id(id_high: u64, id_low: u64) -> UsizeResult { let id: u128 = (id_high as u128) << 64 | (id_low as u128); datadog_crashtracker::insert_span(id) - .context("ddog_prof_Crashtracker_insert_span_id failed") + .context("ddog_crasht_insert_span_id failed") .into() } @@ -121,14 +115,14 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_insert_span_id( /// /// # Safety /// No safety concerns. -pub unsafe extern "C" fn ddog_prof_Crashtracker_remove_span_id( +pub unsafe extern "C" fn ddog_crasht_remove_span_id( id_high: u64, id_low: u64, idx: usize, -) -> CrashtrackerResult { +) -> Result { let id: u128 = (id_high as u128) << 64 | (id_low as u128); datadog_crashtracker::remove_span(id, idx) - .context("ddog_prof_Crashtracker_remove_span_id failed") + .context("ddog_crasht_remove_span_id failed") .into() } @@ -154,13 +148,13 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_remove_span_id( /// /// # Safety /// No safety concerns. -pub unsafe extern "C" fn ddog_prof_Crashtracker_remove_trace_id( +pub unsafe extern "C" fn ddog_crasht_remove_trace_id( id_high: u64, id_low: u64, idx: usize, -) -> CrashtrackerResult { +) -> Result { let id: u128 = (id_high as u128) << 64 | (id_low as u128); datadog_crashtracker::remove_trace(id, idx) - .context("ddog_prof_Crashtracker_remove_trace_id failed") + .context("ddog_crasht_remove_trace_id failed") .into() } diff --git a/profiling-ffi/src/crashtracker/crash_info/datatypes.rs b/crashtracker-ffi/src/crash_info/datatypes.rs similarity index 97% rename from profiling-ffi/src/crashtracker/crash_info/datatypes.rs rename to crashtracker-ffi/src/crash_info/datatypes.rs index 5f75a1537..f4a31ed55 100644 --- a/profiling-ffi/src/crashtracker/crash_info/datatypes.rs +++ b/crashtracker-ffi/src/crash_info/datatypes.rs @@ -227,7 +227,7 @@ impl<'a> TryFrom> for datadog_crashtracker::SigInfo { } #[repr(C)] -pub struct CrashtrackerMetadata<'a> { +pub struct Metadata<'a> { pub profiling_library_name: CharSlice<'a>, pub profiling_library_version: CharSlice<'a>, pub family: CharSlice<'a>, @@ -235,9 +235,9 @@ pub struct CrashtrackerMetadata<'a> { pub tags: Option<&'a ddcommon_ffi::Vec>, } -impl<'a> TryFrom> for datadog_crashtracker::CrashtrackerMetadata { +impl<'a> TryFrom> for datadog_crashtracker::CrashtrackerMetadata { type Error = anyhow::Error; - fn try_from(value: CrashtrackerMetadata<'a>) -> anyhow::Result { + fn try_from(value: Metadata<'a>) -> anyhow::Result { let profiling_library_name = value.profiling_library_name.try_to_utf8()?.to_string(); let profiling_library_version = value.profiling_library_version.try_to_utf8()?.to_string(); let family = value.family.try_to_utf8()?.to_string(); diff --git a/profiling-ffi/src/crashtracker/crash_info/mod.rs b/crashtracker-ffi/src/crash_info/mod.rs similarity index 73% rename from profiling-ffi/src/crashtracker/crash_info/mod.rs rename to crashtracker-ffi/src/crash_info/mod.rs index 7c2ee0daa..18e49e0cf 100644 --- a/profiling-ffi/src/crashtracker/crash_info/mod.rs +++ b/crashtracker-ffi/src/crash_info/mod.rs @@ -4,20 +4,17 @@ mod datatypes; pub use datatypes::*; -use crate::{ - crashtracker::{option_from_char_slice, CrashtrackerResult}, - exporter::ProfilingEndpoint, -}; +use crate::{option_from_char_slice, Result}; use anyhow::Context; -use chrono::DateTime; -use ddcommon_ffi::{slice::AsBytes, CharSlice, Slice}; +use ddcommon::Endpoint; +use ddcommon_ffi::{slice::AsBytes, CharSlice, Slice, Timespec}; /// Create a new crashinfo, and returns an opaque reference to it. /// # Safety /// No safety issues. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_new() -> CrashInfoNewResult { +pub unsafe extern "C" fn ddog_crasht_CrashInfo_new() -> CrashInfoNewResult { CrashInfoNewResult::Ok(CrashInfo::new(datadog_crashtracker::CrashInfo::new())) } @@ -25,7 +22,7 @@ pub unsafe extern "C" fn ddog_crashinfo_new() -> CrashInfoNewResult { /// The `crash_info` can be null, but if non-null it must point to a CrashInfo /// made by this module, which has not previously been dropped. #[no_mangle] -pub unsafe extern "C" fn ddog_crashinfo_drop(crashinfo: *mut CrashInfo) { +pub unsafe extern "C" fn ddog_crasht_CrashInfo_drop(crashinfo: *mut CrashInfo) { // Technically, this function has been designed so if it's double-dropped // then it's okay, but it's not something that should be relied on. if !crashinfo.is_null() { @@ -41,15 +38,15 @@ pub unsafe extern "C" fn ddog_crashinfo_drop(crashinfo: *mut CrashInfo) { #[cfg(unix)] #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_normalize_ips( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_normalize_ips( crashinfo: *mut CrashInfo, pid: u32, -) -> CrashtrackerResult { +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; crashinfo.normalize_ips(pid) })() - .context("ddog_crashinfo_normalize_ips failed") + .context("ddog_crasht_CrashInfo_normalize_ips failed") .into() } @@ -63,17 +60,17 @@ pub unsafe extern "C" fn ddog_crashinfo_normalize_ips( /// call. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_add_counter( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_add_counter( crashinfo: *mut CrashInfo, name: CharSlice, val: i64, -) -> CrashtrackerResult { +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; let name = name.to_utf8_lossy(); crashinfo.add_counter(&name, val) })() - .context("ddog_crashinfo_add_counter failed") + .context("ddog_crasht_CrashInfo_add_counter failed") .into() } @@ -86,16 +83,16 @@ pub unsafe extern "C" fn ddog_crashinfo_add_counter( /// call. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_add_file( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_add_file( crashinfo: *mut CrashInfo, - name: CharSlice, -) -> CrashtrackerResult { + filename: CharSlice, +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; - let name = name.to_utf8_lossy(); - crashinfo.add_file(&name) + let filename = filename.to_utf8_lossy(); + crashinfo.add_file(&filename) })() - .context("ddog_crashinfo_add_file failed") + .context("ddog_crasht_CrashInfo_add_file failed") .into() } @@ -109,18 +106,18 @@ pub unsafe extern "C" fn ddog_crashinfo_add_file( /// call. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_add_tag( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_add_tag( crashinfo: *mut CrashInfo, key: CharSlice, value: CharSlice, -) -> CrashtrackerResult { +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; let key = key.to_utf8_lossy().to_string(); let value = value.to_utf8_lossy().to_string(); crashinfo.add_tag(key, value) })() - .context("ddog_crashinfo_add_tag failed") + .context("ddog_crasht_CrashInfo_add_tag failed") .into() } @@ -132,16 +129,16 @@ pub unsafe extern "C" fn ddog_crashinfo_add_tag( /// Strings are copied into the crashinfo, and do not need to outlive this call. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_set_metadata( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_set_metadata( crashinfo: *mut CrashInfo, - metadata: CrashtrackerMetadata, -) -> CrashtrackerResult { + metadata: Metadata, +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; let metadata = metadata.try_into()?; crashinfo.set_metadata(metadata) })() - .context("ddog_crashinfo_set_metadata failed") + .context("ddog_crasht_CrashInfo_set_metadata failed") .into() } @@ -153,16 +150,16 @@ pub unsafe extern "C" fn ddog_crashinfo_set_metadata( /// Strings are copied into the crashinfo, and do not need to outlive this call. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_set_siginfo( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_set_siginfo( crashinfo: *mut CrashInfo, siginfo: SigInfo, -) -> CrashtrackerResult { +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; let siginfo = siginfo.try_into()?; crashinfo.set_siginfo(siginfo) })() - .context("ddog_crashinfo_set_siginfo failed") + .context("ddog_crasht_CrashInfo_set_siginfo failed") .into() } @@ -175,11 +172,11 @@ pub unsafe extern "C" fn ddog_crashinfo_set_siginfo( /// Strings are copied into the crashinfo, and do not need to outlive this call. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_set_stacktrace( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_set_stacktrace( crashinfo: *mut CrashInfo, thread_id: CharSlice, stacktrace: Slice, -) -> CrashtrackerResult { +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; let thread_id = option_from_char_slice(thread_id)?; @@ -189,7 +186,7 @@ pub unsafe extern "C" fn ddog_crashinfo_set_stacktrace( } crashinfo.set_stacktrace(thread_id, stacktrace_vec) })() - .context("ddog_crashinfo_set_stacktrace failed") + .context("ddog_crasht_CrashInfo_set_stacktrace failed") .into() } @@ -199,18 +196,15 @@ pub unsafe extern "C" fn ddog_crashinfo_set_stacktrace( /// `crashinfo` must be a valid pointer to a `CrashInfo` object. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_set_timestamp( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_set_timestamp( crashinfo: *mut CrashInfo, - secs: i64, - nsecs: u32, -) -> CrashtrackerResult { + ts: Timespec, +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; - let ts = DateTime::from_timestamp(secs, nsecs) - .with_context(|| format!("Invalid timestamp {secs} {nsecs}"))?; - crashinfo.set_timestamp(ts) + crashinfo.set_timestamp(ts.into()) })() - .context("ddog_crashinfo_set_timestamp_to_now failed") + .context("ddog_crasht_CrashInfo_set_timestamp_to_now failed") .into() } @@ -220,14 +214,14 @@ pub unsafe extern "C" fn ddog_crashinfo_set_timestamp( /// `crashinfo` must be a valid pointer to a `CrashInfo` object. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_set_timestamp_to_now( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_set_timestamp_to_now( crashinfo: *mut CrashInfo, -) -> CrashtrackerResult { +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; crashinfo.set_timestamp_to_now() })() - .context("ddog_crashinfo_set_timestamp_to_now failed") + .context("ddog_crasht_CrashInfo_set_timestamp_to_now failed") .into() } @@ -237,15 +231,15 @@ pub unsafe extern "C" fn ddog_crashinfo_set_timestamp_to_now( /// `crashinfo` must be a valid pointer to a `CrashInfo` object. #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_crashinfo_upload_to_endpoint( +pub unsafe extern "C" fn ddog_crasht_CrashInfo_upload_to_endpoint( crashinfo: *mut CrashInfo, - endpoint: ProfilingEndpoint, -) -> CrashtrackerResult { + endpoint: Option<&Endpoint>, +) -> Result { (|| { let crashinfo = crashinfo_ptr_to_inner(crashinfo)?; - let endpoint = Some(unsafe { crate::exporter::try_to_endpoint(endpoint)? }); + let endpoint = endpoint.cloned(); crashinfo.upload_to_endpoint(&endpoint) })() - .context("ddog_crashinfo_upload_to_endpoint failed") + .context("ddog_crasht_CrashInfo_upload_to_endpoint failed") .into() } diff --git a/profiling-ffi/src/crashtracker/datatypes/mod.rs b/crashtracker-ffi/src/datatypes/mod.rs similarity index 90% rename from profiling-ffi/src/crashtracker/datatypes/mod.rs rename to crashtracker-ffi/src/datatypes/mod.rs index ec7b51f7e..04eb263ab 100644 --- a/profiling-ffi/src/crashtracker/datatypes/mod.rs +++ b/crashtracker-ffi/src/datatypes/mod.rs @@ -13,7 +13,7 @@ pub fn option_from_char_slice(s: CharSlice) -> anyhow::Result> { /// A generic result type for when a crashtracking operation may fail, /// but there's nothing to return in the case of success. #[repr(C)] -pub enum CrashtrackerResult { +pub enum Result { Ok( /// Do not use the value of Ok. This value only exists to overcome /// Rust -> C code generation. @@ -22,7 +22,7 @@ pub enum CrashtrackerResult { Err(Error), } -impl From> for CrashtrackerResult { +impl From> for Result { fn from(value: anyhow::Result<()>) -> Self { match value { Ok(_) => Self::Ok(true), diff --git a/profiling-ffi/src/crashtracker/demangler/datatypes.rs b/crashtracker-ffi/src/demangler/datatypes.rs similarity index 100% rename from profiling-ffi/src/crashtracker/demangler/datatypes.rs rename to crashtracker-ffi/src/demangler/datatypes.rs diff --git a/profiling-ffi/src/crashtracker/demangler/mod.rs b/crashtracker-ffi/src/demangler/mod.rs similarity index 81% rename from profiling-ffi/src/crashtracker/demangler/mod.rs rename to crashtracker-ffi/src/demangler/mod.rs index 3c1ed09bf..c5e47b5ed 100644 --- a/profiling-ffi/src/crashtracker/demangler/mod.rs +++ b/crashtracker-ffi/src/demangler/mod.rs @@ -15,7 +15,7 @@ use symbolic_demangle::Demangle; /// The string is copied into the result, and does not need to outlive this call #[no_mangle] #[must_use] -pub unsafe extern "C" fn ddog_demangle( +pub unsafe extern "C" fn ddog_crasht_demangle( name: CharSlice, options: DemangleOptions, ) -> StringWrapperResult { @@ -32,12 +32,12 @@ pub unsafe extern "C" fn ddog_demangle( fn test_demangle() { let test_string = "_ZNSt28__atomic_futex_unsigned_base26_M_futex_wait_until_steadyEPjjbNSt6chrono8durationIlSt5ratioILl1ELl1EEEENS2_IlS3_ILl1ELl1000000000EEEE"; let test_slice = CharSlice::from(test_string); - let result: String = unsafe { ddog_demangle(test_slice, DemangleOptions::Complete) } + let result: String = unsafe { ddog_crasht_demangle(test_slice, DemangleOptions::Complete) } .unwrap() .into(); assert_eq!(result, "std::__atomic_futex_unsigned_base::_M_futex_wait_until_steady(unsigned int*, unsigned int, bool, std::chrono::duration >, std::chrono::duration >)"); - let result: String = unsafe { ddog_demangle(test_slice, DemangleOptions::NameOnly) } + let result: String = unsafe { ddog_crasht_demangle(test_slice, DemangleOptions::NameOnly) } .unwrap() .into(); assert_eq!( @@ -50,12 +50,12 @@ fn test_demangle() { fn test_demangle_fails() { let test_string = "_ZNSt28__fdf"; let test_slice = CharSlice::from(test_string); - let result: String = unsafe { ddog_demangle(test_slice, DemangleOptions::Complete) } + let result: String = unsafe { ddog_crasht_demangle(test_slice, DemangleOptions::Complete) } .unwrap() .into(); assert_eq!(result, ""); - let result: String = unsafe { ddog_demangle(test_slice, DemangleOptions::NameOnly) } + let result: String = unsafe { ddog_crasht_demangle(test_slice, DemangleOptions::NameOnly) } .unwrap() .into(); assert_eq!(result, ""); diff --git a/profiling-ffi/src/crashtracker/mod.rs b/crashtracker-ffi/src/lib.rs similarity index 56% rename from profiling-ffi/src/crashtracker/mod.rs rename to crashtracker-ffi/src/lib.rs index 0a414c745..1dc4a8a8d 100644 --- a/profiling-ffi/src/crashtracker/mod.rs +++ b/crashtracker-ffi/src/lib.rs @@ -1,18 +1,20 @@ // Copyright 2024-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -#[cfg(all(unix, feature = "crashtracker-collector"))] +#[cfg(all(unix, feature = "collector"))] mod collector; mod crash_info; mod datatypes; +#[cfg(feature = "demangler")] mod demangler; -#[cfg(all(unix, feature = "crashtracker-receiver"))] +#[cfg(all(unix, feature = "receiver"))] mod receiver; -#[cfg(all(unix, feature = "crashtracker-collector"))] +#[cfg(all(unix, feature = "collector"))] pub use collector::*; pub use crash_info::*; pub use datatypes::*; +#[cfg(feature = "demangler")] pub use demangler::*; -#[cfg(all(unix, feature = "crashtracker-receiver"))] +#[cfg(all(unix, feature = "receiver"))] pub use receiver::*; diff --git a/profiling-ffi/src/crashtracker/receiver.rs b/crashtracker-ffi/src/receiver.rs similarity index 77% rename from profiling-ffi/src/crashtracker/receiver.rs rename to crashtracker-ffi/src/receiver.rs index c366e19dc..d7f64f65d 100644 --- a/profiling-ffi/src/crashtracker/receiver.rs +++ b/crashtracker-ffi/src/receiver.rs @@ -1,7 +1,7 @@ // Copyright 2023-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::crashtracker::datatypes::*; +use crate::Result; use anyhow::Context; use ddcommon_ffi::{slice::AsBytes, CharSlice}; #[no_mangle] @@ -17,9 +17,9 @@ use ddcommon_ffi::{slice::AsBytes, CharSlice}; /// description. /// # Safety /// No safety concerns -pub unsafe extern "C" fn ddog_prof_Crashtracker_receiver_entry_point_stdin() -> CrashtrackerResult { +pub unsafe extern "C" fn ddog_crasht_receiver_entry_point_stdin() -> Result { datadog_crashtracker::receiver_entry_point_stdin() - .context("ddog_prof_Crashtracker_receiver_entry_point_stdin failed") + .context("ddog_crasht_receiver_entry_point_stdin failed") .into() } @@ -36,13 +36,13 @@ pub unsafe extern "C" fn ddog_prof_Crashtracker_receiver_entry_point_stdin() -> /// description. /// # Safety /// No safety concerns -pub unsafe extern "C" fn ddog_prof_Crashtracker_receiver_entry_point_unix_socket( +pub unsafe extern "C" fn ddog_crasht_receiver_entry_point_unix_socket( socket_path: CharSlice, -) -> CrashtrackerResult { +) -> Result { (|| { let socket_path = socket_path.try_to_utf8()?; datadog_crashtracker::reciever_entry_point_unix_socket(socket_path) })() - .context("ddog_prof_Crashtracker_receiver_entry_point_unix_socket failed") + .context("ddog_crasht_receiver_entry_point_unix_socket failed") .into() } diff --git a/crashtracker/libdatadog-crashtracking-receiver.c b/crashtracker/libdatadog-crashtracking-receiver.c index 3b23c58a7..c8e3e265f 100644 --- a/crashtracker/libdatadog-crashtracking-receiver.c +++ b/crashtracker/libdatadog-crashtracking-receiver.c @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 #include -#include +#include #include #include int main(void) { - ddog_prof_CrashtrackerResult new_result = ddog_prof_Crashtracker_receiver_entry_point_stdin(); - if (new_result.tag != DDOG_PROF_CRASHTRACKER_RESULT_OK) { + ddog_crasht_Result new_result = ddog_crasht_receiver_entry_point_stdin(); + if (new_result.tag != DDOG_CRASHT_RESULT_OK) { ddog_CharSlice message = ddog_Error_message(&new_result.err); fprintf(stderr, "%.*s", (int)message.len, message.ptr); ddog_Error_drop(&new_result.err); diff --git a/crashtracker/src/README.md b/crashtracker/src/README.md index 06755ee56..8e4232552 100644 --- a/crashtracker/src/README.md +++ b/crashtracker/src/README.md @@ -13,10 +13,10 @@ It has three related parts: ## How to use the crashhandler -1. Initilize it using `ddog_prof_Crashtracker_init` -2. After a fork, reset the crashtracker in the child using `ddog_prof_Crashtracker_update_on_fork`. +1. Initilize it using `ddog_crasht_init` +2. After a fork, reset the crashtracker in the child using `ddog_crasht_update_on_fork`. This can be done in an `pthread_atfork` handler. -2. [Optional]. The crash-tracker can be shutdown, and the previous crash handler restored, using `ddog_prof_Crashtracker_shutdown`. +2. [Optional]. The crash-tracker can be shutdown, and the previous crash handler restored, using `ddog_crasht_shutdown`. Currently, there is a state machine that stops you from then restarting the crash-tracker. Fixing this is a todo diff --git a/crashtracker/src/receiver.rs b/crashtracker/src/receiver.rs index 964a18e1e..f009c97af 100644 --- a/crashtracker/src/receiver.rs +++ b/crashtracker/src/receiver.rs @@ -223,6 +223,7 @@ fn process_line( Ok(next) } +#[derive(Debug)] enum CrashReportStatus { NoCrash, CrashReport(CrashtrackerConfiguration, CrashInfo), @@ -232,6 +233,7 @@ enum CrashReportStatus { /// Listens to `stream`, reading it line by line, until /// 1. A crash-report is received, in which case it is processed for upload /// 2. `stdin` closes without a crash report (i.e. if the parent terminated normally) +/// /// In the case where the parent failed to transfer a full crash-report /// (for instance if it crashed while calculating the crash-report), we return /// a PartialCrashReport. diff --git a/ddcommon-ffi/Cargo.toml b/ddcommon-ffi/Cargo.toml index 5ca6ad9b3..2c621a8ab 100644 --- a/ddcommon-ffi/Cargo.toml +++ b/ddcommon-ffi/Cargo.toml @@ -19,6 +19,7 @@ cbindgen = ["build_common/cbindgen"] build_common = { path = "../build-common" } [dependencies] -ddcommon = { path = "../ddcommon" } anyhow = "1.0" +chrono = { version = "0.4.38", features = ["std"] } +ddcommon = { path = "../ddcommon" } hyper = {version = "0.14", default-features = false} diff --git a/ddcommon-ffi/src/endpoint.rs b/ddcommon-ffi/src/endpoint.rs index 6f3c6f4a8..6477e23d2 100644 --- a/ddcommon-ffi/src/endpoint.rs +++ b/ddcommon-ffi/src/endpoint.rs @@ -15,6 +15,13 @@ pub extern "C" fn ddog_endpoint_from_url(url: crate::CharSlice) -> Option Option> { + let url = format!("file://{}", filename.to_utf8_lossy()); + Some(Box::new(Endpoint::from_slice(&url))) +} + // We'll just specify the base site here. If api key provided, different intakes need to use their // own subdomains. #[no_mangle] diff --git a/ddcommon-ffi/src/lib.rs b/ddcommon-ffi/src/lib.rs index 23855c6e6..1fdef2289 100644 --- a/ddcommon-ffi/src/lib.rs +++ b/ddcommon-ffi/src/lib.rs @@ -8,11 +8,12 @@ pub mod option; pub mod slice; pub mod string; pub mod tags; +pub mod timespec; pub mod vec; pub use error::*; -pub use string::*; - -pub use option::Option; +pub use option::*; pub use slice::{CharSlice, Slice}; +pub use string::*; +pub use timespec::*; pub use vec::Vec; diff --git a/ddcommon-ffi/src/option.rs b/ddcommon-ffi/src/option.rs index 2f55d95c9..05047a81a 100644 --- a/ddcommon-ffi/src/option.rs +++ b/ddcommon-ffi/src/option.rs @@ -31,3 +31,13 @@ impl From<&Option> for std::option::Option { } } } + +#[no_mangle] +pub extern "C" fn ddog_Option_U32_some(v: u32) -> Option { + Option::Some(v) +} + +#[no_mangle] +pub extern "C" fn ddog_Option_U32_none() -> Option { + Option::None +} diff --git a/ddcommon-ffi/src/timespec.rs b/ddcommon-ffi/src/timespec.rs new file mode 100644 index 000000000..a490845c3 --- /dev/null +++ b/ddcommon-ffi/src/timespec.rs @@ -0,0 +1,53 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use chrono::{DateTime, TimeZone, Utc}; +use std::fmt::Debug; +use std::time::SystemTime; + +/// Represents time since the Unix Epoch in seconds plus nanoseconds. +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct Timespec { + pub seconds: i64, + pub nanoseconds: u32, +} + +impl From for DateTime { + fn from(value: Timespec) -> Self { + Utc.timestamp_opt(value.seconds, value.nanoseconds).unwrap() + } +} + +impl From for SystemTime { + fn from(value: Timespec) -> Self { + // The DateTime API is more convenient, so let's delegate. + let datetime: DateTime = value.into(); + SystemTime::from(datetime) + } +} + +impl<'a> From<&'a Timespec> for SystemTime { + fn from(value: &'a Timespec) -> Self { + // The DateTime API is more convenient, so let's delegate. + let datetime: DateTime = (*value).into(); + SystemTime::from(datetime) + } +} + +impl From> for Timespec { + fn from(value: DateTime) -> Self { + Self { + seconds: value.timestamp(), + nanoseconds: value.timestamp_subsec_nanos(), + } + } +} + +impl From for Timespec { + fn from(value: SystemTime) -> Self { + // The DateTime API is more convenient, so let's delegate again. + let datetime: DateTime = value.into(); + Self::from(datetime) + } +} diff --git a/ddcommon/src/tag.rs b/ddcommon/src/tag.rs index 17f2b691b..2c43fd1e5 100644 --- a/ddcommon/src/tag.rs +++ b/ddcommon/src/tag.rs @@ -14,6 +14,7 @@ pub struct Tag { /// - "language:native" /// - "src_library:libdatadog" /// - "type:timeout" + /// /// So being able to save allocations is nice. value: Cow<'static, str>, } diff --git a/ddsketch/src/lib.rs b/ddsketch/src/lib.rs index 470615264..f6f74abdf 100644 --- a/ddsketch/src/lib.rs +++ b/ddsketch/src/lib.rs @@ -21,8 +21,7 @@ pub mod pb; /// /// This implementation only supports a part of the standard (which is also only the parts dd /// backend supports :shrug:) -/// - max length contiguous bin store, with lower bin -/// collapse behavior. +/// - max length contiguous bin store, with lower bin collapse behavior. /// - Positive or zero values /// /// The default sketch has a 1% relative accuracy, and only accepts positive points diff --git a/examples/ffi/crashinfo.cpp b/examples/ffi/crashinfo.cpp index 0b70fc9b0..9b5dca912 100644 --- a/examples/ffi/crashinfo.cpp +++ b/examples/ffi/crashinfo.cpp @@ -3,7 +3,7 @@ extern "C" { #include -#include +#include } #include #include @@ -19,17 +19,8 @@ static ddog_CharSlice to_slice_string(std::string &s) { return {.ptr = s.data(), .len = s.length()}; } -// TODO: Testing on my mac, the tags appear to have the opposite meaning you'd -// expect -static ddog_prof_Option_U32 some_u32(uint32_t i) { - ddog_prof_Option_U32 rval = {.tag = DDOG_PROF_OPTION_U32_SOME_U32}; - rval.some = i; - return rval; -} -static ddog_prof_Option_U32 none_u32() { return {.tag = DDOG_PROF_OPTION_U32_NONE_U32}; } - struct Deleter { - void operator()(ddog_prof_CrashInfo *object) { ddog_crashinfo_drop(object); } + void operator()(ddog_crasht_CrashInfo *object) { ddog_crasht_CrashInfo_drop(object); } }; void print_error(const char *s, const ddog_Error &err) { @@ -37,15 +28,15 @@ void print_error(const char *s, const ddog_Error &err) { printf("%s (%.*s)\n", s, static_cast(charslice.len), charslice.ptr); } -void check_result(ddog_prof_CrashtrackerResult result, const char *msg) { - if (result.tag != DDOG_PROF_CRASHTRACKER_RESULT_OK) { +void check_result(ddog_crasht_Result result, const char *msg) { + if (result.tag != DDOG_CRASHT_RESULT_OK) { print_error(msg, result.err); ddog_Error_drop(&result.err); exit(EXIT_FAILURE); } } -void add_stacktrace(std::unique_ptr &crashinfo) { +void add_stacktrace(std::unique_ptr &crashinfo) { // Collect things into vectors so they stay alive till the function exits std::vector filenames; @@ -55,45 +46,46 @@ void add_stacktrace(std::unique_ptr &crashinfo) { function_names.push_back("func_" + std::to_string(i)); } - std::vector names; + std::vector names; for (uintptr_t i = 0; i < 20; ++i) { - names.push_back({.colno = some_u32(i), + names.push_back({.colno = ddog_Option_U32_some(i), .filename = to_slice_string(filenames[i]), - .lineno = some_u32(2 * i + 3), + .lineno = ddog_Option_U32_some(2 * i + 3), .name = to_slice_string(function_names[i])}); } - std::vector trace; + std::vector trace; for (uintptr_t i = 0; i < 20; ++i) { - ddog_prof_StackFrame frame = {.ip = i, - .module_base_address = 0, - .names = {.ptr = &names[i], .len = 1}, - .sp = 0, - .symbol_address = 0}; + ddog_crasht_StackFrame frame = {.ip = i, + .module_base_address = 0, + .names = {.ptr = &names[i], .len = 1}, + .sp = 0, + .symbol_address = 0}; trace.push_back(frame); } - ddog_prof_Slice_StackFrame trace_slice = {.ptr = trace.data(), .len = trace.size()}; + ddog_crasht_Slice_StackFrame trace_slice = {.ptr = trace.data(), .len = trace.size()}; - check_result(ddog_crashinfo_set_stacktrace(crashinfo.get(), to_slice_c_char(""), trace_slice), - "Failed to set stacktrace"); + check_result( + ddog_crasht_CrashInfo_set_stacktrace(crashinfo.get(), to_slice_c_char(""), trace_slice), + "Failed to set stacktrace"); } int main(void) { - auto crashinfo_new_result = ddog_crashinfo_new(); - if (crashinfo_new_result.tag != DDOG_PROF_CRASH_INFO_NEW_RESULT_OK) { + auto crashinfo_new_result = ddog_crasht_CrashInfo_new(); + if (crashinfo_new_result.tag != DDOG_CRASHT_CRASH_INFO_NEW_RESULT_OK) { print_error("Failed to make new crashinfo: ", crashinfo_new_result.err); ddog_Error_drop(&crashinfo_new_result.err); exit(EXIT_FAILURE); } - std::unique_ptr crashinfo{&crashinfo_new_result.ok}; + std::unique_ptr crashinfo{&crashinfo_new_result.ok}; check_result( - ddog_crashinfo_add_counter(crashinfo.get(), to_slice_c_char("my_amazing_counter"), 3), + ddog_crasht_CrashInfo_add_counter(crashinfo.get(), to_slice_c_char("my_amazing_counter"), 3), "Failed to add counter"); // TODO add some tags here auto tags = ddog_Vec_Tag_new(); - const ddog_prof_CrashtrackerMetadata metadata = { + const ddog_crasht_Metadata metadata = { .profiling_library_name = to_slice_c_char("libdatadog"), .profiling_library_version = to_slice_c_char("42"), .family = to_slice_c_char("rust"), @@ -101,24 +93,29 @@ int main(void) { }; // TODO: We should set more tags that are expected by telemetry - check_result(ddog_crashinfo_set_metadata(crashinfo.get(), metadata), "Failed to add metadata"); - check_result(ddog_crashinfo_add_tag(crashinfo.get(), to_slice_c_char("best hockey team"), - to_slice_c_char("Habs")), + check_result(ddog_crasht_CrashInfo_set_metadata(crashinfo.get(), metadata), + "Failed to add metadata"); + check_result(ddog_crasht_CrashInfo_add_tag(crashinfo.get(), to_slice_c_char("best hockey team"), + to_slice_c_char("Habs")), "Failed to add tag"); // This API allows one to capture useful files (e.g. /proc/pid/maps) // For testing purposes, use `/etc/hosts` which should exist on any reasonable // UNIX system - check_result(ddog_crashinfo_add_file(crashinfo.get(), to_slice_c_char("/etc/hosts")), + check_result(ddog_crasht_CrashInfo_add_file(crashinfo.get(), to_slice_c_char("/etc/hosts")), "Failed to add file"); add_stacktrace(crashinfo); + ddog_Timespec timestamp = {.seconds = 1568899800, .nanoseconds = 0}; // Datadog IPO at 2019-09-19T13:30:00Z = 1568899800 unix - check_result(ddog_crashinfo_set_timestamp(crashinfo.get(), 1568899800, 0), + check_result(ddog_crasht_CrashInfo_set_timestamp( + crashinfo.get(), timestamp), "Failed to set timestamp"); - check_result(ddog_crashinfo_upload_to_endpoint( - crashinfo.get(), ddog_Endpoint_file(to_slice_c_char("file://tmp/test"))), + auto endpoint = ddog_endpoint_from_filename(to_slice_c_char("/tmp/test")); + + check_result(ddog_crasht_CrashInfo_upload_to_endpoint(crashinfo.get(), endpoint), "Failed to export to file"); -} \ No newline at end of file + ddog_endpoint_drop(endpoint); +} diff --git a/examples/ffi/crashtracking.c b/examples/ffi/crashtracking.c index 13f4a99c8..a3b373904 100644 --- a/examples/ffi/crashtracking.c +++ b/examples/ffi/crashtracking.c @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 #include -#include +#include #include #include #include @@ -12,8 +12,8 @@ void example_segfault_handler(int signal) { exit(-1); } -void handle_result(ddog_prof_CrashtrackerResult result) { - if (result.tag == DDOG_PROF_PROFILE_RESULT_ERR) { +void handle_result(ddog_crasht_Result result) { + if (result.tag == DDOG_CRASHT_RESULT_ERR) { ddog_CharSlice message = ddog_Error_message(&result.err); fprintf(stderr, "%.*s\n", (int)message.len, message.ptr); ddog_Error_drop(&result.err); @@ -21,8 +21,8 @@ void handle_result(ddog_prof_CrashtrackerResult result) { } } -uintptr_t handle_uintptr_t_result(ddog_prof_CrashtrackerUsizeResult result) { - if (result.tag == DDOG_PROF_PROFILE_RESULT_ERR) { +uintptr_t handle_uintptr_t_result(ddog_crasht_UsizeResult result) { + if (result.tag == DDOG_CRASHT_USIZE_RESULT_ERR) { ddog_CharSlice message = ddog_Error_message(&result.err); fprintf(stderr, "%.*s\n", (int)message.len, message.ptr); ddog_Error_drop(&result.err); @@ -37,7 +37,7 @@ int main(int argc, char **argv) { return -1; } - ddog_prof_CrashtrackerReceiverConfig receiver_config = { + ddog_crasht_ReceiverConfig receiver_config = { .args = {}, .env = {}, .path_to_receiver_binary = DDOG_CHARSLICE_C("SET ME TO THE ACTUAL PATH ON YOUR MACHINE"), @@ -49,26 +49,31 @@ int main(int argc, char **argv) { .optional_stdout_filename = DDOG_CHARSLICE_C("/tmp/crashreports/stdout.txt"), }; - ddog_prof_CrashtrackerConfiguration config = { + struct ddog_Endpoint *endpoint = + ddog_endpoint_from_filename(DDOG_CHARSLICE_C("/tmp/crashreports/crashreport.json")); + // Alternatively: + // struct ddog_Endpoint * endpoint = + // ddog_endpoint_from_url(DDOG_CHARSLICE_C("http://localhost:8126")); + + ddog_crasht_Config config = { .create_alt_stack = false, - .endpoint = ddog_Endpoint_file(DDOG_CHARSLICE_C("/tmp/crashreports/crashreport.json")), - // Alternatively: - //.endpoint = ddog_prof_Endpoint_agent(DDOG_CHARSLICE_C("http://localhost:8126")), - .resolve_frames = DDOG_PROF_STACKTRACE_COLLECTION_ENABLED_WITH_INPROCESS_SYMBOLS, + .endpoint = endpoint, + .resolve_frames = DDOG_CRASHT_STACKTRACE_COLLECTION_ENABLED_WITH_INPROCESS_SYMBOLS, }; - ddog_prof_CrashtrackerMetadata metadata = { + ddog_crasht_Metadata metadata = { .profiling_library_name = DDOG_CHARSLICE_C("crashtracking-test"), .profiling_library_version = DDOG_CHARSLICE_C("12.34.56"), .family = DDOG_CHARSLICE_C("crashtracking-test"), .tags = NULL, }; - handle_result(ddog_prof_Crashtracker_init_with_receiver(config, receiver_config, metadata)); - handle_result( - ddog_prof_Crashtracker_begin_profiling_op(DDOG_PROF_PROFILING_OP_TYPES_SERIALIZING)); - handle_uintptr_t_result(ddog_prof_Crashtracker_insert_span_id(0, 42)); - handle_uintptr_t_result(ddog_prof_Crashtracker_insert_trace_id(1, 1)); + handle_result(ddog_crasht_init_with_receiver(config, receiver_config, metadata)); + ddog_endpoint_drop(endpoint); + + handle_result(ddog_crasht_begin_profiling_op(DDOG_CRASHT_PROFILING_OP_TYPES_SERIALIZING)); + handle_uintptr_t_result(ddog_crasht_insert_span_id(0, 42)); + handle_uintptr_t_result(ddog_crasht_insert_trace_id(1, 1)); #ifdef EXPLICIT_RAISE_SEGV // Test raising SEGV explicitly, to ensure chaining works diff --git a/ipc/Cargo.toml b/ipc/Cargo.toml index bbd866554..b702645a3 100644 --- a/ipc/Cargo.toml +++ b/ipc/Cargo.toml @@ -59,3 +59,6 @@ bench = false harness = false name = "ipc" path = "benches/ipc.rs" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(polyfill_glibc_memfd)'] } diff --git a/ipc/src/platform/mem_handle.rs b/ipc/src/platform/mem_handle.rs index 0cec84127..94bd62fe6 100644 --- a/ipc/src/platform/mem_handle.rs +++ b/ipc/src/platform/mem_handle.rs @@ -15,6 +15,7 @@ pub struct ShmHandle { } #[derive(Debug)] +#[allow(dead_code)] pub struct AnonHandle { pub(crate) size: usize, } diff --git a/ipc/tarpc/tarpc/src/client.rs b/ipc/tarpc/tarpc/src/client.rs index eedb2596e..a9c3c2d41 100644 --- a/ipc/tarpc/tarpc/src/client.rs +++ b/ipc/tarpc/tarpc/src/client.rs @@ -133,7 +133,7 @@ impl Channel { ); ctx.trace_context.new_child() }); - span.record("rpc.trace_id", &tracing::field::display(ctx.trace_id())); + span.record("rpc.trace_id", tracing::field::display(ctx.trace_id())); let (response_completion, mut response) = oneshot::channel(); let request_id = u64::try_from(self.next_request_id.fetch_add(1, Ordering::Relaxed)).unwrap(); diff --git a/ipc/tarpc/tarpc/src/serde_transport.rs b/ipc/tarpc/tarpc/src/serde_transport.rs index 1773e846b..d94bfe1ff 100644 --- a/ipc/tarpc/tarpc/src/serde_transport.rs +++ b/ipc/tarpc/tarpc/src/serde_transport.rs @@ -631,10 +631,11 @@ mod tests { ); } - #[cfg(tcp)] + #[cfg(feature = "tcp")] #[tokio::test] async fn tcp() -> io::Result<()> { use super::tcp; + use futures::{SinkExt, StreamExt}; let mut listener = tcp::listen("0.0.0.0:0", SymmetricalJson::::default).await?; let addr = listener.local_addr(); diff --git a/profiling-ffi/Cargo.toml b/profiling-ffi/Cargo.toml index 535527f0f..62515d60f 100644 --- a/profiling-ffi/Cargo.toml +++ b/profiling-ffi/Cargo.toml @@ -20,18 +20,18 @@ cbindgen = ["build_common/cbindgen", "ddcommon-ffi/cbindgen"] ddtelemetry-ffi = ["dep:ddtelemetry-ffi"] symbolizer = ["symbolizer-ffi"] data-pipeline-ffi = ["dep:data-pipeline-ffi"] +crashtracker-ffi = ["dep:datadog-crashtracker-ffi"] # Enables the in-process collection of crash-info -crashtracker-collector = ["datadog-crashtracker/collector"] +crashtracker-collector = ["crashtracker-ffi", "datadog-crashtracker-ffi/collector"] # Enables the use of this library to receiver crash-info from a suitable collector -crashtracker-receiver = ["datadog-crashtracker/receiver"] +crashtracker-receiver = ["crashtracker-ffi", "datadog-crashtracker-ffi/receiver"] [build-dependencies] build_common = { path = "../build-common" } [dependencies] anyhow = "1.0" -chrono = {version = "0.4", default-features = false } -datadog-crashtracker = { path = "../crashtracker" } +datadog-crashtracker-ffi = { path = "../crashtracker-ffi", default-features = false, optional = true} datadog-profiling = { path = "../profiling" } hyper = { version = "0.14", default-features = false } ddcommon = { path = "../ddcommon"} diff --git a/profiling-ffi/cbindgen.toml b/profiling-ffi/cbindgen.toml index be9db9fcd..4c3c485dd 100644 --- a/profiling-ffi/cbindgen.toml +++ b/profiling-ffi/cbindgen.toml @@ -23,6 +23,7 @@ renaming_overrides_prefixing = true "ByteSlice" = "ddog_ByteSlice" "CancellationToken" = "ddog_CancellationToken" "CharSlice" = "ddog_CharSlice" +"Endpoint" = "ddog_Endpoint" "Error" = "ddog_Error" "HttpStatus" = "ddog_HttpStatus" "Slice_CChar" = "ddog_Slice_CChar" diff --git a/profiling-ffi/src/exporter.rs b/profiling-ffi/src/exporter.rs index 7d00acbb9..515ede7ff 100644 --- a/profiling-ffi/src/exporter.rs +++ b/profiling-ffi/src/exporter.rs @@ -4,13 +4,12 @@ #![allow(renamed_and_removed_lints)] #![allow(clippy::box_vec)] -use crate::Timespec; use datadog_profiling::exporter; use datadog_profiling::exporter::{ProfileExporter, Request}; use datadog_profiling::internal::ProfiledEndpointsStats; use ddcommon::tag::Tag; use ddcommon_ffi::slice::{AsBytes, ByteSlice, CharSlice, Slice}; -use ddcommon_ffi::{Error, MaybeError}; +use ddcommon_ffi::{Error, MaybeError, Timespec}; use std::borrow::Cow; use std::ptr::NonNull; use std::str::FromStr; diff --git a/profiling-ffi/src/lib.rs b/profiling-ffi/src/lib.rs index 9f5c260c2..f25481447 100644 --- a/profiling-ffi/src/lib.rs +++ b/profiling-ffi/src/lib.rs @@ -4,16 +4,13 @@ #[cfg(all(feature = "symbolizer", not(target_os = "windows")))] pub use symbolizer_ffi::*; -use std::fmt::Debug; -use std::time::SystemTime; - -use chrono::{DateTime, TimeZone, Utc}; - -mod crashtracker; mod exporter; mod profiles; -pub use crashtracker::*; +// re-export crashtracker ffi +#[cfg(feature = "crashtracker-ffi")] +pub use datadog_crashtracker_ffi::*; + // re-export telemetry ffi #[cfg(feature = "ddtelemetry-ffi")] pub use ddtelemetry_ffi::*; @@ -21,50 +18,3 @@ pub use ddtelemetry_ffi::*; #[cfg(feature = "data-pipeline-ffi")] #[allow(unused_imports)] pub use data_pipeline_ffi::*; - -/// Represents time since the Unix Epoch in seconds plus nanoseconds. -#[repr(C)] -#[derive(Copy, Clone, Debug)] -pub struct Timespec { - pub seconds: i64, - pub nanoseconds: u32, -} - -impl From for DateTime { - fn from(value: Timespec) -> Self { - Utc.timestamp_opt(value.seconds, value.nanoseconds).unwrap() - } -} - -impl From for SystemTime { - fn from(value: Timespec) -> Self { - // The DateTime API is more convenient, so let's delegate. - let datetime: DateTime = value.into(); - SystemTime::from(datetime) - } -} - -impl<'a> From<&'a Timespec> for SystemTime { - fn from(value: &'a Timespec) -> Self { - // The DateTime API is more convenient, so let's delegate. - let datetime: DateTime = (*value).into(); - SystemTime::from(datetime) - } -} - -impl From> for Timespec { - fn from(value: DateTime) -> Self { - Self { - seconds: value.timestamp(), - nanoseconds: value.timestamp_subsec_nanos(), - } - } -} - -impl From for Timespec { - fn from(value: SystemTime) -> Self { - // The DateTime API is more convenient, so let's delegate again. - let datetime: DateTime = value.into(); - Self::from(datetime) - } -} diff --git a/profiling-ffi/src/profiles.rs b/profiling-ffi/src/profiles.rs index 5ec993237..6f3b4716b 100644 --- a/profiling-ffi/src/profiles.rs +++ b/profiling-ffi/src/profiles.rs @@ -1,13 +1,12 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::Timespec; use anyhow::Context; use datadog_profiling::api; use datadog_profiling::internal; use datadog_profiling::internal::ProfiledEndpointsStats; use ddcommon_ffi::slice::{AsBytes, CharSlice, Slice}; -use ddcommon_ffi::Error; +use ddcommon_ffi::{Error, Timespec}; use std::num::NonZeroI64; use std::str::Utf8Error; use std::time::{Duration, SystemTime}; diff --git a/profiling/src/collections/string_table/mod.rs b/profiling/src/collections/string_table/mod.rs index 0c785417e..ccdf8224e 100644 --- a/profiling/src/collections/string_table/mod.rs +++ b/profiling/src/collections/string_table/mod.rs @@ -236,11 +236,12 @@ mod tests { /// This is a fuzz test for the allocation optimized `StringTable`. /// It checks both safety (lack of crashes / sanitizer failures), /// as well as functional correctness (the table should behave like an - /// ordered set). + /// ordered set). /// Limitations: /// - The crate used here to generate Strings internally has a default range for the length of /// a string, (0..=64) We should experiment with longer strings to see what happens. https://github.com/camshaft/bolero/blob/f401669697ffcbe7f34cbfd09fd57b93d5df734c/lib/bolero-generator/src/alloc/mod.rs#L17 /// - Since iterating is destructive, can only check the string values once. + /// /// `cargo +nightly bolero test collections::string_table::tests::fuzz_string_table -T 1min` #[test] fn fuzz_string_table() { diff --git a/profiling/src/collections/string_table/wordpress_test_data.rs b/profiling/src/collections/string_table/wordpress_test_data.rs index dfa5ae582..34cd36184 100644 --- a/profiling/src/collections/string_table/wordpress_test_data.rs +++ b/profiling/src/collections/string_table/wordpress_test_data.rs @@ -5,6 +5,7 @@ /// profiler version generated it. It was made from setting up a WordPress /// demo app of some kind. It's extracted from this file (relative to root): /// * `profiling/tests/wordpress.pprof.lz4` +/// /// For various tests such as using MIRI, it's too slow to decompress, open, /// parse, and extract the strings on-demand. pub const WORDPRESS_STRINGS: [&str; 1059] = [ diff --git a/profiling/src/internal/profile/mod.rs b/profiling/src/internal/profile/mod.rs index 31a15f803..8363de3a6 100644 --- a/profiling/src/internal/profile/mod.rs +++ b/profiling/src/internal/profile/mod.rs @@ -142,6 +142,7 @@ impl Profile { /// - "" (the empty string) /// - "local root span id" /// - "trace endpoint" + /// /// All other fields are default. #[inline] pub fn new( diff --git a/profiling/src/pprof/sliced_proto.rs b/profiling/src/pprof/sliced_proto.rs index 4dc067a9f..a741d5c63 100644 --- a/profiling/src/pprof/sliced_proto.rs +++ b/profiling/src/pprof/sliced_proto.rs @@ -40,6 +40,7 @@ //! 1. repeatedly emitting a sliced message with a "required" field, //! 2. repeatedly emitting a sliced message using a "repeated" field, //! 3. Emitting once the message with the repeated field containing all values. +//! //! In other words, we get the same bytes from "required" as "repeated", but //! with fewer allocations (since we don't need a `Vec` for the elements). //! diff --git a/serverless/Cargo.toml b/serverless/Cargo.toml index 102539afa..bf8ad4410 100644 --- a/serverless/Cargo.toml +++ b/serverless/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "datadog-serverless-trace-mini-agent" -version = "0.5.0" +version = "0.6.0" edition = "2021" [dependencies] diff --git a/serverless/src/main.rs b/serverless/src/main.rs index 483370a47..b0b5acfe8 100644 --- a/serverless/src/main.rs +++ b/serverless/src/main.rs @@ -3,7 +3,7 @@ use env_logger::{Builder, Env, Target}; use log::{error, info}; -use std::sync::Arc; +use std::{env, sync::Arc}; use datadog_trace_mini_agent::{ config, env_verifier, mini_agent, stats_flusher, stats_processor, trace_flusher, @@ -16,6 +16,9 @@ pub fn main() { info!("Starting serverless trace mini agent"); + let mini_agent_version = env!("CARGO_PKG_VERSION").to_string(); + env::set_var("DD_MINI_AGENT_VERSION", mini_agent_version); + let env_verifier = Arc::new(env_verifier::ServerlessEnvVerifier::default()); let trace_flusher = Arc::new(trace_flusher::ServerlessTraceFlusher {}); diff --git a/sidecar-ffi/Cargo.toml b/sidecar-ffi/Cargo.toml index 90ea5b0d9..163deb527 100644 --- a/sidecar-ffi/Cargo.toml +++ b/sidecar-ffi/Cargo.toml @@ -23,4 +23,10 @@ libc = "0.2" [dev-dependencies] hyper = { version = "0.14", default-features = false } -tempfile = {version = "3.3"} +tempfile = { version = "3.3" } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ + "cfg(coverage_nightly)", + 'cfg(feature, values("prefer_dynamic"))', +] } diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index d323d470d..7ca427256 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -44,7 +44,13 @@ rmp-serde = "1.1.1" spawn_worker = { path = "../spawn_worker" } zwohash = "0.1.2" sys-info = { version = "0.9.0" } -tokio = { version = "1.35.1", features = ["fs", "sync", "io-util", "signal", "rt-multi-thread"] } +tokio = { version = "1.35.1", features = [ + "fs", + "sync", + "io-util", + "signal", + "rt-multi-thread", +] } tokio-util = { version = "0.7", features = ["codec"] } prctl = "1.0.0" @@ -71,7 +77,7 @@ features = [ "Win32_Foundation", "Wdk_Storage_FileSystem", "Win32_System_IO", - "Win32_System_WindowsProgramming" + "Win32_System_WindowsProgramming", ] version = "0.51.0" @@ -83,8 +89,7 @@ nix = { version = "0.26.2", features = ["socket", "mman"] } sendfd = { version = "0.4", features = ["tokio"] } [target.'cfg(windows)'.dependencies] -winapi = { version = "=0.2.8" } -kernel32-sys = "0.2.2" +winapi = { version = "0.3.9", features = ["securitybaseapi", "sddl"] } [target.'cfg(windows_seh_wrapper)'.dependencies] microseh = "0.1.1" @@ -95,3 +100,8 @@ tempfile = { version = "3.3" } httpmock = "0.7.0" datadog-trace-utils = { path = "../trace-utils", features = ["test-utils"] } +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(tokio_taskdump,windows_seh_wrapper)', + 'cfg(feature, values("logging"))', +] } diff --git a/sidecar/src/service/tracing/trace_flusher.rs b/sidecar/src/service/tracing/trace_flusher.rs index cdd8f82fe..36f1e2b69 100644 --- a/sidecar/src/service/tracing/trace_flusher.rs +++ b/sidecar/src/service/tracing/trace_flusher.rs @@ -146,6 +146,7 @@ impl TraceFlusher { /// /// * A `Result` which is `Ok` if the flusher task successfully joins, or `Err` if the flusher /// task panics. + /// /// If the flusher task is not running, it returns `Ok`. pub(crate) async fn join(&self) -> anyhow::Result<(), JoinError> { let flusher = { diff --git a/sidecar/src/setup/windows.rs b/sidecar/src/setup/windows.rs index 7461e2fbe..9e252b9be 100644 --- a/sidecar/src/setup/windows.rs +++ b/sidecar/src/setup/windows.rs @@ -3,28 +3,37 @@ use crate::one_way_shared_memory::open_named_shm; use crate::primary_sidecar_identifier; +use crate::setup::Liaison; use arrayref::array_ref; use datadog_ipc::platform::metadata::ProcessHandle; use datadog_ipc::platform::{Channel, PIPE_PATH}; -use kernel32::{CreateFileA, CreateNamedPipeA}; use libc::getpid; use std::error::Error; use std::ffi::CString; -use std::os::windows::io::{FromRawHandle, OwnedHandle}; +use std::os::windows::io::{FromRawHandle, OwnedHandle, RawHandle}; use std::ptr::null_mut; use std::time::{Duration, Instant}; use std::{env, io, mem}; use tokio::net::windows::named_pipe::NamedPipeServer; use tracing::warn; use winapi::{ - DWORD, ERROR_ACCESS_DENIED, ERROR_PIPE_BUSY, FILE_FLAG_FIRST_PIPE_INSTANCE, - FILE_FLAG_OVERLAPPED, GENERIC_READ, GENERIC_WRITE, INVALID_HANDLE_VALUE, LPSECURITY_ATTRIBUTES, - OPEN_EXISTING, PIPE_ACCESS_INBOUND, PIPE_ACCESS_OUTBOUND, PIPE_READMODE_BYTE, PIPE_TYPE_BYTE, - PIPE_UNLIMITED_INSTANCES, SECURITY_ATTRIBUTES, + shared::{ + minwindef::DWORD, + winerror::{ERROR_ACCESS_DENIED, ERROR_PIPE_BUSY}, + }, + um::{ + fileapi::{CreateFileA, OPEN_EXISTING}, + handleapi::INVALID_HANDLE_VALUE, + minwinbase::SECURITY_ATTRIBUTES, + winbase::{ + CreateNamedPipeA, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, + PIPE_ACCESS_INBOUND, PIPE_ACCESS_OUTBOUND, PIPE_READMODE_BYTE, PIPE_TYPE_BYTE, + PIPE_UNLIMITED_INSTANCES, + }, + winnt::{GENERIC_READ, GENERIC_WRITE}, + }, }; -use crate::setup::Liaison; - pub type IpcClient = NamedPipeServer; pub type IpcServer = OwnedHandle; @@ -70,7 +79,7 @@ impl Liaison for NamedPipeLiaison { // Have a ProcessHandle::Getter() so that we don't immediately block in case the sidecar is // still starting up, but only the first time we want to submit shared memory Ok(Channel::from_client_handle_and_pid( - unsafe { OwnedHandle::from_raw_handle(pipe) }, + unsafe { OwnedHandle::from_raw_handle(pipe as RawHandle) }, ProcessHandle::Getter(Box::new(move || { // Await the shared memory handle which will contain the pid of the sidecar // As it may not be immediately available during startup @@ -129,7 +138,7 @@ impl Liaison for NamedPipeLiaison { 65536, 65536, 0, - &mut sec_attributes as LPSECURITY_ATTRIBUTES, + &mut sec_attributes, ) } { INVALID_HANDLE_VALUE => { @@ -143,7 +152,9 @@ impl Liaison for NamedPipeLiaison { Err(error) } } - h => Ok(Some(unsafe { OwnedHandle::from_raw_handle(h) })), + h => Ok(Some(unsafe { + OwnedHandle::from_raw_handle(h as RawHandle) + })), } } @@ -188,16 +199,15 @@ pub type DefaultLiason = NamedPipeLiaison; #[cfg(test)] mod tests { + use super::Liaison; use futures::future; - use kernel32::CloseHandle; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use std::io::Write; use std::os::windows::io::IntoRawHandle; use tokio::io::AsyncReadExt; use tokio::net::windows::named_pipe::NamedPipeServer; - - use super::Liaison; + use winapi::um::{handleapi::CloseHandle, winnt::HANDLE}; #[tokio::test] async fn test_shared_dir_can_connect_to_socket() -> anyhow::Result<()> { @@ -232,7 +242,7 @@ mod tests { // for this test: Somehow, NamedPipeServer remains tangled with the event-loop and won't // free itself in time - unsafe { CloseHandle(raw_handle) }; + unsafe { CloseHandle(raw_handle as HANDLE) }; std::mem::forget(srv); liaison diff --git a/sidecar/src/windows.rs b/sidecar/src/windows.rs index 151ac09b3..654841305 100644 --- a/sidecar/src/windows.rs +++ b/sidecar/src/windows.rs @@ -1,20 +1,39 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 + use crate::enter_listener_loop; use crate::setup::pid_shm_path; use datadog_ipc::platform::{ named_pipe_name_from_raw_handle, FileBackedHandle, MappedMem, NamedShmHandle, }; use futures::FutureExt; -use kernel32::WTSGetActiveConsoleSessionId; +use lazy_static::lazy_static; use manual_future::ManualFuture; use spawn_worker::{SpawnWorker, Stdio}; -use std::io; +use std::ffi::CStr; +use std::io::{self, Error}; use std::os::windows::io::{AsRawHandle, IntoRawHandle, OwnedHandle}; +use std::ptr::null_mut; use std::sync::{Arc, Mutex}; use std::time::Instant; use tokio::net::windows::named_pipe::{NamedPipeServer, ServerOptions}; use tokio::select; +use tracing::{error, info}; +use winapi::{ + shared::{ + sddl::ConvertSidToStringSidA, + winerror::{ERROR_INSUFFICIENT_BUFFER, ERROR_NO_TOKEN}, + }, + um::{ + handleapi::CloseHandle, + processthreadsapi::{ + GetCurrentProcess, GetCurrentThread, OpenProcessToken, OpenThreadToken, + }, + securitybaseapi::GetTokenInformation, + winbase::LocalFree, + winnt::{TokenUser, HANDLE, TOKEN_QUERY, TOKEN_USER}, + }, +}; #[no_mangle] pub extern "C" fn ddog_daemon_entry_point() { @@ -33,13 +52,13 @@ pub extern "C" fn ddog_daemon_entry_point() { { Ok(ok) => ok, Err(err) => { - tracing::error!("Couldn't store pid to shared memory: {err}"); + error!("Couldn't store pid to shared memory: {err}"); return; } }; shm.as_slice_mut().copy_from_slice(&pid.to_ne_bytes()); - tracing::info!("Starting sidecar, pid: {}", pid); + info!("Starting sidecar, pid: {}", pid); let acquire_listener = move || unsafe { let (closed_future, close_completer) = ManualFuture::new(); @@ -61,11 +80,11 @@ pub extern "C" fn ddog_daemon_entry_point() { }; if let Err(err) = enter_listener_loop(acquire_listener) { - tracing::error!("Error: {err}") + error!("Error: {err}") } } - tracing::info!( + info!( "shutting down sidecar, pid: {}, total runtime: {:.3}s", pid, now.elapsed().as_secs_f64() @@ -109,6 +128,81 @@ pub fn setup_daemon_process(listener: OwnedHandle, spawn_cfg: &mut SpawnWorker) Ok(()) } -pub fn primary_sidecar_identifier() -> u32 { - unsafe { WTSGetActiveConsoleSessionId() } +lazy_static! { + static ref SIDECAR_IDENTIFIER: String = fetch_sidecar_identifier(); +} + +fn fetch_sidecar_identifier() -> String { + unsafe { + let mut access_token = null_mut(); + + 'token: { + if OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, 1, &mut access_token) != 0 { + break 'token; + } + let mut err = Error::last_os_error(); + if err.raw_os_error() == Some(ERROR_NO_TOKEN as i32) { + if OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &mut access_token) != 0 { + break 'token; + } + err = Error::last_os_error(); + } + error!("Failed fetching thread token: {:?}", err); + return "".to_string(); + } + + let mut info_buffer_size = 0; + if GetTokenInformation( + access_token, + TokenUser, + null_mut(), + 0, + &mut info_buffer_size, + ) == 0 + { + let err = Error::last_os_error(); + if err.raw_os_error() != Some(ERROR_INSUFFICIENT_BUFFER as i32) { + error!("Failed fetching thread token: {:?}", err); + CloseHandle(access_token); + return "".to_string(); + } + } + + let user_token_mem = Vec::::with_capacity(info_buffer_size as usize); + let user_token = user_token_mem.as_ptr() as *const TOKEN_USER; + if GetTokenInformation( + access_token, + TokenUser, + user_token as *mut _, + info_buffer_size, + &mut info_buffer_size, + ) == 0 + { + error!("Failed fetching thread token: {:?}", Error::last_os_error()); + CloseHandle(access_token); + return "".to_string(); + } + + let mut string_sid = null_mut(); + let success = ConvertSidToStringSidA((*user_token).User.Sid, &mut string_sid); + CloseHandle(access_token); + + if success == 0 { + error!("Failed stringifying SID: {:?}", Error::last_os_error()); + return "".to_string(); + } + + let str = String::from_utf8_lossy(CStr::from_ptr(string_sid).to_bytes()).to_string(); + LocalFree(string_sid as HANDLE); + str + } +} + +pub fn primary_sidecar_identifier() -> &'static str { + SIDECAR_IDENTIFIER.as_str() +} + +#[test] +fn test_fetch_identifier() { + assert!(primary_sidecar_identifier().starts_with("S-")); } diff --git a/spawn_worker/Cargo.toml b/spawn_worker/Cargo.toml index 848f2a1fd..c6b74422a 100644 --- a/spawn_worker/Cargo.toml +++ b/spawn_worker/Cargo.toml @@ -39,3 +39,7 @@ tempfile = { version = "3.3" } [target.'cfg(not(windows))'.dev-dependencies] rlimit = {version = "0.9"} + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage,coverage_nightly)'] } + diff --git a/tests/spawn_from_lib/tests/spawn.rs b/tests/spawn_from_lib/tests/spawn.rs index 95f566be3..1315e5561 100644 --- a/tests/spawn_from_lib/tests/spawn.rs +++ b/tests/spawn_from_lib/tests/spawn.rs @@ -3,10 +3,10 @@ // #![cfg(feature = "prefer-dynamic")] // use test_spawn_from_lib::spawn_self; -#[cfg(feature = "prefer_dynamic")] +#[cfg(feature = "prefer-dynamic")] use std::io::{Read, Seek}; -#[cfg(feature = "prefer_dynamic")] +#[cfg(feature = "prefer-dynamic")] fn rewind_and_read(file: &mut std::fs::File) -> anyhow::Result { file.rewind()?; let mut buf = String::new(); @@ -19,7 +19,7 @@ fn rewind_and_read(file: &mut std::fs::File) -> anyhow::Result { /// prefer-dynamic -- --ignored #[test] #[ignore = "requires -C prefer-dynamic"] -#[cfg(feature = "prefer_dynamic")] +#[cfg(feature = "prefer-dynamic")] fn test_spawning_trampoline_worker() { let mut stdout = tempfile::tempfile().unwrap(); let mut stderr = tempfile::tempfile().unwrap(); diff --git a/tools/docker/Dockerfile.build b/tools/docker/Dockerfile.build index 9db3aab71..955ff42a1 100644 --- a/tools/docker/Dockerfile.build +++ b/tools/docker/Dockerfile.build @@ -77,6 +77,7 @@ COPY [ "Cargo.lock", "Cargo.toml", "./"] COPY "alloc/Cargo.toml" "alloc/" COPY "build-common/Cargo.toml" "build-common/" COPY "crashtracker/Cargo.toml" "crashtracker/" +COPY "crashtracker-ffi/Cargo.toml" "crashtracker-ffi/" COPY "ddcommon/Cargo.toml" "ddcommon/" COPY "ddcommon-ffi/Cargo.toml" "ddcommon-ffi/" COPY "ddtelemetry/Cargo.toml" "ddtelemetry/" diff --git a/trace-mini-agent/Cargo.toml b/trace-mini-agent/Cargo.toml index dc94b626d..1ffc2bb1c 100644 --- a/trace-mini-agent/Cargo.toml +++ b/trace-mini-agent/Cargo.toml @@ -1,8 +1,10 @@ [package] name = "datadog-trace-mini-agent" -description = "A subset of the trace agent that is shipped alongside tracers in a few serverless use cases (Google Cloud Functions and Azure Functions)" -version = "0.5.0" -edition = "2021" +description = "A subset of the trace agent that is shipped alongside tracers in a few serverless use cases (Google Cloud Functions, Azure Functions, and Azure Spring Apps)" +edition.workspace = true +version.workspace = true +rust-version.workspace = true +license.workspace = true autobenches = false [lib] diff --git a/trace-mini-agent/src/config.rs b/trace-mini-agent/src/config.rs index 647bea804..418c6495a 100644 --- a/trace-mini-agent/src/config.rs +++ b/trace-mini-agent/src/config.rs @@ -19,7 +19,6 @@ pub struct Config { pub env_type: trace_utils::EnvironmentType, pub function_name: Option, pub max_request_content_length: usize, - pub mini_agent_version: String, pub obfuscation_config: obfuscation_config::ObfuscationConfig, pub os: String, /// how often to flush stats, in seconds @@ -62,8 +61,6 @@ impl Config { ) })?; - let mini_agent_version: String = env!("CARGO_PKG_VERSION").to_string(); - Ok(Config { function_name: Some(function_name), env_type, @@ -84,7 +81,6 @@ impl Config { ..Default::default() }, obfuscation_config, - mini_agent_version, }) } } diff --git a/trace-mini-agent/src/env_verifier.rs b/trace-mini-agent/src/env_verifier.rs index 8363d224f..3400006aa 100644 --- a/trace-mini-agent/src/env_verifier.rs +++ b/trace-mini-agent/src/env_verifier.rs @@ -117,6 +117,7 @@ impl ServerlessEnvVerifier { gcp_region: Some(get_region_from_gcp_region_string( gcp_metadata.instance.region, )), + version: trace_utils::MiniAgentMetadata::default().version, } } } @@ -139,6 +140,9 @@ impl EnvVerifier for ServerlessEnvVerifier { .verify_gcp_environment_or_exit(verify_env_timeout) .await; } + trace_utils::EnvironmentType::AzureSpringApp => { + trace_utils::MiniAgentMetadata::default() + } trace_utils::EnvironmentType::LambdaFunction => { trace_utils::MiniAgentMetadata::default() } @@ -466,6 +470,7 @@ mod tests { trace_utils::MiniAgentMetadata { gcp_project_id: Some("unknown".to_string()), gcp_region: Some("unknown".to_string()), + version: None } ); } diff --git a/trace-mini-agent/src/http_utils.rs b/trace-mini-agent/src/http_utils.rs index 701af8d2f..523736d63 100644 --- a/trace-mini-agent/src/http_utils.rs +++ b/trace-mini-agent/src/http_utils.rs @@ -11,8 +11,7 @@ use serde_json::json; /// Does two things: /// 1. Logs the given message. A success status code (within 200-299) will cause an info log to be -/// written, -/// otherwise error will be written. +/// written, otherwise error will be written. /// 2. Returns the given message in the body of JSON response with the given status code. /// /// Response body format: @@ -32,6 +31,26 @@ pub fn log_and_create_http_response( Response::builder().status(status).body(Body::from(body)) } +/// Does two things: +/// 1. Logs the given message +/// 2. Returns the rate_by_service map to use to set the sampling priority in the body of JSON +/// response with the given status code. +/// +/// Response body format: +/// { +/// "rate_by_service": { +/// "service:,env:":1 +/// } +/// } +pub fn log_and_create_traces_success_http_response( + message: &str, + status: StatusCode, +) -> http::Result> { + info!("{message}"); + let body = json!({"rate_by_service":{"service:,env:":1}}).to_string(); + Response::builder().status(status).body(Body::from(body)) +} + /// Takes a request's header map, and verifies that the "content-length" header is present, valid, /// and less than the given max_content_length. /// diff --git a/trace-mini-agent/src/mini_agent.rs b/trace-mini-agent/src/mini_agent.rs index e42103bd1..e62b53c30 100644 --- a/trace-mini-agent/src/mini_agent.rs +++ b/trace-mini-agent/src/mini_agent.rs @@ -192,6 +192,9 @@ impl MiniAgent { INFO_ENDPOINT_PATH ], "client_drop_p0s": true, + "config": { + "statsd_port": MINI_AGENT_PORT + } } ); Response::builder() diff --git a/trace-mini-agent/src/trace_processor.rs b/trace-mini-agent/src/trace_processor.rs index 5ec036480..be1fafb2b 100644 --- a/trace-mini-agent/src/trace_processor.rs +++ b/trace-mini-agent/src/trace_processor.rs @@ -17,7 +17,7 @@ use datadog_trace_utils::tracer_payload::TraceEncoding; use crate::{ config::Config, - http_utils::{self, log_and_create_http_response}, + http_utils::{self, log_and_create_http_response, log_and_create_traces_success_http_response}, }; #[async_trait] @@ -47,10 +47,7 @@ impl TraceChunkProcessor for ChunkProcessor { ); for span in chunk.spans.iter_mut() { trace_utils::enrich_span_with_mini_agent_metadata(span, &self.mini_agent_metadata); - trace_utils::enrich_span_with_azure_metadata( - span, - self.config.mini_agent_version.as_str(), - ); + trace_utils::enrich_span_with_azure_metadata(span); obfuscate_span(span, &self.config.obfuscation_config); } } @@ -108,9 +105,9 @@ impl TraceProcessor for ServerlessTraceProcessor { // send trace payload to our trace flusher match tx.send(send_data).await { Ok(_) => { - return log_and_create_http_response( + return log_and_create_traces_success_http_response( "Successfully buffered traces to be flushed.", - StatusCode::ACCEPTED, + StatusCode::OK, ); } Err(err) => { @@ -174,7 +171,6 @@ mod tests { env_type: trace_utils::EnvironmentType::CloudFunction, os: "linux".to_string(), obfuscation_config: ObfuscationConfig::new().unwrap(), - mini_agent_version: "0.1.0".to_string(), } } diff --git a/trace-normalization/src/normalizer.rs b/trace-normalization/src/normalizer.rs index f6a97ddb6..c18b0c5a7 100644 --- a/trace-normalization/src/normalizer.rs +++ b/trace-normalization/src/normalizer.rs @@ -73,9 +73,8 @@ pub fn normalize_trace(trace: &mut [pb::Span]) -> anyhow::Result<()> { /// normalize_chunk takes a trace chunk and /// * populates origin field if it wasn't populated -/// * populates priority field if it wasn't populated -/// the root span is used to populate these fields, and it's index in TraceChunk spans vec must be -/// passed. +/// * populates priority field if it wasn't populated the root span is used to populate these +/// fields, and it's index in TraceChunk spans vec must be passed. pub fn normalize_chunk(chunk: &mut pb::TraceChunk, root_span_index: usize) -> anyhow::Result<()> { // check if priority is not populated let root_span = match chunk.spans.get(root_span_index) { diff --git a/trace-utils/src/config_utils.rs b/trace-utils/src/config_utils.rs index 3095e5800..e071072f1 100644 --- a/trace-utils/src/config_utils.rs +++ b/trace-utils/src/config_utils.rs @@ -25,6 +25,10 @@ pub fn read_cloud_env() -> Option<(String, trace_utils::EnvironmentType)> { // Set by Azure Functions return Some((res, trace_utils::EnvironmentType::AzureFunction)); } + if let Ok(res) = env::var("ASCSVCRT_SPRING__APPLICATION__NAME") { + // Set by Azure Spring Apps + return Some((res, trace_utils::EnvironmentType::AzureSpringApp)); + } None } diff --git a/trace-utils/src/trace_utils.rs b/trace-utils/src/trace_utils.rs index 14806c33a..72130cb2d 100644 --- a/trace-utils/src/trace_utils.rs +++ b/trace-utils/src/trace_utils.rs @@ -10,6 +10,7 @@ use rmpv::decode::read_value; use rmpv::{Integer, Value}; use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; +use std::env; pub use crate::send_data::send_data_result::SendDataResult; pub use crate::send_data::SendData; @@ -428,6 +429,7 @@ pub fn set_serverless_root_span_tags( let origin_tag = match env_type { EnvironmentType::CloudFunction => "cloudfunction", EnvironmentType::AzureFunction => "azurefunction", + EnvironmentType::AzureSpringApp => "azurespringapp", EnvironmentType::LambdaFunction => "lambda", // historical reasons }; span.meta @@ -450,13 +452,25 @@ fn update_tracer_top_level(span: &mut pb::Span) { pub enum EnvironmentType { CloudFunction, AzureFunction, + AzureSpringApp, LambdaFunction, } -#[derive(Clone, Default, Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq)] pub struct MiniAgentMetadata { pub gcp_project_id: Option, pub gcp_region: Option, + pub version: Option, +} + +impl Default for MiniAgentMetadata { + fn default() -> Self { + MiniAgentMetadata { + gcp_project_id: Default::default(), + gcp_region: Default::default(), + version: env::var("DD_MINI_AGENT_VERSION").ok(), + } + } } pub fn enrich_span_with_mini_agent_metadata( @@ -471,9 +485,15 @@ pub fn enrich_span_with_mini_agent_metadata( span.meta .insert("location".to_string(), gcp_region.to_string()); } + if let Some(mini_agent_version) = &mini_agent_metadata.version { + span.meta.insert( + "_dd.mini_agent_version".to_string(), + mini_agent_version.to_string(), + ); + } } -pub fn enrich_span_with_azure_metadata(span: &mut pb::Span, mini_agent_version: &str) { +pub fn enrich_span_with_azure_metadata(span: &mut pb::Span) { if let Some(aas_metadata) = azure_app_services::get_function_metadata() { let aas_tags = [ ("aas.resource.id", aas_metadata.get_resource_id()), @@ -486,7 +506,6 @@ pub fn enrich_span_with_azure_metadata(span: &mut pb::Span, mini_agent_version: aas_metadata.get_instance_name(), ), ("aas.subscription.id", aas_metadata.get_subscription_id()), - ("aas.environment.mini_agent_version", mini_agent_version), ("aas.environment.os", aas_metadata.get_operating_system()), ("aas.environment.runtime", aas_metadata.get_runtime()), (