Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into ref/server-state-cl…
Browse files Browse the repository at this point in the history
…eanup
  • Loading branch information
jjbayer committed Jul 11, 2024
2 parents 946939b + 6dc184f commit 80f9ef8
Show file tree
Hide file tree
Showing 45 changed files with 1,264 additions and 1,155 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@
- Support extrapolation of metrics extracted from sampled data, as long as the sample rate is set in the DynamicSamplingContext. ([#3753](https://github.com/getsentry/relay/pull/3753))
- Extract thread ID and name in spans. ([#3771](https://github.com/getsentry/relay/pull/3771))
- Compute metrics summary on the extracted custom metrics. ([#3769](https://github.com/getsentry/relay/pull/3769))
- Add support for `all` and `any` `RuleCondition`(s). ([#3791](https://github.com/getsentry/relay/pull/3791))
- Copy root span data from `contexts.trace.data` when converting transaction events into raw spans. ([#3790](https://github.com/getsentry/relay/pull/3790))
- Remove experimental double-write from spans to transactions. ([#3801](https://github.com/getsentry/relay/pull/3801))
- Add feature flag to disable replay-video events. ([#3803](https://github.com/getsentry/relay/pull/3803))

## 24.6.0

Expand Down
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ rand_pcg = "0.3.1"
rdkafka = "0.29.0"
rdkafka-sys = "4.3.0"
# Git revision until https://github.com/redis-rs/redis-rs/pull/1097 (merged) and https://github.com/redis-rs/redis-rs/pull/1253 are released.
redis = { git = "https://github.com/redis-rs/redis-rs.git", rev = "939e5df6f9cc976b0a53987f6eb3f76b2c398bd6", default-features = false }
redis = { git = "https://github.com/getsentry/redis-rs.git", rev = "939e5df6f9cc976b0a53987f6eb3f76b2c398bd6", default-features = false }
regex = "1.10.2"
reqwest = "0.11.1"
rmp-serde = "1.1.1"
Expand Down
2 changes: 1 addition & 1 deletion py/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Changelog

## Unreleased
## 0.9.0

- Build wheels with manylinux_2_28 and alma linux 8. [#3787](https://github.com/getsentry/relay/pull/3787)

Expand Down
2 changes: 1 addition & 1 deletion relay-cabi/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "relay-cabi"
version = "0.8.67"
version = "0.9.0"
authors = ["Sentry <oss@sentry.io>"]
homepage = "https://getsentry.github.io/relay/"
repository = "https://github.com/getsentry/relay"
Expand Down
115 changes: 115 additions & 0 deletions relay-config/src/aggregator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
//! Metrics aggregator configuration.

use relay_metrics::aggregator::AggregatorConfig;
use relay_metrics::MetricNamespace;
use serde::{Deserialize, Serialize};

/// Parameters used for metric aggregation.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(default)]
pub struct AggregatorServiceConfig {
/// The config used by the internal aggregator.
#[serde(flatten)]
pub aggregator: AggregatorConfig,

/// Maximum amount of bytes used for metrics aggregation.
///
/// When aggregating metrics, Relay keeps track of how many bytes a metric takes in memory.
/// This is only an approximation and does not take into account things such as pre-allocation
/// in hashmaps.
///
/// Defaults to `None`, i.e. no limit.
pub max_total_bucket_bytes: Option<usize>,

/// The approximate maximum number of bytes submitted within one flush cycle.
///
/// This controls how big flushed batches of buckets get, depending on the number of buckets,
/// the cumulative length of their keys, and the number of raw values. Since final serialization
/// adds some additional overhead, this number is approxmate and some safety margin should be
/// left to hard limits.
pub max_flush_bytes: usize,

/// The flushing interval in milliseconds that determines how often the aggregator is polled for
/// flushing new buckets.
///
/// Defaults to `100` milliseconds.
pub flush_interval_ms: u64,
}

impl Default for AggregatorServiceConfig {
fn default() -> Self {
Self {
aggregator: AggregatorConfig::default(),
max_total_bucket_bytes: None,
max_flush_bytes: 5_000_000, // 5 MB
flush_interval_ms: 100, // 100 milliseconds
}
}
}

/// Contains an [`AggregatorServiceConfig`] for a specific scope.
///
/// For now, the only way to scope an aggregator is by [`MetricNamespace`].
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ScopedAggregatorConfig {
/// Name of the aggregator, used to tag statsd metrics.
pub name: String,
/// Condition that needs to be met for a metric or bucket to be routed to a
/// secondary aggregator.
pub condition: Condition,
/// The configuration of the secondary aggregator.
pub config: AggregatorServiceConfig,
}

/// Condition that needs to be met for a metric or bucket to be routed to a
/// secondary aggregator.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "op", rename_all = "lowercase")]
pub enum Condition {
/// Checks for equality on a specific field.
Eq(FieldCondition),
/// Matches if all conditions are true.
And {
/// Inner rules to combine.
inner: Vec<Condition>,
},
/// Matches if any condition is true.
Or {
/// Inner rules to combine.
inner: Vec<Condition>,
},
/// Inverts the condition.
Not {
/// Inner rule to negate.
inner: Box<Condition>,
},
}

impl Condition {
/// Checks if the condition matches the given namespace.
pub fn matches(&self, namespace: Option<MetricNamespace>) -> bool {
match self {
Condition::Eq(field) => field.matches(namespace),
Condition::And { inner } => inner.iter().all(|cond| cond.matches(namespace)),
Condition::Or { inner } => inner.iter().any(|cond| cond.matches(namespace)),
Condition::Not { inner } => !inner.matches(namespace),
}
}
}

/// Defines a field and a field value to compare to when a [`Condition`] is evaluated.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "field", content = "value", rename_all = "lowercase")]
pub enum FieldCondition {
/// Field that allows comparison to a metric or bucket's namespace.
Namespace(MetricNamespace),
}

impl FieldCondition {
fn matches(&self, namespace: Option<MetricNamespace>) -> bool {
match (self, namespace) {
(FieldCondition::Namespace(expected), Some(actual)) => expected == &actual,
_ => false,
}
}
}
7 changes: 4 additions & 3 deletions relay-config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@ use relay_kafka::{
TopicAssignments,
};
use relay_metrics::aggregator::{AggregatorConfig, FlushBatching};
use relay_metrics::{AggregatorServiceConfig, MetricNamespace, ScopedAggregatorConfig};
use relay_metrics::MetricNamespace;
use relay_redis::RedisConfig;
use serde::de::{DeserializeOwned, Unexpected, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use uuid::Uuid;

use crate::aggregator::{AggregatorServiceConfig, ScopedAggregatorConfig};
use crate::byte_size::ByteSize;
use crate::upstream::UpstreamDescriptor;

Expand Down Expand Up @@ -2401,12 +2402,12 @@ impl Config {
}
}

/// Returns configuration for the default metrics [aggregator](relay_metrics::Aggregator).
/// Returns configuration for the default metrics aggregator.
pub fn default_aggregator_config(&self) -> &AggregatorServiceConfig {
&self.values.aggregator
}

/// Returns configuration for non-default metrics [aggregators](relay_metrics::Aggregator).
/// Returns configuration for non-default metrics aggregator.
pub fn secondary_aggregator_configs(&self) -> &Vec<ScopedAggregatorConfig> {
&self.values.secondary_aggregators
}
Expand Down
2 changes: 2 additions & 0 deletions relay-config/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
)]
#![allow(clippy::derive_partial_eq_without_eq)]

pub mod aggregator;
mod byte_size;
mod config;
mod upstream;

pub use crate::aggregator::{AggregatorServiceConfig, ScopedAggregatorConfig};
pub use crate::byte_size::*;
pub use crate::config::*;
pub use crate::upstream::*;
17 changes: 5 additions & 12 deletions relay-dynamic-config/src/defaults.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,11 @@ pub fn add_span_metrics(project_config: &mut ProjectConfig) {
.is_enabled = true;
}

// Enable transaction metrics for span (score.total), but only if double-write to transactions
// is disabled.
if !project_config
.features
.has(Feature::ExtractTransactionFromSegmentSpan)
{
let span_metrics_tx = config
.global_groups
.entry(GroupKey::SpanMetricsTx)
.or_default();
span_metrics_tx.is_enabled = true;
}
let span_metrics_tx = config
.global_groups
.entry(GroupKey::SpanMetricsTx)
.or_default();
span_metrics_tx.is_enabled = true;

config._span_metrics_extended = true;
if config.version == 0 {
Expand Down
14 changes: 5 additions & 9 deletions relay-dynamic-config/src/feature.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ pub enum Feature {
/// Serialized as `organizations:session-replay-combined-envelope-items`.
#[serde(rename = "organizations:session-replay-combined-envelope-items")]
SessionReplayCombinedEnvelopeItems,
/// Disables select organizations from processing mobile replay events.
///
/// Serialized as `organizations:session-replay-video-disabled`.
#[serde(rename = "organizations:session-replay-video-disabled")]
SessionReplayVideoDisabled,
/// Enables new User Feedback ingest.
///
/// TODO(jferg): rename to UserFeedbackIngest once old UserReport logic is deprecated.
Expand Down Expand Up @@ -76,15 +81,6 @@ pub enum Feature {
#[serde(rename = "organizations:continuous-profiling")]
ContinuousProfiling,

/// When enabled, every standalone segment span will be duplicated as a transaction.
///
/// This allows support of product features that rely on transactions for SDKs that only
/// send spans.
///
/// Serialized as `projects:extract-transaction-from-segment-span`.
#[serde(rename = "projects:extract-transaction-from-segment-span")]
ExtractTransactionFromSegmentSpan,

/// Enables metric extraction from spans for common modules.
///
/// Serialized as `projects:span-metrics-extraction`.
Expand Down
8 changes: 0 additions & 8 deletions relay-dynamic-config/src/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -214,14 +214,6 @@ pub struct Options {
)]
pub span_extraction_sample_rate: Option<f32>,

/// Overall sampling of metrics summaries computation.
#[serde(
rename = "relay.compute-metrics-summaries.sample-rate",
deserialize_with = "default_on_error",
skip_serializing_if = "is_default"
)]
pub compute_metrics_summaries_sample_rate: Option<f32>,

/// The maximum duplication factor used to extrapolate distribution metrics from sampled data.
///
/// This applies as long as Relay duplicates distribution values to extrapolate. The default is
Expand Down
15 changes: 4 additions & 11 deletions relay-event-normalization/src/normalize/span/tag_extraction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -610,16 +610,14 @@ pub fn extract_tags(
if let Some(destination) = span
.data
.value()
.and_then(|data| data.messaging_destination_name.value())
.and_then(|value| value.as_str())
.and_then(|data| data.messaging_destination_name.as_str())
{
span_tags.insert(SpanTagKey::MessagingDestinationName, destination.into());
}
if let Some(message_id) = span
.data
.value()
.and_then(|data| data.messaging_message_id.value())
.and_then(|value| value.as_str())
.and_then(|data| data.messaging_message_id.as_str())
{
span_tags.insert(SpanTagKey::MessagingMessageId, message_id.into());
}
Expand Down Expand Up @@ -737,12 +735,7 @@ pub fn extract_tags(
}

if is_mobile {
if let Some(thread_name) = span
.data
.value()
.and_then(|data| data.thread_name.value())
.and_then(|value| value.as_str())
{
if let Some(thread_name) = span.data.value().and_then(|data| data.thread_name.as_str()) {
if thread_name == MAIN_THREAD_NAME {
span_tags.insert(SpanTagKey::MainThread, "true".to_owned());
}
Expand Down Expand Up @@ -784,7 +777,7 @@ pub fn extract_tags(
span_tags.insert(SpanTagKey::ThreadId, thread_id.to_string());
}

if let Some(thread_name) = data.thread_name.value().and_then(|name| name.as_str()) {
if let Some(thread_name) = data.thread_name.as_str() {
span_tags.insert(SpanTagKey::ThreadName, thread_name.into());
}
}
Expand Down
Loading

0 comments on commit 80f9ef8

Please sign in to comment.