diff --git a/relay-base-schema/src/metrics/mri.rs b/relay-base-schema/src/metrics/mri.rs index f8eddf555c2..2a1dc241966 100644 --- a/relay-base-schema/src/metrics/mri.rs +++ b/relay-base-schema/src/metrics/mri.rs @@ -112,12 +112,6 @@ pub enum MetricNamespace { Transactions, /// Metrics extracted from spans. Spans, - /// User-defined metrics directly sent by SDKs and applications. - Custom, - /// Metric stats. - /// - /// Metrics about metrics. - Stats, /// An unknown and unsupported metric. /// /// Metrics that Relay either doesn't know or recognize the namespace of will be dropped before @@ -132,30 +126,21 @@ pub enum MetricNamespace { impl MetricNamespace { /// Returns all namespaces/variants of this enum. - pub fn all() -> [Self; 6] { + pub fn all() -> [Self; 4] { [ Self::Sessions, Self::Transactions, Self::Spans, - Self::Custom, - Self::Stats, Self::Unsupported, ] } - /// Returns `true` if metric stats are enabled for this namespace. - pub fn has_metric_stats(&self) -> bool { - matches!(self, Self::Custom) - } - /// Returns the string representation for this metric type. pub fn as_str(&self) -> &'static str { match self { Self::Sessions => "sessions", Self::Transactions => "transactions", Self::Spans => "spans", - Self::Custom => "custom", - Self::Stats => "metric_stats", Self::Unsupported => "unsupported", } } @@ -169,8 +154,6 @@ impl std::str::FromStr for MetricNamespace { "sessions" => Ok(Self::Sessions), "transactions" => Ok(Self::Transactions), "spans" => Ok(Self::Spans), - "custom" => Ok(Self::Custom), - "metric_stats" => Ok(Self::Stats), _ => Ok(Self::Unsupported), } } @@ -269,7 +252,7 @@ impl<'a> MetricResourceIdentifier<'a> { let (namespace, name) = match name_and_namespace.split_once('/') { Some((raw_namespace, name)) => (raw_namespace.parse()?, name), - None => (MetricNamespace::Custom, name_and_namespace), + None => return Err(ParseMetricError), }; let name = crate::metrics::try_normalize_metric_name(name).ok_or(ParseMetricError)?; @@ -346,7 +329,7 @@ fn parse_name_unit(string: &str) -> Option<(&str, MetricUnit)> { #[cfg(test)] mod tests { - use crate::metrics::{CustomUnit, DurationUnit}; + use crate::metrics::DurationUnit; use super::*; @@ -368,38 +351,24 @@ mod tests { #[test] fn test_parse_mri_lenient() { + assert!(MetricResourceIdentifier::parse("c:foo@none").is_err(),); + assert!(MetricResourceIdentifier::parse("c:foo@something").is_err()); + assert!(MetricResourceIdentifier::parse("foo").is_err()); + assert_eq!( - MetricResourceIdentifier::parse("c:foo@none").unwrap(), - MetricResourceIdentifier { - ty: MetricType::Counter, - namespace: MetricNamespace::Custom, - name: "foo".into(), - unit: MetricUnit::None, - }, - ); - assert_eq!( - MetricResourceIdentifier::parse("c:foo").unwrap(), - MetricResourceIdentifier { - ty: MetricType::Counter, - namespace: MetricNamespace::Custom, - name: "foo".into(), - unit: MetricUnit::None, - }, - ); - assert_eq!( - MetricResourceIdentifier::parse("c:custom/foo").unwrap(), + MetricResourceIdentifier::parse("c:transactions/foo").unwrap(), MetricResourceIdentifier { ty: MetricType::Counter, - namespace: MetricNamespace::Custom, + namespace: MetricNamespace::Transactions, name: "foo".into(), unit: MetricUnit::None, }, ); assert_eq!( - MetricResourceIdentifier::parse("c:custom/foo@millisecond").unwrap(), + MetricResourceIdentifier::parse("c:transactions/foo@millisecond").unwrap(), MetricResourceIdentifier { ty: MetricType::Counter, - namespace: MetricNamespace::Custom, + namespace: MetricNamespace::Transactions, name: "foo".into(), unit: MetricUnit::Duration(DurationUnit::MilliSecond), }, @@ -413,32 +382,10 @@ mod tests { unit: MetricUnit::None, }, ); - assert_eq!( - MetricResourceIdentifier::parse("c:foo@something").unwrap(), - MetricResourceIdentifier { - ty: MetricType::Counter, - namespace: MetricNamespace::Custom, - name: "foo".into(), - unit: MetricUnit::Custom(CustomUnit::parse("something").unwrap()), - }, - ); - assert!(MetricResourceIdentifier::parse("foo").is_err()); } #[test] fn test_invalid_names_should_normalize() { - assert_eq!( - MetricResourceIdentifier::parse("c:f?o").unwrap().name, - "f_o" - ); - assert_eq!( - MetricResourceIdentifier::parse("c:f??o").unwrap().name, - "f_o" - ); - assert_eq!( - MetricResourceIdentifier::parse("c:föo").unwrap().name, - "f_o" - ); assert_eq!( MetricResourceIdentifier::parse("c:custom/f?o") .unwrap() @@ -487,10 +434,10 @@ mod tests { #[test] fn test_normalize_dash_to_underscore() { assert_eq!( - MetricResourceIdentifier::parse("d:foo.bar.blob-size@second").unwrap(), + MetricResourceIdentifier::parse("d:transactions/foo.bar.blob-size@second").unwrap(), MetricResourceIdentifier { ty: MetricType::Distribution, - namespace: MetricNamespace::Custom, + namespace: MetricNamespace::Transactions, name: "foo.bar.blob_size".into(), unit: MetricUnit::Duration(DurationUnit::Second), }, @@ -506,7 +453,7 @@ mod tests { .unwrap(), MetricResourceIdentifier { ty: MetricType::Counter, - namespace: MetricNamespace::Custom, + namespace: MetricNamespace::Unsupported, name: "foo".into(), unit: MetricUnit::Duration(DurationUnit::MilliSecond), }, @@ -518,12 +465,12 @@ mod tests { assert_eq!( serde_json::to_string(&MetricResourceIdentifier { ty: MetricType::Counter, - namespace: MetricNamespace::Custom, + namespace: MetricNamespace::Transactions, name: "foo".into(), unit: MetricUnit::Duration(DurationUnit::MilliSecond), }) .unwrap(), - "\"c:custom/foo@millisecond\"".to_owned(), + "\"c:transactions/foo@millisecond\"".to_owned(), ); } } diff --git a/relay-base-schema/src/metrics/name.rs b/relay-base-schema/src/metrics/name.rs index 17f894ed123..89f0fe94f5b 100644 --- a/relay-base-schema/src/metrics/name.rs +++ b/relay-base-schema/src/metrics/name.rs @@ -26,13 +26,13 @@ impl MetricName { /// /// let name = MetricName::from("cfoo"); /// assert!(name.try_type().is_none()); - /// let name = MetricName::from("c:custom/foo@none"); + /// let name = MetricName::from("c:spans/foo@none"); /// assert_eq!(name.try_type(), Some(MetricType::Counter)); - /// let name = MetricName::from("d:custom/foo@none"); + /// let name = MetricName::from("d:spans/foo@none"); /// assert_eq!(name.try_type(), Some(MetricType::Distribution)); - /// let name = MetricName::from("s:custom/foo@none"); + /// let name = MetricName::from("s:spans/foo@none"); /// assert_eq!(name.try_type(), Some(MetricType::Set)); - /// let name = MetricName::from("g:custom/foo@none"); + /// let name = MetricName::from("g:spans/foo@none"); /// assert_eq!(name.try_type(), Some(MetricType::Gauge)); /// ``` pub fn try_type(&self) -> Option { @@ -56,11 +56,11 @@ impl MetricName { /// /// let name = MetricName::from("foo"); /// assert_eq!(name.namespace(), MetricNamespace::Unsupported); - /// let name = MetricName::from("c:custom_oops/foo@none"); + /// let name = MetricName::from("c:spans_oops/foo@none"); /// assert_eq!(name.namespace(), MetricNamespace::Unsupported); /// - /// let name = MetricName::from("c:custom/foo@none"); - /// assert_eq!(name.namespace(), MetricNamespace::Custom); + /// let name = MetricName::from("c:spans/foo@none"); + /// assert_eq!(name.namespace(), MetricNamespace::Spans); /// ``` pub fn namespace(&self) -> MetricNamespace { self.try_namespace().unwrap_or(MetricNamespace::Unsupported) @@ -77,11 +77,11 @@ impl MetricName { /// /// let name = MetricName::from("foo"); /// assert!(name.try_namespace().is_none()); - /// let name = MetricName::from("c:custom_oops/foo@none"); + /// let name = MetricName::from("c:spans_oops/foo@none"); /// assert!(name.try_namespace().is_none()); /// - /// let name = MetricName::from("c:custom/foo@none"); - /// assert_eq!(name.try_namespace(), Some(MetricNamespace::Custom)); + /// let name = MetricName::from("c:spans/foo@none"); + /// assert_eq!(name.try_namespace(), Some(MetricNamespace::Spans)); /// ``` pub fn try_namespace(&self) -> Option { // A well formed MRI is always in the format `:/[@]`, diff --git a/relay-cardinality/benches/redis_impl.rs b/relay-cardinality/benches/redis_impl.rs index 9892a7aba19..04650b734f6 100644 --- a/relay-cardinality/benches/redis_impl.rs +++ b/relay-cardinality/benches/redis_impl.rs @@ -106,7 +106,7 @@ impl Params { .map(|i| { Entry::new( EntryId(i), - MetricNamespace::Custom, + MetricNamespace::Spans, &self.names[i % self.names.len()], u32::MAX - (i as u32), ) @@ -128,7 +128,7 @@ impl Params { .map(|i| { Entry::new( EntryId(i), - MetricNamespace::Custom, + MetricNamespace::Spans, &self.names[i % self.names.len()], hash.fetch_sub(1, Ordering::SeqCst), ) @@ -142,7 +142,7 @@ impl Params { fn never_entry(&self) -> Entry<'_> { Entry::new( EntryId(usize::MAX), - MetricNamespace::Custom, + MetricNamespace::Spans, &self.names[0], 0, ) @@ -154,7 +154,7 @@ impl Params { .map(|i| { Entry::new( EntryId(usize::MAX - i), - MetricNamespace::Custom, + MetricNamespace::Spans, &self.names[i % self.names.len()], i as u32, ) diff --git a/relay-cardinality/src/config.rs b/relay-cardinality/src/config.rs index 25f740adb3b..dfbb74fbd1d 100644 --- a/relay-cardinality/src/config.rs +++ b/relay-cardinality/src/config.rs @@ -106,7 +106,7 @@ mod tests { }, limit: 1337, scope: CardinalityScope::Organization, - namespace: Some(MetricNamespace::Custom), + namespace: Some(MetricNamespace::Sessions), }; let j = serde_json::to_string(&limit).unwrap(); @@ -117,7 +117,7 @@ mod tests { "window":{"windowSeconds":3600,"granularitySeconds":200}, "limit":1337, "scope":"organization", - "namespace":"custom" + "namespace":"sessions" }"#; assert_eq!(serde_json::from_str::(j).unwrap(), limit); } diff --git a/relay-cardinality/src/limiter.rs b/relay-cardinality/src/limiter.rs index 094bc207a41..b97af8880d7 100644 --- a/relay-cardinality/src/limiter.rs +++ b/relay-cardinality/src/limiter.rs @@ -589,8 +589,8 @@ mod tests { Item::new(0, MetricNamespace::Sessions), Item::new(1, MetricNamespace::Transactions), Item::new(2, MetricNamespace::Spans), - Item::new(3, MetricNamespace::Custom), - Item::new(4, MetricNamespace::Custom), + Item::new(3, MetricNamespace::Unsupported), + Item::new(4, MetricNamespace::Unsupported), Item::new(5, MetricNamespace::Transactions), Item::new(6, MetricNamespace::Spans), ]; @@ -605,7 +605,7 @@ mod tests { vec![ (Item::new(0, MetricNamespace::Sessions), &limits[0]), (Item::new(2, MetricNamespace::Spans), &limits[0]), - (Item::new(4, MetricNamespace::Custom), &limits[0]), + (Item::new(4, MetricNamespace::Unsupported), &limits[0]), (Item::new(6, MetricNamespace::Spans), &limits[0]), ] ); @@ -613,7 +613,7 @@ mod tests { split.accepted, vec![ Item::new(1, MetricNamespace::Transactions), - Item::new(3, MetricNamespace::Custom), + Item::new(3, MetricNamespace::Unsupported), Item::new(5, MetricNamespace::Transactions), ] ); @@ -671,12 +671,12 @@ mod tests { ]; let items = vec![ - Item::new(0, MetricNamespace::Custom), - Item::new(1, MetricNamespace::Custom), - Item::new(2, MetricNamespace::Custom), - Item::new(3, MetricNamespace::Custom), - Item::new(4, MetricNamespace::Custom), - Item::new(5, MetricNamespace::Custom), + Item::new(0, MetricNamespace::Spans), + Item::new(1, MetricNamespace::Spans), + Item::new(2, MetricNamespace::Spans), + Item::new(3, MetricNamespace::Spans), + Item::new(4, MetricNamespace::Spans), + Item::new(5, MetricNamespace::Spans), ]; let limited = limiter .check_cardinality_limits(build_scoping(), limits, items) @@ -689,17 +689,17 @@ mod tests { assert_eq!( split.rejected, vec![ - (Item::new(0, MetricNamespace::Custom), &limits[0]), - (Item::new(2, MetricNamespace::Custom), &limits[0]), - (Item::new(4, MetricNamespace::Custom), &limits[0]), + (Item::new(0, MetricNamespace::Spans), &limits[0]), + (Item::new(2, MetricNamespace::Spans), &limits[0]), + (Item::new(4, MetricNamespace::Spans), &limits[0]), ] ); assert_eq!( split.accepted, vec![ - Item::new(1, MetricNamespace::Custom), - Item::new(3, MetricNamespace::Custom), - Item::new(5, MetricNamespace::Custom), + Item::new(1, MetricNamespace::Spans), + Item::new(3, MetricNamespace::Spans), + Item::new(5, MetricNamespace::Spans), ] ); } @@ -795,7 +795,7 @@ mod tests { }, ]; let scoping = build_scoping(); - let items = vec![Item::new(0, MetricNamespace::Custom)]; + let items = vec![Item::new(0, MetricNamespace::Spans)]; let limiter = CardinalityLimiter::new(CreateReports); let limited = limiter diff --git a/relay-cardinality/src/redis/limiter.rs b/relay-cardinality/src/redis/limiter.rs index 6e80886684c..fd6cab9d6b0 100644 --- a/relay-cardinality/src/redis/limiter.rs +++ b/relay-cardinality/src/redis/limiter.rs @@ -430,12 +430,12 @@ mod tests { let m5 = MetricName::from("f"); let entries = [ - Entry::new(EntryId(0), Custom, &m0, 0), - Entry::new(EntryId(1), Custom, &m1, 1), - Entry::new(EntryId(2), Custom, &m2, 2), - Entry::new(EntryId(3), Custom, &m3, 3), - Entry::new(EntryId(4), Custom, &m4, 4), - Entry::new(EntryId(5), Custom, &m5, 5), + Entry::new(EntryId(0), Sessions, &m0, 0), + Entry::new(EntryId(1), Sessions, &m1, 1), + Entry::new(EntryId(2), Sessions, &m2, 2), + Entry::new(EntryId(3), Sessions, &m3, 3), + Entry::new(EntryId(4), Sessions, &m4, 4), + Entry::new(EntryId(5), Sessions, &m5, 5), ]; let scoping = new_scoping(&limiter); @@ -449,7 +449,7 @@ mod tests { }, limit: 5, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }; // 6 items, limit is 5 -> 1 rejection. @@ -476,12 +476,12 @@ mod tests { let m1 = MetricName::from("b"); let entries = [ - Entry::new(EntryId(0), Custom, &m0, 0), - Entry::new(EntryId(1), Custom, &m0, 1), - Entry::new(EntryId(2), Custom, &m0, 2), - Entry::new(EntryId(3), Custom, &m1, 3), - Entry::new(EntryId(4), Custom, &m1, 4), - Entry::new(EntryId(5), Custom, &m1, 5), + Entry::new(EntryId(0), Sessions, &m0, 0), + Entry::new(EntryId(1), Sessions, &m0, 1), + Entry::new(EntryId(2), Sessions, &m0, 2), + Entry::new(EntryId(3), Sessions, &m1, 3), + Entry::new(EntryId(4), Sessions, &m1, 4), + Entry::new(EntryId(5), Sessions, &m1, 5), ]; let scoping = new_scoping(&limiter); @@ -495,7 +495,7 @@ mod tests { }, limit: 2, scope: CardinalityScope::Name, - namespace: Some(Custom), + namespace: Some(Sessions), }; let rejected = limiter.test_limits(scoping, &[limit.clone()], entries); @@ -537,12 +537,12 @@ mod tests { let m2 = MetricName::from("d:custom/foo@none"); let entries = [ - Entry::new(EntryId(0), Custom, &m0, 0), - Entry::new(EntryId(1), Custom, &m0, 1), - Entry::new(EntryId(2), Custom, &m1, 2), - Entry::new(EntryId(3), Custom, &m2, 3), - Entry::new(EntryId(4), Custom, &m2, 4), - Entry::new(EntryId(5), Custom, &m2, 5), + Entry::new(EntryId(0), Sessions, &m0, 0), + Entry::new(EntryId(1), Sessions, &m0, 1), + Entry::new(EntryId(2), Sessions, &m1, 2), + Entry::new(EntryId(3), Sessions, &m2, 3), + Entry::new(EntryId(4), Sessions, &m2, 4), + Entry::new(EntryId(5), Sessions, &m2, 5), ]; let scoping = new_scoping(&limiter); @@ -556,7 +556,7 @@ mod tests { }, limit: 2, scope: CardinalityScope::Type, - namespace: Some(Custom), + namespace: Some(Sessions), }; let rejected = limiter.test_limits(scoping, &[limit.clone()], entries); @@ -615,17 +615,17 @@ mod tests { }, limit: 1, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }]; let m = MetricName::from("a"); - let entries1 = [Entry::new(EntryId(0), Custom, &m, 0)]; + let entries1 = [Entry::new(EntryId(0), Sessions, &m, 0)]; assert!(limiter.test_limits(scoping1, limits, entries1).is_empty()); assert!(limiter.test_limits(scoping2, limits, entries1).is_empty()); // Make sure `entries2` is not accepted. - let entries2 = [Entry::new(EntryId(1), Custom, &m, 1)]; + let entries2 = [Entry::new(EntryId(1), Sessions, &m, 1)]; assert_eq!(limiter.test_limits(scoping1, limits, entries2).len(), 1); assert_eq!(limiter.test_limits(scoping2, limits, entries2).len(), 1); @@ -679,19 +679,19 @@ mod tests { }, limit: 10_000, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }]; let m = MetricName::from("a"); let entries = (0..50) - .map(|i| Entry::new(EntryId(i as usize), Custom, &m, i)) + .map(|i| Entry::new(EntryId(i as usize), Sessions, &m, i)) .collect::>(); let rejected = limiter.test_limits(scoping, limits, entries); assert_eq!(rejected.len(), 0); let entries = (100..150) - .map(|i| Entry::new(EntryId(i as usize), Custom, &m, i)) + .map(|i| Entry::new(EntryId(i as usize), Sessions, &m, i)) .collect::>(); let rejected = limiter.test_limits(scoping, limits, entries); assert_eq!(rejected.len(), 0); @@ -712,13 +712,13 @@ mod tests { }, limit: 80_000, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }]; let m = MetricName::from("a"); let entries = (0..100_000) - .map(|i| Entry::new(EntryId(i as usize), Custom, &m, i)) + .map(|i| Entry::new(EntryId(i as usize), Sessions, &m, i)) .collect::>(); let rejected = limiter.test_limits(scoping, limits, entries); @@ -741,14 +741,14 @@ mod tests { window, limit: 1, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }]; let m0 = MetricName::from("a"); let m1 = MetricName::from("b"); - let entries1 = [Entry::new(EntryId(0), Custom, &m0, 0)]; - let entries2 = [Entry::new(EntryId(1), Custom, &m1, 1)]; + let entries1 = [Entry::new(EntryId(0), Sessions, &m0, 0)]; + let entries2 = [Entry::new(EntryId(1), Sessions, &m1, 1)]; // 1 item and limit is 1 -> No rejections. let rejected = limiter.test_limits(scoping, limits, entries1); @@ -798,7 +798,7 @@ mod tests { let m1 = MetricName::from("b"); let m2 = MetricName::from("c"); - let entries1 = [Entry::new(EntryId(0), Custom, &m0, 0)]; + let entries1 = [Entry::new(EntryId(0), Sessions, &m0, 0)]; let entries2 = [Entry::new(EntryId(0), Spans, &m1, 1)]; let entries3 = [Entry::new(EntryId(0), Transactions, &m2, 2)]; @@ -828,7 +828,7 @@ mod tests { }, limit: 1, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }, CardinalityLimit { id: "limit2".to_owned(), @@ -840,7 +840,7 @@ mod tests { }, limit: 1, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }, CardinalityLimit { id: "limit3".to_owned(), @@ -876,8 +876,8 @@ mod tests { let m5 = MetricName::from("f"); let entries = [ - Entry::new(EntryId(0), Custom, &m0, 0), - Entry::new(EntryId(1), Custom, &m1, 1), + Entry::new(EntryId(0), Sessions, &m0, 0), + Entry::new(EntryId(1), Sessions, &m1, 1), Entry::new(EntryId(2), Spans, &m2, 2), Entry::new(EntryId(3), Spans, &m3, 3), Entry::new(EntryId(4), Transactions, &m4, 4), @@ -968,8 +968,8 @@ mod tests { let m1 = MetricName::from("a"); let m2 = MetricName::from("b"); - let entries1 = [Entry::new(EntryId(0), Custom, &m1, 0)]; - let entries2 = [Entry::new(EntryId(0), Custom, &m2, 1)]; + let entries1 = [Entry::new(EntryId(0), Sessions, &m1, 0)]; + let entries2 = [Entry::new(EntryId(0), Sessions, &m2, 1)]; // Accept different entries for different scopes. let rejected = limiter.test_limits(scoping1, limits, entries1); @@ -1000,14 +1000,14 @@ mod tests { window, limit: 100, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }]; let m = MetricName::from("foo"); macro_rules! test { ($r:expr) => {{ let entries = $r - .map(|i| Entry::new(EntryId(i as usize), Custom, &m, i)) + .map(|i| Entry::new(EntryId(i as usize), Sessions, &m, i)) .collect::>(); limiter.test_limits(scoping, limits, entries) @@ -1099,14 +1099,14 @@ mod tests { window, limit: 100, scope: CardinalityScope::Organization, - namespace: Some(Custom), + namespace: Some(Sessions), }]; let m = MetricName::from("foo"); macro_rules! test { ($r:expr) => {{ let entries = $r - .map(|i| Entry::new(EntryId(i as usize), Custom, &m, i)) + .map(|i| Entry::new(EntryId(i as usize), Sessions, &m, i)) .collect::>(); limiter.test_limits(scoping, limits, entries) diff --git a/relay-cogs/src/lib.rs b/relay-cogs/src/lib.rs index b5f217c8f46..5e285134339 100644 --- a/relay-cogs/src/lib.rs +++ b/relay-cogs/src/lib.rs @@ -176,10 +176,6 @@ pub enum AppFeature { MetricsSpans, /// Metrics in the sessions namespace. MetricsSessions, - /// Metrics in the custom namespace. - MetricsCustom, - /// Metrics in the `metric_stats` namespace. - MetricsStats, /// Metrics in the unsupported namespace. /// /// This is usually not emitted, since metrics in the unsupported @@ -205,8 +201,6 @@ impl AppFeature { Self::MetricsTransactions => "metrics_transactions", Self::MetricsSpans => "metrics_spans", Self::MetricsSessions => "metrics_sessions", - Self::MetricsCustom => "metrics_custom", - Self::MetricsStats => "metrics_metric_stats", Self::MetricsUnsupported => "metrics_unsupported", Self::Profiles => "profiles", } diff --git a/relay-config/src/aggregator.rs b/relay-config/src/aggregator.rs index dd52c7fae39..118479caa57 100644 --- a/relay-config/src/aggregator.rs +++ b/relay-config/src/aggregator.rs @@ -158,13 +158,13 @@ mod tests { "op": "or", "inner": [ {"op": "eq", "field": "namespace", "value": "spans"}, - {"op": "eq", "field": "namespace", "value": "custom"} + {"op": "eq", "field": "namespace", "value": "sessions"} ] }); let condition = serde_json::from_value::(json).unwrap(); assert!(condition.matches(Some(MetricNamespace::Spans))); - assert!(condition.matches(Some(MetricNamespace::Custom))); + assert!(condition.matches(Some(MetricNamespace::Sessions))); assert!(!condition.matches(Some(MetricNamespace::Transactions))); } } diff --git a/relay-config/src/config.rs b/relay-config/src/config.rs index 058f3b31bf5..7ba7383f097 100644 --- a/relay-config/src/config.rs +++ b/relay-config/src/config.rs @@ -567,23 +567,6 @@ impl Default for Metrics { } } -/// Controls processing of Sentry metrics and metric metadata. -#[derive(Serialize, Deserialize, Debug, Default)] -#[serde(default)] -pub struct SentryMetrics { - /// Whether metric stats are collected and emitted. - /// - /// Metric stats are always collected and emitted when processing - /// is enabled. - /// - /// This option is required for running multiple trusted Relays in a chain - /// and you want the metric stats to be collected and forwarded from - /// the first Relay in the chain. - /// - /// Defaults to `false`. - pub metric_stats_enabled: bool, -} - /// Controls various limits #[derive(Serialize, Deserialize, Debug)] #[serde(default)] @@ -1536,8 +1519,6 @@ struct ConfigValues { #[serde(default)] metrics: Metrics, #[serde(default)] - sentry_metrics: SentryMetrics, - #[serde(default)] sentry: relay_log::SentryConfig, #[serde(default)] processing: Processing, @@ -2258,14 +2239,6 @@ impl Config { self.values.limits.max_metric_buckets_size.as_bytes() } - /// Whether metric stats are collected and emitted. - /// - /// Metric stats are always collected and emitted when processing - /// is enabled. - pub fn metric_stats_enabled(&self) -> bool { - self.values.sentry_metrics.metric_stats_enabled || self.values.processing.enabled - } - /// Returns the maximum payload size for general API requests. pub fn max_api_payload_size(&self) -> usize { self.values.limits.max_api_payload_size.as_bytes() diff --git a/relay-dynamic-config/src/feature.rs b/relay-dynamic-config/src/feature.rs index 2210a1b1e7a..f4264ffe32d 100644 --- a/relay-dynamic-config/src/feature.rs +++ b/relay-dynamic-config/src/feature.rs @@ -41,11 +41,6 @@ pub enum Feature { /// Serialized as `organizations:device-class-synthesis`. #[serde(rename = "organizations:device-class-synthesis")] DeviceClassSynthesis, - /// Allow ingestion of metrics in the "custom" namespace. - /// - /// Serialized as `organizations:custom-metrics`. - #[serde(rename = "organizations:custom-metrics")] - CustomMetrics, /// Enable processing profiles. /// /// Serialized as `organizations:profiling`. diff --git a/relay-dynamic-config/src/global.rs b/relay-dynamic-config/src/global.rs index a58f081e922..da8a8051eea 100644 --- a/relay-dynamic-config/src/global.rs +++ b/relay-dynamic-config/src/global.rs @@ -147,17 +147,6 @@ pub struct Options { )] pub metric_bucket_dist_encodings: BucketEncodings, - /// Rollout rate for metric stats. - /// - /// Rate needs to be between `0.0` and `1.0`. - /// If set to `1.0` all organizations will have metric stats enabled. - #[serde( - rename = "relay.metric-stats.rollout-rate", - deserialize_with = "default_on_error", - skip_serializing_if = "is_default" - )] - pub metric_stats_rollout_rate: f32, - /// Overall sampling of span extraction. /// /// This number represents the fraction of transactions for which @@ -253,8 +242,6 @@ pub struct BucketEncodings { transactions: BucketEncoding, spans: BucketEncoding, profiles: BucketEncoding, - custom: BucketEncoding, - metric_stats: BucketEncoding, } impl BucketEncodings { @@ -263,8 +250,6 @@ impl BucketEncodings { match namespace { MetricNamespace::Transactions => self.transactions, MetricNamespace::Spans => self.spans, - MetricNamespace::Custom => self.custom, - MetricNamespace::Stats => self.metric_stats, // Always force the legacy encoding for sessions, // sessions are not part of the generic metrics platform with different // consumer which are not (yet) updated to support the new data. @@ -299,8 +284,6 @@ where transactions: encoding, spans: encoding, profiles: encoding, - custom: encoding, - metric_stats: encoding, }) } @@ -506,8 +489,6 @@ mod tests { transactions: BucketEncoding::Legacy, spans: BucketEncoding::Legacy, profiles: BucketEncoding::Legacy, - custom: BucketEncoding::Legacy, - metric_stats: BucketEncoding::Legacy, } ); assert_eq!( @@ -516,8 +497,6 @@ mod tests { transactions: BucketEncoding::Zstd, spans: BucketEncoding::Zstd, profiles: BucketEncoding::Zstd, - custom: BucketEncoding::Zstd, - metric_stats: BucketEncoding::Zstd, } ); } @@ -528,8 +507,6 @@ mod tests { transactions: BucketEncoding::Base64, spans: BucketEncoding::Zstd, profiles: BucketEncoding::Base64, - custom: BucketEncoding::Zstd, - metric_stats: BucketEncoding::Base64, }; let s = serde_json::to_string(&original).unwrap(); let s = format!( diff --git a/relay-dynamic-config/src/metrics.rs b/relay-dynamic-config/src/metrics.rs index 86bbc0c239b..818357dbfec 100644 --- a/relay-dynamic-config/src/metrics.rs +++ b/relay-dynamic-config/src/metrics.rs @@ -9,7 +9,6 @@ use relay_base_schema::data_category::DataCategory; use relay_cardinality::CardinalityLimit; use relay_common::glob2::LazyGlob; use relay_common::impl_str_serde; -use relay_pattern::{Patterns, TypedPatterns}; use relay_protocol::RuleCondition; use serde::{Deserialize, Serialize}; @@ -31,18 +30,6 @@ impl Metrics { } } -/// Configuration for removing tags matching the `tag` pattern on metrics whose name matches the `name` pattern. -#[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq)] -#[serde(default)] -pub struct TagBlock { - /// Name of metric of which we want to remove certain tags. - #[serde(skip_serializing_if = "Patterns::is_empty")] - pub name: TypedPatterns, - /// Pattern to match keys of tags that we want to remove. - #[serde(skip_serializing_if = "Patterns::is_empty")] - pub tags: TypedPatterns, -} - /// Rule defining when a target tag should be set on a metric. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/relay-metrics/src/cogs.rs b/relay-metrics/src/cogs.rs index c8df9d5701e..f9acab89b39 100644 --- a/relay-metrics/src/cogs.rs +++ b/relay-metrics/src/cogs.rs @@ -40,8 +40,6 @@ fn to_app_feature(ns: MetricNamespace) -> AppFeature { MetricNamespace::Sessions => AppFeature::MetricsSessions, MetricNamespace::Transactions => AppFeature::MetricsTransactions, MetricNamespace::Spans => AppFeature::MetricsSpans, - MetricNamespace::Custom => AppFeature::MetricsCustom, - MetricNamespace::Stats => AppFeature::MetricsStats, MetricNamespace::Unsupported => AppFeature::MetricsUnsupported, } } diff --git a/relay-metrics/src/utils.rs b/relay-metrics/src/utils.rs index f5cebd84997..4e011289a14 100644 --- a/relay-metrics/src/utils.rs +++ b/relay-metrics/src/utils.rs @@ -21,10 +21,6 @@ pub struct ByNamespace { pub transactions: T, /// Value for the [`MetricNamespace::Spans`] namespace. pub spans: T, - /// Value for the [`MetricNamespace::Custom`] namespace. - pub custom: T, - /// Value for the [`MetricNamespace::Stats`] namespace. - pub stats: T, /// Value for the [`MetricNamespace::Unsupported`] namespace. pub unsupported: T, } @@ -36,8 +32,6 @@ impl ByNamespace { MetricNamespace::Sessions => &self.sessions, MetricNamespace::Transactions => &self.transactions, MetricNamespace::Spans => &self.spans, - MetricNamespace::Custom => &self.custom, - MetricNamespace::Stats => &self.stats, MetricNamespace::Unsupported => &self.unsupported, } } @@ -48,8 +42,6 @@ impl ByNamespace { MetricNamespace::Sessions => &mut self.sessions, MetricNamespace::Transactions => &mut self.transactions, MetricNamespace::Spans => &mut self.spans, - MetricNamespace::Custom => &mut self.custom, - MetricNamespace::Stats => &mut self.stats, MetricNamespace::Unsupported => &mut self.unsupported, } } @@ -57,15 +49,13 @@ impl ByNamespace { impl IntoIterator for ByNamespace { type Item = (MetricNamespace, T); - type IntoIter = std::array::IntoIter<(MetricNamespace, T), 6>; + type IntoIter = std::array::IntoIter<(MetricNamespace, T), 4>; fn into_iter(self) -> Self::IntoIter { let Self { sessions, transactions, spans, - custom, - stats, unsupported, } = self; @@ -73,8 +63,6 @@ impl IntoIterator for ByNamespace { (MetricNamespace::Sessions, sessions), (MetricNamespace::Transactions, transactions), (MetricNamespace::Spans, spans), - (MetricNamespace::Custom, custom), - (MetricNamespace::Stats, stats), (MetricNamespace::Unsupported, unsupported), ] .into_iter() @@ -117,16 +105,12 @@ macro_rules! impl_op { sessions, transactions, spans, - custom, - stats, unsupported, } = self; $op::$opfn(sessions, rhs.sessions); $op::$opfn(transactions, rhs.transactions); $op::$opfn(spans, rhs.spans); - $op::$opfn(custom, rhs.custom); - $op::$opfn(stats, rhs.stats); $op::$opfn(unsupported, rhs.unsupported); } } diff --git a/relay-quotas/src/rate_limit.rs b/relay-quotas/src/rate_limit.rs index 13a80d955f1..19a309adc4f 100644 --- a/relay-quotas/src/rate_limit.rs +++ b/relay-quotas/src/rate_limit.rs @@ -601,7 +601,7 @@ mod tests { scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), - namespaces: smallvec![MetricNamespace::Custom], + namespaces: smallvec![MetricNamespace::Transactions], }; let scoping = Scoping { @@ -614,7 +614,7 @@ mod tests { assert!(rate_limit.matches(ItemScoping { category: DataCategory::MetricBucket, scoping: &scoping, - namespace: MetricNamespaceScoping::Some(MetricNamespace::Custom), + namespace: MetricNamespaceScoping::Some(MetricNamespace::Transactions), })); assert!(!rate_limit.matches(ItemScoping { @@ -832,7 +832,7 @@ mod tests { scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), - namespaces: smallvec![MetricNamespace::Custom], + namespaces: smallvec![MetricNamespace::Transactions], }); // Same category but different namespaces @@ -855,7 +855,7 @@ mod tests { reason_code: None, retry_after: RetryAfter(1), namespaces: [ - "custom", + "transactions", ], ), RateLimit( diff --git a/relay-server/src/metrics/metric_stats.rs b/relay-server/src/metrics/metric_stats.rs deleted file mode 100644 index 8ebc389852f..00000000000 --- a/relay-server/src/metrics/metric_stats.rs +++ /dev/null @@ -1,475 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::{Arc, OnceLock}; - -use relay_base_schema::organization::OrganizationId; -#[cfg(feature = "processing")] -use relay_cardinality::{CardinalityLimit, CardinalityReport}; -use relay_config::Config; -#[cfg(feature = "processing")] -use relay_metrics::GaugeValue; -use relay_metrics::{Bucket, BucketValue, MetricName, UnixTimestamp}; -use relay_quotas::Scoping; -use relay_system::Addr; - -use crate::metrics::TrackableBucket; -use crate::services::global_config::GlobalConfigHandle; -use crate::services::metrics::{Aggregator, MergeBuckets}; -use crate::services::outcome::Outcome; -use crate::utils::is_rolled_out; - -fn volume_metric_mri() -> MetricName { - static VOLUME_METRIC_MRI: OnceLock = OnceLock::new(); - - VOLUME_METRIC_MRI - .get_or_init(|| "c:metric_stats/volume@none".into()) - .clone() -} - -#[cfg(feature = "processing")] -fn cardinality_metric_mri() -> MetricName { - static CARDINALITY_METRIC_MRI: OnceLock = OnceLock::new(); - - CARDINALITY_METRIC_MRI - .get_or_init(|| "g:metric_stats/cardinality@none".into()) - .clone() -} - -/// Tracks stats about metrics. -/// -/// Metric stats are similar to outcomes for envelopes, they record -/// the final "fate" of a metric and its properties. -/// -/// Unlike outcomes metric stats are tracked on a per MRI basis -/// and contain additional metadata, like the cardinality of a metric. -#[derive(Clone, Debug)] -pub struct MetricStats { - config: Arc, - global_config: GlobalConfigHandle, - aggregator: Addr, -} - -impl MetricStats { - /// Creates a new [`MetricStats`] instance. - pub fn new( - config: Arc, - global_config: GlobalConfigHandle, - aggregator: Addr, - ) -> Self { - Self { - config, - global_config, - aggregator, - } - } - - /// Tracks the metric volume and outcome for the bucket. - pub fn track_metric(&self, scoping: Scoping, bucket: impl TrackableBucket, outcome: &Outcome) { - if !self.is_enabled(scoping) { - return; - } - - let Some(volume) = self.to_volume_metric(&bucket, outcome) else { - return; - }; - - relay_log::trace!( - "Tracking volume of {} for mri '{}': {}", - bucket.metadata().merges, - bucket.name(), - outcome - ); - self.aggregator - .send(MergeBuckets::new(scoping.project_key, vec![volume])); - } - - /// Tracks the cardinality of a metric. - #[cfg(feature = "processing")] - pub fn track_cardinality( - &self, - scoping: Scoping, - limit: &CardinalityLimit, - report: &CardinalityReport, - ) { - if !self.is_enabled(scoping) { - return; - } - - let Some(cardinality) = self.to_cardinality_metric(limit, report) else { - return; - }; - - relay_log::trace!( - "Tracking cardinality '{}' for mri '{}': {}", - limit.id, - report.metric_name.as_deref().unwrap_or("-"), - report.cardinality, - ); - self.aggregator - .send(MergeBuckets::new(scoping.project_key, vec![cardinality])); - } - - fn is_enabled(&self, scoping: Scoping) -> bool { - self.config.metric_stats_enabled() && self.is_rolled_out(scoping.organization_id) - } - - fn is_rolled_out(&self, organization_id: OrganizationId) -> bool { - let rate = self - .global_config - .current() - .options - .metric_stats_rollout_rate; - - is_rolled_out(organization_id.value(), rate) - } - - fn to_volume_metric(&self, bucket: impl TrackableBucket, outcome: &Outcome) -> Option { - let volume = bucket.metadata().merges; - if volume == 0 { - return None; - } - - let namespace = bucket.name().namespace(); - if !namespace.has_metric_stats() { - return None; - } - - let mut tags = BTreeMap::from([ - ("mri".to_owned(), bucket.name().to_string()), - ("mri.type".to_owned(), bucket.ty().to_string()), - ("mri.namespace".to_owned(), namespace.to_string()), - ( - "outcome.id".to_owned(), - outcome.to_outcome_id().as_u8().to_string(), - ), - ]); - - if let Some(reason) = outcome.to_reason() { - tags.insert("outcome.reason".to_owned(), reason.into_owned()); - } - - Some(Bucket { - timestamp: UnixTimestamp::now(), - width: 0, - name: volume_metric_mri(), - value: BucketValue::Counter(volume.into()), - tags, - metadata: Default::default(), - }) - } - - #[cfg(feature = "processing")] - fn to_cardinality_metric( - &self, - limit: &CardinalityLimit, - report: &CardinalityReport, - ) -> Option { - let cardinality = report.cardinality; - if cardinality == 0 { - return None; - } - - let mut tags = BTreeMap::from([ - ("cardinality.limit".to_owned(), limit.id.clone()), - ( - "cardinality.scope".to_owned(), - limit.scope.as_str().to_owned(), - ), - ( - "cardinality.window".to_owned(), - limit.window.window_seconds.to_string(), - ), - ]); - - if let Some(ref name) = report.metric_name { - tags.insert("mri".to_owned(), name.to_string()); - tags.insert("mri.namespace".to_owned(), name.namespace().to_string()); - if let Some(t) = name.try_type() { - tags.insert("mri.type".to_owned(), t.to_string()); - } - } else { - if let Some(namespace) = limit.namespace { - tags.insert("mri.namespace".to_owned(), namespace.to_string()); - } - if let Some(t) = report.metric_type { - tags.insert("mri.type".to_owned(), t.to_string()); - } - } - - Some(Bucket { - timestamp: report.timestamp, - width: 0, - name: cardinality_metric_mri(), - value: BucketValue::Gauge(GaugeValue::single(cardinality.into())), - tags, - metadata: Default::default(), - }) - } -} - -#[cfg(test)] -mod tests { - use relay_base_schema::project::{ProjectId, ProjectKey}; - #[cfg(feature = "processing")] - use relay_cardinality::{CardinalityScope, SlidingWindow}; - use relay_dynamic_config::GlobalConfig; - #[cfg(feature = "processing")] - use relay_metrics::{MetricNamespace, MetricType}; - use relay_quotas::ReasonCode; - use tokio::sync::mpsc::UnboundedReceiver; - - use super::*; - - impl MetricStats { - pub fn test() -> (Self, UnboundedReceiver) { - create_metric_stats(1.0) - } - } - - fn create_metric_stats(rollout_rate: f32) -> (MetricStats, UnboundedReceiver) { - let config = Config::from_json_value(serde_json::json!({ - "processing": { - "enabled": true, - "kafka_config": [], - } - })) - .unwrap(); - - let mut global_config = GlobalConfig::default(); - global_config.options.metric_stats_rollout_rate = rollout_rate; - let global_config = GlobalConfigHandle::fixed(global_config); - - let (addr, receiver) = Addr::custom(); - let ms = MetricStats::new(Arc::new(config), global_config, addr); - - (ms, receiver) - } - - fn scoping() -> Scoping { - Scoping { - organization_id: OrganizationId::new(42), - project_id: ProjectId::new(21), - project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), - key_id: Some(17), - } - } - - macro_rules! tags { - ($(($key:expr, $value:expr),)*) => { - BTreeMap::from([ - $(($key.to_owned(), $value.to_owned())),* - ]) - } - } - - #[test] - fn test_metric_stats_volume() { - let (ms, mut receiver) = create_metric_stats(1.0); - - let scoping = scoping(); - let mut bucket = Bucket::parse(b"rt@millisecond:57|d", UnixTimestamp::now()).unwrap(); - - ms.track_metric(scoping, &bucket, &Outcome::Accepted); - - bucket.metadata.merges = bucket.metadata.merges.saturating_add(41); - ms.track_metric( - scoping, - &bucket, - &Outcome::RateLimited(Some(ReasonCode::new("foobar"))), - ); - - drop(ms); - - let Aggregator::MergeBuckets(mut mb) = receiver.blocking_recv().unwrap(); - assert_eq!(mb.project_key, scoping.project_key); - - assert_eq!(mb.buckets.len(), 1); - let bucket = mb.buckets.pop().unwrap(); - - assert_eq!(&*bucket.name, "c:metric_stats/volume@none"); - assert_eq!(bucket.value, BucketValue::Counter(1.into())); - assert_eq!( - bucket.tags, - tags!( - ("mri", "d:custom/rt@millisecond"), - ("mri.type", "d"), - ("mri.namespace", "custom"), - ("outcome.id", "0"), - ) - ); - - let Aggregator::MergeBuckets(mut mb) = receiver.blocking_recv().unwrap(); - assert_eq!(mb.project_key, scoping.project_key); - - assert_eq!(mb.buckets.len(), 1); - let bucket = mb.buckets.pop().unwrap(); - - assert_eq!(&*bucket.name, "c:metric_stats/volume@none"); - assert_eq!(bucket.value, BucketValue::Counter(42.into())); - assert_eq!( - bucket.tags, - tags!( - ("mri", "d:custom/rt@millisecond"), - ("mri.type", "d"), - ("mri.namespace", "custom"), - ("outcome.id", "2"), - ("outcome.reason", "foobar"), - ) - ); - - assert!(receiver.blocking_recv().is_none()); - } - - #[test] - #[cfg(feature = "processing")] - fn test_metric_stats_cardinality_name() { - let (ms, mut receiver) = create_metric_stats(1.0); - - let scoping = scoping(); - let limit = CardinalityLimit { - id: "test".to_owned(), - passive: false, - report: true, - window: SlidingWindow { - window_seconds: 246, - granularity_seconds: 123, - }, - limit: 99, - scope: CardinalityScope::Name, - namespace: None, - }; - let report = CardinalityReport { - timestamp: UnixTimestamp::from_secs(3333), - organization_id: Some(scoping.organization_id), - project_id: Some(scoping.project_id), - metric_type: None, - metric_name: Some(MetricName::from("d:custom/rt@millisecond")), - cardinality: 12, - }; - - ms.track_cardinality(scoping, &limit, &report); - - drop(ms); - - let Aggregator::MergeBuckets(mut mb) = receiver.blocking_recv().unwrap(); - assert_eq!(mb.project_key, scoping.project_key); - - assert_eq!(mb.buckets.len(), 1); - let bucket = mb.buckets.pop().unwrap(); - - assert_eq!(&*bucket.name, "g:metric_stats/cardinality@none"); - assert_eq!(bucket.timestamp, UnixTimestamp::from_secs(3333)); - assert_eq!( - bucket.value, - BucketValue::Gauge(GaugeValue { - last: 12.into(), - min: 12.into(), - max: 12.into(), - sum: 12.into(), - count: 1, - }) - ); - assert_eq!( - bucket.tags, - tags!( - ("mri", "d:custom/rt@millisecond"), - ("mri.type", "d"), - ("mri.namespace", "custom"), - ("cardinality.limit", "test"), - ("cardinality.scope", "name"), - ("cardinality.window", "246"), - ) - ); - - assert!(receiver.blocking_recv().is_none()); - } - - #[test] - #[cfg(feature = "processing")] - fn test_metric_stats_cardinality_type() { - let (ms, mut receiver) = create_metric_stats(1.0); - - let scoping = scoping(); - let limit = CardinalityLimit { - id: "test".to_owned(), - passive: false, - report: true, - window: SlidingWindow { - window_seconds: 246, - granularity_seconds: 123, - }, - limit: 99, - scope: CardinalityScope::Type, - namespace: Some(MetricNamespace::Spans), - }; - let report = CardinalityReport { - timestamp: UnixTimestamp::from_secs(2222), - organization_id: Some(scoping.organization_id), - project_id: Some(scoping.project_id), - metric_type: Some(MetricType::Distribution), - metric_name: None, - cardinality: 12, - }; - - ms.track_cardinality(scoping, &limit, &report); - - drop(ms); - - let Aggregator::MergeBuckets(mut mb) = receiver.blocking_recv().unwrap(); - assert_eq!(mb.project_key, scoping.project_key); - - assert_eq!(mb.buckets.len(), 1); - let bucket = mb.buckets.pop().unwrap(); - - assert_eq!(&*bucket.name, "g:metric_stats/cardinality@none"); - assert_eq!(bucket.timestamp, UnixTimestamp::from_secs(2222)); - assert_eq!( - bucket.value, - BucketValue::Gauge(GaugeValue { - last: 12.into(), - min: 12.into(), - max: 12.into(), - sum: 12.into(), - count: 1, - }) - ); - assert_eq!( - bucket.tags, - tags!( - ("mri.type", "d"), - ("mri.namespace", "spans"), - ("cardinality.limit", "test"), - ("cardinality.scope", "type"), - ("cardinality.window", "246"), - ) - ); - - assert!(receiver.blocking_recv().is_none()); - } - - #[test] - fn test_metric_stats_rollout_rate_disabled() { - let (ms, mut receiver) = create_metric_stats(0.0); - - let scoping = scoping(); - let bucket = Bucket::parse(b"rt@millisecond:57|d", UnixTimestamp::now()).unwrap(); - ms.track_metric(scoping, &bucket, &Outcome::Accepted); - - drop(ms); - - assert!(receiver.blocking_recv().is_none()); - } - - #[test] - fn test_metric_stats_disabled_namespace() { - let (ms, mut receiver) = create_metric_stats(1.0); - - let scoping = scoping(); - let bucket = - Bucket::parse(b"transactions/rt@millisecond:57|d", UnixTimestamp::now()).unwrap(); - ms.track_metric(scoping, &bucket, &Outcome::Accepted); - - drop(ms); - - assert!(receiver.blocking_recv().is_none()); - } -} diff --git a/relay-server/src/metrics/mod.rs b/relay-server/src/metrics/mod.rs index 2576a7b385a..a3c531cd95d 100644 --- a/relay-server/src/metrics/mod.rs +++ b/relay-server/src/metrics/mod.rs @@ -1,13 +1,11 @@ #[cfg(feature = "processing")] mod bucket_encoding; -mod metric_stats; mod minimal; mod outcomes; mod rate_limits; #[cfg(feature = "processing")] pub use self::bucket_encoding::*; -pub use self::metric_stats::*; pub use self::minimal::*; pub use self::outcomes::*; pub use self::rate_limits::*; diff --git a/relay-server/src/metrics/outcomes.rs b/relay-server/src/metrics/outcomes.rs index 5d0e2ff40f3..d7b612ac031 100644 --- a/relay-server/src/metrics/outcomes.rs +++ b/relay-server/src/metrics/outcomes.rs @@ -7,7 +7,6 @@ use relay_quotas::{DataCategory, Scoping}; use relay_system::Addr; use crate::envelope::SourceQuantities; -use crate::metrics::MetricStats; use crate::services::outcome::{Outcome, TrackOutcome}; #[cfg(feature = "processing")] use relay_cardinality::{CardinalityLimit, CardinalityReport}; @@ -19,17 +18,13 @@ use relay_cardinality::{CardinalityLimit, CardinalityReport}; /// like custom. #[derive(Debug, Clone)] pub struct MetricOutcomes { - metric_stats: MetricStats, outcomes: Addr, } impl MetricOutcomes { /// Creates a new [`MetricOutcomes`]. - pub fn new(metric_stats: MetricStats, outcomes: Addr) -> Self { - Self { - metric_stats, - outcomes, - } + pub fn new(outcomes: Addr) -> Self { + Self { outcomes } } /// Tracks an outcome for a list of buckets and generates the necessary outcomes. @@ -67,28 +62,17 @@ impl MetricOutcomes { } } } - - // When rejecting metrics, we need to make sure that the number of merges is correctly handled - // for buckets views, since if we have a bucket which has 5 merges, and it's split into 2 - // bucket views, we will emit the volume of the rejection as 5 + 5 merges since we still read - // the underlying metadata for each view, and it points to the same bucket reference. - // Possible solutions to this problem include emitting the merges only if the bucket view is - // the first of view or distributing uniformly the metadata between split views. - for bucket in buckets { - relay_log::trace!("{:<50} -> {outcome}", bucket.name()); - self.metric_stats.track_metric(scoping, bucket, &outcome) - } } /// Tracks the cardinality of a metric. #[cfg(feature = "processing")] pub fn cardinality( &self, - scoping: Scoping, - limit: &CardinalityLimit, - report: &CardinalityReport, + _scoping: Scoping, + _limit: &CardinalityLimit, + _report: &CardinalityReport, ) { - self.metric_stats.track_cardinality(scoping, limit, report) + // Future entrypoint if it ever becomes necessary again to track cardinality. } } diff --git a/relay-server/src/metrics/rate_limits.rs b/relay-server/src/metrics/rate_limits.rs index 993008ae8bb..5216763ab90 100644 --- a/relay-server/src/metrics/rate_limits.rs +++ b/relay-server/src/metrics/rate_limits.rs @@ -220,8 +220,6 @@ mod tests { use relay_system::Addr; use smallvec::smallvec; - use crate::metrics::MetricStats; - use super::*; fn deny(category: DataCategory) -> Vec { @@ -243,7 +241,7 @@ mod tests { quotas: Vec, ) -> (Vec, Vec<(Outcome, DataCategory, u32)>) { let (outcome_sink, mut rx) = Addr::custom(); - let metric_outcomes = MetricOutcomes::new(MetricStats::test().0, outcome_sink.clone()); + let metric_outcomes = MetricOutcomes::new(outcome_sink.clone()); let mut limiter = MetricsLimiter::create( metrics, diff --git a/relay-server/src/service.rs b/relay-server/src/service.rs index d098eb486b4..124718177ed 100644 --- a/relay-server/src/service.rs +++ b/relay-server/src/service.rs @@ -2,7 +2,7 @@ use std::convert::Infallible; use std::sync::Arc; use std::time::Duration; -use crate::metrics::{MetricOutcomes, MetricStats}; +use crate::metrics::MetricOutcomes; use crate::services::autoscaling::{AutoscalingMetricService, AutoscalingMetrics}; use crate::services::buffer::{ ObservableEnvelopeBuffer, PartitionedEnvelopeBuffer, ProjectKeyPair, @@ -219,13 +219,7 @@ impl ServiceState { let aggregator_handle = aggregator.handle(); let aggregator = services.start(aggregator); - let metric_stats = MetricStats::new( - config.clone(), - global_config_handle.clone(), - aggregator.clone(), - ); - - let metric_outcomes = MetricOutcomes::new(metric_stats, outcome_aggregator.clone()); + let metric_outcomes = MetricOutcomes::new(outcome_aggregator.clone()); #[cfg(feature = "processing")] let store_pool = create_store_pool(&config)?; diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index 4970b255cf6..892eefe5336 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -2447,7 +2447,7 @@ impl EnvelopeProcessorService { return false; } - if !self::metrics::is_valid_namespace(bucket, source) { + if !self::metrics::is_valid_namespace(bucket) { return false; } diff --git a/relay-server/src/services/processor/metrics.rs b/relay-server/src/services/processor/metrics.rs index ff084a2f848..fd72e0ddeb5 100644 --- a/relay-server/src/services/processor/metrics.rs +++ b/relay-server/src/services/processor/metrics.rs @@ -1,25 +1,20 @@ -use relay_dynamic_config::Feature; use relay_filter::FilterStatKey; use relay_metrics::{Bucket, MetricNamespace}; use relay_quotas::Scoping; use crate::metrics::MetricOutcomes; use crate::services::outcome::Outcome; -use crate::services::processor::BucketSource; use crate::services::projects::project::ProjectInfo; /// Checks if the namespace of the passed bucket is valid. /// /// This is returns `true` for most namespaces except: /// - [`MetricNamespace::Unsupported`]: Equal to invalid/unknown namespaces. -/// - [`MetricNamespace::Stats`]: Metric stats are only allowed if the `source` is [`BucketSource::Internal`]. -pub fn is_valid_namespace(bucket: &Bucket, source: BucketSource) -> bool { +pub fn is_valid_namespace(bucket: &Bucket) -> bool { match bucket.name.namespace() { MetricNamespace::Sessions => true, MetricNamespace::Transactions => true, MetricNamespace::Spans => true, - MetricNamespace::Custom => true, - MetricNamespace::Stats => source == BucketSource::Internal, MetricNamespace::Unsupported => false, } } @@ -61,8 +56,6 @@ fn is_metric_namespace_valid(state: &ProjectInfo, namespace: MetricNamespace) -> MetricNamespace::Sessions => true, MetricNamespace::Transactions => true, MetricNamespace::Spans => state.config.features.produces_spans(), - MetricNamespace::Custom => state.has_feature(Feature::CustomMetrics), - MetricNamespace::Stats => true, MetricNamespace::Unsupported => false, } } @@ -74,8 +67,7 @@ mod tests { use relay_metrics::{BucketValue, UnixTimestamp}; use relay_system::Addr; - use crate::metrics::MetricStats; - use crate::services::metrics::Aggregator; + use crate::services::outcome::TrackOutcome; use super::*; @@ -92,9 +84,8 @@ mod tests { #[test] fn test_apply_project_info_with_disabled_custom_namespace() { - let (outcome_aggregator, _) = Addr::custom(); - let (metric_stats, mut metric_stats_rx) = MetricStats::test(); - let metric_outcomes = MetricOutcomes::new(metric_stats, outcome_aggregator); + let (outcomes, mut outcomes_rx) = Addr::custom(); + let metric_outcomes = MetricOutcomes::new(outcomes); let b1 = create_custom_bucket_with_name("cpu_time".into()); let b2 = create_custom_bucket_with_name("memory_usage".into()); @@ -114,15 +105,12 @@ mod tests { assert!(buckets.is_empty()); - // We assert that two metrics are emitted by metric stats. for _ in 0..2 { - let value = metric_stats_rx.blocking_recv().unwrap(); - let Aggregator::MergeBuckets(merge_buckets) = value; - assert_eq!(merge_buckets.buckets.len(), 1); - let BucketValue::Counter(value) = merge_buckets.buckets[0].value else { - panic!(); - }; - assert_eq!(value.to_f64(), 1.0); + let value: TrackOutcome = outcomes_rx.blocking_recv().unwrap(); + assert_eq!( + value.outcome, + Outcome::Filtered(FilterStatKey::DisabledNamespace) + ); } } } diff --git a/relay-server/src/services/store.rs b/relay-server/src/services/store.rs index 41994745336..78e636d840c 100644 --- a/relay-server/src/services/store.rs +++ b/relay-server/src/services/store.rs @@ -1480,8 +1480,6 @@ impl Message for KafkaMessage<'_> { MetricNamespace::Sessions => "metric_sessions", MetricNamespace::Transactions => "metric_transactions", MetricNamespace::Spans => "metric_spans", - MetricNamespace::Custom => "metric_custom", - MetricNamespace::Stats => "metric_metric_stats", MetricNamespace::Unsupported => "metric_unsupported", }, KafkaMessage::Profile(_) => "profile", diff --git a/relay-server/src/testutils.rs b/relay-server/src/testutils.rs index 6dd616fcfd6..77f51f1149a 100644 --- a/relay-server/src/testutils.rs +++ b/relay-server/src/testutils.rs @@ -13,7 +13,7 @@ use relay_system::Addr; use relay_test::mock_service; use crate::envelope::{Envelope, Item, ItemType}; -use crate::metrics::{MetricOutcomes, MetricStats}; +use crate::metrics::MetricOutcomes; #[cfg(feature = "processing")] use crate::service::create_redis_pools; use crate::services::global_config::GlobalConfigHandle; @@ -117,7 +117,7 @@ pub async fn create_test_processor(config: Config) -> EnvelopeProcessorService { .transpose() .unwrap(); - let metric_outcomes = MetricOutcomes::new(MetricStats::test().0, outcome_aggregator.clone()); + let metric_outcomes = MetricOutcomes::new(outcome_aggregator.clone()); let config = Arc::new(config); EnvelopeProcessorService::new( @@ -151,8 +151,7 @@ pub async fn create_test_processor_with_addrs( } .transpose() .unwrap(); - let metric_outcomes = - MetricOutcomes::new(MetricStats::test().0, addrs.outcome_aggregator.clone()); + let metric_outcomes = MetricOutcomes::new(addrs.outcome_aggregator.clone()); let config = Arc::new(config); EnvelopeProcessorService::new( diff --git a/relay-server/src/utils/pick.rs b/relay-server/src/utils/pick.rs index 021a9b44577..847971282e8 100644 --- a/relay-server/src/utils/pick.rs +++ b/relay-server/src/utils/pick.rs @@ -2,6 +2,7 @@ /// /// Deterministically makes a rollout decision for an id, usually organization id, /// and rate. +#[cfg_attr(not(test), expect(unused, reason = "currently no rollouts defined"))] pub fn is_rolled_out(id: u64, rate: f32) -> bool { ((id % 100000) as f32 / 100000.0f32) < rate } diff --git a/relay-server/src/utils/rate_limits.rs b/relay-server/src/utils/rate_limits.rs index 54347e7ccf5..d215f2e830c 100644 --- a/relay-server/src/utils/rate_limits.rs +++ b/relay-server/src/utils/rate_limits.rs @@ -929,7 +929,7 @@ mod tests { scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("my_limit")), retry_after: RetryAfter::from_secs(42), - namespaces: smallvec![MetricNamespace::Custom, MetricNamespace::Spans], + namespaces: smallvec![MetricNamespace::Transactions, MetricNamespace::Spans], }); // Rate limit without reason code. @@ -943,7 +943,7 @@ mod tests { let formatted = format_rate_limits(&rate_limits); let expected = - "42:metric_bucket:organization:my_limit:custom;spans, 42:metric_bucket:organization::spans"; + "42:metric_bucket:organization:my_limit:transactions;spans, 42:metric_bucket:organization::spans"; assert_eq!(formatted, expected); } @@ -1013,7 +1013,7 @@ mod tests { key_id: Some(17), }; - let formatted = "42:metric_bucket:organization::custom;spans"; + let formatted = "42:metric_bucket:organization::transactions;spans"; let rate_limits: Vec = parse_rate_limits(&scoping, formatted).into_iter().collect(); @@ -1024,7 +1024,7 @@ mod tests { scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: rate_limits[0].retry_after, - namespaces: smallvec![MetricNamespace::Custom, MetricNamespace::Spans], + namespaces: smallvec![MetricNamespace::Transactions, MetricNamespace::Spans], }] ); } diff --git a/tests/integration/test_cardinality_limiter.py b/tests/integration/test_cardinality_limiter.py index 13e85f8e996..0a5046d1946 100644 --- a/tests/integration/test_cardinality_limiter.py +++ b/tests/integration/test_cardinality_limiter.py @@ -24,7 +24,6 @@ def metrics_by_namespace(metrics_consumer, count, timeout=None): def add_project_config(mini_sentry, project_id, cardinality_limits=None): project_config = mini_sentry.add_full_project_config(project_id) - project_config["config"]["features"] = ["organizations:custom-metrics"] project_config["config"]["metrics"] = { "cardinalityLimits": cardinality_limits or [] } @@ -42,14 +41,7 @@ def test_cardinality_limits(mini_sentry, relay_with_processing, metrics_consumer "limit": 1, "scope": "project", "namespace": "transactions", - }, - { - "id": "custom", - "window": {"windowSeconds": 3600, "granularitySeconds": 600}, - "limit": 2, - "scope": "project", - "namespace": "custom", - }, + } ] add_project_config(mini_sentry, project_id, cardinality_limits) @@ -59,15 +51,15 @@ def test_cardinality_limits(mini_sentry, relay_with_processing, metrics_consumer "transactions/foo@second:12|c", "transactions/bar@second:23|c", "sessions/foo@second:12|c", - "foo@second:12|c", - "bar@second:23|c", - "baz@second:17|c", + "spans/foo@second:12|c", + "spans/bar@second:23|c", + "spans/baz@second:17|c", ] ) relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_namespace(metrics_consumer, 4) - assert len(metrics["custom"]) == 2 + assert len(metrics["spans"]) == 2 assert len(metrics["sessions"]) == 1 assert len(metrics["transactions"]) == 1 @@ -130,11 +122,11 @@ def test_cardinality_limits_passive_limit( "namespace": "transactions", }, { - "id": "custom", + "id": "spans", "window": {"windowSeconds": 3600, "granularitySeconds": 600}, "limit": 2, "scope": "project", - "namespace": "custom", + "namespace": "spans", }, ] @@ -147,15 +139,15 @@ def test_cardinality_limits_passive_limit( "transactions/baz@second:33|c", "transactions/lol@second:55|c", "sessions/foo@second:12|c", - "foo@second:12|c", - "bar@second:23|c", - "baz@second:17|c", + "spans/foo@second:12|c", + "spans/bar@second:23|c", + "spans/baz@second:17|c", ] ) relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_namespace(metrics_consumer, 6) - assert len(metrics["custom"]) == 2 + assert len(metrics["spans"]) == 2 assert len(metrics["sessions"]) == 1 # The passive limit should be ignored, the non-passive limit still needs to be enforced. assert len(metrics["transactions"]) == 3 diff --git a/tests/integration/test_metric_stats.py b/tests/integration/test_metric_stats.py deleted file mode 100644 index a6795d77667..00000000000 --- a/tests/integration/test_metric_stats.py +++ /dev/null @@ -1,304 +0,0 @@ -import copy -from typing import Any -import pytest -import time -from dataclasses import dataclass -from .test_metrics import TEST_CONFIG - - -@dataclass -class MetricStatsByMri: - volume: dict[str, Any] - cardinality: dict[str, Any] - other: list[Any] - - -def metric_stats_by_mri(metrics_consumer, count, timeout=5): - volume = dict() - cardinality = dict() - other = list() - - for i in range(count): - try: - metric, _ = metrics_consumer.get_metric(timeout) - if metric["name"] == "c:metric_stats/volume@none": - volume[metric["tags"]["mri"]] = metric - elif metric["name"] == "g:metric_stats/cardinality@none": - cardinality[metric["tags"]["mri"]] = metric - else: - other.append(metric) - except AssertionError: - pytest.fail(f"Message {i + 1} not found.") - - metrics_consumer.assert_empty() - return MetricStatsByMri(volume=volume, cardinality=cardinality, other=other) - - -@pytest.mark.parametrize("mode", ["default", "chain"]) -def test_metric_stats_simple( - mini_sentry, relay, relay_with_processing, relay_credentials, metrics_consumer, mode -): - mini_sentry.global_config["options"]["relay.metric-stats.rollout-rate"] = 1.0 - - metrics_consumer = metrics_consumer() - - if mode == "default": - relay = relay_with_processing(options=TEST_CONFIG) - elif mode == "chain": - credentials = relay_credentials() - static_relays = { - credentials["id"]: { - "public_key": credentials["public_key"], - "internal": True, - }, - } - relay = relay( - relay_with_processing(options=TEST_CONFIG, static_relays=static_relays), - options=TEST_CONFIG, - credentials=credentials, - ) - - project_id = 42 - project_config = mini_sentry.add_basic_project_config(project_id) - project_config["config"]["features"] = [ - "organizations:custom-metrics", - "organizations:metric-stats", - ] - project_config["config"]["metrics"] = { - "cardinalityLimits": [ - { - "id": "custom-limit", - "window": {"windowSeconds": 3600, "granularitySeconds": 600}, - "report": True, - "limit": 100, - "scope": "name", - "namespace": "custom", - } - ] - } - - relay.send_metrics( - project_id, "custom/foo:1337|d\ncustom/foo:12|d|#tag:value\ncustom/bar:42|s" - ) - - metrics = metric_stats_by_mri(metrics_consumer, 7) - - assert metrics.volume["d:custom/foo@none"]["org_id"] == 0 - assert metrics.volume["d:custom/foo@none"]["project_id"] == project_id - assert metrics.volume["d:custom/foo@none"]["value"] == 2.0 - assert metrics.volume["d:custom/foo@none"]["tags"] == { - "mri": "d:custom/foo@none", - "mri.type": "d", - "mri.namespace": "custom", - "outcome.id": "0", - } - assert metrics.volume["s:custom/bar@none"]["org_id"] == 0 - assert metrics.volume["s:custom/bar@none"]["project_id"] == project_id - assert metrics.volume["s:custom/bar@none"]["value"] == 1.0 - assert metrics.volume["s:custom/bar@none"]["tags"] == { - "mri": "s:custom/bar@none", - "mri.type": "s", - "mri.namespace": "custom", - "outcome.id": "0", - } - assert len(metrics.volume) == 2 - assert metrics.cardinality["d:custom/foo@none"]["org_id"] == 0 - assert metrics.cardinality["d:custom/foo@none"]["project_id"] == project_id - assert metrics.cardinality["d:custom/foo@none"]["value"] == { - "count": 1, - "last": 2.0, - "max": 2.0, - "min": 2.0, - "sum": 2.0, - } - assert metrics.cardinality["d:custom/foo@none"]["tags"] == { - "mri": "d:custom/foo@none", - "mri.type": "d", - "mri.namespace": "custom", - "cardinality.limit": "custom-limit", - "cardinality.scope": "name", - "cardinality.window": "3600", - } - assert metrics.cardinality["s:custom/bar@none"]["org_id"] == 0 - assert metrics.cardinality["s:custom/bar@none"]["project_id"] == project_id - assert metrics.cardinality["s:custom/bar@none"]["value"] == { - "count": 1, - "last": 1.0, - "max": 1.0, - "min": 1.0, - "sum": 1.0, - } - assert metrics.cardinality["s:custom/bar@none"]["tags"] == { - "mri": "s:custom/bar@none", - "mri.type": "s", - "mri.namespace": "custom", - "cardinality.limit": "custom-limit", - "cardinality.scope": "name", - "cardinality.window": "3600", - } - assert len(metrics.cardinality) == 2 - assert len(metrics.other) == 3 - - -@pytest.mark.parametrize("mode", ["default", "chain"]) -def test_metric_stats_with_limit_surpassed( - mini_sentry, relay, relay_with_processing, relay_credentials, metrics_consumer, mode -): - mini_sentry.global_config["options"]["relay.metric-stats.rollout-rate"] = 1.0 - - metrics_consumer = metrics_consumer() - - if mode == "default": - relay = relay_with_processing(options=TEST_CONFIG) - elif mode == "chain": - credentials = relay_credentials() - static_relays = { - credentials["id"]: { - "public_key": credentials["public_key"], - "internal": True, - }, - } - relay = relay( - relay_with_processing(options=TEST_CONFIG, static_relays=static_relays), - options=TEST_CONFIG, - credentials=credentials, - ) - - project_id = 42 - project_config = mini_sentry.add_basic_project_config(project_id) - project_config["config"]["features"] = [ - "organizations:custom-metrics", - "organizations:metric-stats", - ] - project_config["config"]["metrics"] = { - "cardinalityLimits": [ - { - "id": "org-limit", - "window": {"windowSeconds": 1, "granularitySeconds": 1}, - "report": True, - "limit": 0, - "scope": "org", - "namespace": "custom", - }, - { - "id": "name-limit", - "window": {"windowSeconds": 1, "granularitySeconds": 1}, - "report": True, - "limit": 0, - "scope": "name", - "namespace": "custom", - }, - ] - } - - relay.send_metrics( - project_id, "custom/foo:1337|d\ncustom/baz:12|d|#tag:value\ncustom/bar:42|s" - ) - - metrics = metric_stats_by_mri(metrics_consumer, 3) - assert metrics.volume["d:custom/foo@none"]["org_id"] == 0 - assert metrics.volume["d:custom/foo@none"]["project_id"] == project_id - assert metrics.volume["d:custom/foo@none"]["value"] == 1.0 - assert metrics.volume["d:custom/foo@none"]["tags"] == { - "mri": "d:custom/foo@none", - "mri.type": "d", - "mri.namespace": "custom", - "outcome.id": "6", - "outcome.reason": "name-limit", - } - assert metrics.volume["d:custom/baz@none"]["org_id"] == 0 - assert metrics.volume["d:custom/baz@none"]["project_id"] == project_id - assert metrics.volume["d:custom/baz@none"]["value"] == 1.0 - assert metrics.volume["d:custom/baz@none"]["tags"] == { - "mri": "d:custom/baz@none", - "mri.type": "d", - "mri.namespace": "custom", - "outcome.id": "6", - "outcome.reason": "name-limit", - } - assert metrics.volume["s:custom/bar@none"]["org_id"] == 0 - assert metrics.volume["s:custom/bar@none"]["project_id"] == project_id - assert metrics.volume["s:custom/bar@none"]["value"] == 1.0 - assert metrics.volume["s:custom/bar@none"]["tags"] == { - "mri": "s:custom/bar@none", - "mri.type": "s", - "mri.namespace": "custom", - "outcome.id": "6", - "outcome.reason": "name-limit", - } - assert len(metrics.volume) == 3 - assert len(metrics.cardinality) == 0 - assert len(metrics.other) == 0 - - -def test_metric_stats_max_flush_bytes( - mini_sentry, relay_with_processing, metrics_consumer -): - mini_sentry.global_config["options"]["relay.metric-stats.rollout-rate"] = 1.0 - - metrics_consumer = metrics_consumer() - - relay_config = copy.deepcopy(TEST_CONFIG) - relay_config["aggregator"]["max_flush_bytes"] = 150 - - relay = relay_with_processing(options=relay_config) - - project_id = 42 - project_config = mini_sentry.add_basic_project_config(project_id) - project_config["config"]["features"] = [ - "organizations:custom-metrics", - "organizations:metric-stats", - ] - - # Metric is big enough to be split into multiple smaller metrics when emitting to Kafka, - # make sure the volume counted is still just 1. - relay.send_metrics( - project_id, "custom/foo:1:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17:18:19:20|d" - ) - - metrics = metric_stats_by_mri(metrics_consumer, 3) - assert metrics.volume["d:custom/foo@none"]["value"] == 1.0 - assert len(metrics.other) == 2 - - -@pytest.mark.parametrize("enabled", [True, False]) -def test_metric_stats_non_processing(mini_sentry, relay, enabled): - mini_sentry.global_config["options"]["relay.metric-stats.rollout-rate"] = 1.0 - - relay = relay( - mini_sentry, - options={ - "sentry_metrics": {"metric_stats_enabled": enabled}, - "http": {"global_metrics": True}, - **TEST_CONFIG, - }, - ) - - project_id = 42 - config = mini_sentry.add_basic_project_config(project_id) - public_key = config["publicKeys"][0]["publicKey"] - - # Custom metrics are disabled -> should log metric stats and drop the metric - relay.send_metrics(project_id, "custom/foo:1337|c") - - if enabled: - metrics = mini_sentry.captured_metrics.get(timeout=3)[public_key] - del metrics[0]["timestamp"] - assert metrics == [ - { - "width": 1, - "name": "c:metric_stats/volume@none", - "type": "c", - "value": 1.0, - "tags": { - "mri": "c:custom/foo@none", - "mri.namespace": "custom", - "mri.type": "c", - "outcome.id": "1", - "outcome.reason": "disabled-namespace", - }, - } - ] - else: - time.sleep(3) - assert mini_sentry.captured_metrics.empty() diff --git a/tests/integration/test_metrics.py b/tests/integration/test_metrics.py index e5046648914..b9713858f4f 100644 --- a/tests/integration/test_metrics.py +++ b/tests/integration/test_metrics.py @@ -293,7 +293,7 @@ def test_metrics_max_batch_size(mini_sentry, relay, max_batch_size, expected_eve mini_sentry.captured_events.get(timeout=1) -@pytest.mark.parametrize("ns", [None, "custom", "transactions"]) +@pytest.mark.parametrize("ns", [None, "spans", "transactions"]) def test_metrics_rate_limits_namespace(mini_sentry, relay, ns): relay = relay(mini_sentry, options=TEST_CONFIG) @@ -453,11 +453,10 @@ def test_metrics_with_processing(mini_sentry, relay_with_processing, metrics_con metrics_consumer = metrics_consumer() project_id = 42 - project_config = mini_sentry.add_full_project_config(project_id) - project_config["config"]["features"] = ["organizations:custom-metrics"] + mini_sentry.add_full_project_config(project_id) timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\nbar@second:17|c|T{timestamp}" + metrics_payload = f"transactions/foo:42|c\nspans/bar@second:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_name(metrics_consumer, 2) @@ -477,12 +476,12 @@ def test_metrics_with_processing(mini_sentry, relay_with_processing, metrics_con "received_at": time_after(timestamp), } - assert metrics["headers"]["c:custom/bar@second"] == [("namespace", b"custom")] - assert metrics["c:custom/bar@second"] == { + assert metrics["headers"]["c:spans/bar@second"] == [("namespace", b"spans")] + assert metrics["c:spans/bar@second"] == { "org_id": 1, "project_id": project_id, "retention_days": 90, - "name": "c:custom/bar@second", + "name": "c:spans/bar@second", "tags": {}, "value": 17.0, "type": "c", @@ -504,12 +503,11 @@ def test_global_metrics_with_processing( metrics_consumer = metrics_consumer() project_id = 42 - project_config = mini_sentry.add_full_project_config(project_id) - project_config["config"]["features"] = ["organizations:custom-metrics"] + mini_sentry.add_full_project_config(project_id) timestamp = int(datetime.now(tz=timezone.utc).timestamp()) metrics_payload = ( - f"transactions/foo:42|c|T{timestamp}\nbar@second:17|c|T{timestamp}" + f"transactions/foo:42|c|T{timestamp}\nspans/bar@second:17|c|T{timestamp}" ) relay.send_metrics(project_id, metrics_payload) @@ -530,12 +528,12 @@ def test_global_metrics_with_processing( "received_at": time_after(timestamp), } - assert metrics["headers"]["c:custom/bar@second"] == [("namespace", b"custom")] - assert metrics["c:custom/bar@second"] == { + assert metrics["headers"]["c:spans/bar@second"] == [("namespace", b"spans")] + assert metrics["c:spans/bar@second"] == { "org_id": 1, "project_id": project_id, "retention_days": 90, - "name": "c:custom/bar@second", + "name": "c:spans/bar@second", "tags": {}, "value": 17.0, "type": "c", @@ -1724,24 +1722,6 @@ def test_span_metrics_secondary_aggregator( ] -def test_custom_metrics_disabled(mini_sentry, relay_with_processing, metrics_consumer): - relay = relay_with_processing(options=TEST_CONFIG) - metrics_consumer = metrics_consumer() - - project_id = 42 - mini_sentry.add_full_project_config(project_id) - # NOTE: "organizations:custom-metrics" missing from features - - timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\nbar@second:17|c|T{timestamp}" - relay.send_metrics(project_id, metrics_payload) - - metrics = metrics_by_name(metrics_consumer, 1) - - assert "c:transactions/foo@none" in metrics - assert "c:custom/bar@second" not in metrics - - @pytest.mark.parametrize("is_processing_relay", (False, True)) @pytest.mark.parametrize( "global_generic_filters", @@ -1935,19 +1915,16 @@ def test_metrics_received_at( ) project_id = 42 - project_config = mini_sentry.add_basic_project_config(project_id) - project_config["config"]["features"] = [ - "organizations:custom-metrics", - ] + mini_sentry.add_full_project_config(project_id) timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - relay.send_metrics(project_id, "custom/foo:1337|d") + relay.send_metrics(project_id, "spans/foo:1337|d") metric, _ = metrics_consumer.get_metric() assert metric == { "org_id": 0, - "project_id": 42, - "name": "d:custom/foo@none", + "project_id": project_id, + "name": "d:spans/foo@none", "type": "d", "value": [1337.0], "timestamp": time_after(timestamp), diff --git a/tests/integration/test_metrics_encoding.py b/tests/integration/test_metrics_encoding.py index 5b0595824b4..8e45d8a77b9 100644 --- a/tests/integration/test_metrics_encoding.py +++ b/tests/integration/test_metrics_encoding.py @@ -69,7 +69,7 @@ def test_metric_bucket_encoding_legacy( assert metrics["s:transactions/bar@none"]["value"] == [42.0] -@pytest.mark.parametrize("namespace", [None, "spans", "custom"]) +@pytest.mark.parametrize("namespace", ["spans", "transactions"]) @pytest.mark.parametrize("ty", ["set", "distribution"]) def test_metric_bucket_encoding_dynamic_global_config_option( mini_sentry, relay_with_processing, metrics_consumer, namespace, ty @@ -85,19 +85,16 @@ def test_metric_bucket_encoding_dynamic_global_config_option( project_id = 42 project_config = mini_sentry.add_basic_project_config(project_id) project_config["config"]["features"] = [ - "organizations:custom-metrics", "projects:span-metrics-extraction", ] - metrics_payload = ( - f"{namespace or 'custom'}/foo:1337|d\n{namespace or 'custom'}/bar:42|s" - ) + metrics_payload = f"{namespace}/foo:1337|d\n{namespace}/bar:42|s" relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_name(metrics_consumer, 2) - dname = f"d:{namespace or 'custom'}/foo@none" - sname = f"s:{namespace or 'custom'}/bar@none" + dname = f"d:{namespace}/foo@none" + sname = f"s:{namespace}/bar@none" assert dname in metrics assert sname in metrics diff --git a/tests/integration/test_store.py b/tests/integration/test_store.py index 16f6fa5fdec..715a324b42a 100644 --- a/tests/integration/test_store.py +++ b/tests/integration/test_store.py @@ -501,7 +501,7 @@ def transform(e): assert event["logentry"]["formatted"] == f"otherkey{i}" -@pytest.mark.parametrize("namespace", ["transactions", "custom"]) +@pytest.mark.parametrize("namespace", ["transactions", "spans"]) def test_sends_metric_bucket_outcome( mini_sentry, relay_with_processing, outcomes_consumer, namespace ): @@ -524,7 +524,6 @@ def test_sends_metric_bucket_outcome( projectconfig = mini_sentry.add_full_project_config(project_id) mini_sentry.add_dsn_key_to_project(project_id) - projectconfig["config"]["features"] = ["organizations:custom-metrics"] projectconfig["config"]["quotas"] = [ { "scope": "organization", @@ -534,7 +533,7 @@ def test_sends_metric_bucket_outcome( ] timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\nbar@second:17|c|T{timestamp}" + metrics_payload = f"transactions/foo:42|c\nbar@spans/second:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) outcome = outcomes_consumer.get_outcome(timeout=3)