diff --git a/opentelemetry-otlp/tests/integration_test/Cargo.toml b/opentelemetry-otlp/tests/integration_test/Cargo.toml index 413673286e..53abd0bcce 100644 --- a/opentelemetry-otlp/tests/integration_test/Cargo.toml +++ b/opentelemetry-otlp/tests/integration_test/Cargo.toml @@ -7,12 +7,15 @@ publish = false [dependencies] opentelemetry = { path = "../../../opentelemetry", features = ["metrics", "logs"] } -opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "logs", "testing"] } +opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "logs", "testing", "metrics"] } opentelemetry-proto = { path = "../../../opentelemetry-proto", features = ["gen-tonic-messages", "trace", "logs", "with-serde"] } log = { workspace = true } tokio = { version = "1.0", features = ["full"] } serde_json = "1" testcontainers = "0.15.0" +once_cell.workspace = true +anyhow = "1.0.94" +ctor = "0.2.9" [target.'cfg(unix)'.dependencies] opentelemetry-appender-log = { path = "../../../opentelemetry-appender-log", default-features = false} diff --git a/opentelemetry-otlp/tests/integration_test/actual/.gitignore b/opentelemetry-otlp/tests/integration_test/actual/.gitignore new file mode 100644 index 0000000000..a6c57f5fb2 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/actual/.gitignore @@ -0,0 +1 @@ +*.json diff --git a/opentelemetry-otlp/tests/integration_test/actual/README.md b/opentelemetry-otlp/tests/integration_test/actual/README.md new file mode 100644 index 0000000000..9380bd7807 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/actual/README.md @@ -0,0 +1 @@ +Output from the otel-collector goes here. diff --git a/opentelemetry-otlp/tests/integration_test/expected/different_metrics.json b/opentelemetry-otlp/tests/integration_test/expected/different_metrics.json new file mode 100644 index 0000000000..5b9bcdba0a --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/different_metrics.json @@ -0,0 +1,133 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "meter" + }, + "metrics": [ + { + "name": "counter_u64", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey1", + "value": { + "stringValue": "mydifferentval" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" + } + } + ], + "startTimeUnixNano": "1734094309366798000", + "timeUnixNano": "1734094317871514000", + "asInt": "15" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + }, + { + "name": "example_histogram", + "histogram": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey3", + "value": { + "stringValue": "myvalue4" + } + } + ], + "startTimeUnixNano": "1734094309366875000", + "timeUnixNano": "1734094317871537000", + "count": "1", + "sum": 42, + "bucketCounts": [ + "0", + "0", + "0", + "0", + "1", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0" + ], + "explicitBounds": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "min": 42, + "max": 42 + } + ], + "aggregationTemporality": 2 + } + }, + { + "name": "example_up_down_counter", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey5", + "value": { + "stringValue": "myvalue5" + } + } + ], + "startTimeUnixNano": "1734094309366941000", + "timeUnixNano": "1734094317871548000", + "asInt": "-1" + } + ], + "aggregationTemporality": 2 + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics.json b/opentelemetry-otlp/tests/integration_test/expected/metrics.json index fa713b8ea3..f1711d889e 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/metrics.json +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics.json @@ -6,7 +6,7 @@ { "key": "service.name", "value": { - "stringValue": "my.service" + "stringValue": "metrics-integration-test" } } ] @@ -14,106 +14,120 @@ "scopeMetrics": [ { "scope": { - "name": "my.library", - "version": "1.0.0", - "attributes": [ - { - "key": "my.scope.attribute", - "value": { - "stringValue": "some scope attribute" - } - } - ] + "name": "meter" }, "metrics": [ { - "name": "my.counter", - "unit": "1", - "description": "I am a Counter", - "metadata": [], + "name": "counter_u64", "sum": { - "aggregationTemporality": 1, - "isMonotonic": true, "dataPoints": [ { - "asDouble": 5, - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", "attributes": [ { - "key": "my.counter.attr", + "key": "mykey1", "value": { - "stringValue": "some value" + "stringValue": "myvalue1" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" } } ], - "exemplars": [], - "flags": 0 + "startTimeUnixNano": "1734094309366798000", + "timeUnixNano": "1734094317871514000", + "asInt": "10" } - ] + ], + "aggregationTemporality": 2, + "isMonotonic": true } }, { - "name": "my.gauge", - "unit": "1", - "description": "I am a Gauge", - "metadata": [], - "gauge": { + "name": "example_histogram", + "histogram": { "dataPoints": [ { - "asDouble": 10, - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", "attributes": [ { - "key": "my.gauge.attr", + "key": "mykey3", "value": { - "stringValue": "some value" + "stringValue": "myvalue4" } } ], - "exemplars": [], - "flags": 0 + "startTimeUnixNano": "1734094309366875000", + "timeUnixNano": "1734094317871537000", + "count": "1", + "sum": 42, + "bucketCounts": [ + "0", + "0", + "0", + "0", + "1", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0" + ], + "explicitBounds": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "min": 42, + "max": 42 } - ] + ], + "aggregationTemporality": 2 } }, { - "name": "my.histogram", - "unit": "1", - "description": "I am a Histogram", - "metadata": [], - "histogram": { - "aggregationTemporality": 1, + "name": "example_up_down_counter", + "sum": { "dataPoints": [ { - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", - "count": 2, - "sum": 2, - "bucketCounts": [1,1], - "explicitBounds": [1], - "min": 0, - "max": 2, "attributes": [ { - "key": "my.histogram.attr", + "key": "mykey5", "value": { - "stringValue": "some value" + "stringValue": "myvalue5" } } ], - "exemplars": [], - "flags": 0 + "startTimeUnixNano": "1734094309366941000", + "timeUnixNano": "1734094317871548000", + "asInt": "-1" } - ] + ], + "aggregationTemporality": 2 } } - ], - "schemaUrl": "whatever" + ] } - ], - "schemaUrl": "whatever" + ] } ] -} \ No newline at end of file +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_histogram_meter.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_histogram_meter.json new file mode 100644 index 0000000000..9ca8a5a49e --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_histogram_meter.json @@ -0,0 +1,84 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_histogram_meter" + }, + "metrics": [ + { + "name": "example_histogram", + "histogram": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey3", + "value": { + "stringValue": "myvalue4" + } + } + ], + "startTimeUnixNano": "1734259947902842000", + "timeUnixNano": "1734259949551023000", + "count": "1", + "sum": 42, + "bucketCounts": [ + "0", + "0", + "0", + "0", + "1", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0" + ], + "explicitBounds": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "min": 42, + "max": 42 + } + ], + "aggregationTemporality": 2 + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_u64_counter_meter.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_u64_counter_meter.json new file mode 100644 index 0000000000..aeb3da7b20 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_u64_counter_meter.json @@ -0,0 +1,53 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_u64_counter_meter" + }, + "metrics": [ + { + "name": "counter_u64", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey1", + "value": { + "stringValue": "myvalue1" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" + } + } + ], + "startTimeUnixNano": "1734255506254812000", + "timeUnixNano": "1734255533415552000", + "asInt": "10" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_up_down_meter.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_up_down_meter.json new file mode 100755 index 0000000000..a82cd63acf --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_up_down_meter.json @@ -0,0 +1,46 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_up_down_meter" + }, + "metrics": [ + { + "name": "example_up_down_counter", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey5", + "value": { + "stringValue": "myvalue5" + } + } + ], + "startTimeUnixNano": "1734259947902844000", + "timeUnixNano": "1734259952816822000", + "asInt": "-1" + } + ], + "aggregationTemporality": 2 + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json b/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json index 4910e128a2..de13fb3cbf 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json +++ b/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json @@ -6,7 +6,7 @@ { "key": "service.name", "value": { - "stringValue": "my.service" + "stringValue": "metrics-integration-test" } } ], @@ -15,112 +15,81 @@ "scopeMetrics": [ { "scope": { - "name": "my.library", - "version": "1.0.0", - "attributes": [ - { - "key": "my.scope.attribute", - "value": { - "stringValue": "some scope attribute" - } - } - ], + "name": "meter", + "version": "", + "attributes": [], "droppedAttributesCount": 0 }, "metrics": [ { - "name": "my.counter", - "description": "I am a Counter", - "unit": "1", + "name": "counter_u64", + "description": "", + "unit": "", "metadata": [], "sum": { "dataPoints": [ { "attributes": [ { - "key": "my.counter.attr", + "key": "mykey1", "value": { - "stringValue": "some value" + "stringValue": "myvalue1" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" } } ], - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", + "startTimeUnixNano": "1734094309366798000", + "timeUnixNano": "1734094317871514000", "exemplars": [], - "flags": 0, - "asDouble": 5.0 + "flags": 0 } ], - "aggregationTemporality": 1, + "aggregationTemporality": 2, "isMonotonic": true } }, { - "name": "my.gauge", - "description": "I am a Gauge", - "unit": "1", - "metadata": [], - "gauge": { - "dataPoints": [ - { - "attributes": [ - { - "key": "my.gauge.attr", - "value": { - "stringValue": "some value" - } - } - ], - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", - "exemplars": [], - "flags": 0, - "asDouble": 10.0 - } - ] - } + "name": "example_histogram", + "description": "", + "unit": "", + "metadata": [] }, { - "name": "my.histogram", - "description": "I am a Histogram", - "unit": "1", + "name": "example_up_down_counter", + "description": "", + "unit": "", "metadata": [], - "histogram": { + "sum": { "dataPoints": [ { "attributes": [ { - "key": "my.histogram.attr", + "key": "mykey5", "value": { - "stringValue": "some value" + "stringValue": "myvalue5" } } ], - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", - "count": 2, - "sum": 2.0, - "bucketCounts": [ - 1, - 1 - ], - "explicitBounds": [ - 1.0 - ], + "startTimeUnixNano": "1734094309366941000", + "timeUnixNano": "1734094317871548000", "exemplars": [], - "flags": 0, - "min": 0.0, - "max": 2.0 + "flags": 0 } ], - "aggregationTemporality": 1 + "aggregationTemporality": 2, + "isMonotonic": false } } ], - "schemaUrl": "whatever" + "schemaUrl": "" } ], - "schemaUrl": "whatever" + "schemaUrl": "" } ] } \ No newline at end of file diff --git a/opentelemetry-otlp/tests/integration_test/otel-collector-config-2.yaml b/opentelemetry-otlp/tests/integration_test/otel-collector-config-2.yaml new file mode 100644 index 0000000000..26177a5870 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/otel-collector-config-2.yaml @@ -0,0 +1,28 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +exporters: + file/traces: + path: /testresults/traces.json + file/logs: + path: /testresults/logs.json + file/metrics: + path: /testresults/metrics.json + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [file/traces] + logs: + receivers: [otlp] + exporters: [file/logs] + metrics: + receivers: [otlp] + exporters: [file/metrics] + diff --git a/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml b/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml deleted file mode 100644 index 7cd19bbfee..0000000000 --- a/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml +++ /dev/null @@ -1,20 +0,0 @@ -receivers: - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:4317 - http: - endpoint: 0.0.0.0:4318 - -exporters: - file: - path: /testresults/result.json - -service: - pipelines: - traces: - receivers: [otlp] - exporters: [file] - logs: - receivers: [otlp] - exporters: [file] diff --git a/opentelemetry-otlp/tests/integration_test/src/images.rs b/opentelemetry-otlp/tests/integration_test/src/images.rs index 37a9c1b38b..7c09d697c8 100644 --- a/opentelemetry-otlp/tests/integration_test/src/images.rs +++ b/opentelemetry-otlp/tests/integration_test/src/images.rs @@ -36,10 +36,21 @@ impl Image for Collector { impl Default for Collector { fn default() -> Self { Collector { - volumes: HashMap::from([( - "./otel-collector-config.yaml".into(), - "/etc/otelcol/config.yaml".into(), - )]), + volumes: HashMap::from([ + ( + "./otel-collector-config-2.yaml".into(), + "/etc/otelcol/config.yaml".into(), + ), + ( + "./actual/traces.json".into(), + "/testresults/traces.json".into(), + ), + ("./actual/logs.json".into(), "/testresults/logs.json".into()), + ( + "./actual/metrics.json".into(), + "/testresults/metrics.json".into(), + ), + ]), } } } diff --git a/opentelemetry-otlp/tests/integration_test/src/lib.rs b/opentelemetry-otlp/tests/integration_test/src/lib.rs index e6bc88c742..3b6903633e 100644 --- a/opentelemetry-otlp/tests/integration_test/src/lib.rs +++ b/opentelemetry-otlp/tests/integration_test/src/lib.rs @@ -1,4 +1,7 @@ +use ctor::dtor; + pub mod images; pub mod logs_asserter; pub mod metrics_asserter; +pub mod test_utils; pub mod trace_asserter; diff --git a/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs b/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs index 4845270999..f370df8a62 100644 --- a/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs +++ b/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs @@ -1,40 +1,64 @@ +use anyhow::Result; +use serde_json::Value; use std::fs::File; +use std::io::{BufReader, Read}; -use opentelemetry_proto::tonic::metrics::v1::{MetricsData, ResourceMetrics}; +pub fn read_metrics_from_json(file: File) -> Result { + // Create a buffered reader for the file + let mut reader = BufReader::new(file); + let mut contents = String::new(); + + // Read the file contents into a string + reader + .read_to_string(&mut contents) + .expect("Failed to read json file"); + + // Parse the contents into a JSON Value + let metrics_data: Value = serde_json::from_str(&contents)?; + Ok(metrics_data) +} pub struct MetricsAsserter { - results: Vec, - expected: Vec, + results: Value, + expected: Value, } impl MetricsAsserter { - pub fn new(results: Vec, expected: Vec) -> Self { + pub fn new(results: Value, expected: Value) -> Self { MetricsAsserter { results, expected } } - pub fn assert(self) { - self.assert_resource_metrics_eq(&self.results, &self.expected); + pub fn assert(mut self) { + // Normalize JSON by cleaning out timestamps + Self::zero_out_timestamps(&mut self.results); + Self::zero_out_timestamps(&mut self.expected); + + // Perform the assertion + assert_eq!( + self.results, self.expected, + "Metrics did not match. Results: {:#?}, Expected: {:#?}", + self.results, self.expected + ); } - fn assert_resource_metrics_eq( - &self, - results: &[ResourceMetrics], - expected: &[ResourceMetrics], - ) { - assert_eq!(results.len(), expected.len()); - for i in 0..results.len() { - let result_resource_metrics = &results[i]; - let expected_resource_metrics = &expected[i]; - assert_eq!(result_resource_metrics, expected_resource_metrics); + /// Recursively removes or zeros out timestamp fields in the JSON + fn zero_out_timestamps(value: &mut Value) { + match value { + Value::Object(map) => { + for (key, val) in map.iter_mut() { + if key == "startTimeUnixNano" || key == "timeUnixNano" { + *val = Value::String("0".to_string()); + } else { + Self::zero_out_timestamps(val); + } + } + } + Value::Array(array) => { + for item in array.iter_mut() { + Self::zero_out_timestamps(item); + } + } + _ => {} } } } - -// read a file contains ResourceMetrics in json format -pub fn read_metrics_from_json(file: File) -> Vec { - let reader = std::io::BufReader::new(file); - - let metrics_data: MetricsData = - serde_json::from_reader(reader).expect("Failed to read json file"); - metrics_data.resource_metrics -} diff --git a/opentelemetry-otlp/tests/integration_test/src/test_utils.rs b/opentelemetry-otlp/tests/integration_test/src/test_utils.rs new file mode 100644 index 0000000000..77151a947d --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/src/test_utils.rs @@ -0,0 +1,78 @@ +#![cfg(unix)] + +use crate::images::Collector; +use ctor::dtor; +use std::fs; +use std::fs::File; +use std::os::unix::fs::PermissionsExt; +use std::sync::{Arc, Mutex, OnceLock}; +use testcontainers::clients::Cli; +use testcontainers::core::Port; +use testcontainers::{Container, RunnableImage}; + +// Static references for container management +static COLLECTOR_ARC: OnceLock>>>> = OnceLock::new(); +static DOCKER_CLIENT: OnceLock = OnceLock::new(); + +fn init_docker_client() -> Cli { + Cli::default() +} + +pub static METRICS_FILE: &str = "./actual/metrics.json"; +pub static LOGS_FILE: &str = "./actual/logs.json"; +pub static TRACES_FILE: &str = "./actual/traces.json"; + +pub async fn start_collector_container() -> Arc> { + let docker = DOCKER_CLIENT.get_or_init(init_docker_client); + let mut arc_guard = COLLECTOR_ARC + .get_or_init(|| Mutex::new(None)) + .lock() + .unwrap(); + + // Check if the container is already running + if let Some(container) = &*arc_guard { + Arc::clone(container) + } else { + // Make sure all our test data is mounted + upsert_empty_file(METRICS_FILE); + upsert_empty_file(TRACES_FILE); + upsert_empty_file(LOGS_FILE); + + // Start a new container + let image = Collector::default(); + let mut runnable_image = RunnableImage::from(image); + + // Map ports + for port in [4317, 4318] { + runnable_image = runnable_image.with_mapped_port(Port { + local: port, + internal: port, + }); + } + + let container = Arc::new(docker.run(runnable_image)); + + // Store the container in COLLECTOR_ARC + *arc_guard = Some(Arc::clone(&container)); + + container + } +} + +fn upsert_empty_file(path: &str) -> File { + let file = File::create(path).unwrap(); + file.set_permissions(std::fs::Permissions::from_mode(0o666)) + .unwrap(); + file +} + +pub fn stop_collector_container() { + println!("Trying to shutdown"); + if let Some(container) = COLLECTOR_ARC + .get() + .and_then(|arc_lock| arc_lock.lock().unwrap().take()) + { + container.stop(); + println!("Collector container stopped."); + } +} diff --git a/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs b/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs index 00c7c2300d..ce7eec928a 100644 --- a/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs +++ b/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs @@ -1,3 +1,4 @@ +use anyhow::Result; use opentelemetry_proto::tonic::trace::v1::{ResourceSpans, Span, TracesData}; use std::collections::{HashMap, HashSet}; use std::fmt::{Debug, Formatter}; @@ -213,9 +214,9 @@ fn span_eq(left: &Span, right: &Span) -> bool { } // read a file contains ResourceSpans in json format -pub fn read_spans_from_json(file: File) -> Vec { +pub fn read_spans_from_json(file: File) -> Result> { let reader = std::io::BufReader::new(file); - let trace_data: TracesData = serde_json::from_reader(reader).expect("Failed to read json file"); - trace_data.resource_spans + let trace_data: TracesData = serde_json::from_reader(reader)?; + Ok(trace_data.resource_spans) } diff --git a/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs b/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs deleted file mode 100644 index 5f5468d0dc..0000000000 --- a/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs +++ /dev/null @@ -1,142 +0,0 @@ -#![cfg(unix)] - -use integration_test_runner::images::Collector; -use std::fs::File; -use std::os::unix::fs::PermissionsExt; -use std::time::Duration; -use testcontainers::clients::Cli; -use testcontainers::core::Port; -use testcontainers::RunnableImage; - -mod logs; -mod metrics; -mod traces; - -const COLLECTOR_CONTAINER_NAME: &str = "otel-collector"; -const TEST_RESULT_DIR_IN_CONTAINER: &str = "testresults"; -const EXPECTED_DIR: &str = "./expected"; -const RESULT_FILE_PATH: &str = "./result.json"; - -struct TestSuite { - expected_file_path: &'static str, -} - -impl TestSuite { - fn new(expected_file_path: &'static str) -> Self { - Self { expected_file_path } - } - - pub fn expected_file_path(&self) -> String { - format!("{}/{}", EXPECTED_DIR, self.expected_file_path) - } - - pub fn result_file_path_in_container(&self) -> String { - format!("/{}/{}", TEST_RESULT_DIR_IN_CONTAINER, RESULT_FILE_PATH) - } - - pub fn result_file_path(&self) -> String { - format!("./{}", RESULT_FILE_PATH) - } - - /// Create a empty file on localhost and copy it to container with proper permissions - /// we have to create the file for the container otherwise we will encounter a permission denied error. - /// see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/3159 - pub fn create_temporary_result_file(&self) -> File { - let file = File::create(self.result_file_path()).unwrap(); - file.set_permissions(std::fs::Permissions::from_mode(0o666)) - .unwrap(); - file - } -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] // skip when running unit test -async fn integration_tests() { - trace_integration_tests().await; - logs_integration_tests().await; -} - -async fn trace_integration_tests() { - let test_suites = [TestSuite::new("traces.json")]; - let mut collector_image = Collector::default(); - for test in test_suites.as_ref() { - let _ = test.create_temporary_result_file(); - collector_image = collector_image.with_volume( - test.result_file_path().as_str(), - test.result_file_path_in_container().as_str(), - ); - } - - let docker = Cli::default(); - let mut image = - RunnableImage::from(collector_image).with_container_name(COLLECTOR_CONTAINER_NAME); - - for port in [ - 4317, // gRPC port - 4318, // HTTP port - ] { - image = image.with_mapped_port(Port { - local: port, - internal: port, - }) - } - - let collector_container = docker.run(image); - - tokio::time::sleep(Duration::from_secs(5)).await; - traces::traces().await.unwrap(); - - // wait for file to flush to disks - // ideally we should use volume mount but otel collector file exporter doesn't handle permission too well - // bind mount mitigate the issue by set up the permission correctly on host system - tokio::time::sleep(Duration::from_secs(5)).await; - traces::assert_traces_results( - test_suites[0].result_file_path().as_str(), - test_suites[0].expected_file_path().as_str(), - ); - - collector_container.stop(); -} - -async fn logs_integration_tests() { - let test_suites = [TestSuite::new("logs.json")]; - - let mut collector_image = Collector::default(); - for test in test_suites.as_ref() { - let _ = test.create_temporary_result_file(); - collector_image = collector_image.with_volume( - test.result_file_path().as_str(), - test.result_file_path_in_container().as_str(), - ); - } - - let docker = Cli::default(); - let mut image = - RunnableImage::from(collector_image).with_container_name(COLLECTOR_CONTAINER_NAME); - - for port in [ - 4317, // gRPC port - 4318, // HTTP port - ] { - image = image.with_mapped_port(Port { - local: port, - internal: port, - }) - } - - let collector_container = docker.run(image); - - tokio::time::sleep(Duration::from_secs(5)).await; - logs::logs().await.unwrap(); - - // wait for file to flush to disks - // ideally we should use volume mount but otel collector file exporter doesn't handle permission too well - // bind mount mitigate the issue by set up the permission correctly on host system - tokio::time::sleep(Duration::from_secs(5)).await; - logs::assert_logs_results( - test_suites[0].result_file_path().as_str(), - test_suites[0].expected_file_path().as_str(), - ); - - collector_container.stop(); -} diff --git a/opentelemetry-otlp/tests/integration_test/tests/logs.rs b/opentelemetry-otlp/tests/integration_test/tests/logs.rs index 86448e5c51..54e5bb9c41 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/logs.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/logs.rs @@ -1,6 +1,9 @@ #![cfg(unix)] +use anyhow::Result; +use ctor::dtor; use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; +use integration_test_runner::test_utils; use log::{info, Level}; use opentelemetry::KeyValue; use opentelemetry_appender_log::OpenTelemetryLogBridge; @@ -10,8 +13,9 @@ use opentelemetry_sdk::{logs as sdklogs, runtime, Resource}; use std::error::Error; use std::fs::File; use std::os::unix::fs::MetadataExt; +use std::time::Duration; -fn init_logs() -> Result { +fn init_logs() -> Result { let exporter_builder = LogExporter::builder(); #[cfg(feature = "tonic-client")] let exporter_builder = exporter_builder.with_tonic(); @@ -34,7 +38,11 @@ fn init_logs() -> Result { .build()) } -pub async fn logs() -> Result<(), Box> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +pub async fn test_logs() -> Result<()> { + // Make sure the container is running + test_utils::start_collector_container().await; + let logger_provider = init_logs().unwrap(); let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider); log::set_boxed_logger(Box::new(otel_log_appender))?; @@ -42,6 +50,11 @@ pub async fn logs() -> Result<(), Box> { info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); let _ = logger_provider.shutdown(); + + tokio::time::sleep(Duration::from_secs(1)).await; + + assert_logs_results(test_utils::LOGS_FILE, "expected/logs.json"); + Ok(()) } @@ -67,3 +80,8 @@ pub fn test_assert_logs_eq() { let logs = read_logs_from_json(File::open("./expected/logs.json").unwrap()); LogsAsserter::new(logs.clone(), logs).assert(); } + +#[dtor] +fn shutdown() { + test_utils::stop_collector_container(); +} diff --git a/opentelemetry-otlp/tests/integration_test/tests/metrics.rs b/opentelemetry-otlp/tests/integration_test/tests/metrics.rs index 5395c67d58..d1667f3573 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/metrics.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/metrics.rs @@ -1,23 +1,235 @@ -use std::{fs::File, io::Write}; +#![cfg(unix)] +use anyhow::{Context, Result}; +use ctor::dtor; use integration_test_runner::metrics_asserter::{read_metrics_from_json, MetricsAsserter}; +use integration_test_runner::test_utils; +use integration_test_runner::test_utils::start_collector_container; +use opentelemetry::KeyValue; +use opentelemetry_otlp::MetricExporter; use opentelemetry_proto::tonic::metrics::v1::MetricsData; +use opentelemetry_sdk::metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider}; +use opentelemetry_sdk::Resource; +use serde_json::Value; +use std::fs; +use std::fs::File; +use std::sync::Mutex; +use std::time::Duration; -#[test] -fn test_serde() { - let metrics = read_metrics_from_json(File::open("./expected/metrics.json").unwrap()); +static SETUP_DONE: Mutex = Mutex::new(false); - let json = serde_json::to_string_pretty(&MetricsData { - resource_metrics: metrics, - }) - .expect("Failed to serialize metrics"); +static RESULT_PATH: &str = "actual/metrics.json"; - // Write to file. - let mut file = File::create("./expected/serialized_metrics.json").unwrap(); - file.write_all(json.as_bytes()).unwrap(); +/// Initializes the OpenTelemetry metrics pipeline +async fn init_metrics() -> SdkMeterProvider { + let exporter_builder = MetricExporter::builder(); - let left = read_metrics_from_json(File::open("./expected/metrics.json").unwrap()); - let right = read_metrics_from_json(File::open("./expected/serialized_metrics.json").unwrap()); + #[cfg(feature = "tonic-client")] + let exporter_builder = exporter_builder.with_tonic(); + #[cfg(not(feature = "tonic-client"))] + #[cfg(any( + feature = "hyper-client", + feature = "reqwest-client", + feature = "reqwest-blocking-client" + ))] + let exporter_builder = exporter_builder.with_http(); - MetricsAsserter::new(left, right).assert(); + let exporter = exporter_builder + .build() + .expect("Failed to build MetricExporter"); + + let reader = PeriodicReader::builder(exporter) + .with_interval(Duration::from_millis(100)) + .with_timeout(Duration::from_secs(1)) + .build(); + + let meter_provider = MeterProviderBuilder::default() + .with_resource(Resource::new(vec![KeyValue::new( + opentelemetry_semantic_conventions::resource::SERVICE_NAME, + "metrics-integration-test", + )])) + .with_reader(reader) + .build(); + + opentelemetry::global::set_meter_provider(meter_provider.clone()); + + meter_provider +} + +pub fn fetch_latest_metrics_for_scope(scope_name: &str) -> Result { + // Open the file and fetch the contents + let contents = fs::read_to_string(test_utils::METRICS_FILE)?; + + // Find the last complete metrics line. Work backwards until one parses. + let json_line = contents + .lines() + .rev() + .find_map(|line| serde_json::from_str::(line).ok()) + .with_context(|| "No valid JSON line found in the metrics file.")?; + + // Parse the JSON and filter metrics strictly by the scope name + let mut filtered_json = json_line; + if let Some(resource_metrics) = filtered_json + .get_mut("resourceMetrics") + .and_then(|v| v.as_array_mut()) + { + resource_metrics.retain_mut(|resource| { + if let Some(scope_metrics) = resource + .get_mut("scopeMetrics") + .and_then(|v| v.as_array_mut()) + { + // Retain only `ScopeMetrics` that match the specified scope_name + scope_metrics.retain(|scope| { + scope + .get("scope") + .and_then(|s| s.get("name")) + .and_then(|name| name.as_str()) + .map_or(false, |n| n == scope_name) + }); + + // Keep the resource only if it has any matching `ScopeMetrics` + !scope_metrics.is_empty() + } else { + false + } + }); + } + + Ok(filtered_json) +} + +/// Performs setup for metrics tests, including environment setup and data seeding. +/// This only needs to be done once for the whole test suite +async fn setup_metrics_test() { + let mut done = SETUP_DONE.lock().unwrap(); + if !*done { + println!("Running setup before any tests..."); + *done = true; // Mark setup as done + + // Make sure the collector container is running + start_collector_container().await; + + // Initialise the metrics subsystem + _ = init_metrics().await; + } + + // Truncate results + _ = File::create(RESULT_PATH).expect("it's good"); +} + +/// +/// Check that the metrics for the given scope match what we expect. This +/// includes zeroing out timestamps, which we reasonably expect not to match. +/// +pub fn validate_metrics_against_results(scope_name: &str) -> Result<()> { + // Define the results file path + let results_file_path = format!("./expected/metrics/{}.json", scope_name); + + // Fetch the actual metrics for the given scope + let actual_metrics = fetch_latest_metrics_for_scope(scope_name) + .context(format!("Failed to fetch metrics for scope: {}", scope_name))?; + + // Read the expected metrics from the results file + let expected_metrics = { + let file = File::open(&results_file_path).context(format!( + "Failed to open results file: {}", + results_file_path + ))?; + read_metrics_from_json(file) + }?; + + // Compare the actual metrics with the expected metrics + MetricsAsserter::new(actual_metrics, expected_metrics).assert(); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// + /// JSON doesn't roundtrip through the MetricsData models properly. + /// TODO - this is some other issue to look at that this test demonstrates. + /// + //#[tokio::test] + //#[ignore] + async fn test_roundtrip_example_data() -> Result<()> { + let metrics_in = include_str!("../expected/metrics/test_u64_counter_meter.json"); + let metrics: MetricsData = serde_json::from_str(metrics_in)?; + let metrics_out = serde_json::to_string(&metrics)?; + + println!("{:}", metrics_out); + + let metrics_in_json: Value = serde_json::from_str(metrics_in)?; + let metrics_out_json: Value = serde_json::from_str(&metrics_out)?; + + assert_eq!(metrics_in_json, metrics_out_json); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_u64_counter() -> Result<()> { + let _result_path = setup_metrics_test().await; + const METER_NAME: &str = "test_u64_counter_meter"; + + // Add data to u64_counter + let meter = opentelemetry::global::meter_provider().meter(METER_NAME); + + let counter = meter.u64_counter("counter_u64").build(); + counter.add( + 10, + &[ + KeyValue::new("mykey1", "myvalue1"), + KeyValue::new("mykey2", "myvalue2"), + ], + ); + + tokio::time::sleep(Duration::from_secs(2)).await; + + // Validate metrics against results file + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + // #[ignore] // skip when running unit test + async fn test_histogram() -> Result<()> { + _ = setup_metrics_test().await; + const METER_NAME: &str = "test_histogram_meter"; + + // Add data to histogram + let meter = opentelemetry::global::meter_provider().meter(METER_NAME); + let histogram = meter.u64_histogram("example_histogram").build(); + histogram.record(42, &[KeyValue::new("mykey3", "myvalue4")]); + tokio::time::sleep(Duration::from_secs(5)).await; + + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + // #[ignore] // skip when running unit test + async fn test_up_down_counter() -> Result<()> { + _ = setup_metrics_test().await; + const METER_NAME: &str = "test_up_down_meter"; + + // Add data to up_down_counter + let meter = opentelemetry::global::meter_provider().meter(METER_NAME); + let up_down_counter = meter.i64_up_down_counter("example_up_down_counter").build(); + up_down_counter.add(-1, &[KeyValue::new("mykey5", "myvalue5")]); + tokio::time::sleep(Duration::from_secs(5)).await; + + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } +} + +#[dtor] +fn shutdown() { + test_utils::stop_collector_container(); } diff --git a/opentelemetry-otlp/tests/integration_test/tests/traces.rs b/opentelemetry-otlp/tests/integration_test/tests/traces.rs index 28649d1fcc..98aebab7eb 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/traces.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/traces.rs @@ -9,12 +9,17 @@ use opentelemetry::{ }; use opentelemetry_otlp::SpanExporter; +use anyhow::Result; +use ctor::dtor; +use integration_test_runner::test_utils; use opentelemetry_proto::tonic::trace::v1::TracesData; use opentelemetry_sdk::{runtime, trace as sdktrace, Resource}; use std::error::Error; use std::fs::File; use std::io::Write; use std::os::unix::fs::MetadataExt; +use std::time::Duration; +use tokio::time::sleep; fn init_tracer_provider() -> Result { let exporter_builder = SpanExporter::builder(); @@ -42,7 +47,10 @@ fn init_tracer_provider() -> Result { const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); -pub async fn traces() -> Result<(), Box> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +pub async fn traces() -> Result<()> { + test_utils::start_collector_container().await; + let tracer_provider = init_tracer_provider().expect("Failed to initialize tracer provider."); global::set_tracer_provider(tracer_provider.clone()); @@ -66,42 +74,51 @@ pub async fn traces() -> Result<(), Box> { tracer_provider.shutdown()?; + // Give it a second to flush + sleep(Duration::from_secs(2)).await; + + // Validate results + assert_traces_results(test_utils::TRACES_FILE, "./expected/traces.json")?; + Ok(()) } -pub fn assert_traces_results(result: &str, expected: &str) { - let left = read_spans_from_json(File::open(expected).unwrap()); - let right = read_spans_from_json(File::open(result).unwrap()); +pub fn assert_traces_results(result: &str, expected: &str) -> Result<()> { + let left = read_spans_from_json(File::open(expected)?)?; + let right = read_spans_from_json(File::open(result)?)?; TraceAsserter::new(left, right).assert(); // we cannot read result json file because the timestamp was represents as string instead of u64. // need to fix it on json file exporter + assert!(File::open(result)?.metadata()?.size() > 0); - assert!(File::open(result).unwrap().metadata().unwrap().size() > 0) + Ok(()) } #[test] #[should_panic(expected = "left: \"Sub operation...\"")] // we swap the parent spans with child spans in failed_traces.json -pub fn test_assert_span_eq_failure() { - let left = read_spans_from_json(File::open("./expected/traces.json").unwrap()); - let right = read_spans_from_json(File::open("./expected/failed_traces.json").unwrap()); +pub fn test_assert_span_eq_failure() -> () { + let left = read_spans_from_json(File::open("./expected/traces.json").unwrap()).unwrap(); + let right = read_spans_from_json(File::open("./expected/failed_traces.json").unwrap()).unwrap(); TraceAsserter::new(right, left).assert(); } #[test] -pub fn test_assert_span_eq() { - let spans = read_spans_from_json(File::open("./expected/traces.json").unwrap()); +pub fn test_assert_span_eq() -> Result<()> { + let spans = read_spans_from_json(File::open("./expected/traces.json")?)?; TraceAsserter::new(spans.clone(), spans).assert(); + + Ok(()) } #[test] -pub fn test_serde() { +pub fn test_serde() -> Result<()> { let spans = read_spans_from_json( File::open("./expected/traces.json").expect("Failed to read traces.json"), - ); + )?; let json = serde_json::to_string_pretty(&TracesData { resource_spans: spans, }) @@ -113,11 +130,18 @@ pub fn test_serde() { let left = read_spans_from_json( File::open("./expected/traces.json").expect("Failed to read traces.json"), - ); + )?; let right = read_spans_from_json( File::open("./expected/serialized_traces.json") .expect("Failed to read serialized_traces.json"), - ); + )?; TraceAsserter::new(left, right).assert(); + + Ok(()) +} + +#[dtor] +fn shutdown() { + test_utils::stop_collector_container(); } diff --git a/scripts/test.sh b/scripts/test.sh index dfcb925659..467d5f7c4a 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -2,15 +2,19 @@ set -eu +# +# Using '--lib' skips integration tests +# + echo "Running tests for all packages in workspace with --all-features" -cargo test --workspace --all-features +cargo test --workspace --all-features --lib # See https://github.com/rust-lang/cargo/issues/5364 echo "Running tests for opentelemetry package with --no-default-features" -cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features +cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features --lib # Run global tracer provider test in single thread # //TODO: This tests were not running for a while. Need to find out how to run # run them. Using --ignored will run other tests as well, so that cannot be used. # echo "Running global tracer provider for opentelemetry-sdk package with single thread." -# cargo test --manifest-path=opentelemetry-sdk/Cargo.toml --all-features -- --test-threads=1 +# cargo test --manifest-path=opentelemetry-sdk/Cargo.toml --all-features -- --test-threads=1 --lib