diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index ba229419d4..2a81cb9b1d 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -18,8 +18,8 @@ use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::logs::LogResult; use opentelemetry::KeyValue; use opentelemetry_appender_tracing::layer as tracing_layer; -use opentelemetry_sdk::export::logs::{LogData, LogExporter}; -use opentelemetry_sdk::logs::{LogProcessor, LoggerProvider}; +use opentelemetry_sdk::export::logs::LogExporter; +use opentelemetry_sdk::logs::{LogData, LogProcessor, LoggerProvider}; use opentelemetry_sdk::Resource; use pprof::criterion::{Output, PProfProfiler}; use tracing::error; @@ -34,7 +34,13 @@ struct NoopExporter { #[async_trait] impl LogExporter for NoopExporter { - async fn export<'a>(&mut self, _: Vec>) -> LogResult<()> { + async fn export( + &mut self, + _: Vec<( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + )>, + ) -> LogResult<()> { LogResult::Ok(()) } diff --git a/opentelemetry-otlp/src/exporter/http/logs.rs b/opentelemetry-otlp/src/exporter/http/logs.rs index 396dec680d..83f25c1f9f 100644 --- a/opentelemetry-otlp/src/exporter/http/logs.rs +++ b/opentelemetry-otlp/src/exporter/http/logs.rs @@ -3,13 +3,15 @@ use std::sync::Arc; use async_trait::async_trait; use http::{header::CONTENT_TYPE, Method}; use opentelemetry::logs::{LogError, LogResult}; -use opentelemetry_sdk::export::logs::{LogData, LogExporter}; +use opentelemetry::InstrumentationLibrary; +use opentelemetry_sdk::export::logs::LogExporter; +use opentelemetry_sdk::logs::LogRecord; use super::OtlpHttpClient; #[async_trait] impl LogExporter for OtlpHttpClient { - async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()> { + async fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>) -> LogResult<()> { let client = self .client .lock() @@ -19,13 +21,7 @@ impl LogExporter for OtlpHttpClient { _ => Err(LogError::Other("exporter is already shut down".into())), })?; - //TODO: avoid cloning here. - let owned_batch = batch - .into_iter() - .map(|cow_log_data| cow_log_data.into_owned()) // Converts Cow to owned LogData - .collect::>(); - - let (body, content_type) = { self.build_logs_export_body(owned_batch)? }; + let (body, content_type) = { self.build_logs_export_body(batch)? }; let mut request = http::Request::builder() .method(Method::POST) .uri(&self.collector_endpoint) diff --git a/opentelemetry-otlp/src/exporter/http/mod.rs b/opentelemetry-otlp/src/exporter/http/mod.rs index 2fa3ff851b..1b60971d76 100644 --- a/opentelemetry-otlp/src/exporter/http/mod.rs +++ b/opentelemetry-otlp/src/exporter/http/mod.rs @@ -7,16 +7,18 @@ use crate::{ OTEL_EXPORTER_OTLP_TIMEOUT, }; use http::{HeaderName, HeaderValue, Uri}; +#[cfg(feature = "logs")] +use opentelemetry::InstrumentationLibrary; use opentelemetry_http::HttpClient; use opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema; #[cfg(feature = "logs")] use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scope; #[cfg(feature = "trace")] use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_scope; -#[cfg(feature = "logs")] -use opentelemetry_sdk::export::logs::LogData; #[cfg(feature = "trace")] use opentelemetry_sdk::export::trace::SpanData; +#[cfg(feature = "logs")] +use opentelemetry_sdk::logs::LogRecord; #[cfg(feature = "metrics")] use opentelemetry_sdk::metrics::data::ResourceMetrics; use prost::Message; @@ -328,7 +330,7 @@ impl OtlpHttpClient { #[cfg(feature = "logs")] fn build_logs_export_body( &self, - logs: Vec, + logs: Vec<(&LogRecord, &InstrumentationLibrary)>, ) -> opentelemetry::logs::LogResult<(Vec, &'static str)> { use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; let resource_logs = group_logs_by_resource_and_scope(logs, &self.resource); diff --git a/opentelemetry-otlp/src/exporter/tonic/logs.rs b/opentelemetry-otlp/src/exporter/tonic/logs.rs index b529eda511..5a8d04b97e 100644 --- a/opentelemetry-otlp/src/exporter/tonic/logs.rs +++ b/opentelemetry-otlp/src/exporter/tonic/logs.rs @@ -4,13 +4,16 @@ use opentelemetry::logs::{LogError, LogResult}; use opentelemetry_proto::tonic::collector::logs::v1::{ logs_service_client::LogsServiceClient, ExportLogsServiceRequest, }; -use opentelemetry_sdk::export::logs::{LogData, LogExporter}; +use opentelemetry_sdk::export::logs::LogExporter; use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request}; use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scope; use super::BoxInterceptor; +use opentelemetry::InstrumentationLibrary; +use opentelemetry_sdk::logs::LogRecord; + pub(crate) struct TonicLogsClient { inner: Option, #[allow(dead_code)] @@ -54,7 +57,7 @@ impl TonicLogsClient { #[async_trait] impl LogExporter for TonicLogsClient { - async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()> { + async fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>) -> LogResult<()> { let (mut client, metadata, extensions) = match &mut self.inner { Some(inner) => { let (m, e, _) = inner @@ -67,13 +70,7 @@ impl LogExporter for TonicLogsClient { None => return Err(LogError::Other("exporter is already shut down".into())), }; - //TODO: avoid cloning here. - let owned_batch = batch - .into_iter() - .map(|cow_log_data| cow_log_data.into_owned()) // Converts Cow to owned LogData - .collect::>(); - - let resource_logs = group_logs_by_resource_and_scope(owned_batch, &self.resource); + let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource); client .export(Request::from_parts( diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 3f21697fb0..9a67bf66d9 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -14,7 +14,9 @@ use std::fmt::Debug; use opentelemetry::logs::LogError; -use opentelemetry_sdk::{export::logs::LogData, runtime::RuntimeChannel, Resource}; +use opentelemetry::InstrumentationLibrary; +use opentelemetry_sdk::logs::LogRecord; +use opentelemetry_sdk::{runtime::RuntimeChannel, Resource}; /// Compression algorithm to use, defaults to none. pub const OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"; @@ -98,9 +100,9 @@ impl LogExporter { #[async_trait] impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { - async fn export<'a>( + async fn export( &mut self, - batch: Vec>, + batch: Vec<(&LogRecord, &InstrumentationLibrary)>, ) -> opentelemetry::logs::LogResult<()> { self.client.export(batch).await } diff --git a/opentelemetry-proto/src/transform/logs.rs b/opentelemetry-proto/src/transform/logs.rs index dfd845c5d8..9ab688bac2 100644 --- a/opentelemetry-proto/src/transform/logs.rs +++ b/opentelemetry-proto/src/transform/logs.rs @@ -54,8 +54,8 @@ pub mod tonic { } } - impl From for LogRecord { - fn from(log_record: opentelemetry_sdk::logs::LogRecord) -> Self { + impl From<&opentelemetry_sdk::logs::LogRecord> for LogRecord { + fn from(log_record: &opentelemetry_sdk::logs::LogRecord) -> Self { let trace_context = log_record.trace_context.as_ref(); let severity_number = match log_record.severity_number { Some(Severity::Trace) => SeverityNumber::Trace, @@ -118,7 +118,7 @@ pub mod tonic { }, severity_number: severity_number.into(), severity_text: log_record.severity_text.map(Into::into).unwrap_or_default(), - body: log_record.body.map(Into::into), + body: log_record.body.clone().map(Into::into), dropped_attributes_count: 0, flags: trace_context .map(|ctx| { @@ -139,17 +139,23 @@ pub mod tonic { impl From<( - opentelemetry_sdk::export::logs::LogData, + ( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + ), &ResourceAttributesWithSchema, )> for ResourceLogs { fn from( data: ( - opentelemetry_sdk::export::logs::LogData, + ( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + ), &ResourceAttributesWithSchema, ), ) -> Self { - let (log_data, resource) = data; + let ((log_record, instrumentation), resource) = data; ResourceLogs { resource: Some(Resource { @@ -158,21 +164,23 @@ pub mod tonic { }), schema_url: resource.schema_url.clone().unwrap_or_default(), scope_logs: vec![ScopeLogs { - schema_url: log_data - .instrumentation + schema_url: instrumentation .schema_url .clone() .map(Into::into) .unwrap_or_default(), - scope: Some((log_data.instrumentation, log_data.record.target.clone()).into()), - log_records: vec![log_data.record.into()], + scope: Some((instrumentation, log_record.target.clone()).into()), + log_records: vec![log_record.into()], }], } } } pub fn group_logs_by_resource_and_scope( - logs: Vec, + logs: Vec<( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + )>, resource: &ResourceAttributesWithSchema, ) -> Vec { // Group logs by target or instrumentation name @@ -180,15 +188,20 @@ pub mod tonic { HashMap::new(), |mut scope_map: HashMap< Cow<'static, str>, - Vec<&opentelemetry_sdk::export::logs::LogData>, + Vec<( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + )>, >, - log| { - let key = log - .record + (log_record, instrumentation)| { + let key = log_record .target .clone() - .unwrap_or_else(|| log.instrumentation.name.clone()); - scope_map.entry(key).or_default().push(log); + .unwrap_or_else(|| Cow::Owned(instrumentation.name.clone().into_owned())); + scope_map + .entry(key) + .or_default() + .push((log_record, instrumentation)); scope_map }, ); @@ -197,13 +210,13 @@ pub mod tonic { .into_iter() .map(|(key, log_data)| ScopeLogs { scope: Some(InstrumentationScope::from(( - &log_data.first().unwrap().instrumentation, - Some(key), + log_data.first().unwrap().1, + Some(key.into_owned().into()), ))), schema_url: resource.schema_url.clone().unwrap_or_default(), log_records: log_data .into_iter() - .map(|log_data| log_data.record.clone().into()) + .map(|(log_record, _)| log_record.into()) .collect(), }) .collect(); @@ -223,30 +236,29 @@ pub mod tonic { mod tests { use crate::transform::common::tonic::ResourceAttributesWithSchema; use opentelemetry::logs::LogRecord as _; - use opentelemetry_sdk::export::logs::LogData; + use opentelemetry::InstrumentationLibrary; use opentelemetry_sdk::{logs::LogRecord, Resource}; use std::time::SystemTime; - fn create_test_log_data(instrumentation_name: &str, _message: &str) -> LogData { + fn create_test_log_data( + instrumentation_name: &str, + _message: &str, + ) -> (LogRecord, InstrumentationLibrary) { let mut logrecord = LogRecord::default(); logrecord.set_timestamp(SystemTime::now()); logrecord.set_observed_timestamp(SystemTime::now()); - LogData { - instrumentation: opentelemetry_sdk::InstrumentationLibrary::builder( - instrumentation_name.to_string(), - ) - .build(), - record: logrecord, - } + let instrumentation = + InstrumentationLibrary::builder(instrumentation_name.to_string()).build(); + (logrecord, instrumentation) } #[test] fn test_group_logs_by_resource_and_scope_single_scope() { let resource = Resource::default(); - let log1 = create_test_log_data("test-lib", "Log 1"); - let log2 = create_test_log_data("test-lib", "Log 2"); + let log_data1 = create_test_log_data("test-lib", "Log 1"); + let log_data2 = create_test_log_data("test-lib", "Log 2"); - let logs = vec![log1, log2]; + let logs = vec![(&log_data1.0, &log_data1.1), (&log_data2.0, &log_data2.1)]; let resource: ResourceAttributesWithSchema = (&resource).into(); // Convert Resource to ResourceAttributesWithSchema let grouped_logs = @@ -263,10 +275,10 @@ mod tests { #[test] fn test_group_logs_by_resource_and_scope_multiple_scopes() { let resource = Resource::default(); - let log1 = create_test_log_data("lib1", "Log 1"); - let log2 = create_test_log_data("lib2", "Log 2"); + let log_data1 = create_test_log_data("lib1", "Log 1"); + let log_data2 = create_test_log_data("lib2", "Log 2"); - let logs = vec![log1, log2]; + let logs = vec![(&log_data1.0, &log_data1.1), (&log_data2.0, &log_data2.1)]; let resource: ResourceAttributesWithSchema = (&resource).into(); // Convert Resource to ResourceAttributesWithSchema let grouped_logs = crate::transform::logs::tonic::group_logs_by_resource_and_scope(logs, &resource); diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 496cc3543f..c674857dd2 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -25,6 +25,44 @@ [#2021](https://github.com/open-telemetry/opentelemetry-rust/pull/2021) - Provide default implementation for `event_enabled` method in `LogProcessor` trait that returns `true` always. +- **Breaking** [#2035](https://github.com/open-telemetry/opentelemetry-rust/pull/2035) + - The Exporter::export() interface is modified as below: + Previous Signature: + ```rust + async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()>; + ``` + + Updated Signature: + ```rust + async fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>) -> LogResult<()>; + ``` + This change simplifies the processing required by exporters. Exporters no longer need to determine if the LogData is borrowed or owned, as they now work directly with references. As a result, exporters must explicitly create a copy of LogRecord and/or InstrumentationLibrary when needed, as the new interface only provides references to these structures. + + - The LogData structure is NO longer the part of the export interface. So it has been moved from `opentelemetry_sdk::export::logs` to `opentelemetry_sdk::logs` namespace. The custom implementations of `LogProcessor` need to update the imports accordindgly. + + - The LogData structure has been changed as below: + Previous Signature + ```rust + #[derive(Clone, Debug)] + pub struct LogData { + /// Log record + pub record: LogRecord, + /// Instrumentation details for the emitter who produced this `LogEvent`. + pub instrumentation: InstrumentationLibrary, + ``` + + Updated Signature: + ```rust + #[derive(Clone, Debug)] + pub struct LogData<'a> { + /// Log record, which can be borrowed or owned. + pub record: Cow<'a, LogRecord>, + /// Instrumentation details for the emitter who produced this `LogEvent`. + pub instrumentation: Cow<'a, InstrumentationLibrary>, + } + ``` + The custom implementation of `LogProcessor` need to accordingly modify the handling of LogData + received through LogProcessor::emit() interface. ## v0.24.1 diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs index 71d5fc699f..840560a1f4 100644 --- a/opentelemetry-sdk/benches/log.rs +++ b/opentelemetry-sdk/benches/log.rs @@ -26,7 +26,7 @@ use opentelemetry::logs::{ use opentelemetry::trace::Tracer; use opentelemetry::trace::TracerProvider as _; use opentelemetry::Key; -use opentelemetry_sdk::export::logs::LogData; +use opentelemetry_sdk::logs::LogData; use opentelemetry_sdk::logs::LogProcessor; use opentelemetry_sdk::logs::{Logger, LoggerProvider}; use opentelemetry_sdk::trace; diff --git a/opentelemetry-sdk/benches/log_exporter.rs b/opentelemetry-sdk/benches/log_exporter.rs index 97069db21c..73fde7d61d 100644 --- a/opentelemetry-sdk/benches/log_exporter.rs +++ b/opentelemetry-sdk/benches/log_exporter.rs @@ -18,8 +18,10 @@ use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::logs::{LogRecord as _, LogResult, Logger as _, LoggerProvider as _, Severity}; -use opentelemetry_sdk::export::logs::LogData; +use opentelemetry::InstrumentationLibrary; +use opentelemetry_sdk::logs::LogData; use opentelemetry_sdk::logs::LogProcessor; +use opentelemetry_sdk::logs::LogRecord; use opentelemetry_sdk::logs::LoggerProvider; use pprof::criterion::{Output, PProfProfiler}; use std::fmt::Debug; @@ -28,11 +30,11 @@ use std::fmt::Debug; // cargo bench --bench log_exporter #[async_trait] pub trait LogExporterWithFuture: Send + Sync + Debug { - async fn export(&mut self, batch: Vec); + async fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>); } pub trait LogExporterWithoutFuture: Send + Sync + Debug { - fn export(&mut self, batch: Vec); + fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>); } #[derive(Debug)] @@ -40,13 +42,13 @@ struct NoOpExporterWithFuture {} #[async_trait] impl LogExporterWithFuture for NoOpExporterWithFuture { - async fn export(&mut self, _batch: Vec) {} + async fn export(&mut self, _batch: Vec<(&LogRecord, &InstrumentationLibrary)>) {} } #[derive(Debug)] struct NoOpExporterWithoutFuture {} impl LogExporterWithoutFuture for NoOpExporterWithoutFuture { - fn export(&mut self, _batch: Vec) {} + fn export(&mut self, _batch: Vec<(&LogRecord, &InstrumentationLibrary)>) {} } #[derive(Debug)] @@ -65,7 +67,9 @@ impl ExportingProcessorWithFuture { impl LogProcessor for ExportingProcessorWithFuture { fn emit(&self, data: &mut LogData) { let mut exporter = self.exporter.lock().expect("lock error"); - futures_executor::block_on(exporter.export(vec![data.clone()])); + futures_executor::block_on( + exporter.export(vec![(data.record.as_ref(), data.instrumentation.as_ref())]), + ); } fn force_flush(&self) -> LogResult<()> { @@ -95,7 +99,7 @@ impl LogProcessor for ExportingProcessorWithoutFuture { self.exporter .lock() .expect("lock error") - .export(vec![data.clone()]); + .export(vec![(data.record.as_ref(), data.instrumentation.as_ref())]); } fn force_flush(&self) -> LogResult<()> { diff --git a/opentelemetry-sdk/benches/log_processor.rs b/opentelemetry-sdk/benches/log_processor.rs index c75dee65c1..7e78897669 100644 --- a/opentelemetry-sdk/benches/log_processor.rs +++ b/opentelemetry-sdk/benches/log_processor.rs @@ -19,10 +19,8 @@ use std::{ use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::logs::{LogRecord as _, LogResult, Logger as _, LoggerProvider as _, Severity}; -use opentelemetry_sdk::{ - export::logs::LogData, - logs::{LogProcessor, LogRecord, Logger, LoggerProvider}, -}; +use opentelemetry_sdk::logs::{LogData, LogProcessor, LogRecord, Logger, LoggerProvider}; +use std::borrow::Cow; // Run this benchmark with: // cargo bench --bench log_processor @@ -45,7 +43,7 @@ fn create_log_record(logger: &Logger) -> LogRecord { struct NoopProcessor; impl LogProcessor for NoopProcessor { - fn emit(&self, _data: &mut LogData) {} + fn emit(&self, _data: &mut LogData<'_>) {} fn force_flush(&self) -> LogResult<()> { Ok(()) @@ -60,7 +58,7 @@ impl LogProcessor for NoopProcessor { struct CloningProcessor; impl LogProcessor for CloningProcessor { - fn emit(&self, data: &mut LogData) { + fn emit(&self, data: &mut LogData<'_>) { let _data_cloned = data.clone(); } @@ -75,8 +73,8 @@ impl LogProcessor for CloningProcessor { #[derive(Debug)] struct SendToChannelProcessor { - sender: std::sync::mpsc::Sender, - receiver: Arc>>, + sender: std::sync::mpsc::Sender>, + receiver: Arc>>>, } impl SendToChannelProcessor { @@ -104,7 +102,10 @@ impl SendToChannelProcessor { impl LogProcessor for SendToChannelProcessor { fn emit(&self, data: &mut LogData) { - let data_cloned = data.clone(); + let data_cloned = LogData { + record: Cow::Owned(data.record.clone().into_owned()), + instrumentation: Cow::Owned(data.instrumentation.clone().into_owned()), + }; let res = self.sender.send(data_cloned); if res.is_err() { println!("Error sending log data to channel {0}", res.err().unwrap()); diff --git a/opentelemetry-sdk/src/export/logs/mod.rs b/opentelemetry-sdk/src/export/logs/mod.rs index e1426553a1..353c89042c 100644 --- a/opentelemetry-sdk/src/export/logs/mod.rs +++ b/opentelemetry-sdk/src/export/logs/mod.rs @@ -8,14 +8,16 @@ use opentelemetry::{ logs::{LogError, LogResult}, InstrumentationLibrary, }; -use std::borrow::Cow; use std::fmt::Debug; /// `LogExporter` defines the interface that log exporters should implement. #[async_trait] pub trait LogExporter: Send + Sync + Debug { - /// Exports a batch of [`LogData`]. - async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()>; + /// Exports a batch of [`LogRecord`, `InstrumentationLibrary`]. + async fn export( + &mut self, + records: Vec<(&LogRecord, &InstrumentationLibrary)>, + ) -> LogResult<()>; /// Shuts down the exporter. fn shutdown(&mut self) {} #[cfg(feature = "logs_level_enabled")] @@ -28,14 +30,5 @@ pub trait LogExporter: Send + Sync + Debug { fn set_resource(&mut self, _resource: &Resource) {} } -/// `LogData` represents a single log event without resource context. -#[derive(Clone, Debug)] -pub struct LogData { - /// Log record - pub record: LogRecord, - /// Instrumentation details for the emitter who produced this `LogEvent`. - pub instrumentation: InstrumentationLibrary, -} - /// Describes the result of an export. pub type ExportResult = Result<(), LogError>; diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/log_emitter.rs index c9d3e5a828..ff4f7b57ba 100644 --- a/opentelemetry-sdk/src/logs/log_emitter.rs +++ b/opentelemetry-sdk/src/logs/log_emitter.rs @@ -1,9 +1,6 @@ use super::{BatchLogProcessor, LogProcessor, LogRecord, SimpleLogProcessor, TraceContext}; -use crate::{ - export::logs::{LogData, LogExporter}, - runtime::RuntimeChannel, - Resource, -}; +use crate::logs::LogData; +use crate::{export::logs::LogExporter, runtime::RuntimeChannel, Resource}; use opentelemetry::{ global, logs::{LogError, LogResult}, @@ -274,8 +271,8 @@ impl opentelemetry::logs::Logger for Logger { } let mut data = LogData { - record: log_record, - instrumentation: self.instrumentation_library().clone(), + record: Cow::Borrowed(&log_record), + instrumentation: Cow::Borrowed(self.instrumentation_library()), }; for p in processors { @@ -336,7 +333,7 @@ mod tests { } impl LogProcessor for ShutdownTestLogProcessor { - fn emit(&self, _data: &mut LogData) { + fn emit(&self, _data: &mut LogData<'_>) { self.is_shutdown .lock() .map(|is_shutdown| { @@ -566,7 +563,7 @@ mod tests { } impl LogProcessor for LazyLogProcessor { - fn emit(&self, _data: &mut LogData) { + fn emit(&self, _data: &mut LogData<'_>) { // nothing to do. } diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index 7366f19791..072e350ca4 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -1,5 +1,6 @@ +use crate::logs::LogData; use crate::{ - export::logs::{ExportResult, LogData, LogExporter}, + export::logs::{ExportResult, LogExporter}, runtime::{RuntimeChannel, TrySend}, Resource, }; @@ -55,7 +56,7 @@ pub trait LogProcessor: Send + Sync + Debug { /// /// # Parameters /// - `data`: A mutable reference to `LogData` representing the log record. - fn emit(&self, data: &mut LogData); + fn emit(&self, data: &mut LogData<'_>); /// Force the logs lying in the cache to be exported. fn force_flush(&self) -> LogResult<()>; /// Shuts down the processor. @@ -93,7 +94,7 @@ impl SimpleLogProcessor { } impl LogProcessor for SimpleLogProcessor { - fn emit(&self, data: &mut LogData) { + fn emit(&self, data: &mut LogData<'_>) { // noop after shutdown if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { return; @@ -104,7 +105,10 @@ impl LogProcessor for SimpleLogProcessor { .lock() .map_err(|_| LogError::Other("simple logprocessor mutex poison".into())) .and_then(|mut exporter| { - futures_executor::block_on(exporter.export(vec![Cow::Borrowed(data)])) + // Extract references to LogRecord and InstrumentationLibrary + let log_record = data.record.as_ref(); + let instrumentation = data.instrumentation.as_ref(); + futures_executor::block_on(exporter.export(vec![(log_record, instrumentation)])) }); if let Err(err) = result { global::handle_error(err); @@ -150,10 +154,14 @@ impl Debug for BatchLogProcessor { } impl LogProcessor for BatchLogProcessor { - fn emit(&self, data: &mut LogData) { + fn emit(&self, data: &mut LogData<'_>) { + let owned_data = LogData { + record: Cow::Owned(data.record.clone().into_owned()), + instrumentation: Cow::Owned(data.instrumentation.clone().into_owned()), + }; let result = self .message_sender - .try_send(BatchMessage::ExportLog(data.clone())); + .try_send(BatchMessage::ExportLog(owned_data)); if let Err(err) = result { global::handle_error(LogError::Other(err.into())); @@ -300,7 +308,7 @@ async fn export_with_timeout<'a, R, E>( time_out: Duration, exporter: &mut E, runtime: &R, - batch: Vec>, + batch: Vec>>, ) -> ExportResult where R: RuntimeChannel, @@ -309,8 +317,13 @@ where if batch.is_empty() { return Ok(()); } + // Convert the Vec<&LogData> to Vec<(&LogRecord, &InstrumentationLibrary)> + let export_batch = batch + .iter() + .map(|log_data| (log_data.record.as_ref(), log_data.instrumentation.as_ref())) + .collect(); - let export = exporter.export(batch); + let export = exporter.export(export_batch); let timeout = runtime.delay(time_out); pin_mut!(export); pin_mut!(timeout); @@ -490,7 +503,7 @@ where #[derive(Debug)] enum BatchMessage { /// Export logs, usually called when the log is emitted. - ExportLog(LogData), + ExportLog(LogData<'static>), /// Flush the current buffer to the backend, it can be triggered by /// pre configured interval or a call to `force_push` function. Flush(Option>), @@ -506,9 +519,11 @@ mod tests { BatchLogProcessor, OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, }; + use crate::logs::LogData; + use crate::logs::LogRecord; use crate::testing::logs::InMemoryLogsExporterBuilder; use crate::{ - export::logs::{LogData, LogExporter}, + export::logs::LogExporter, logs::{ log_processor::{ OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, @@ -523,6 +538,7 @@ mod tests { use async_trait::async_trait; use opentelemetry::logs::AnyValue; use opentelemetry::logs::{Logger, LoggerProvider as _}; + use opentelemetry::InstrumentationLibrary; use opentelemetry::Key; use opentelemetry::{logs::LogResult, KeyValue}; use std::borrow::Cow; @@ -536,7 +552,10 @@ mod tests { #[async_trait] impl LogExporter for MockLogExporter { - async fn export<'a>(&mut self, _batch: Vec>) -> LogResult<()> { + async fn export( + &mut self, + _batch: Vec<(&LogRecord, &InstrumentationLibrary)>, + ) -> LogResult<()> { Ok(()) } @@ -805,20 +824,26 @@ mod tests { #[derive(Debug)] struct FirstProcessor { - pub(crate) logs: Arc>>, + pub(crate) logs: Arc>>>, } impl LogProcessor for FirstProcessor { - fn emit(&self, data: &mut LogData) { + fn emit(&self, data: &mut LogData<'_>) { // add attribute - data.record.attributes.get_or_insert(vec![]).push(( + let record = data.record.to_mut(); + record.attributes.get_or_insert(vec![]).push(( Key::from_static_str("processed_by"), AnyValue::String("FirstProcessor".into()), )); // update body - data.record.body = Some("Updated by FirstProcessor".into()); - - self.logs.lock().unwrap().push(data.clone()); //clone as the LogProcessor is storing the data. + record.body = Some("Updated by FirstProcessor".into()); + // Convert the modified LogData to an owned version + let owned_data = LogData { + record: Cow::Owned(record.clone()), // Since record is already owned, no need to clone deeply + instrumentation: Cow::Owned(data.instrumentation.clone().into_owned()), + }; + + self.logs.lock().unwrap().push(owned_data); //clone as the LogProcessor is storing the data. } fn force_flush(&self) -> LogResult<()> { @@ -832,11 +857,11 @@ mod tests { #[derive(Debug)] struct SecondProcessor { - pub(crate) logs: Arc>>, + pub(crate) logs: Arc>>>, } impl LogProcessor for SecondProcessor { - fn emit(&self, data: &mut LogData) { + fn emit(&self, data: &mut LogData<'_>) { assert!(data.record.attributes.as_ref().map_or(false, |attrs| { attrs.iter().any(|(key, value)| { key.as_str() == "processed_by" @@ -847,7 +872,12 @@ mod tests { data.record.body.clone().unwrap() == AnyValue::String("Updated by FirstProcessor".into()) ); - self.logs.lock().unwrap().push(data.clone()); + let record = data.record.to_mut(); + let owned_data = LogData { + record: Cow::Owned(record.clone()), // Convert the record to owned + instrumentation: Cow::Owned(data.instrumentation.clone().into_owned()), + }; + self.logs.lock().unwrap().push(owned_data); } fn force_flush(&self) -> LogResult<()> { diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 5d2e72719b..92e384ee41 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -4,12 +4,23 @@ mod log_emitter; mod log_processor; mod record; +use crate::InstrumentationLibrary; pub use log_emitter::{Builder, Logger, LoggerProvider}; pub use log_processor::{ BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, SimpleLogProcessor, }; pub use record::{LogRecord, TraceContext}; +use std::borrow::Cow; + +/// `LogData` represents a single log event without resource context. +#[derive(Clone, Debug)] +pub struct LogData<'a> { + /// Log record, which can be borrowed or owned. + pub record: Cow<'a, LogRecord>, + /// Instrumentation details for the emitter who produced this `LogEvent`. + pub instrumentation: Cow<'a, InstrumentationLibrary>, +} #[cfg(all(test, feature = "testing"))] mod tests { diff --git a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs index 8068fafaec..346ef9861a 100644 --- a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs @@ -1,4 +1,4 @@ -use crate::export::logs::{LogData, LogExporter}; +use crate::export::logs::LogExporter; use crate::logs::LogRecord; use crate::Resource; use async_trait::async_trait; @@ -37,13 +37,23 @@ use std::sync::{Arc, Mutex}; ///# } /// ``` /// +/// #[derive(Clone, Debug)] pub struct InMemoryLogsExporter { - logs: Arc>>, + logs: Arc>>, resource: Arc>, should_reset_on_shutdown: bool, } +/// `OwnedLogData` represents a single log event without resource context. +#[derive(Debug, Clone)] +pub struct OwnedLogData { + /// Log record, which can be borrowed or owned. + pub record: LogRecord, + /// Instrumentation details for the emitter who produced this `LogEvent`. + pub instrumentation: InstrumentationLibrary, +} + impl Default for InMemoryLogsExporter { fn default() -> Self { InMemoryLogsExporterBuilder::new().build() @@ -175,10 +185,14 @@ impl InMemoryLogsExporter { #[async_trait] impl LogExporter for InMemoryLogsExporter { - async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()> { + async fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>) -> LogResult<()> { let mut logs_guard = self.logs.lock().map_err(LogError::from)?; - for log in batch.into_iter() { - logs_guard.push(log.into_owned()); + for (log_record, instrumentation) in batch.into_iter() { + let owned_log = OwnedLogData { + record: log_record.clone(), + instrumentation: instrumentation.clone(), + }; + logs_guard.push(owned_log); } Ok(()) } diff --git a/opentelemetry-stdout/src/logs/exporter.rs b/opentelemetry-stdout/src/logs/exporter.rs index dacefa3d8b..fd59701c0b 100644 --- a/opentelemetry-stdout/src/logs/exporter.rs +++ b/opentelemetry-stdout/src/logs/exporter.rs @@ -1,12 +1,12 @@ use async_trait::async_trait; use core::fmt; +use opentelemetry::InstrumentationLibrary; use opentelemetry::{ logs::{LogError, LogResult}, ExportError, }; -use opentelemetry_sdk::export::logs::{ExportResult, LogData}; +use opentelemetry_sdk::logs::LogRecord; use opentelemetry_sdk::Resource; -use std::borrow::Cow; use std::io::{stdout, Write}; type Encoder = @@ -45,14 +45,9 @@ impl fmt::Debug for LogExporter { #[async_trait] impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { /// Export spans to stdout - async fn export<'a>(&mut self, batch: Vec>) -> ExportResult { + async fn export(&mut self, batch: Vec<(&LogRecord, &InstrumentationLibrary)>) -> LogResult<()> { if let Some(writer) = &mut self.writer { - // TODO - Avoid cloning logdata if it is borrowed. - let log_data = crate::logs::transform::LogData::from(( - batch.into_iter().map(Cow::into_owned).collect(), - &self.resource, - )); - let result = (self.encoder)(writer, log_data) as LogResult<()>; + let result = (self.encoder)(writer, (batch, &self.resource).into()) as LogResult<()>; result.and_then(|_| writer.write_all(b"\n").map_err(|e| Error(e).into())) } else { Err("exporter is shut down".into()) diff --git a/opentelemetry-stdout/src/logs/transform.rs b/opentelemetry-stdout/src/logs/transform.rs index 0560e0c064..84e864f469 100644 --- a/opentelemetry-stdout/src/logs/transform.rs +++ b/opentelemetry-stdout/src/logs/transform.rs @@ -16,13 +16,19 @@ pub struct LogData { impl From<( - Vec, + Vec<( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + )>, &opentelemetry_sdk::Resource, )> for LogData { fn from( (sdk_logs, sdk_resource): ( - Vec, + Vec<( + &opentelemetry_sdk::logs::LogRecord, + &opentelemetry::InstrumentationLibrary, + )>, &opentelemetry_sdk::Resource, ), ) -> Self { @@ -30,8 +36,8 @@ impl for sdk_log in sdk_logs { let resource_schema_url = sdk_resource.schema_url().map(|s| s.to_string().into()); - let schema_url = sdk_log.instrumentation.schema_url.clone(); - let scope: Scope = sdk_log.instrumentation.clone().into(); + let schema_url = sdk_log.1.schema_url.clone(); + let scope: Scope = sdk_log.1.clone().into(); let resource: Resource = sdk_resource.into(); let rl = resource_logs @@ -43,10 +49,10 @@ impl }); match rl.scope_logs.iter_mut().find(|sl| sl.scope == scope) { - Some(sl) => sl.log_records.push(sdk_log.into()), + Some(sl) => sl.log_records.push(sdk_log.0.into()), None => rl.scope_logs.push(ScopeLogs { scope, - log_records: vec![sdk_log.into()], + log_records: vec![sdk_log.0.into()], schema_url, }), } @@ -104,18 +110,17 @@ struct LogRecord { trace_id: Option, } -impl From for LogRecord { - fn from(value: opentelemetry_sdk::export::logs::LogData) -> Self { +impl From<&opentelemetry_sdk::logs::LogRecord> for LogRecord { + fn from(record: &opentelemetry_sdk::logs::LogRecord) -> Self { LogRecord { attributes: { - let attributes = value - .record + let attributes = record .attributes_iter() .map(|(k, v)| KeyValue::from((k.clone(), v.clone()))) // Map each pair to a KeyValue .collect::>(); // Collect into a Vecs #[cfg(feature = "populate-logs-event-name")] - if let Some(event_name) = value.record.event_name { + if let Some(event_name) = record.event_name { let mut attributes_with_name = attributes; attributes_with_name.push(KeyValue::from(( "name".into(), @@ -129,33 +134,24 @@ impl From for LogRecord { #[cfg(not(feature = "populate-logs-event-name"))] attributes }, - trace_id: value - .record + trace_id: record .trace_context .as_ref() .map(|c| c.trace_id.to_string()), - span_id: value - .record + span_id: record.trace_context.as_ref().map(|c| c.span_id.to_string()), + flags: record .trace_context .as_ref() - .map(|c| c.span_id.to_string()), - flags: value - .record - .trace_context .map(|c| c.trace_flags.map(|f| f.to_u8())) .unwrap_or_default(), - time_unix_nano: value.record.timestamp, - time: value.record.timestamp, - observed_time_unix_nano: value.record.observed_timestamp.unwrap(), - observed_time: value.record.observed_timestamp.unwrap(), - severity_number: value - .record - .severity_number - .map(|u| u as u32) - .unwrap_or_default(), + time_unix_nano: record.timestamp, + time: record.timestamp, + observed_time_unix_nano: record.observed_timestamp.unwrap(), + observed_time: record.observed_timestamp.unwrap(), + severity_number: record.severity_number.map(|u| u as u32).unwrap_or_default(), dropped_attributes_count: 0, - severity_text: value.record.severity_text, - body: value.record.body.map(|a| a.into()), + severity_text: record.severity_text, + body: record.body.clone().map(|a| a.into()), } } } diff --git a/stress/src/logs.rs b/stress/src/logs.rs index 6cec97463c..1798401e32 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -20,7 +20,7 @@ mod throughput; pub struct NoOpLogProcessor; impl LogProcessor for NoOpLogProcessor { - fn emit(&self, _data: &mut opentelemetry_sdk::export::logs::LogData) {} + fn emit(&self, _data: &mut opentelemetry_sdk::logs::LogData) {} fn force_flush(&self) -> opentelemetry::logs::LogResult<()> { Ok(())