-
Notifications
You must be signed in to change notification settings - Fork 525
appender-tracing: Include trace ID and span ID in logs when nested in tracing crate's spans #2438
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 11 commits
fb2af02
d08b480
69b467a
c6e1141
12cf399
750df51
319bed7
3146dfb
f7f2e16
80741cd
3aa5bdf
d0b94ce
a53f3fc
99580f8
50b549a
260df21
bd23f55
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ tracing = { workspace = true, features = ["std"]} | |
tracing-core = { workspace = true } | ||
tracing-log = { version = "0.2", optional = true } | ||
tracing-subscriber = { workspace = true, features = ["registry", "std"] } | ||
tracing-opentelemetry = { version = "0.28", optional = true } | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will there be a conflict if the application uses version 0.29 of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. tracing-opentelemetry v0.29 hasn't been released, but I think I see what you are getting at. tracing-opentelemetry v0.28 (latest release) depends on opentelemetry v0.27, so won't be compatible with opentelemetry v0.28 when it is released. It looks like typically all the opentelemetry crates would be bumped together so there is a version of all these crates that are compatible with each other. However, this means the point at which opentelemetry-appender-tracing v0.28 would be released there wouldn't be a compatible version of tracing-opentelemetry without coordination with that external crate. Given that this is an experimental feature, I'm guessing it doesn't make sense to wait for tracing-opentelemetry v0.29 to be released before releasing opentelemetry-appender-tracing. Removing the version constraint would probably work in practice with these unstable versions given that the constraint on opentelemetry crates should ensure the correct version is used. Although, potentially tracing-opentelemetry could introduce a breaking change that is incompatible with opentelemetry-appender-tracing ithout changing the opentelemetry v0.x version that it depends on, which I think would result in the incompatible version of tracing-opentelemetry to be chosen to use by cargo when a compatible version does exist. I think the simplest thing to do for now (while things are unstable) when releasing a new v0.x version of opentelemetry crates would be to just optimistically bump the version of this optional tracing-opentelemetry dependency to its next v0.y version. E.g. when releasing opentelemetry v0.28.0, depend on tracing-opentelemetry v0.29, so that it can be made compatible with opentelemetry-appender-tracing without any further changes to opentelemetry-appender-tracing. If tracing-opentelemetry v0.29.0 isn't compatible, then v0.28.1 can be released of opentelemetry-appender-tracing could be released fixing this feature. However, it also won't result in an incompatible v0.30 version of tracing-opentelemetry being used when v0.29 is already working with this feature. Functionally, you could think of a release of a new v0.x version of opentelemetry crates (including opentelemetry-appender-tracing) as removing this feature, then automatically reintroducing the new feature when the next v0.y version of tracing-opentelemetry is made. This could be done manually, which would allow for extra testing after the v0.y release of tracing-opentelemetry is made, but it just seems like it introduces unnecessary work and complicates the opentelemetry-appender-tracing release process. Once things stabilize, this could be coordinated more carefully between opentelemetry-appender-tracing & tracing-opentelemetry using RC releases, such that the new major version of opentelemetry-appender-tracing is made compatible with a RC version of tracing-opentelemetry. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks @dylanahsmith. I agree that optimistically bumping the version of tracing-opentelemetry to the next v0.y (e.g., v0.29 when releasing opentelemetry v0.28) seems like a practical approach for now, given the experimental nature of the feature. If compatibility issues arise, a quick patch release can address them. |
||
|
||
[dev-dependencies] | ||
log = { workspace = true } | ||
|
@@ -33,6 +34,7 @@ pprof = { version = "0.13", features = ["flamegraph", "criterion"] } | |
[features] | ||
experimental_metadata_attributes = ["dep:tracing-log"] | ||
spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"] | ||
experimental_use_tracing_span_context = ["tracing-opentelemetry"] | ||
|
||
|
||
[[bench]] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,7 +8,7 @@ use tracing_core::Level; | |
use tracing_core::Metadata; | ||
#[cfg(feature = "experimental_metadata_attributes")] | ||
use tracing_log::NormalizeEvent; | ||
use tracing_subscriber::Layer; | ||
use tracing_subscriber::{registry::LookupSpan, Layer}; | ||
|
||
const INSTRUMENTATION_LIBRARY_NAME: &str = "opentelemetry-appender-tracing"; | ||
|
||
|
@@ -149,7 +149,7 @@ where | |
|
||
impl<S, P, L> Layer<S> for OpenTelemetryTracingBridge<P, L> | ||
where | ||
S: tracing::Subscriber, | ||
S: tracing::Subscriber + for<'a> LookupSpan<'a>, | ||
P: LoggerProvider<Logger = L> + Send + Sync + 'static, | ||
L: Logger + Send + Sync + 'static, | ||
{ | ||
|
@@ -180,6 +180,26 @@ where | |
// Visit fields. | ||
event.record(&mut visitor); | ||
|
||
#[cfg(feature = "experimental_use_tracing_span_context")] | ||
if let Some(span) = _ctx.event_span(event) { | ||
use tracing_opentelemetry::OtelData; | ||
let opt_span_id = span | ||
.extensions() | ||
.get::<OtelData>() | ||
.and_then(|otd| otd.builder.span_id); | ||
|
||
let opt_trace_id = span.scope().last().and_then(|root_span| { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it really necessary to look for the root span to get the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Typically the current span won't have the span id explicitly set on it, so it won't actually store the trace id itself, instead it will be inherited when the span is built for sending. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I suppose going straight to the root span wouldn't support an explicitly set trace id on a non-root span. Is there a non-contrived example of when this might be done that I should add a test for and support? Or should this just be left as a limitation to avoid extra code and runtime overhead? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I looked at the listener code and yeah, it's only root spans that get the Still, I think that we should pull out the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I did not take a deep look with how setting trace_id implementation was done, but I encountered an issue when using set_parent to set the trace_id (see #2824) this discussion thread looks like could be related |
||
root_span | ||
.extensions() | ||
.get::<OtelData>() | ||
.and_then(|otd| otd.builder.trace_id) | ||
}); | ||
|
||
if let Some((trace_id, span_id)) = opt_trace_id.zip(opt_span_id) { | ||
log_record.set_trace_context(trace_id, span_id, None); | ||
} | ||
} | ||
|
||
//emit record | ||
self.logger.emit(log_record); | ||
} | ||
|
@@ -495,6 +515,67 @@ mod tests { | |
} | ||
} | ||
|
||
#[cfg(feature = "experimental_use_tracing_span_context")] | ||
#[test] | ||
fn tracing_appender_inside_tracing_crate_context() { | ||
use opentelemetry_sdk::testing::trace::InMemorySpanExporterBuilder; | ||
|
||
// Arrange | ||
let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); | ||
let logger_provider = LoggerProvider::builder() | ||
.with_simple_exporter(exporter.clone()) | ||
.build(); | ||
|
||
// setup tracing layer to compare trace/span IDs against | ||
let span_exporter = InMemorySpanExporterBuilder::new().build(); | ||
let tracer_provider = TracerProvider::builder() | ||
.with_simple_exporter(span_exporter.clone()) | ||
.build(); | ||
let tracer = tracer_provider.tracer("test-tracer"); | ||
|
||
let level_filter = tracing_subscriber::filter::LevelFilter::INFO; | ||
let log_layer = | ||
layer::OpenTelemetryTracingBridge::new(&logger_provider).with_filter(level_filter); | ||
|
||
let subscriber = tracing_subscriber::registry() | ||
.with(log_layer) | ||
.with(tracing_opentelemetry::layer().with_tracer(tracer)); | ||
|
||
// Avoiding global subscriber.init() as that does not play well with unit tests. | ||
let _guard = tracing::subscriber::set_default(subscriber); | ||
|
||
// Act | ||
tracing::info_span!("outer-span").in_scope(|| { | ||
error!("first-event"); | ||
|
||
tracing::info_span!("inner-span").in_scope(|| { | ||
error!("second-event"); | ||
}); | ||
}); | ||
|
||
logger_provider.force_flush(); | ||
|
||
let logs = exporter.get_emitted_logs().expect("No emitted logs"); | ||
assert_eq!(logs.len(), 2); | ||
|
||
let spans = span_exporter.get_finished_spans().unwrap(); | ||
assert_eq!(spans.len(), 2); | ||
|
||
let trace_id = spans[0].span_context.trace_id(); | ||
assert_eq!(trace_id, spans[1].span_context.trace_id()); | ||
let inner_span_id = spans[0].span_context.span_id(); | ||
let outer_span_id = spans[1].span_context.span_id(); | ||
assert_eq!(outer_span_id, spans[0].parent_span_id); | ||
|
||
let trace_ctx0 = logs[0].record.trace_context().unwrap(); | ||
let trace_ctx1 = logs[1].record.trace_context().unwrap(); | ||
|
||
assert_eq!(trace_ctx0.trace_id, trace_id); | ||
assert_eq!(trace_ctx1.trace_id, trace_id); | ||
assert_eq!(trace_ctx0.span_id, outer_span_id); | ||
assert_eq!(trace_ctx1.span_id, inner_span_id); | ||
} | ||
|
||
#[test] | ||
fn tracing_appender_standalone_with_tracing_log() { | ||
// Arrange | ||
|
Uh oh!
There was an error while loading. Please reload this page.