From ad97ad6948c3b39236982ebf887dd5d875f365b9 Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Thu, 14 Nov 2019 22:15:56 +1100 Subject: [PATCH 1/9] core: vendor lazy_static and spin for no_std support --- tracing-core/Cargo.toml | 8 - tracing-core/src/callsite.rs | 1 + tracing-core/src/lazy_static/LICENSE | 26 ++ tracing-core/src/lazy_static/core_lazy.rs | 29 ++ tracing-core/src/lazy_static/inline_lazy.rs | 55 +++ tracing-core/src/lazy_static/mod.rs | 106 +++++ tracing-core/src/lib.rs | 11 +- tracing-core/src/spin/LICENSE | 21 + tracing-core/src/spin/mod.rs | 11 + tracing-core/src/spin/mutex.rs | 406 ++++++++++++++++++++ tracing-core/src/spin/once.rs | 290 ++++++++++++++ tracing/Cargo.toml | 3 - 12 files changed, 953 insertions(+), 14 deletions(-) create mode 100644 tracing-core/src/lazy_static/LICENSE create mode 100644 tracing-core/src/lazy_static/core_lazy.rs create mode 100644 tracing-core/src/lazy_static/inline_lazy.rs create mode 100644 tracing-core/src/lazy_static/mod.rs create mode 100644 tracing-core/src/spin/LICENSE create mode 100644 tracing-core/src/spin/mod.rs create mode 100644 tracing-core/src/spin/mutex.rs create mode 100644 tracing-core/src/spin/once.rs diff --git a/tracing-core/Cargo.toml b/tracing-core/Cargo.toml index 101919417f..f462bbc189 100644 --- a/tracing-core/Cargo.toml +++ b/tracing-core/Cargo.toml @@ -32,11 +32,3 @@ std = [] [badges] azure-devops = { project = "tracing/tracing", pipeline = "tokio-rs.tracing", build = "1" } maintenance = { status = "actively-developed" } - -[dependencies] -lazy_static = "1" - -[target.'cfg(not(feature = "std"))'.dependencies] -spin = "0.5" -lazy_static = { version = "1", features = ["spin_no_std"] } - diff --git a/tracing-core/src/callsite.rs b/tracing-core/src/callsite.rs index 8b672b6a1e..33dada4d29 100644 --- a/tracing-core/src/callsite.rs +++ b/tracing-core/src/callsite.rs @@ -12,6 +12,7 @@ use crate::{ subscriber::Interest, Metadata, }; +use crate::lazy_static; lazy_static! { static ref REGISTRY: Mutex = Mutex::new(Registry { diff --git a/tracing-core/src/lazy_static/LICENSE b/tracing-core/src/lazy_static/LICENSE new file mode 100644 index 0000000000..28e478827c --- /dev/null +++ b/tracing-core/src/lazy_static/LICENSE @@ -0,0 +1,26 @@ + +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/tracing-core/src/lazy_static/core_lazy.rs b/tracing-core/src/lazy_static/core_lazy.rs new file mode 100644 index 0000000000..b5df71f9f4 --- /dev/null +++ b/tracing-core/src/lazy_static/core_lazy.rs @@ -0,0 +1,29 @@ +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::spin::Once; + +pub struct Lazy(Once); + +impl Lazy { + pub const INIT: Self = Lazy(Once::INIT); + + #[inline(always)] + pub fn get(&'static self, builder: F) -> &T + where F: FnOnce() -> T + { + self.0.call_once(builder) + } +} + +#[macro_export] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::INIT; + } +} diff --git a/tracing-core/src/lazy_static/inline_lazy.rs b/tracing-core/src/lazy_static/inline_lazy.rs new file mode 100644 index 0000000000..a9c23b8be7 --- /dev/null +++ b/tracing-core/src/lazy_static/inline_lazy.rs @@ -0,0 +1,55 @@ +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use std::prelude::v1::*; +use std::cell::Cell; +use std::hint::unreachable_unchecked; +use std::sync::Once; +#[allow(deprecated)] +pub use std::sync::ONCE_INIT; + +// FIXME: Replace Option with MaybeUninit (stable since 1.36.0) +#[allow(missing_debug_implementations)] +pub struct Lazy(Cell>, Once); + +impl Lazy { + #[allow(deprecated)] + pub const INIT: Self = Lazy(Cell::new(None), ONCE_INIT); + + #[inline(always)] + pub fn get(&'static self, f: F) -> &T + where + F: FnOnce() -> T, + { + self.1.call_once(|| { + self.0.set(Some(f())); + }); + + // `self.0` is guaranteed to be `Some` by this point + // The `Once` will catch and propagate panics + unsafe { + match *self.0.as_ptr() { + Some(ref x) => x, + None => { + debug_assert!(false, "attempted to derefence an uninitialized lazy static. This is a bug"); + + unreachable_unchecked() + }, + } + } + } +} + +unsafe impl Sync for Lazy {} + +#[macro_export] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + static $NAME: $crate::lazy_static::lazy::Lazy<$T> = $crate::lazy_static::lazy::Lazy::INIT; + }; +} diff --git a/tracing-core/src/lazy_static/mod.rs b/tracing-core/src/lazy_static/mod.rs new file mode 100644 index 0000000000..ceb907049d --- /dev/null +++ b/tracing-core/src/lazy_static/mod.rs @@ -0,0 +1,106 @@ + + +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +/*! +A macro for declaring lazily evaluated statics. +Using this macro, it is possible to have `static`s that require code to be +executed at runtime in order to be initialized. +This includes anything requiring heap allocations, like vectors or hash maps, +as well as anything that requires function calls to be computed. +*/ + +#[cfg(feature = "std")] +#[path="inline_lazy.rs"] +#[doc(hidden)] +pub mod lazy; + +#[cfg(not(feature = "std"))] +#[path="core_lazy.rs"] +#[doc(hidden)] +pub mod lazy; + +#[doc(hidden)] +pub use core::ops::Deref as __Deref; + +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! __lazy_static_internal { + // optional visibility restrictions are wrapped in `()` to allow for + // explicitly passing otherwise implicit information about private items + ($(#[$attr:meta])* ($($vis:tt)*) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@MAKE TY, $(#[$attr])*, ($($vis)*), $N); + __lazy_static_internal!(@TAIL, $N : $T = $e); + lazy_static!($($t)*); + }; + (@TAIL, $N:ident : $T:ty = $e:expr) => { + impl $crate::lazy_static::__Deref for $N { + type Target = $T; + fn deref(&self) -> &$T { + #[inline(always)] + fn __static_ref_initialize() -> $T { $e } + + #[inline(always)] + fn __stability() -> &'static $T { + __lazy_static_create!(LAZY, $T); + LAZY.get(__static_ref_initialize) + } + __stability() + } + } + impl $crate::lazy_static::LazyStatic for $N { + fn initialize(lazy: &Self) { + let _ = &**lazy; + } + } + }; + // `vis` is wrapped in `()` to prevent parsing ambiguity + (@MAKE TY, $(#[$attr:meta])*, ($($vis:tt)*), $N:ident) => { + #[allow(missing_copy_implementations)] + #[allow(non_camel_case_types)] + #[allow(dead_code)] + $(#[$attr])* + $($vis)* struct $N {__private_field: ()} + #[doc(hidden)] + $($vis)* static $N: $N = $N {__private_field: ()}; + }; + () => () +} + +#[macro_export(local_inner_macros)] +/// lazy_static (suppress docs_missing warning) +macro_rules! lazy_static { + ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + // use `()` to explicitly forward the information about private items + __lazy_static_internal!($(#[$attr])* () static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!($(#[$attr])* (pub) static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub ($($vis:tt)+) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!($(#[$attr])* (pub ($($vis)+)) static ref $N : $T = $e; $($t)*); + }; + () => () +} + +/// Support trait for enabling a few common operation on lazy static values. +/// +/// This is implemented by each defined lazy static, and +/// used by the free functions in this crate. +pub trait LazyStatic { + #[doc(hidden)] + fn initialize(lazy: &Self); +} + +/// Takes a shared reference to a lazy static and initializes +/// it if it has not been already. +/// +/// This can be used to control the initialization point of a lazy static. +pub fn initialize(lazy: &T) { + LazyStatic::initialize(lazy); +} diff --git a/tracing-core/src/lib.rs b/tracing-core/src/lib.rs index c0f5e42755..0659f748f0 100644 --- a/tracing-core/src/lib.rs +++ b/tracing-core/src/lib.rs @@ -95,9 +95,6 @@ #[cfg(not(feature = "std"))] extern crate alloc; -#[macro_use] -extern crate lazy_static; - /// Statically constructs an [`Identifier`] for the provided [`Callsite`]. /// /// This may be used in contexts, such as static initializers, where the @@ -219,6 +216,14 @@ pub mod span; pub(crate) mod stdlib; pub mod subscriber; +// Vendored version of spin 0.5.2 (0387621) +// `mutex` and `once` modules only +#[cfg(not(feature = "std"))] +pub mod spin; + +// Vendored version of lazy_static 1.4.0 (4216696) +pub mod lazy_static; + #[doc(inline)] pub use self::{ callsite::Callsite, diff --git a/tracing-core/src/spin/LICENSE b/tracing-core/src/spin/LICENSE new file mode 100644 index 0000000000..84d5f4d7af --- /dev/null +++ b/tracing-core/src/spin/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mathijs van de Nes + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tracing-core/src/spin/mod.rs b/tracing-core/src/spin/mod.rs new file mode 100644 index 0000000000..d712c0b09a --- /dev/null +++ b/tracing-core/src/spin/mod.rs @@ -0,0 +1,11 @@ +//! Synchronization primitives based on spinning + +#[cfg(test)] +#[macro_use] +extern crate std; + +pub use mutex::*; +pub use once::*; + +mod mutex; +mod once; diff --git a/tracing-core/src/spin/mutex.rs b/tracing-core/src/spin/mutex.rs new file mode 100644 index 0000000000..bf346fce44 --- /dev/null +++ b/tracing-core/src/spin/mutex.rs @@ -0,0 +1,406 @@ +use core::sync::atomic::{AtomicBool, Ordering, spin_loop_hint as cpu_relax}; +use core::cell::UnsafeCell; +use core::marker::Sync; +use core::ops::{Drop, Deref, DerefMut}; +use core::fmt; +use core::option::Option::{self, None, Some}; +use core::default::Default; + +/// This type provides MUTual EXclusion based on spinning. +/// +/// # Description +/// +/// The behaviour of these lock is similar to their namesakes in `std::sync`. they +/// differ on the following: +/// +/// - The lock will not be poisoned in case of failure; +/// +/// # Simple examples +/// +/// ``` +/// use spin; +/// let spin_mutex = spin::Mutex::new(0); +/// +/// // Modify the data +/// { +/// let mut data = spin_mutex.lock(); +/// *data = 2; +/// } +/// +/// // Read the data +/// let answer = +/// { +/// let data = spin_mutex.lock(); +/// *data +/// }; +/// +/// assert_eq!(answer, 2); +/// ``` +/// +/// # Thread-safety example +/// +/// ``` +/// use spin; +/// use std::sync::{Arc, Barrier}; +/// +/// let numthreads = 1000; +/// let spin_mutex = Arc::new(spin::Mutex::new(0)); +/// +/// // We use a barrier to ensure the readout happens after all writing +/// let barrier = Arc::new(Barrier::new(numthreads + 1)); +/// +/// for _ in (0..numthreads) +/// { +/// let my_barrier = barrier.clone(); +/// let my_lock = spin_mutex.clone(); +/// std::thread::spawn(move|| +/// { +/// let mut guard = my_lock.lock(); +/// *guard += 1; +/// +/// // Release the lock to prevent a deadlock +/// drop(guard); +/// my_barrier.wait(); +/// }); +/// } +/// +/// barrier.wait(); +/// +/// let answer = { *spin_mutex.lock() }; +/// assert_eq!(answer, numthreads); +/// ``` +pub struct Mutex +{ + lock: AtomicBool, + data: UnsafeCell, +} + +/// A guard to which the protected data can be accessed +/// +/// When the guard falls out of scope it will release the lock. +#[derive(Debug)] +pub struct MutexGuard<'a, T: ?Sized + 'a> +{ + lock: &'a AtomicBool, + data: &'a mut T, +} + +// Same unsafe impls as `std::sync::Mutex` +unsafe impl Sync for Mutex {} +unsafe impl Send for Mutex {} + +impl Mutex +{ + /// Creates a new spinlock wrapping the supplied data. + /// + /// May be used statically: + /// + /// ``` + /// use spin; + /// + /// static MUTEX: spin::Mutex<()> = spin::Mutex::new(()); + /// + /// fn demo() { + /// let lock = MUTEX.lock(); + /// // do something with lock + /// drop(lock); + /// } + /// ``` + pub const fn new(user_data: T) -> Mutex + { + Mutex + { + lock: AtomicBool::new(false), + data: UnsafeCell::new(user_data), + } + } + + /// Consumes this mutex, returning the underlying data. + pub fn into_inner(self) -> T { + // We know statically that there are no outstanding references to + // `self` so there's no need to lock. + let Mutex { data, .. } = self; + data.into_inner() + } +} + +impl Mutex +{ + fn obtain_lock(&self) + { + while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false + { + // Wait until the lock looks unlocked before retrying + while self.lock.load(Ordering::Relaxed) + { + cpu_relax(); + } + } + } + + /// Locks the spinlock and returns a guard. + /// + /// The returned value may be dereferenced for data access + /// and the lock will be dropped when the guard falls out of scope. + /// + /// ``` + /// let mylock = spin::Mutex::new(0); + /// { + /// let mut data = mylock.lock(); + /// // The lock is now locked and the data can be accessed + /// *data += 1; + /// // The lock is implicitly dropped + /// } + /// + /// ``` + pub fn lock(&self) -> MutexGuard + { + self.obtain_lock(); + MutexGuard + { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + } + } + + /// Force unlock the spinlock. + /// + /// This is *extremely* unsafe if the lock is not held by the current + /// thread. However, this can be useful in some instances for exposing the + /// lock to FFI that doesn't know how to deal with RAII. + /// + /// If the lock isn't held, this is a no-op. + pub unsafe fn force_unlock(&self) { + self.lock.store(false, Ordering::Release); + } + + /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns + /// a guard within Some. + pub fn try_lock(&self) -> Option> + { + if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false + { + Some( + MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + } + ) + } + else + { + None + } + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `Mutex` mutably, no actual locking needs to + /// take place -- the mutable borrow statically guarantees no locks exist. + /// + /// # Examples + /// + /// ``` + /// let mut my_lock = spin::Mutex::new(0); + /// *my_lock.get_mut() = 10; + /// assert_eq!(*my_lock.lock(), 10); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + // We know statically that there are no other references to `self`, so + // there's no need to lock the inner mutex. + unsafe { &mut *self.data.get() } + } +} + +impl fmt::Debug for Mutex +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result + { + match self.try_lock() + { + Some(guard) => write!(f, "Mutex {{ data: ") + .and_then(|()| (&*guard).fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Mutex {{ }}"), + } + } +} + +impl Default for Mutex { + fn default() -> Mutex { + Mutex::new(Default::default()) + } +} + +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> +{ + type Target = T; + fn deref<'b>(&'b self) -> &'b T { &*self.data } +} + +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> +{ + fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data } +} + +impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> +{ + /// The dropping of the MutexGuard will release the lock it was created from. + fn drop(&mut self) + { + self.lock.store(false, Ordering::Release); + } +} + +#[cfg(test)] +mod tests { + use std::prelude::v1::*; + + use std::sync::mpsc::channel; + use std::sync::Arc; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::thread; + + use super::*; + + #[derive(Eq, PartialEq, Debug)] + struct NonCopy(i32); + + #[test] + fn smoke() { + let m = Mutex::new(()); + drop(m.lock()); + drop(m.lock()); + } + + #[test] + fn lots_and_lots() { + static M: Mutex<()> = Mutex::new(()); + static mut CNT: u32 = 0; + const J: u32 = 1000; + const K: u32 = 3; + + fn inc() { + for _ in 0..J { + unsafe { + let _g = M.lock(); + CNT += 1; + } + } + } + + let (tx, rx) = channel(); + for _ in 0..K { + let tx2 = tx.clone(); + thread::spawn(move|| { inc(); tx2.send(()).unwrap(); }); + let tx2 = tx.clone(); + thread::spawn(move|| { inc(); tx2.send(()).unwrap(); }); + } + + drop(tx); + for _ in 0..2 * K { + rx.recv().unwrap(); + } + assert_eq!(unsafe {CNT}, J * K * 2); + } + + #[test] + fn try_lock() { + let mutex = Mutex::new(42); + + // First lock succeeds + let a = mutex.try_lock(); + assert_eq!(a.as_ref().map(|r| **r), Some(42)); + + // Additional lock failes + let b = mutex.try_lock(); + assert!(b.is_none()); + + // After dropping lock, it succeeds again + ::core::mem::drop(a); + let c = mutex.try_lock(); + assert_eq!(c.as_ref().map(|r| **r), Some(42)); + } + + #[test] + fn test_into_inner() { + let m = Mutex::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); + } + + #[test] + fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } + + #[test] + fn test_mutex_arc_nested() { + // Tests nested mutexes and access + // to underlying data. + let arc = Arc::new(Mutex::new(1)); + let arc2 = Arc::new(Mutex::new(arc)); + let (tx, rx) = channel(); + let _t = thread::spawn(move|| { + let lock = arc2.lock(); + let lock2 = lock.lock(); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + } + + #[test] + fn test_mutex_arc_access_in_unwind() { + let arc = Arc::new(Mutex::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move|| -> () { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *self.i.lock() += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }).join(); + let lock = arc.lock(); + assert_eq!(*lock, 2); + } + + #[test] + fn test_mutex_unsized() { + let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); + { + let b = &mut *mutex.lock(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*mutex.lock(), comp); + } + + #[test] + fn test_mutex_force_lock() { + let lock = Mutex::new(()); + ::std::mem::forget(lock.lock()); + unsafe { + lock.force_unlock(); + } + assert!(lock.try_lock().is_some()); + } +} diff --git a/tracing-core/src/spin/once.rs b/tracing-core/src/spin/once.rs new file mode 100644 index 0000000000..40a8ca3dc0 --- /dev/null +++ b/tracing-core/src/spin/once.rs @@ -0,0 +1,290 @@ +use core::cell::UnsafeCell; +use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax}; +use core::fmt; + +/// A synchronization primitive which can be used to run a one-time global +/// initialization. Unlike its std equivalent, this is generalized so that the +/// closure returns a value and it is stored. Once therefore acts something like +/// a future, too. +/// +/// # Examples +/// +/// ``` +/// use spin; +/// +/// static START: spin::Once<()> = spin::Once::new(); +/// +/// START.call_once(|| { +/// // run initialization here +/// }); +/// ``` +pub struct Once { + state: AtomicUsize, + data: UnsafeCell>, // TODO remove option and use mem::uninitialized +} + +impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try() { + Some(s) => write!(f, "Once {{ data: ") + .and_then(|()| s.fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Once {{ }}") + } + } +} + +// Same unsafe impls as `std::sync::RwLock`, because this also allows for +// concurrent reads. +unsafe impl Sync for Once {} +unsafe impl Send for Once {} + +// Four states that a Once can be in, encoded into the lower bits of `state` in +// the Once structure. +const INCOMPLETE: usize = 0x0; +const RUNNING: usize = 0x1; +const COMPLETE: usize = 0x2; +const PANICKED: usize = 0x3; + +use core::hint::unreachable_unchecked as unreachable; + +impl Once { + /// Initialization constant of `Once`. + pub const INIT: Self = Once { + state: AtomicUsize::new(INCOMPLETE), + data: UnsafeCell::new(None), + }; + + /// Creates a new `Once` value. + pub const fn new() -> Once { + Self::INIT + } + + fn force_get<'a>(&'a self) -> &'a T { + match unsafe { &*self.data.get() }.as_ref() { + None => unsafe { unreachable() }, + Some(p) => p, + } + } + + /// Performs an initialization routine once and only once. The given closure + /// will be executed if this is the first time `call_once` has been called, + /// and otherwise the routine will *not* be invoked. + /// + /// This method will block the calling thread if another initialization + /// routine is currently running. + /// + /// When this function returns, it is guaranteed that some initialization + /// has run and completed (it may not be the closure specified). The + /// returned pointer will point to the result from the closure that was + /// run. + /// + /// # Examples + /// + /// ``` + /// use spin; + /// + /// static INIT: spin::Once = spin::Once::new(); + /// + /// fn get_cached_val() -> usize { + /// *INIT.call_once(expensive_computation) + /// } + /// + /// fn expensive_computation() -> usize { + /// // ... + /// # 2 + /// } + /// ``` + pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T + where F: FnOnce() -> T + { + let mut status = self.state.load(Ordering::SeqCst); + + if status == INCOMPLETE { + status = self.state.compare_and_swap(INCOMPLETE, + RUNNING, + Ordering::SeqCst); + if status == INCOMPLETE { // We init + // We use a guard (Finish) to catch panics caused by builder + let mut finish = Finish { state: &self.state, panicked: true }; + unsafe { *self.data.get() = Some(builder()) }; + finish.panicked = false; + + status = COMPLETE; + self.state.store(status, Ordering::SeqCst); + + // This next line is strictly an optimization + return self.force_get(); + } + } + + loop { + match status { + INCOMPLETE => unreachable!(), + RUNNING => { // We spin + cpu_relax(); + status = self.state.load(Ordering::SeqCst) + }, + PANICKED => panic!("Once has panicked"), + COMPLETE => return self.force_get(), + _ => unsafe { unreachable() }, + } + } + } + + /// Returns a pointer iff the `Once` was previously initialized + pub fn try<'a>(&'a self) -> Option<&'a T> { + match self.state.load(Ordering::SeqCst) { + COMPLETE => Some(self.force_get()), + _ => None, + } + } + + /// Like try, but will spin if the `Once` is in the process of being + /// initialized + pub fn wait<'a>(&'a self) -> Option<&'a T> { + loop { + match self.state.load(Ordering::SeqCst) { + INCOMPLETE => return None, + RUNNING => cpu_relax(), // We spin + COMPLETE => return Some(self.force_get()), + PANICKED => panic!("Once has panicked"), + _ => unsafe { unreachable() }, + } + } + } +} + +struct Finish<'a> { + state: &'a AtomicUsize, + panicked: bool, +} + +impl<'a> Drop for Finish<'a> { + fn drop(&mut self) { + if self.panicked { + self.state.store(PANICKED, Ordering::SeqCst); + } + } +} + +#[cfg(test)] +mod tests { + use std::prelude::v1::*; + + use std::sync::mpsc::channel; + use std::thread; + use super::Once; + + #[test] + fn smoke_once() { + static O: Once<()> = Once::new(); + let mut a = 0; + O.call_once(|| a += 1); + assert_eq!(a, 1); + O.call_once(|| a += 1); + assert_eq!(a, 1); + } + + #[test] + fn smoke_once_value() { + static O: Once = Once::new(); + let a = O.call_once(|| 1); + assert_eq!(*a, 1); + let b = O.call_once(|| 2); + assert_eq!(*b, 1); + } + + #[test] + fn stampede_once() { + static O: Once<()> = Once::new(); + static mut RUN: bool = false; + + let (tx, rx) = channel(); + for _ in 0..10 { + let tx = tx.clone(); + thread::spawn(move|| { + for _ in 0..4 { thread::yield_now() } + unsafe { + O.call_once(|| { + assert!(!RUN); + RUN = true; + }); + assert!(RUN); + } + tx.send(()).unwrap(); + }); + } + + unsafe { + O.call_once(|| { + assert!(!RUN); + RUN = true; + }); + assert!(RUN); + } + + for _ in 0..10 { + rx.recv().unwrap(); + } + } + + #[test] + fn try() { + static INIT: Once = Once::new(); + + assert!(INIT.try().is_none()); + INIT.call_once(|| 2); + assert_eq!(INIT.try().map(|r| *r), Some(2)); + } + + #[test] + fn try_no_wait() { + static INIT: Once = Once::new(); + + assert!(INIT.try().is_none()); + thread::spawn(move|| { + INIT.call_once(|| loop { }); + }); + assert!(INIT.try().is_none()); + } + + + #[test] + fn wait() { + static INIT: Once = Once::new(); + + assert!(INIT.wait().is_none()); + INIT.call_once(|| 3); + assert_eq!(INIT.wait().map(|r| *r), Some(3)); + } + + #[test] + fn panic() { + use ::std::panic; + + static INIT: Once<()> = Once::new(); + + // poison the once + let t = panic::catch_unwind(|| { + INIT.call_once(|| panic!()); + }); + assert!(t.is_err()); + + // poisoning propagates + let t = panic::catch_unwind(|| { + INIT.call_once(|| {}); + }); + assert!(t.is_err()); + } + + #[test] + fn init_constant() { + static O: Once<()> = Once::INIT; + let mut a = 0; + O.call_once(|| a += 1); + assert_eq!(a, 1); + O.call_once(|| a += 1); + assert_eq!(a, 1); + } +} \ No newline at end of file diff --git a/tracing/Cargo.toml b/tracing/Cargo.toml index dfa19ba2b3..9473213115 100644 --- a/tracing/Cargo.toml +++ b/tracing/Cargo.toml @@ -71,6 +71,3 @@ harness = false [badges] azure-devops = { project = "tracing/tracing", pipeline = "tokio-rs.tracing", build = "1" } maintenance = { status = "actively-developed" } - -[target.'cfg(not(feature = "std"))'.dependencies] -spin = "0.5" From 91726e034335a53e96ea2649060a6cecfecec86a Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Thu, 14 Nov 2019 22:37:59 +1100 Subject: [PATCH 2/9] core: fmt vendored code --- tracing-core/src/callsite.rs | 2 +- tracing-core/src/lazy_static/core_lazy.rs | 5 +- tracing-core/src/lazy_static/inline_lazy.rs | 9 +- tracing-core/src/lazy_static/mod.rs | 6 +- tracing-core/src/spin/mutex.rs | 119 +++++++++----------- tracing-core/src/spin/once.rs | 61 +++++----- 6 files changed, 99 insertions(+), 103 deletions(-) diff --git a/tracing-core/src/callsite.rs b/tracing-core/src/callsite.rs index 33dada4d29..d34622e556 100644 --- a/tracing-core/src/callsite.rs +++ b/tracing-core/src/callsite.rs @@ -1,5 +1,6 @@ //! Callsites represent the source locations from which spans or events //! originate. +use crate::lazy_static; use crate::stdlib::{ fmt, hash::{Hash, Hasher}, @@ -12,7 +13,6 @@ use crate::{ subscriber::Interest, Metadata, }; -use crate::lazy_static; lazy_static! { static ref REGISTRY: Mutex = Mutex::new(Registry { diff --git a/tracing-core/src/lazy_static/core_lazy.rs b/tracing-core/src/lazy_static/core_lazy.rs index b5df71f9f4..6f6659f218 100644 --- a/tracing-core/src/lazy_static/core_lazy.rs +++ b/tracing-core/src/lazy_static/core_lazy.rs @@ -14,7 +14,8 @@ impl Lazy { #[inline(always)] pub fn get(&'static self, builder: F) -> &T - where F: FnOnce() -> T + where + F: FnOnce() -> T, { self.0.call_once(builder) } @@ -25,5 +26,5 @@ impl Lazy { macro_rules! __lazy_static_create { ($NAME:ident, $T:ty) => { static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::INIT; - } + }; } diff --git a/tracing-core/src/lazy_static/inline_lazy.rs b/tracing-core/src/lazy_static/inline_lazy.rs index a9c23b8be7..4816968398 100644 --- a/tracing-core/src/lazy_static/inline_lazy.rs +++ b/tracing-core/src/lazy_static/inline_lazy.rs @@ -5,9 +5,9 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use std::prelude::v1::*; use std::cell::Cell; use std::hint::unreachable_unchecked; +use std::prelude::v1::*; use std::sync::Once; #[allow(deprecated)] pub use std::sync::ONCE_INIT; @@ -35,10 +35,13 @@ impl Lazy { match *self.0.as_ptr() { Some(ref x) => x, None => { - debug_assert!(false, "attempted to derefence an uninitialized lazy static. This is a bug"); + debug_assert!( + false, + "attempted to derefence an uninitialized lazy static. This is a bug" + ); unreachable_unchecked() - }, + } } } } diff --git a/tracing-core/src/lazy_static/mod.rs b/tracing-core/src/lazy_static/mod.rs index ceb907049d..44df44f1d7 100644 --- a/tracing-core/src/lazy_static/mod.rs +++ b/tracing-core/src/lazy_static/mod.rs @@ -1,5 +1,3 @@ - - // Copyright 2016 lazy-static.rs Developers // // Licensed under the Apache License, Version 2.0, -{ +pub struct Mutex { lock: AtomicBool, data: UnsafeCell, } @@ -79,8 +78,7 @@ pub struct Mutex /// /// When the guard falls out of scope it will release the lock. #[derive(Debug)] -pub struct MutexGuard<'a, T: ?Sized + 'a> -{ +pub struct MutexGuard<'a, T: ?Sized + 'a> { lock: &'a AtomicBool, data: &'a mut T, } @@ -89,8 +87,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a> unsafe impl Sync for Mutex {} unsafe impl Send for Mutex {} -impl Mutex -{ +impl Mutex { /// Creates a new spinlock wrapping the supplied data. /// /// May be used statically: @@ -106,10 +103,8 @@ impl Mutex /// drop(lock); /// } /// ``` - pub const fn new(user_data: T) -> Mutex - { - Mutex - { + pub const fn new(user_data: T) -> Mutex { + Mutex { lock: AtomicBool::new(false), data: UnsafeCell::new(user_data), } @@ -124,15 +119,11 @@ impl Mutex } } -impl Mutex -{ - fn obtain_lock(&self) - { - while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false - { +impl Mutex { + fn obtain_lock(&self) { + while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { // Wait until the lock looks unlocked before retrying - while self.lock.load(Ordering::Relaxed) - { + while self.lock.load(Ordering::Relaxed) { cpu_relax(); } } @@ -153,11 +144,9 @@ impl Mutex /// } /// /// ``` - pub fn lock(&self) -> MutexGuard - { + pub fn lock(&self) -> MutexGuard { self.obtain_lock(); - MutexGuard - { + MutexGuard { lock: &self.lock, data: unsafe { &mut *self.data.get() }, } @@ -176,19 +165,13 @@ impl Mutex /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns /// a guard within Some. - pub fn try_lock(&self) -> Option> - { - if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false - { - Some( - MutexGuard { - lock: &self.lock, - data: unsafe { &mut *self.data.get() }, - } - ) - } - else - { + pub fn try_lock(&self) -> Option> { + if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false { + Some(MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + }) + } else { None } } @@ -212,15 +195,12 @@ impl Mutex } } -impl fmt::Debug for Mutex -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result - { - match self.try_lock() - { +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try_lock() { Some(guard) => write!(f, "Mutex {{ data: ") - .and_then(|()| (&*guard).fmt(f)) - .and_then(|()| write!(f, "}}")), + .and_then(|()| (&*guard).fmt(f)) + .and_then(|()| write!(f, "}}")), None => write!(f, "Mutex {{ }}"), } } @@ -232,22 +212,22 @@ impl Default for Mutex { } } -impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> -{ +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { type Target = T; - fn deref<'b>(&'b self) -> &'b T { &*self.data } + fn deref<'b>(&'b self) -> &'b T { + &*self.data + } } -impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> -{ - fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data } +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { + fn deref_mut<'b>(&'b mut self) -> &'b mut T { + &mut *self.data + } } -impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> -{ +impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { /// The dropping of the MutexGuard will release the lock it was created from. - fn drop(&mut self) - { + fn drop(&mut self) { self.lock.store(false, Ordering::Release); } } @@ -256,9 +236,9 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> mod tests { use std::prelude::v1::*; + use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; - use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; use super::*; @@ -275,7 +255,7 @@ mod tests { #[test] fn lots_and_lots() { - static M: Mutex<()> = Mutex::new(()); + static M: Mutex<()> = Mutex::new(()); static mut CNT: u32 = 0; const J: u32 = 1000; const K: u32 = 3; @@ -292,16 +272,22 @@ mod tests { let (tx, rx) = channel(); for _ in 0..K { let tx2 = tx.clone(); - thread::spawn(move|| { inc(); tx2.send(()).unwrap(); }); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); let tx2 = tx.clone(); - thread::spawn(move|| { inc(); tx2.send(()).unwrap(); }); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); } drop(tx); for _ in 0..2 * K { rx.recv().unwrap(); } - assert_eq!(unsafe {CNT}, J * K * 2); + assert_eq!(unsafe { CNT }, J * K * 2); } #[test] @@ -353,7 +339,7 @@ mod tests { let arc = Arc::new(Mutex::new(1)); let arc2 = Arc::new(Mutex::new(arc)); let (tx, rx) = channel(); - let _t = thread::spawn(move|| { + let _t = thread::spawn(move || { let lock = arc2.lock(); let lock2 = lock.lock(); assert_eq!(*lock2, 1); @@ -366,7 +352,7 @@ mod tests { fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move|| -> () { + let _ = thread::spawn(move || -> () { struct Unwinder { i: Arc>, } @@ -377,7 +363,8 @@ mod tests { } let _u = Unwinder { i: arc2 }; panic!(); - }).join(); + }) + .join(); let lock = arc.lock(); assert_eq!(*lock, 2); } @@ -400,7 +387,7 @@ mod tests { ::std::mem::forget(lock.lock()); unsafe { lock.force_unlock(); - } + } assert!(lock.try_lock().is_some()); } } diff --git a/tracing-core/src/spin/once.rs b/tracing-core/src/spin/once.rs index 40a8ca3dc0..04c9765f68 100644 --- a/tracing-core/src/spin/once.rs +++ b/tracing-core/src/spin/once.rs @@ -1,6 +1,6 @@ use core::cell::UnsafeCell; -use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax}; use core::fmt; +use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering}; /// A synchronization primitive which can be used to run a one-time global /// initialization. Unlike its std equivalent, this is generalized so that the @@ -25,11 +25,11 @@ pub struct Once { impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.try() { + match self.r#try() { Some(s) => write!(f, "Once {{ data: ") - .and_then(|()| s.fmt(f)) - .and_then(|()| write!(f, "}}")), - None => write!(f, "Once {{ }}") + .and_then(|()| s.fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Once {{ }}"), } } } @@ -62,7 +62,7 @@ impl Once { fn force_get<'a>(&'a self) -> &'a T { match unsafe { &*self.data.get() }.as_ref() { - None => unsafe { unreachable() }, + None => unsafe { unreachable() }, Some(p) => p, } } @@ -96,17 +96,22 @@ impl Once { /// } /// ``` pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T - where F: FnOnce() -> T + where + F: FnOnce() -> T, { let mut status = self.state.load(Ordering::SeqCst); if status == INCOMPLETE { - status = self.state.compare_and_swap(INCOMPLETE, - RUNNING, - Ordering::SeqCst); - if status == INCOMPLETE { // We init + status = self + .state + .compare_and_swap(INCOMPLETE, RUNNING, Ordering::SeqCst); + if status == INCOMPLETE { + // We init // We use a guard (Finish) to catch panics caused by builder - let mut finish = Finish { state: &self.state, panicked: true }; + let mut finish = Finish { + state: &self.state, + panicked: true, + }; unsafe { *self.data.get() = Some(builder()) }; finish.panicked = false; @@ -121,10 +126,11 @@ impl Once { loop { match status { INCOMPLETE => unreachable!(), - RUNNING => { // We spin + RUNNING => { + // We spin cpu_relax(); status = self.state.load(Ordering::SeqCst) - }, + } PANICKED => panic!("Once has panicked"), COMPLETE => return self.force_get(), _ => unsafe { unreachable() }, @@ -133,10 +139,10 @@ impl Once { } /// Returns a pointer iff the `Once` was previously initialized - pub fn try<'a>(&'a self) -> Option<&'a T> { + pub fn r#try<'a>(&'a self) -> Option<&'a T> { match self.state.load(Ordering::SeqCst) { COMPLETE => Some(self.force_get()), - _ => None, + _ => None, } } @@ -146,9 +152,9 @@ impl Once { loop { match self.state.load(Ordering::SeqCst) { INCOMPLETE => return None, - RUNNING => cpu_relax(), // We spin - COMPLETE => return Some(self.force_get()), - PANICKED => panic!("Once has panicked"), + RUNNING => cpu_relax(), // We spin + COMPLETE => return Some(self.force_get()), + PANICKED => panic!("Once has panicked"), _ => unsafe { unreachable() }, } } @@ -172,9 +178,9 @@ impl<'a> Drop for Finish<'a> { mod tests { use std::prelude::v1::*; + use super::Once; use std::sync::mpsc::channel; use std::thread; - use super::Once; #[test] fn smoke_once() { @@ -203,8 +209,10 @@ mod tests { let (tx, rx) = channel(); for _ in 0..10 { let tx = tx.clone(); - thread::spawn(move|| { - for _ in 0..4 { thread::yield_now() } + thread::spawn(move || { + for _ in 0..4 { + thread::yield_now() + } unsafe { O.call_once(|| { assert!(!RUN); @@ -230,7 +238,7 @@ mod tests { } #[test] - fn try() { + fn r#try() { static INIT: Once = Once::new(); assert!(INIT.try().is_none()); @@ -243,13 +251,12 @@ mod tests { static INIT: Once = Once::new(); assert!(INIT.try().is_none()); - thread::spawn(move|| { - INIT.call_once(|| loop { }); + thread::spawn(move || { + INIT.call_once(|| loop {}); }); assert!(INIT.try().is_none()); } - #[test] fn wait() { static INIT: Once = Once::new(); @@ -287,4 +294,4 @@ mod tests { O.call_once(|| a += 1); assert_eq!(a, 1); } -} \ No newline at end of file +} From c292d32b7a7657d10f6617bfe9e3d0e37bad71fa Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 14:49:21 +1100 Subject: [PATCH 3/9] core: trim down vendored spin, reduce visibility and addresss warnings --- tracing-core/src/lib.rs | 15 +- tracing-core/src/spin/mod.rs | 8 +- tracing-core/src/spin/mutex.rs | 296 +-------------------------------- tracing-core/src/spin/once.rs | 153 +---------------- 4 files changed, 19 insertions(+), 453 deletions(-) diff --git a/tracing-core/src/lib.rs b/tracing-core/src/lib.rs index 0659f748f0..afdff4d0d4 100644 --- a/tracing-core/src/lib.rs +++ b/tracing-core/src/lib.rs @@ -206,6 +206,16 @@ macro_rules! metadata { }; } +// Trimmed-down vendored version of spin 0.5.2 (0387621) +// Dependency of no_std lazy_static, not required in a std build +#[cfg(not(feature = "std"))] +#[doc(hidden)] +pub(crate) mod spin; + +#[cfg(not(feature = "std"))] +#[doc(hidden)] +pub use self::spin::Once; + pub mod callsite; pub mod dispatcher; pub mod event; @@ -216,11 +226,6 @@ pub mod span; pub(crate) mod stdlib; pub mod subscriber; -// Vendored version of spin 0.5.2 (0387621) -// `mutex` and `once` modules only -#[cfg(not(feature = "std"))] -pub mod spin; - // Vendored version of lazy_static 1.4.0 (4216696) pub mod lazy_static; diff --git a/tracing-core/src/spin/mod.rs b/tracing-core/src/spin/mod.rs index d712c0b09a..767fcf649b 100644 --- a/tracing-core/src/spin/mod.rs +++ b/tracing-core/src/spin/mod.rs @@ -1,11 +1,7 @@ //! Synchronization primitives based on spinning -#[cfg(test)] -#[macro_use] -extern crate std; - -pub use mutex::*; -pub use once::*; +pub(crate) use mutex::*; +pub use once::Once; mod mutex; mod once; diff --git a/tracing-core/src/spin/mutex.rs b/tracing-core/src/spin/mutex.rs index ba4067d014..383a2cd839 100644 --- a/tracing-core/src/spin/mutex.rs +++ b/tracing-core/src/spin/mutex.rs @@ -7,69 +7,7 @@ use core::option::Option::{self, None, Some}; use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicBool, Ordering}; /// This type provides MUTual EXclusion based on spinning. -/// -/// # Description -/// -/// The behaviour of these lock is similar to their namesakes in `std::sync`. they -/// differ on the following: -/// -/// - The lock will not be poisoned in case of failure; -/// -/// # Simple examples -/// -/// ``` -/// use spin; -/// let spin_mutex = spin::Mutex::new(0); -/// -/// // Modify the data -/// { -/// let mut data = spin_mutex.lock(); -/// *data = 2; -/// } -/// -/// // Read the data -/// let answer = -/// { -/// let data = spin_mutex.lock(); -/// *data -/// }; -/// -/// assert_eq!(answer, 2); -/// ``` -/// -/// # Thread-safety example -/// -/// ``` -/// use spin; -/// use std::sync::{Arc, Barrier}; -/// -/// let numthreads = 1000; -/// let spin_mutex = Arc::new(spin::Mutex::new(0)); -/// -/// // We use a barrier to ensure the readout happens after all writing -/// let barrier = Arc::new(Barrier::new(numthreads + 1)); -/// -/// for _ in (0..numthreads) -/// { -/// let my_barrier = barrier.clone(); -/// let my_lock = spin_mutex.clone(); -/// std::thread::spawn(move|| -/// { -/// let mut guard = my_lock.lock(); -/// *guard += 1; -/// -/// // Release the lock to prevent a deadlock -/// drop(guard); -/// my_barrier.wait(); -/// }); -/// } -/// -/// barrier.wait(); -/// -/// let answer = { *spin_mutex.lock() }; -/// assert_eq!(answer, numthreads); -/// ``` -pub struct Mutex { +pub(crate) struct Mutex { lock: AtomicBool, data: UnsafeCell, } @@ -78,7 +16,7 @@ pub struct Mutex { /// /// When the guard falls out of scope it will release the lock. #[derive(Debug)] -pub struct MutexGuard<'a, T: ?Sized + 'a> { +pub(crate) struct MutexGuard<'a, T: ?Sized> { lock: &'a AtomicBool, data: &'a mut T, } @@ -89,34 +27,12 @@ unsafe impl Send for Mutex {} impl Mutex { /// Creates a new spinlock wrapping the supplied data. - /// - /// May be used statically: - /// - /// ``` - /// use spin; - /// - /// static MUTEX: spin::Mutex<()> = spin::Mutex::new(()); - /// - /// fn demo() { - /// let lock = MUTEX.lock(); - /// // do something with lock - /// drop(lock); - /// } - /// ``` - pub const fn new(user_data: T) -> Mutex { + pub(crate) const fn new(user_data: T) -> Mutex { Mutex { lock: AtomicBool::new(false), data: UnsafeCell::new(user_data), } } - - /// Consumes this mutex, returning the underlying data. - pub fn into_inner(self) -> T { - // We know statically that there are no outstanding references to - // `self` so there's no need to lock. - let Mutex { data, .. } = self; - data.into_inner() - } } impl Mutex { @@ -133,18 +49,7 @@ impl Mutex { /// /// The returned value may be dereferenced for data access /// and the lock will be dropped when the guard falls out of scope. - /// - /// ``` - /// let mylock = spin::Mutex::new(0); - /// { - /// let mut data = mylock.lock(); - /// // The lock is now locked and the data can be accessed - /// *data += 1; - /// // The lock is implicitly dropped - /// } - /// - /// ``` - pub fn lock(&self) -> MutexGuard { + pub(crate) fn lock(&self) -> MutexGuard<'_, T> { self.obtain_lock(); MutexGuard { lock: &self.lock, @@ -152,20 +57,9 @@ impl Mutex { } } - /// Force unlock the spinlock. - /// - /// This is *extremely* unsafe if the lock is not held by the current - /// thread. However, this can be useful in some instances for exposing the - /// lock to FFI that doesn't know how to deal with RAII. - /// - /// If the lock isn't held, this is a no-op. - pub unsafe fn force_unlock(&self) { - self.lock.store(false, Ordering::Release); - } - /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns /// a guard within Some. - pub fn try_lock(&self) -> Option> { + pub(crate) fn try_lock(&self) -> Option> { if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false { Some(MutexGuard { lock: &self.lock, @@ -175,28 +69,10 @@ impl Mutex { None } } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `Mutex` mutably, no actual locking needs to - /// take place -- the mutable borrow statically guarantees no locks exist. - /// - /// # Examples - /// - /// ``` - /// let mut my_lock = spin::Mutex::new(0); - /// *my_lock.get_mut() = 10; - /// assert_eq!(*my_lock.lock(), 10); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - // We know statically that there are no other references to `self`, so - // there's no need to lock the inner mutex. - unsafe { &mut *self.data.get() } - } } impl fmt::Debug for Mutex { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.try_lock() { Some(guard) => write!(f, "Mutex {{ data: ") .and_then(|()| (&*guard).fmt(f)) @@ -231,163 +107,3 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { self.lock.store(false, Ordering::Release); } } - -#[cfg(test)] -mod tests { - use std::prelude::v1::*; - - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; - - use super::*; - - #[derive(Eq, PartialEq, Debug)] - struct NonCopy(i32); - - #[test] - fn smoke() { - let m = Mutex::new(()); - drop(m.lock()); - drop(m.lock()); - } - - #[test] - fn lots_and_lots() { - static M: Mutex<()> = Mutex::new(()); - static mut CNT: u32 = 0; - const J: u32 = 1000; - const K: u32 = 3; - - fn inc() { - for _ in 0..J { - unsafe { - let _g = M.lock(); - CNT += 1; - } - } - } - - let (tx, rx) = channel(); - for _ in 0..K { - let tx2 = tx.clone(); - thread::spawn(move || { - inc(); - tx2.send(()).unwrap(); - }); - let tx2 = tx.clone(); - thread::spawn(move || { - inc(); - tx2.send(()).unwrap(); - }); - } - - drop(tx); - for _ in 0..2 * K { - rx.recv().unwrap(); - } - assert_eq!(unsafe { CNT }, J * K * 2); - } - - #[test] - fn try_lock() { - let mutex = Mutex::new(42); - - // First lock succeeds - let a = mutex.try_lock(); - assert_eq!(a.as_ref().map(|r| **r), Some(42)); - - // Additional lock failes - let b = mutex.try_lock(); - assert!(b.is_none()); - - // After dropping lock, it succeeds again - ::core::mem::drop(a); - let c = mutex.try_lock(); - assert_eq!(c.as_ref().map(|r| **r), Some(42)); - } - - #[test] - fn test_into_inner() { - let m = Mutex::new(NonCopy(10)); - assert_eq!(m.into_inner(), NonCopy(10)); - } - - #[test] - fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = Mutex::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_mutex_arc_nested() { - // Tests nested mutexes and access - // to underlying data. - let arc = Arc::new(Mutex::new(1)); - let arc2 = Arc::new(Mutex::new(arc)); - let (tx, rx) = channel(); - let _t = thread::spawn(move || { - let lock = arc2.lock(); - let lock2 = lock.lock(); - assert_eq!(*lock2, 1); - tx.send(()).unwrap(); - }); - rx.recv().unwrap(); - } - - #[test] - fn test_mutex_arc_access_in_unwind() { - let arc = Arc::new(Mutex::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - *self.i.lock() += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.lock(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_mutex_unsized() { - let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); - { - let b = &mut *mutex.lock(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*mutex.lock(), comp); - } - - #[test] - fn test_mutex_force_lock() { - let lock = Mutex::new(()); - ::std::mem::forget(lock.lock()); - unsafe { - lock.force_unlock(); - } - assert!(lock.try_lock().is_some()); - } -} diff --git a/tracing-core/src/spin/once.rs b/tracing-core/src/spin/once.rs index 04c9765f68..0bc47b566a 100644 --- a/tracing-core/src/spin/once.rs +++ b/tracing-core/src/spin/once.rs @@ -6,25 +6,13 @@ use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering}; /// initialization. Unlike its std equivalent, this is generalized so that the /// closure returns a value and it is stored. Once therefore acts something like /// a future, too. -/// -/// # Examples -/// -/// ``` -/// use spin; -/// -/// static START: spin::Once<()> = spin::Once::new(); -/// -/// START.call_once(|| { -/// // run initialization here -/// }); -/// ``` pub struct Once { state: AtomicUsize, data: UnsafeCell>, // TODO remove option and use mem::uninitialized } impl fmt::Debug for Once { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.r#try() { Some(s) => write!(f, "Once {{ data: ") .and_then(|()| s.fmt(f)) @@ -78,23 +66,6 @@ impl Once { /// has run and completed (it may not be the closure specified). The /// returned pointer will point to the result from the closure that was /// run. - /// - /// # Examples - /// - /// ``` - /// use spin; - /// - /// static INIT: spin::Once = spin::Once::new(); - /// - /// fn get_cached_val() -> usize { - /// *INIT.call_once(expensive_computation) - /// } - /// - /// fn expensive_computation() -> usize { - /// // ... - /// # 2 - /// } - /// ``` pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T where F: FnOnce() -> T, @@ -173,125 +144,3 @@ impl<'a> Drop for Finish<'a> { } } } - -#[cfg(test)] -mod tests { - use std::prelude::v1::*; - - use super::Once; - use std::sync::mpsc::channel; - use std::thread; - - #[test] - fn smoke_once() { - static O: Once<()> = Once::new(); - let mut a = 0; - O.call_once(|| a += 1); - assert_eq!(a, 1); - O.call_once(|| a += 1); - assert_eq!(a, 1); - } - - #[test] - fn smoke_once_value() { - static O: Once = Once::new(); - let a = O.call_once(|| 1); - assert_eq!(*a, 1); - let b = O.call_once(|| 2); - assert_eq!(*b, 1); - } - - #[test] - fn stampede_once() { - static O: Once<()> = Once::new(); - static mut RUN: bool = false; - - let (tx, rx) = channel(); - for _ in 0..10 { - let tx = tx.clone(); - thread::spawn(move || { - for _ in 0..4 { - thread::yield_now() - } - unsafe { - O.call_once(|| { - assert!(!RUN); - RUN = true; - }); - assert!(RUN); - } - tx.send(()).unwrap(); - }); - } - - unsafe { - O.call_once(|| { - assert!(!RUN); - RUN = true; - }); - assert!(RUN); - } - - for _ in 0..10 { - rx.recv().unwrap(); - } - } - - #[test] - fn r#try() { - static INIT: Once = Once::new(); - - assert!(INIT.try().is_none()); - INIT.call_once(|| 2); - assert_eq!(INIT.try().map(|r| *r), Some(2)); - } - - #[test] - fn try_no_wait() { - static INIT: Once = Once::new(); - - assert!(INIT.try().is_none()); - thread::spawn(move || { - INIT.call_once(|| loop {}); - }); - assert!(INIT.try().is_none()); - } - - #[test] - fn wait() { - static INIT: Once = Once::new(); - - assert!(INIT.wait().is_none()); - INIT.call_once(|| 3); - assert_eq!(INIT.wait().map(|r| *r), Some(3)); - } - - #[test] - fn panic() { - use ::std::panic; - - static INIT: Once<()> = Once::new(); - - // poison the once - let t = panic::catch_unwind(|| { - INIT.call_once(|| panic!()); - }); - assert!(t.is_err()); - - // poisoning propagates - let t = panic::catch_unwind(|| { - INIT.call_once(|| {}); - }); - assert!(t.is_err()); - } - - #[test] - fn init_constant() { - static O: Once<()> = Once::INIT; - let mut a = 0; - O.call_once(|| a += 1); - assert_eq!(a, 1); - O.call_once(|| a += 1); - assert_eq!(a, 1); - } -} From 92328c7a6ae1b1a3c56489fc3b0d88de4fe89195 Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 14:50:52 +1100 Subject: [PATCH 4/9] core: limit vendored lazy_static for use only with no_std, fix warnings --- tracing-core/Cargo.toml | 5 +- tracing-core/src/lazy_static/core_lazy.rs | 10 ++-- tracing-core/src/lazy_static/inline_lazy.rs | 58 --------------------- tracing-core/src/lazy_static/mod.rs | 21 ++------ tracing-core/src/lib.rs | 16 ++++-- 5 files changed, 25 insertions(+), 85 deletions(-) delete mode 100644 tracing-core/src/lazy_static/inline_lazy.rs diff --git a/tracing-core/Cargo.toml b/tracing-core/Cargo.toml index f462bbc189..34028c6080 100644 --- a/tracing-core/Cargo.toml +++ b/tracing-core/Cargo.toml @@ -27,8 +27,11 @@ edition = "2018" [features] default = ["std"] -std = [] +std = ["lazy_static"] [badges] azure-devops = { project = "tracing/tracing", pipeline = "tokio-rs.tracing", build = "1" } maintenance = { status = "actively-developed" } + +[dependencies] +lazy_static = { version = "1", optional = true } diff --git a/tracing-core/src/lazy_static/core_lazy.rs b/tracing-core/src/lazy_static/core_lazy.rs index 6f6659f218..56cfe24ce3 100644 --- a/tracing-core/src/lazy_static/core_lazy.rs +++ b/tracing-core/src/lazy_static/core_lazy.rs @@ -5,15 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use crate::spin::Once; +use crate::Once; -pub struct Lazy(Once); +pub(crate) struct Lazy(Once); impl Lazy { - pub const INIT: Self = Lazy(Once::INIT); + pub(crate) const INIT: Self = Lazy(Once::INIT); #[inline(always)] - pub fn get(&'static self, builder: F) -> &T + pub(crate) fn get(&'static self, builder: F) -> &T where F: FnOnce() -> T, { @@ -25,6 +25,6 @@ impl Lazy { #[doc(hidden)] macro_rules! __lazy_static_create { ($NAME:ident, $T:ty) => { - static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::INIT; + static $NAME: $crate::lazy_static::lazy::Lazy<$T> = $crate::lazy_static::lazy::Lazy::INIT; }; } diff --git a/tracing-core/src/lazy_static/inline_lazy.rs b/tracing-core/src/lazy_static/inline_lazy.rs deleted file mode 100644 index 4816968398..0000000000 --- a/tracing-core/src/lazy_static/inline_lazy.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 lazy-static.rs Developers -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use std::cell::Cell; -use std::hint::unreachable_unchecked; -use std::prelude::v1::*; -use std::sync::Once; -#[allow(deprecated)] -pub use std::sync::ONCE_INIT; - -// FIXME: Replace Option with MaybeUninit (stable since 1.36.0) -#[allow(missing_debug_implementations)] -pub struct Lazy(Cell>, Once); - -impl Lazy { - #[allow(deprecated)] - pub const INIT: Self = Lazy(Cell::new(None), ONCE_INIT); - - #[inline(always)] - pub fn get(&'static self, f: F) -> &T - where - F: FnOnce() -> T, - { - self.1.call_once(|| { - self.0.set(Some(f())); - }); - - // `self.0` is guaranteed to be `Some` by this point - // The `Once` will catch and propagate panics - unsafe { - match *self.0.as_ptr() { - Some(ref x) => x, - None => { - debug_assert!( - false, - "attempted to derefence an uninitialized lazy static. This is a bug" - ); - - unreachable_unchecked() - } - } - } - } -} - -unsafe impl Sync for Lazy {} - -#[macro_export] -#[doc(hidden)] -macro_rules! __lazy_static_create { - ($NAME:ident, $T:ty) => { - static $NAME: $crate::lazy_static::lazy::Lazy<$T> = $crate::lazy_static::lazy::Lazy::INIT; - }; -} diff --git a/tracing-core/src/lazy_static/mod.rs b/tracing-core/src/lazy_static/mod.rs index 44df44f1d7..137ed1b01f 100644 --- a/tracing-core/src/lazy_static/mod.rs +++ b/tracing-core/src/lazy_static/mod.rs @@ -13,18 +13,11 @@ This includes anything requiring heap allocations, like vectors or hash maps, as well as anything that requires function calls to be computed. */ -#[cfg(feature = "std")] -#[path = "inline_lazy.rs"] -#[doc(hidden)] -pub mod lazy; - -#[cfg(not(feature = "std"))] #[path = "core_lazy.rs"] -#[doc(hidden)] -pub mod lazy; +pub(crate) mod lazy; #[doc(hidden)] -pub use core::ops::Deref as __Deref; +pub(crate) use core::ops::Deref as __Deref; #[macro_export(local_inner_macros)] #[doc(hidden)] @@ -90,15 +83,7 @@ macro_rules! lazy_static { /// /// This is implemented by each defined lazy static, and /// used by the free functions in this crate. -pub trait LazyStatic { +pub(crate) trait LazyStatic { #[doc(hidden)] fn initialize(lazy: &Self); } - -/// Takes a shared reference to a lazy static and initializes -/// it if it has not been already. -/// -/// This can be used to control the initialization point of a lazy static. -pub fn initialize(lazy: &T) { - LazyStatic::initialize(lazy); -} diff --git a/tracing-core/src/lib.rs b/tracing-core/src/lib.rs index afdff4d0d4..c45c6a4212 100644 --- a/tracing-core/src/lib.rs +++ b/tracing-core/src/lib.rs @@ -206,6 +206,19 @@ macro_rules! metadata { }; } +// std uses lazy_static from crates.io +#[cfg(feature = "std")] +#[macro_use] +extern crate lazy_static; + +// no_std uses vendored version of lazy_static 1.4.0 (4216696) with spin +// This can conflict when included in a project already using std lazy_static +// Remove this module when cargo enables specifying dependencies for no_std +#[cfg(not(feature = "std"))] +#[doc(hidden)] +#[macro_use] +mod lazy_static; + // Trimmed-down vendored version of spin 0.5.2 (0387621) // Dependency of no_std lazy_static, not required in a std build #[cfg(not(feature = "std"))] @@ -226,9 +239,6 @@ pub mod span; pub(crate) mod stdlib; pub mod subscriber; -// Vendored version of lazy_static 1.4.0 (4216696) -pub mod lazy_static; - #[doc(inline)] pub use self::{ callsite::Callsite, From 4a95b8b8e6447281c47098dc2a33fb378b4088d7 Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 14:51:33 +1100 Subject: [PATCH 5/9] core: update paths to now-vendored no_std libs --- tracing-core/src/callsite.rs | 1 - tracing-core/src/stdlib.rs | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tracing-core/src/callsite.rs b/tracing-core/src/callsite.rs index d34622e556..8b672b6a1e 100644 --- a/tracing-core/src/callsite.rs +++ b/tracing-core/src/callsite.rs @@ -1,6 +1,5 @@ //! Callsites represent the source locations from which spans or events //! originate. -use crate::lazy_static; use crate::stdlib::{ fmt, hash::{Hash, Hasher}, diff --git a/tracing-core/src/stdlib.rs b/tracing-core/src/stdlib.rs index 6d78456091..5047d4f486 100644 --- a/tracing-core/src/stdlib.rs +++ b/tracing-core/src/stdlib.rs @@ -51,7 +51,7 @@ mod no_std { pub(crate) mod sync { pub(crate) use alloc::sync::*; pub(crate) use core::sync::*; - pub(crate) use spin::MutexGuard; + pub(crate) use crate::spin::MutexGuard; /// This wraps `spin::Mutex` to return a `Result`, so that it can be /// used with code written against `std::sync::Mutex`. @@ -60,17 +60,17 @@ mod no_std { /// by `lock` will always be `Ok`. #[derive(Debug, Default)] pub(crate) struct Mutex { - inner: spin::Mutex, + inner: crate::spin::Mutex, } impl Mutex { pub(crate) fn new(data: T) -> Self { Self { - inner: spin::Mutex::new(data), + inner: crate::spin::Mutex::new(data), } } - pub(crate) fn lock(&self) -> Result, ()> { + pub(crate) fn lock(&self) -> Result, ()> { Ok(self.inner.lock()) } } From 1af7b230150c1d08672d173f657a4624fa919a84 Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 14:52:03 +1100 Subject: [PATCH 6/9] tracing: use spin::Once re-exported from tracing-core instead of crate --- tracing/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracing/src/lib.rs b/tracing/src/lib.rs index 2e9f8bf772..79849678f5 100644 --- a/tracing/src/lib.rs +++ b/tracing/src/lib.rs @@ -701,7 +701,7 @@ pub mod __macro_support { pub use crate::stdlib::sync::Once; #[cfg(not(feature = "std"))] - pub type Once = spin::Once<()>; + pub type Once = tracing_core::Once<()>; } mod sealed { From fde192d157d7be2b3c8d2436c6c8e49be315948d Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 14:56:18 +1100 Subject: [PATCH 7/9] core: remove ineffectual doc(hidden) --- tracing-core/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/tracing-core/src/lib.rs b/tracing-core/src/lib.rs index c45c6a4212..a3bb882323 100644 --- a/tracing-core/src/lib.rs +++ b/tracing-core/src/lib.rs @@ -215,14 +215,12 @@ extern crate lazy_static; // This can conflict when included in a project already using std lazy_static // Remove this module when cargo enables specifying dependencies for no_std #[cfg(not(feature = "std"))] -#[doc(hidden)] #[macro_use] mod lazy_static; // Trimmed-down vendored version of spin 0.5.2 (0387621) // Dependency of no_std lazy_static, not required in a std build #[cfg(not(feature = "std"))] -#[doc(hidden)] pub(crate) mod spin; #[cfg(not(feature = "std"))] From d94e0e19cb09d19ea31f7f0e036045a0f81a360a Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 15:02:11 +1100 Subject: [PATCH 8/9] core: refmt stdlib module --- tracing-core/src/stdlib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracing-core/src/stdlib.rs b/tracing-core/src/stdlib.rs index 5047d4f486..4a1c17c2b8 100644 --- a/tracing-core/src/stdlib.rs +++ b/tracing-core/src/stdlib.rs @@ -49,9 +49,9 @@ mod no_std { } pub(crate) mod sync { + pub(crate) use crate::spin::MutexGuard; pub(crate) use alloc::sync::*; pub(crate) use core::sync::*; - pub(crate) use crate::spin::MutexGuard; /// This wraps `spin::Mutex` to return a `Result`, so that it can be /// used with code written against `std::sync::Mutex`. From a289dd572a6d0be5293b3607d183726583d685c2 Mon Sep 17 00:00:00 2001 From: Thomas Karpiniec Date: Fri, 15 Nov 2019 16:13:43 +1100 Subject: [PATCH 9/9] core: bump to 0.1.8 to expose export of vendored spin::Once to tracing --- tracing-core/Cargo.toml | 2 +- tracing/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tracing-core/Cargo.toml b/tracing-core/Cargo.toml index 34028c6080..b089cca3a2 100644 --- a/tracing-core/Cargo.toml +++ b/tracing-core/Cargo.toml @@ -8,7 +8,7 @@ name = "tracing-core" # - README.md # - Update CHANGELOG.md. # - Create "v0.1.x" git tag. -version = "0.1.7" +version = "0.1.8" authors = ["Tokio Contributors "] license = "MIT" readme = "README.md" diff --git a/tracing/Cargo.toml b/tracing/Cargo.toml index 9473213115..ffdcb37881 100644 --- a/tracing/Cargo.toml +++ b/tracing/Cargo.toml @@ -27,7 +27,7 @@ keywords = ["logging", "tracing", "metrics", "async"] edition = "2018" [dependencies] -tracing-core = { version = "0.1.7", default-features = false } +tracing-core = { path = "../tracing-core", version = "0.1.8", default-features = false } log = { version = "0.4", optional = true } tracing-attributes = "0.1.5" cfg-if = "0.1.10"