diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2ec0651..6e42e92 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -64,6 +64,7 @@ jobs: args: --release miri-test: + name: no_std and miri runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -101,9 +102,14 @@ jobs: with: toolchain: ${{ steps.versions.outputs.rustc }} profile: minimal - components: miri + components: miri, rust-src override: true + - name: Test `no_std` compatibility + shell: bash + working-directory: ensure_no_std + run: cargo +${{ steps.versions.outputs.rustc }} build + - name: Run tests in miri env: RUSTFLAGS: "-Zrandomize-layout" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index e23ed3a..560da44 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -82,6 +82,7 @@ jobs: args: --release miri-test: + name: no_std and miri runs-on: ${{ matrix.os }} needs: rust-test strategy: @@ -131,9 +132,14 @@ jobs: with: toolchain: ${{ steps.versions.outputs.rustc }} profile: minimal - components: miri + components: miri, rust-src override: true + - name: Test `no_std` compatibility + shell: bash + working-directory: ensure_no_std + run: cargo +${{ steps.versions.outputs.rustc }} build + - name: Run tests in miri env: RUSTFLAGS: "-Zrandomize-layout" diff --git a/Cargo.lock b/Cargo.lock index 3964bcb..e7d790a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,8 +90,9 @@ dependencies = [ "byteorder", "criterion", "hashbrown", + "libm", "ndarray", - "num", + "num-traits", "numpy", "probability", "pyo3", @@ -300,6 +301,12 @@ version = "0.2.138" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" +[[package]] +name = "libm" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + [[package]] name = "lock_api" version = "0.4.9" @@ -365,31 +372,6 @@ dependencies = [ "rawpointer", ] -[[package]] -name = "num" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" -dependencies = [ - "num-bigint", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-complex" version = "0.4.2" @@ -409,29 +391,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.15" @@ -439,6 +398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -537,9 +497,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "probability" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34d1ba13c5cdf590c3d5ab5f53b36da2f596e43f8a27b236ad1801b5a1695d8" +checksum = "1d5506784a003ab90372c5ce00c4b5d68a14d2b90b7170de1be0dfe3faa43bb6" dependencies = [ "random", "special", @@ -673,9 +633,9 @@ dependencies = [ [[package]] name = "random" -version = "0.12.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d13a3485349981c90c79112a11222c3e6e75de1d52b87a7525b3bf5361420f" +checksum = "474c42c904f04dfe2a595a02f71e1a0e5e92ffb5761cc9a4c02140b93b8dd504" [[package]] name = "rawpointer" @@ -802,11 +762,11 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "special" -version = "0.8.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a65e074159b75dcf173a4733ab2188baac24967b5c8ec9ed87ae15fcbc7636" +checksum = "d256a3d3497ee65685b3c0267cf13eb9d32920e58eac5bf0e457cb12c3c4f5bb" dependencies = [ - "libc", + "libm", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 847457c..2de81c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ name = "constriction" [features] default = ["std"] -std = ["probability"] +std = [] # Use feature `pybindings` to compile the python extension module that provides # access to this library from python. This feature is turned off by default @@ -30,10 +30,11 @@ pybindings = ["ndarray", "numpy", "pyo3"] [dependencies] hashbrown = "0.12.3" -num = "0.4" +num-traits = {version = "0.2.15", default-features = false, features = ["libm"]} smallvec = "1.6.1" -probability = {version = "0.18", optional = true} +libm = "0.2.6" +probability = {version = "0.20"} ndarray = {version = "0.15", optional = true} numpy = {version = "0.17.1", optional = true} diff --git a/benches/lookup.rs b/benches/lookup.rs index 0e5a5c9..39a8cac 100644 --- a/benches/lookup.rs +++ b/benches/lookup.rs @@ -10,7 +10,7 @@ use constriction::{ BitArray, Pos, Seek, }; use criterion::{black_box, criterion_group, Criterion}; -use num::cast::AsPrimitive; +use num_traits::AsPrimitive; use rand::{RngCore, SeedableRng}; use rand_xoshiro::Xoshiro256StarStar; @@ -26,7 +26,7 @@ criterion_group!( #[cfg(not(miri))] criterion::criterion_main!(benches); #[cfg(miri)] -fn main() {} // All benchmarks currently use FFI and therefore can't be tested in miri. +fn main() {} // miri currently doesn't seem to be able to run criterion benchmarks as tests. fn round_trip_u32_u64_u16_12(c: &mut Criterion) { round_trip::(c); @@ -87,7 +87,7 @@ where type_name::(), PRECISION ); - c.bench_function(&format!("ans_encoding_{}", label_suffix), |b| { + c.bench_function(&format!("ans_encoding_{label_suffix}"), |b| { b.iter(|| { encoder.clear(); encoder @@ -109,7 +109,7 @@ where let mut backward_decoder = encoder.into_seekable_decoder(); let reset_snapshot = backward_decoder.pos(); - c.bench_function(&format!("ans_backward_decoding_{}", label_suffix), |b| { + c.bench_function(&format!("ans_backward_decoding_{label_suffix}"), |b| { b.iter(|| { backward_decoder.seek(black_box(reset_snapshot)).unwrap(); let mut checksum = 1234u16; @@ -132,7 +132,7 @@ where let mut forward_decoder = backward_decoder.into_reversed(); let reset_snapshot = forward_decoder.pos(); - c.bench_function(&format!("ans_forward_decoding_{}", label_suffix), |b| { + c.bench_function(&format!("ans_forward_decoding_{label_suffix}"), |b| { b.iter(|| { forward_decoder.seek(black_box(reset_snapshot)).unwrap(); let mut checksum = 1234u16; @@ -179,7 +179,7 @@ where type_name::(), PRECISION ); - c.bench_function(&format!("range_encoding_{}", label_suffix), |b| { + c.bench_function(&format!("range_encoding_{label_suffix}"), |b| { b.iter(|| { encoder.clear(); encoder @@ -200,7 +200,7 @@ where let mut decoder = encoder.into_decoder().unwrap(); - c.bench_function(&format!("range_decoding_{}", label_suffix), |b| { + c.bench_function(&format!("range_decoding_{label_suffix}"), |b| { b.iter(|| { decoder.seek(black_box(reset_snapshot)).unwrap(); let mut checksum = 1234u16; diff --git a/ensure_no_std/.cargo/config.toml b/ensure_no_std/.cargo/config.toml new file mode 100644 index 0000000..78edb09 --- /dev/null +++ b/ensure_no_std/.cargo/config.toml @@ -0,0 +1,21 @@ +# We need to cross-compile the core library. +[unstable] +build-std = [ + "alloc", + "compiler_builtins", + "core", +] +build-std-features = ["compiler-builtins-mem"] + +[build] +rustflags = [ + # See https://doc.rust-lang.org/rustc/codegen-options/index.html for possible rustflags + # use custom linker script + "-C", + "link-args=-n -T src/link.ld", # The default of the x86_64-unknown-none built-in Rust compiler target is "pic". + # See: https://github.com/rust-lang/rust/blob/1.62.1/compiler/rustc_target/src/spec/x86_64_unknown_none.rs + # This is not supported by Hedron. + "-C", + "relocation-model=static", +] +target = "x86_64-unknown-none" diff --git a/ensure_no_std/.gitignore b/ensure_no_std/.gitignore new file mode 100644 index 0000000..ea8c4bf --- /dev/null +++ b/ensure_no_std/.gitignore @@ -0,0 +1 @@ +/target diff --git a/ensure_no_std/Cargo.lock b/ensure_no_std/Cargo.lock new file mode 100644 index 0000000..9599356 --- /dev/null +++ b/ensure_no_std/Cargo.lock @@ -0,0 +1,181 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "constriction" +version = "0.2.6" +dependencies = [ + "hashbrown", + "libm", + "num-traits", + "probability", + "smallvec", +] + +[[package]] +name = "ensure_no_std" +version = "0.1.0" +dependencies = [ + "constriction", + "simple-chunk-allocator", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + +[[package]] +name = "libc" +version = "0.2.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" + +[[package]] +name = "libm" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "once_cell" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" + +[[package]] +name = "probability" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d5506784a003ab90372c5ce00c4b5d68a14d2b90b7170de1be0dfe3faa43bb6" +dependencies = [ + "random", + "special", +] + +[[package]] +name = "random" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "474c42c904f04dfe2a595a02f71e1a0e5e92ffb5761cc9a4c02140b93b8dd504" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "simple-chunk-allocator" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "973e52fd54df6336f798ab685cd26c71fcb9d065e2fba5f9ac311c3ad43b85b5" +dependencies = [ + "libm", + "log", + "spin", +] + +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] +name = "special" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d256a3d3497ee65685b3c0267cf13eb9d32920e58eac5bf0e457cb12c3c4f5bb" +dependencies = [ + "libm", +] + +[[package]] +name = "spin" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" +dependencies = [ + "lock_api", +] + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" diff --git a/ensure_no_std/Cargo.toml b/ensure_no_std/Cargo.toml new file mode 100644 index 0000000..cffe7a2 --- /dev/null +++ b/ensure_no_std/Cargo.toml @@ -0,0 +1,24 @@ +# This is a small helper subcrate whose only purpose is to test whether the parent crate is +# `no_std` compatible. The implementation of this subcrate follows mostly +# with some parts from +# . +# +# Note that the potentially more elegant solution using `cargo-nono` doesn't work as of +# `cargo-nono` version 0.1.9: it reports spurious `std`-dependencies behind deactivated features. + +[package] +edition = "2021" +name = "ensure_no_std" +version = "0.1.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +constriction = {path = "..", default-features = false} +simple-chunk-allocator = "0.1.5" + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" diff --git a/ensure_no_std/src/link.ld b/ensure_no_std/src/link.ld new file mode 100644 index 0000000..be396b2 --- /dev/null +++ b/ensure_no_std/src/link.ld @@ -0,0 +1,38 @@ +/** The "start"-symbol from main.rs. */ +ENTRY(start) + +OUTPUT_FORMAT("elf64-x86-64") +OUTPUT_ARCH("i386:x86-64") + +SECTIONS { + + /* Link Address: 4MiB */ + . = 0x400000; + . += SIZEOF_HEADERS; + + .text 0x400000 : ALIGN (4096) + { + *(.text .text.*) + } : rx + + .rodata : ALIGN (4096) + { + *(.rodata .rodata.*) + } : r + + .data : ALIGN (4096) + { + *(.data .data.*) + *(COMMON) + + /* put .bss in .data */ + *(.bss .bss.*) + } : rw + + /* Information for unwinding & backtraces */ + .eh_frame : ALIGN (4096) + { + *(.eh_frame*) + } + +} diff --git a/ensure_no_std/src/main.rs b/ensure_no_std/src/main.rs new file mode 100644 index 0000000..9984c6e --- /dev/null +++ b/ensure_no_std/src/main.rs @@ -0,0 +1,46 @@ +#![no_std] +#![no_main] +#![feature(alloc_error_handler)] +#![feature(const_mut_refs)] + +use core::panic::PanicInfo; +use simple_chunk_allocator::{heap, heap_bitmap, GlobalChunkAllocator, PageAligned}; + +static mut HEAP: PageAligned<[u8; 1048576]> = heap!(); +static mut HEAP_BITMAP: PageAligned<[u8; 512]> = heap_bitmap!(); + +#[global_allocator] +static ALLOCATOR: GlobalChunkAllocator = + unsafe { GlobalChunkAllocator::new(HEAP.deref_mut_const(), HEAP_BITMAP.deref_mut_const()) }; + +#[allow(unused_imports)] +use constriction; + +#[panic_handler] +fn panic(_info: &PanicInfo) -> ! { + loop {} +} + +#[alloc_error_handler] +fn alloc_error_handler(layout: core::alloc::Layout) -> ! { + panic!("Can't handle allocation: layout = {:?}", layout); +} + +#[no_mangle] +pub extern "C" fn _start() -> ! { + use constriction::stream::{Decode, Encode}; + + let model = constriction::stream::model::UniformModel::::new(10); + + let mut encoder = constriction::stream::stack::DefaultAnsCoder::new(); + encoder.encode_symbol(3u32, model).unwrap(); + encoder.encode_symbol(5u32, model).unwrap(); + let compressed = core::hint::black_box(encoder.into_compressed().unwrap()); + + let mut decoder = + constriction::stream::stack::DefaultAnsCoder::from_compressed(compressed).unwrap(); + assert_eq!(decoder.decode_symbol(model), Ok(5)); + assert_eq!(decoder.decode_symbol(model), Ok(3)); + + loop {} +} diff --git a/src/lib.rs b/src/lib.rs index 697dae1..5646d89 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -276,14 +276,10 @@ use core::{ convert::Infallible, fmt::{Binary, Debug, Display, LowerHex, UpperHex}, hash::Hash, - num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize}, + num::{NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize}, }; -use num::{ - cast::AsPrimitive, - traits::{WrappingAdd, WrappingMul, WrappingSub}, - PrimInt, Unsigned, -}; +use num_traits::{AsPrimitive, PrimInt, Unsigned, WrappingAdd, WrappingMul, WrappingSub}; // READ WRITE SEMANTICS ======================================================= @@ -340,8 +336,8 @@ impl Display { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { - Self::Frontend(err) => write!(f, "Invalid compressed data: {}", err), - Self::Backend(err) => write!(f, "Error while reading compressed data: {}", err), + Self::Frontend(err) => write!(f, "Invalid compressed data: {err}"), + Self::Backend(err) => write!(f, "Error while reading compressed data: {err}"), } } } @@ -718,7 +714,7 @@ macro_rules! unsafe_impl_bit_array { // have a significant impact on performance, but it doesn't seem to // anymore as of rust version 1.58.0 (although the check itself is // still there). - if non_zero == num::zero::() { + if non_zero == num_traits::zero::() { core::hint::unreachable_unchecked(); } else { non_zero @@ -735,10 +731,12 @@ unsafe_impl_bit_array!( (u16, NonZeroU16), (u32, NonZeroU32), (u64, NonZeroU64), - (u128, NonZeroU128), (usize, NonZeroUsize), ); +#[cfg(feature = "std")] +unsafe_impl_bit_array!((u128, core::num::NonZeroU128),); + pub trait UnwrapInfallible { fn unwrap_infallible(self) -> T; } diff --git a/src/pybindings/stream/chain.rs b/src/pybindings/stream/chain.rs index 19343ab..5af1c7e 100644 --- a/src/pybindings/stream/chain.rs +++ b/src/pybindings/stream/chain.rs @@ -265,9 +265,9 @@ impl ChainCoder { /// [`CustomModel`](model.html#constriction.stream.model.CustomModel) or /// [`ScipyModel`](model.html#constriction.stream.model.ScipyModel) model class. #[pyo3(text_signature = "(DEPRECATED)")] - pub fn encode_iid_custom_model_reverse<'py>( + pub fn encode_iid_custom_model_reverse( &mut self, - py: Python<'py>, + py: Python<'_>, symbols: PyReadonlyArray1<'_, i32>, model: &Model, ) -> PyResult<()> { @@ -293,9 +293,9 @@ impl ChainCoder { /// entropy model). #[pyo3(text_signature = "(model, optional_amt_or_model_params)")] #[args(symbols, model, params = "*")] - pub fn decode<'py>( + pub fn decode( &mut self, - py: Python<'py>, + py: Python<'_>, model: &Model, params: &PyTuple, ) -> PyResult { @@ -434,9 +434,9 @@ impl ChainCoder { /// [`CustomModel`](model.html#constriction.stream.model.CustomModel) or /// [`ScipyModel`](model.html#constriction.stream.model.ScipyModel) model class. #[pyo3(text_signature = "(DEPRECATED)")] - pub fn decode_iid_custom_model<'py>( + pub fn decode_iid_custom_model( &mut self, - py: Python<'py>, + py: Python<'_>, amt: usize, model: &Model, ) -> PyResult { diff --git a/src/pybindings/stream/model.rs b/src/pybindings/stream/model.rs index 198869d..bd57310 100644 --- a/src/pybindings/stream/model.rs +++ b/src/pybindings/stream/model.rs @@ -18,6 +18,7 @@ pub fn init_module(_py: Python<'_>, module: &PyModule) -> PyResult<()> { module.add_class::()?; module.add_class::()?; module.add_class::()?; + module.add_class::()?; module.add_class::()?; module.add_class::()?; Ok(()) @@ -575,6 +576,94 @@ impl QuantizedLaplace { } } +/// A Cauchy distribution, quantized over bins of size 1 centered at integer values. +/// +/// Analogous to [`QuantizedGaussian`](#constriction.stream.model.QuantizedGaussian), just +/// starting from a Cauchy distribution rather than a Gaussian. +/// +/// Before quantization, the probability density function of a Cauchy distribution is: +/// +/// `p(x) = 1 / (pi * gamma * (1 + ((x - loc) / gamma)^2))` +/// +/// where the parameters `loc` and `scale` parameterize the location of the mode and the +/// width of the distribution. +/// +/// ## Fixed Arguments +/// +/// The following arguments always have to be provided directly to the constructor of the +/// model. They cannot be delayed until encoding or decoding. +/// +/// - **min_symbol_inclusive** and **max_symbol_inclusive** --- specify the integer range on +/// which the model is defined. +/// +/// ## Model Parameters +/// +/// Each of the following model parameters can either be specified as a scalar when +/// constructing the model, or as a rank-1 numpy array (with `dtype=np.float64`) when +/// calling the entropy coder's encode or decode method. +/// +/// - **loc** --- the location (mode) of the Cauchy distribution before quantization. +/// - **scale** --- the scale parameter `gamma` of the Cauchy distribution before +/// quantization (resulting in a full width at half maximum of `2 * scale`). +#[pyclass(extends=Model)] +#[pyo3(text_signature = "(min_symbol_inclusive, max_symbol_inclusive, loc=None, scale=None)")] +#[derive(Debug)] +struct QuantizedCauchy; + +#[pymethods] +impl QuantizedCauchy { + #[new] + pub fn new( + min_symbol_inclusive: i32, + max_symbol_inclusive: i32, + loc: Option, + scale: Option, + ) -> PyResult<(Self, Model)> { + let model = match (loc, scale) { + (None, None) => { + let quantizer = LeakyQuantizer::::new( + min_symbol_inclusive..=max_symbol_inclusive, + ); + let model = + internals::ParameterizableModel::new(move |(loc, scale): (f64, f64)| { + let distribution = probability::distribution::Cauchy::new(loc, scale); + quantizer.quantize(distribution) + }); + Arc::new(model) as Arc + } + (Some(loc), Some(scale)) => { + let distribution = probability::distribution::Cauchy::new(loc, scale); + let quantizer = LeakyQuantizer::::new( + min_symbol_inclusive..=max_symbol_inclusive, + ); + Arc::new(quantizer.quantize(distribution)) as Arc + } + (Some(loc), None) => { + let quantizer = LeakyQuantizer::::new( + min_symbol_inclusive..=max_symbol_inclusive, + ); + let model = internals::ParameterizableModel::new(move |(scale,): (f64,)| { + let distribution = probability::distribution::Cauchy::new(loc, scale); + quantizer.quantize(distribution) + }); + Arc::new(model) as Arc + } + (None, Some(scale)) => { + let quantizer = LeakyQuantizer::::new( + min_symbol_inclusive..=max_symbol_inclusive, + ); + let model = internals::ParameterizableModel::new(move |(loc,): (f64,)| { + let distribution = probability::distribution::Cauchy::new(loc, scale); + quantizer.quantize(distribution) + }); + Arc::new(model) as Arc + } + }; + + Ok((Self, Model(model))) + } +} + /// A Binomial distribution over the alphabet {0, 1, ..., n}. /// /// Models the number of successful trials out of `n` trials where the trials are diff --git a/src/pybindings/stream/model/internals.rs b/src/pybindings/stream/model/internals.rs index 60a2768..a97fcac 100644 --- a/src/pybindings/stream/model/internals.rs +++ b/src/pybindings/stream/model/internals.rs @@ -258,9 +258,9 @@ impl Model for UnspecializedPythonModel { (callback)(&self.quantizer.quantize(distribution)) } - fn parameterize<'py>( + fn parameterize( &self, - py: Python<'py>, + py: Python<'_>, params: &PyTuple, reverse: bool, callback: &mut dyn FnMut(&dyn DefaultEntropyModel) -> PyResult<()>, diff --git a/src/pybindings/stream/queue.rs b/src/pybindings/stream/queue.rs index d87246a..fcfbad0 100644 --- a/src/pybindings/stream/queue.rs +++ b/src/pybindings/stream/queue.rs @@ -535,9 +535,9 @@ impl RangeEncoder { /// print(encoder.get_compressed()) # (prints: [1204741195, 2891990943]) /// ``` #[pyo3(text_signature = "(DEPRECATED)")] - pub fn encode_iid_custom_model<'py>( + pub fn encode_iid_custom_model( &mut self, - py: Python<'py>, + py: Python<'_>, symbols: PyReadonlyArray1<'_, i32>, model: &Model, ) -> PyResult<()> { @@ -899,9 +899,9 @@ impl RangeDecoder { /// ``` #[pyo3(text_signature = "(model, optional_amt_or_model_params)")] #[args(symbols, model, params = "*")] - pub fn decode<'py>( + pub fn decode( &mut self, - py: Python<'py>, + py: Python<'_>, model: &Model, params: &PyTuple, ) -> PyResult { @@ -978,9 +978,9 @@ impl RangeDecoder { /// [`CustomModel`](model.html#constriction.stream.model.CustomModel) and /// [`ScipyModel`](model.html#constriction.stream.model.ScipyModel). #[pyo3(text_signature = "(DEPRECATED)")] - pub fn decode_iid_custom_model<'py>( + pub fn decode_iid_custom_model( &mut self, - py: Python<'py>, + py: Python<'_>, amt: usize, model: &Model, ) -> PyResult { diff --git a/src/pybindings/stream/stack.rs b/src/pybindings/stream/stack.rs index 461ad1c..04ca9f4 100644 --- a/src/pybindings/stream/stack.rs +++ b/src/pybindings/stream/stack.rs @@ -721,9 +721,9 @@ impl AnsCoder { /// print(coder.get_compressed()) # (prints: [609762275, 3776398430]) /// ``` #[pyo3(text_signature = "(DEPRECATED)")] - pub fn encode_iid_custom_model_reverse<'py>( + pub fn encode_iid_custom_model_reverse( &mut self, - py: Python<'py>, + py: Python<'_>, symbols: PyReadonlyArray1<'_, i32>, model: &Model, ) -> PyResult<()> { @@ -835,9 +835,9 @@ impl AnsCoder { /// ``` #[pyo3(text_signature = "(model, optional_amt_or_model_params)")] #[args(symbols, model, params = "*")] - pub fn decode<'py>( + pub fn decode( &mut self, - py: Python<'py>, + py: Python<'_>, model: &Model, params: &PyTuple, ) -> PyResult { @@ -1076,9 +1076,9 @@ impl AnsCoder { /// [`CustomModel`](model.html#constriction.stream.model.CustomModel) and /// [`ScipyModel`](model.html#constriction.stream.model.ScipyModel). #[pyo3(text_signature = "(DEPRECATED)")] - pub fn decode_iid_custom_model<'py>( + pub fn decode_iid_custom_model( &mut self, - py: Python<'py>, + py: Python<'_>, amt: usize, model: &Model, ) -> PyResult { diff --git a/src/stream/chain.rs b/src/stream/chain.rs index 3c9f0cf..095d81c 100644 --- a/src/stream/chain.rs +++ b/src/stream/chain.rs @@ -93,7 +93,7 @@ use alloc::vec::Vec; use core::{borrow::Borrow, convert::Infallible, fmt::Display}; -use num::cast::AsPrimitive; +use num_traits::AsPrimitive; use super::{ model::{DecoderModel, EncoderModel}, @@ -841,10 +841,10 @@ impl core::fmt fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::Compressed(err) => { - write!(f, "Read/write error when accessing compressed: {}", err) + write!(f, "Read/write error when accessing compressed: {err}") } Self::Remainders(err) => { - write!(f, "Read/write error when accessing remainders: {}", err) + write!(f, "Read/write error when accessing remainders: {err}") } } } @@ -884,18 +884,10 @@ where fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { ChangePrecisionError::Increase(err) => { - write!( - f, - "Error while increasing precision of chain coder: {}", - err - ) + write!(f, "Error while increasing precision of chain coder: {err}") } ChangePrecisionError::Decrease(err) => { - write!( - f, - "Error while decreasing precision of chain coder: {}", - err - ) + write!(f, "Error while decreasing precision of chain coder: {err}") } } } @@ -1168,91 +1160,76 @@ mod tests { use alloc::vec; #[test] - #[cfg_attr(miri, ignore)] fn restore_none() { generic_restore_many::(4, 0); } #[test] - #[cfg_attr(miri, ignore)] fn restore_one() { generic_restore_many::(5, 1); } #[test] - #[cfg_attr(miri, ignore)] fn restore_two() { generic_restore_many::(5, 2); } #[test] - #[cfg_attr(miri, ignore)] fn restore_ten() { generic_restore_many::(20, 10); } #[test] - #[cfg_attr(miri, ignore)] fn restore_twenty() { generic_restore_many::(19, 20); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u32_u64_32() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u32_u64_24() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u32_u64_16() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u16_u64_16() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u32_u64_8() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u16_u64_8() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u8_u64_8() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u16_u32_16() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u16_u32_8() { generic_restore_many::(1024, 1000); } #[test] - #[cfg_attr(miri, ignore)] fn restore_many_u8_u32_8() { generic_restore_many::(1024, 1000); } @@ -1270,6 +1247,10 @@ mod tests { f64: AsPrimitive, i32: AsPrimitive, { + #[cfg(miri)] + let (amt_compressed_words, amt_symbols) = + (amt_compressed_words.min(128), amt_symbols.min(100)); + let mut rng = Xoshiro256StarStar::seed_from_u64( (amt_compressed_words as u64).rotate_left(32) ^ amt_symbols as u64, ); diff --git a/src/stream/mod.rs b/src/stream/mod.rs index 484b164..c431622 100644 --- a/src/stream/mod.rs +++ b/src/stream/mod.rs @@ -318,7 +318,7 @@ use core::{ use crate::{BitArray, CoderError}; use model::{DecoderModel, EncoderModel, EntropyModel}; -use num::cast::AsPrimitive; +use num_traits::AsPrimitive; /// Base trait for stream encoders and decoders /// @@ -1078,7 +1078,7 @@ pub trait Decode: Code { /// Encoder: Encode + IntoDecoder, // <-- Different trait bound. /// D: EncoderModel + DecoderModel, /// D::Probability: Into, -/// Encoder::Word: num::cast::AsPrimitive +/// Encoder::Word: num_traits::AsPrimitive /// { /// encoder.encode_symbol(137, &model); /// let mut decoder = encoder.into_decoder(); @@ -1137,7 +1137,7 @@ pub trait IntoDecoder: Encode { /// for<'a> Encoder: AsDecoder<'a, PRECISION>, // <-- Different trait bound. /// D: EncoderModel + DecoderModel, /// D::Probability: Into, -/// Encoder::Word: num::cast::AsPrimitive +/// Encoder::Word: num_traits::AsPrimitive /// { /// encoder.encode_symbol(137, &model); /// let mut decoder = encoder.as_decoder(); @@ -1330,10 +1330,10 @@ impl Display fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::InvalidEntropyModel(err) => { - write!(f, "Error while constructing entropy model or data: {}", err) + write!(f, "Error while constructing entropy model or data: {err}") } Self::CodingError(err) => { - write!(f, "Error while entropy coding: {}", err) + write!(f, "Error while entropy coding: {err}") } } } diff --git a/src/stream/model.rs b/src/stream/model.rs index ae7c854..805b2bc 100644 --- a/src/stream/model.rs +++ b/src/stream/model.rs @@ -131,116 +131,41 @@ use hashbrown::hash_map::{ use alloc::{boxed::Box, vec::Vec}; use core::{borrow::Borrow, fmt::Debug, hash::Hash, marker::PhantomData, ops::RangeInclusive}; -use num::{ - cast::AsPrimitive, - traits::{WrappingAdd, WrappingSub}, - Float, One, PrimInt, Zero, -}; +use libm::log1p; +use num_traits::{float::FloatCore, AsPrimitive, One, PrimInt, WrappingAdd, WrappingSub, Zero}; -/// Re-export or replacement of [`probability::distribution::Distribution`]. +/// Re-export of [`probability::distribution::Distribution`]. /// /// Most users will never have to interact with this trait directly. When a method requires /// a type that implements `Distribution`, most users will likely use a predefined type from /// the [`probability`] crate. You only need to implement this trait if you want to use a /// probability distribution that is not (yet) provided by the `probability` crate. /// -/// # Technical Details -/// -/// - For most users, this trait is just a re-export (similar to a type alias) of the -/// [`Distribution` trait from the `probability` crate]. You'll need a type that -/// implements `Distribution` when you call [`LeakyQuantizer::quantize`]. The -/// `probability` crate provides implementations of several common `Distribution`s. -/// - Unfortunately, the `probability` crate does not support `no_std` mode. Thus, if you -/// compile `constriction` in `no_std` mode (by setting `default-features = false` for -/// `constriction` in your `Cargo.toml`) then you can't use the `probability` crate . In -/// this case, the present trait (`constriction::stream::model::Distribution`) is not a -/// re-export but instead a literal copy of its definition in the `probability` trait. -/// -/// # Advice for Implementors -/// -/// If you implement your own probability distribution (rather than using a pre-defined -/// distribution from the `probability` crate) then it is usually better to implement *this* -/// trait rather than `probability::distribution::Distribution`. This way, your code will -/// work as expected both in `std` and in `no_std` mode. -/// /// # See Also /// /// - [`Inverse`] /// /// [`probability::distribution::Distribution`]: /// https://docs.rs/probability/latest/probability/distribution/trait.Distribution.html -/// [`Distribution` trait from the `probability` crate]: -/// https://docs.rs/probability/latest/probability/distribution/trait.Distribution.html /// [`probability`]: https://docs.rs/probability/latest/probability/ -#[cfg(feature = "probability")] pub use probability::distribution::Distribution; -/// Re-export or replacement of [`probability::distribution::Inverse`]. +/// Re-export of [`probability::distribution::Inverse`]. /// /// Most users will never have to interact with this trait directly. When a method requires /// a type that implements `Inverse`, most users will likely use a predefined type from /// the [`probability`] crate. You only need to implement this trait if you want to use a /// probability distribution that is not (yet) provided by the `probability` crate. /// -/// # Technical Details -/// -/// - For most users, this trait is just a re-export (similar to a type alias) of the -/// [`Inverse` trait from the `probability` crate]. You'll need a type that implements -/// `Inverse` when you call [`LeakyQuantizer::quantize`] and then use the resulting -/// [`LeakilyQuantizedDistribution`] for *decoding*. The `probability` crate provides -/// implementations of `Inverse` for several common probability distributions. -/// - Unfortunately, the `probability` crate does not support `no_std` mode. Thus, if you -/// compile `constriction` in `no_std` mode (by setting `default-features = false` for -/// `constriction` in your `Cargo.toml`) then you can't use the `probability` crate . In -/// this case, the present trait (`constriction::stream::model::Inverse`) is not a -/// re-export but instead a literal copy of its definition in the `probability` trait. -/// -/// # Advice for Implementors -/// -/// If you implement your own probability distribution (rather than using a pre-defined -/// distribution from the `probability` crate) then it is usually better to implement *this* -/// trait rather than `probability::distribution::Inverse`. This way, your code will -/// work as expected both in `std` and in `no_std` mode. -/// /// # See Also /// /// - [`Distribution`] /// /// [`probability::distribution::Inverse`]: /// https://docs.rs/probability/latest/probability/distribution/trait.Inverse.html -/// [`Inverse` trait from the `probability` crate]: -/// https://docs.rs/probability/latest/probability/distribution/trait.Inverse.html /// [`probability`]: https://docs.rs/probability/latest/probability/ -#[cfg(feature = "probability")] pub use probability::distribution::Inverse; -/// Mock replacement for [`probability::distribution::Distribution`] in a `no_std` context -/// -/// This trait is only exported if `constriction` is used in a `no_std` context (i.e., with -/// `default-features = false`). In this case, we can't use the `probability` crate because -/// it uses a lot of FFI calls. However, for many things, we really only need the trait -/// definitions for `Distribution` and for [`Inverse`], so we copy them here. -#[cfg(not(feature = "probability"))] -pub trait Distribution { - /// The type of outcomes. - type Value; - - /// Compute the cumulative distribution function. - fn distribution(&self, x: f64) -> f64; -} - -/// Mock replacement for [`probability::distribution::Distribution`] in a `no_std` context -/// -/// This trait is only exported if `constriction` is used in a `no_std` context (i.e., with -/// `default-features = false`). In this case, we can't use the `probability` crate because -/// it uses a lot of FFI calls. However, for many things, we really only need the trait -/// definitions for [`Distribution`] and for `Inverse`, so we copy them here. -#[cfg(not(feature = "probability"))] -pub trait Inverse: Distribution { - /// Compute the inverse of the cumulative distribution function. - fn inverse(&self, p: f64) -> Self::Value; -} - use crate::{wrapping_pow2, BitArray, NonZeroBitArray}; /// Base trait for probabilistic models of a data source. @@ -447,7 +372,7 @@ pub trait IterableEntropyModel<'m, const PRECISION: usize>: EntropyModel(&'m self) -> F where - F: Float + core::iter::Sum, + F: num_traits::Float + core::iter::Sum, Self::Probability: Into, { let entropy_scaled = self @@ -549,7 +474,7 @@ pub struct FloatingPointSymbolTable { impl Iterator for FloatingPointSymbolTable where - F: Float, + F: FloatCore, Probability: BitArray + Into, I: Iterator::NonZero)>, { @@ -574,7 +499,7 @@ where impl ExactSizeIterator for FloatingPointSymbolTable where - F: Float, + F: FloatCore, Probability: BitArray + Into, I: ExactSizeIterator::NonZero)>, { @@ -682,7 +607,7 @@ pub trait EncoderModel: EntropyModel { #[inline] fn floating_point_probability(&self, symbol: Self::Symbol) -> F where - F: Float, + F: FloatCore, Self::Probability: Into, { // This gets compiled to a single floating point multiplication rather than a (slow) @@ -794,7 +719,7 @@ where fn entropy_base2(&'m self) -> F where - F: Float + core::iter::Sum, + F: num_traits::Float + core::iter::Sum, Self::Probability: Into, { (*self).entropy_base2() @@ -1253,7 +1178,7 @@ impl where Probability: BitArray + Into, Symbol: PrimInt + AsPrimitive + WrappingSub + WrappingAdd, - F: Float, + F: FloatCore, { /// Constructs a `LeakyQuantizer` with a finite support. /// @@ -1383,7 +1308,7 @@ impl where Probability: BitArray + Into, Symbol: PrimInt + AsPrimitive + WrappingSub + WrappingAdd, - F: Float, + F: FloatCore, { /// Returns the quantizer that was used to create this entropy model. /// @@ -2321,9 +2246,10 @@ impl /// TODO: should also return an error if support is too large to support leaky /// distribution #[allow(clippy::result_unit_err)] + #[cfg(feature = "std")] pub fn from_floating_point_probabilities(probabilities: &[F]) -> Result where - F: Float + core::iter::Sum + Into, + F: FloatCore + core::iter::Sum + Into, Probability: Into + AsPrimitive, f64: AsPrimitive, usize: AsPrimitive, @@ -2521,7 +2447,7 @@ where probabilities: &[F], ) -> Result where - F: Float + core::iter::Sum + Into, + F: FloatCore + core::iter::Sum + Into, Probability: Into + AsPrimitive, f64: AsPrimitive, usize: AsPrimitive, @@ -3058,7 +2984,7 @@ where probabilities: &[F], ) -> Result where - F: Float + core::iter::Sum + Into, + F: FloatCore + core::iter::Sum + Into, Probability: Into + AsPrimitive, f64: AsPrimitive, usize: AsPrimitive, @@ -3148,7 +3074,7 @@ where /// program executions. pub fn entropy_base2(&self) -> F where - F: Float + core::iter::Sum, + F: num_traits::Float + core::iter::Sum, Probability: Into, { let entropy_scaled = self @@ -3262,7 +3188,7 @@ fn optimize_leaky_categorical( probabilities: &[F], ) -> Result>, ()> where - F: Float + core::iter::Sum + Into, + F: FloatCore + core::iter::Sum + Into, Probability: BitArray + Into + AsPrimitive, f64: AsPrimitive, usize: AsPrimitive, @@ -3296,13 +3222,13 @@ where let weight = current_free_weight + Probability::one(); // How much the cross entropy would decrease when increasing the weight by one. - let win = prob * (1.0f64 / weight.into()).ln_1p(); + let win = prob * log1p(1.0f64 / weight.into()); // How much the cross entropy would increase when decreasing the weight by one. let loss = if weight == Probability::one() { f64::infinity() } else { - -prob * (-1.0f64 / weight.into()).ln_1p() + -prob * log1p(-1.0f64 / weight.into()) }; Ok(Slot { @@ -3323,8 +3249,8 @@ where let batch_size = core::cmp::min(remaining_free_weight.as_(), slots.len()); for slot in &mut slots[..batch_size] { slot.weight = slot.weight + Probability::one(); // Cannot end up in `max_weight` because win would otherwise be -infinity. - slot.win = slot.prob * (1.0f64 / slot.weight.into()).ln_1p(); - slot.loss = -slot.prob * (-1.0f64 / slot.weight.into()).ln_1p(); + slot.win = slot.prob * log1p(1.0f64 / slot.weight.into()); + slot.loss = -slot.prob * log1p(-1.0f64 / slot.weight.into()); } remaining_free_weight = remaining_free_weight - batch_size.as_(); } @@ -3363,13 +3289,13 @@ where seller.loss = if seller.weight == Probability::one() { f64::infinity() } else { - -seller.prob * (-1.0f64 / seller.weight.into()).ln_1p() + -seller.prob * log1p(-1.0f64 / seller.weight.into()) }; let buyer = &mut slots[buyer_index]; buyer.weight = buyer.weight + Probability::one(); buyer.loss = f64::infinity(); // Once a weight gets increased it may never be decreased again. - buyer.win = buyer.prob * (1.0f64 / buyer.weight.into()).ln_1p(); + buyer.win = buyer.prob * log1p(1.0f64 / buyer.weight.into()); } slots.sort_unstable_by_key(|slot| slot.original_index); @@ -3502,7 +3428,7 @@ where probabilities: &[F], ) -> Result where - F: Float + core::iter::Sum + Into, + F: FloatCore + core::iter::Sum + Into, Probability: Into + AsPrimitive, f64: AsPrimitive, usize: AsPrimitive, @@ -3613,7 +3539,7 @@ where #[allow(clippy::result_unit_err)] pub fn from_floating_point_probabilities_contiguous(probabilities: &[F]) -> Result where - F: Float + core::iter::Sum + Into, + F: FloatCore + core::iter::Sum + Into, Probability: Into + AsPrimitive, f64: AsPrimitive, usize: AsPrimitive, @@ -3932,25 +3858,72 @@ mod tests { use super::super::{stack::DefaultAnsCoder, Decode}; use alloc::{string::String, vec}; - use probability::distribution::{Binomial, Gaussian}; + use probability::distribution::{Binomial, Cauchy, Gaussian}; #[test] - #[cfg_attr(miri, ignore)] fn leakily_quantized_normal() { - let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(-127..=127); - for &std_dev in &[0.0001, 0.1, 3.5, 123.45, 1234.56] { - for &mean in &[-300.6, -100.2, -5.2, 0.0, 50.3, 180.2, 2000.0] { + #[cfg(not(miri))] + let (support, std_devs, means) = ( + -127..=127, + [0.0001, 0.1, 3.5, 123.45, 1234.56], + [-300.6, -100.2, -5.2, 0.0, 50.3, 180.2, 2000.0], + ); + + // We use different settings when testing on miri so that the test time stays reasonable. + #[cfg(miri)] + let (support, std_devs, means) = (-50..=50, [0.0001, 3.5, 1234.56], [-300.6, -5.2, 2000.0]); + + let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(support.clone()); + for &std_dev in &std_devs { + for &mean in &means { let distribution = Gaussian::new(mean, std_dev); - test_entropy_model(&quantizer.quantize(distribution), -127..128); + test_entropy_model( + &quantizer.quantize(distribution), + *support.start()..*support.end() + 1, + ); + } + } + } + + #[test] + fn leakily_quantized_cauchy() { + #[cfg(not(miri))] + let (support, gammas, means) = ( + -127..=127, + [0.0001, 0.1, 3.5, 123.45, 1234.56], + [-300.6, -100.2, -5.2, 0.0, 50.3, 180.2, 2000.0], + ); + + // We use different settings when testing on miri so that the test time stays reasonable. + #[cfg(miri)] + let (support, gammas, means) = (-50..=50, [0.0001, 3.5, 1234.56], [-300.6, -5.2, 2000.0]); + + let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(support.clone()); + for &gamma in &gammas { + for &mean in &means { + let distribution = Cauchy::new(mean, gamma); + test_entropy_model( + &quantizer.quantize(distribution), + *support.start()..*support.end() + 1, + ); } } } #[test] - #[cfg_attr(miri, ignore)] fn leakily_quantized_binomial() { - for &n in &[1, 2, 10, 100, 1000, 10_000] { - for &p in &[1e-30, 1e-20, 1e-10, 0.1, 0.4, 0.9] { + #[cfg(not(miri))] + let (ns, ps) = ( + [1, 2, 10, 100, 1000, 10_000], + [1e-30, 1e-20, 1e-10, 0.1, 0.4, 0.9], + ); + + // We use different settings when testing on miri so that the test time stays reasonable. + #[cfg(miri)] + let (ns, ps) = ([1, 2, 100], [1e-30, 0.1, 0.4]); + + for &n in &ns { + for &p in &ps { if n < 1000 || p >= 0.1 { // In the excluded situations, `::inverse` currently doesn't terminate. // TODO: file issue to `probability` repo. @@ -3979,11 +3952,17 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn entropy() { - let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(-1000..=1000); - for &std_dev in &[100., 200., 300.] { - for &mean in &[-10., 2.3, 50.1] { + #[cfg(not(miri))] + let (support, std_devs, means) = (-1000..=1000, [100., 200., 300.], [-10., 2.3, 50.1]); + + // We use different settings when testing on miri so that the test time stays reasonable. + #[cfg(miri)] + let (support, std_devs, means) = (-100..=100, [10., 20., 30.], [-1., 0.23, 5.01]); + + let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(support); + for &std_dev in &std_devs { + for &mean in &means { let distribution = Gaussian::new(mean, std_dev); let model = quantizer.quantize(distribution); let entropy = model.entropy_base2::(); @@ -3996,7 +3975,6 @@ mod tests { /// Test that `optimal_weights` reproduces the same distribution when fed with an /// already quantized model. #[test] - #[cfg_attr(miri, ignore)] fn trivial_optimal_weights() { let hist = [ 56319u32, 134860032, 47755520, 60775168, 75699200, 92529920, 111023616, 130420736, @@ -4022,7 +4000,6 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn nontrivial_optimal_weights() { let hist = [ 1u32, 186545, 237403, 295700, 361445, 433686, 509456, 586943, 663946, 737772, 1657269, @@ -4068,7 +4045,6 @@ mod tests { /// Regression test for convergence of `optimize_leaky_categorical`. #[test] - #[cfg_attr(miri, ignore)] fn categorical_converges() { // Two example probability distributions that lead to an infinite loop in constriction 0.2.6 // (see ). @@ -4103,7 +4079,6 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn contiguous_categorical() { let hist = [ 1u32, 186545, 237403, 295700, 361445, 433686, 509456, 586943, 663946, 737772, 1657269, @@ -4121,7 +4096,6 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn non_contiguous_categorical() { let hist = [ 1u32, 186545, 237403, 295700, 361445, 433686, 509456, 586943, 663946, 737772, 1657269, diff --git a/src/stream/queue.rs b/src/stream/queue.rs index f8253ed..336dac0 100644 --- a/src/stream/queue.rs +++ b/src/stream/queue.rs @@ -40,7 +40,7 @@ use core::{ ops::Deref, }; -use num::cast::AsPrimitive; +use num_traits::AsPrimitive; use super::{ model::{DecoderModel, EncoderModel}, @@ -115,11 +115,26 @@ where situation: EncoderSituation, } +/// Keeps track of yet-to-be-finalized compressed words during encoding with a +/// [`RangeEncoder`]. +/// +/// This type is mostly for internal use. It is only expsed via +/// [`RangeEncoder::into_raw_parts`] and [`RangeEncoder::from_raw_parts`]. #[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum EncoderSituation { +pub enum EncoderSituation { + /// In the `Normal` situation, all full `Words` of compressed data have been written to + /// the backend (or "bulk"), and the internal coder state holds less than one word of + /// additional information content. Normal, - /// Wraps `num_inverted` and `first_inverted_lower_word` + /// The `Inverted` situation occurs only rarely. In this situation, some full words of + /// compressed data have been held back and not yet written to the backend (or "bulk") + /// because their final values may still change depending on subsequently encoded + /// symbols. + /// + /// More precisely, a situation of `Inverted(num_subsequent, first_word)` means that the + /// held-back words can become either `first_word + 1` followed by `num_subsequent` zero + /// words, or `first_word` followed by `num_subsequent` words that have all bits set. Inverted(NonZeroUsize, Word), } @@ -403,6 +418,39 @@ where pub fn bulk(&self) -> &Backend { &self.bulk } + + /// Low-level constructor that assembles a `RangeEncoder` from its internal components. + /// + /// The arguments `bulk`, `state`, and `situation` correspond to the three return values + /// of the method [`into_raw_parts`](Self::into_raw_parts). + pub fn from_raw_parts( + bulk: Backend, + state: RangeCoderState, + situation: EncoderSituation, + ) -> Self { + assert!(State::BITS >= 2 * Word::BITS); + assert_eq!(State::BITS % Word::BITS, 0); + // The invariants for `state` are already enforced statically. + + Self { + bulk, + state, + situation, + } + } + + /// Low-level method that disassembles the `RangeEncoder` into its internal components. + /// + /// Can be used together with [`from_raw_parts`](Self::from_raw_parts). + pub fn into_raw_parts( + self, + ) -> ( + Backend, + RangeCoderState, + EncoderSituation, + ) { + (self.bulk, self.state, self.situation) + } } impl RangeEncoder @@ -666,18 +714,35 @@ where }) } + /// Low-level constructor that assembles a `RangeDecoder` from its internal components. + /// + /// The arguments `bulk`, `state`, and `point` correspond to the three return values of + /// the method [`into_raw_parts`](Self::into_raw_parts). + /// + /// The construction fails if the argument `point` lies outside of the range represented + /// by `state`. In this case, the method returns the (unmodified) argument `bulk` back + /// to the caller, wrapped in an `Err` variant. pub fn from_raw_parts( - _bulk: Backend, - _state: State, - ) -> Result)> { + bulk: Backend, + state: RangeCoderState, + point: State, + ) -> Result { assert!(State::BITS >= 2 * Word::BITS); assert_eq!(State::BITS % Word::BITS, 0); + // The invariants for `state` are already enforced statically. - todo!() + if point.wrapping_sub(&state.lower) >= state.range.get() { + Err(bulk) + } else { + Ok(Self { bulk, state, point }) + } } - pub fn into_raw_parts(self) -> (Backend, RangeCoderState) { - (self.bulk, self.state) + /// Low-level method that disassembles the `RangeDecoder` into its internal components. + /// + /// Can be used together with [`from_raw_parts`](Self::from_raw_parts). + pub fn into_raw_parts(self) -> (Backend, RangeCoderState, State) { + (self.bulk, self.state, self.point) } fn read_point>(bulk: &mut B) -> Result { @@ -979,25 +1044,21 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn compress_one() { generic_compress_few(core::iter::once(5), 1) } #[test] - #[cfg_attr(miri, ignore)] fn compress_two() { generic_compress_few([2, 8].iter().cloned(), 1) } #[test] - #[cfg_attr(miri, ignore)] fn compress_ten() { generic_compress_few(0..10, 2) } #[test] - #[cfg_attr(miri, ignore)] fn compress_twenty() { generic_compress_few(-10..10, 4) } @@ -1025,79 +1086,66 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_32() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_24() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_16() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u64_16() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u64_12() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u64_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u8_u64_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u32_16() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u32_12() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u32_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u8_u32_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u8_u16_8() { generic_compress_many::(); } @@ -1112,7 +1160,12 @@ mod tests { f64: AsPrimitive, i32: AsPrimitive, { + #[cfg(not(miri))] const AMT: usize = 1000; + + #[cfg(miri)] + const AMT: usize = 100; + let mut symbols_gaussian = Vec::with_capacity(AMT); let mut means = Vec::with_capacity(AMT); let mut stds = Vec::with_capacity(AMT); @@ -1190,10 +1243,12 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn seek() { - const NUM_CHUNKS: usize = 100; - const SYMBOLS_PER_CHUNK: usize = 100; + #[cfg(not(miri))] + let (num_chunks, symbols_per_chunk) = (100, 100); + + #[cfg(miri)] + let (num_chunks, symbols_per_chunk) = (10, 10); let quantizer = LeakyQuantizer::<_, _, u32, 24>::new(-100..=100); let model = quantizer.quantize(Gaussian::new(0.0, 10.0)); @@ -1201,12 +1256,12 @@ mod tests { let mut encoder = DefaultRangeEncoder::new(); let mut rng = Xoshiro256StarStar::seed_from_u64(123); - let mut symbols = Vec::with_capacity(NUM_CHUNKS); - let mut jump_table = Vec::with_capacity(NUM_CHUNKS); + let mut symbols = Vec::with_capacity(num_chunks); + let mut jump_table = Vec::with_capacity(num_chunks); - for _ in 0..NUM_CHUNKS { + for _ in 0..num_chunks { jump_table.push(encoder.pos()); - let chunk = (0..SYMBOLS_PER_CHUNK) + let chunk = (0..symbols_per_chunk) .map(|_| model.quantile_function(rng.next_u32() % (1 << 24)).0) .collect::>(); encoder.encode_iid_symbols(&chunk, &model).unwrap(); @@ -1221,7 +1276,7 @@ mod tests { // implement `Pos` due to complications at the stream end.) for (chunk, _) in symbols.iter().zip(&jump_table) { let decoded = decoder - .decode_iid_symbols(SYMBOLS_PER_CHUNK, &model) + .decode_iid_symbols(symbols_per_chunk, &model) .collect::, _>>() .unwrap(); assert_eq!(&decoded, chunk); @@ -1234,13 +1289,13 @@ mod tests { // Make sure we test jumping to beginning at least once. 0 } else { - rng.next_u32() as usize % NUM_CHUNKS + rng.next_u32() as usize % num_chunks }; let pos_and_state = jump_table[chunk_index]; decoder.seek(pos_and_state).unwrap(); let decoded = decoder - .decode_iid_symbols(SYMBOLS_PER_CHUNK, &model) + .decode_iid_symbols(symbols_per_chunk, &model) .collect::, _>>() .unwrap(); assert_eq!(&decoded, &symbols[chunk_index]) diff --git a/src/stream/stack.rs b/src/stream/stack.rs index e460668..4d35f2f 100644 --- a/src/stream/stack.rs +++ b/src/stream/stack.rs @@ -27,7 +27,7 @@ use alloc::vec::Vec; use core::{ borrow::Borrow, convert::Infallible, fmt::Debug, iter::Fuse, marker::PhantomData, ops::Deref, }; -use num::cast::AsPrimitive; +use num_traits::AsPrimitive; use super::{ model::{DecoderModel, EncoderModel}, @@ -271,6 +271,10 @@ where Word: BitArray + Into, State: BitArray + AsPrimitive, { + #[deprecated( + since = "0.2.0", + note = "Use `from_raw_parts(Backend::default(), state)` instead." + )] pub fn with_state_and_empty_bulk(state: State) -> Self where Backend: Default, @@ -282,11 +286,16 @@ where } } - /// # Safety + /// Low-level constructor that assembles an `AnsCoder` from its internal components. + /// + /// The arguments `bulk` and `state` correspond to the two return values of the method + /// [`into_raw_parts`](Self::into_raw_parts). /// /// The caller must ensure that `state >= State::one() << (State::BITS - Word::BITS)` - /// unless `bulk.is_empty()`. - pub unsafe fn from_raw_parts(bulk: Backend, state: State) -> Self { + /// unless `bulk` is empty. This cannot be checked by the method since not all + /// `Backend`s have an `is_empty` method. Violating this invariant is not a memory + /// safety issue but it will lead to incorrect behavior. + pub fn from_raw_parts(bulk: Backend, state: State) -> Self { Self { bulk, state, @@ -398,6 +407,9 @@ where &self.bulk } + /// Low-level method that disassembles the `AnsCoder` into its internal components. + /// + /// Can be used together with [`from_raw_parts`](Self::from_raw_parts). pub fn into_raw_parts(self) -> (Backend, State) { (self.bulk, self.state) } @@ -940,6 +952,8 @@ where M::Probability: Into, Self::Word: AsPrimitive, { + assert!(State::BITS >= Word::BITS + PRECISION); + let (left_sided_cumulative, probability) = model .left_cumulative_and_probability(symbol) .ok_or_else(|| DefaultEncoderFrontendError::ImpossibleSymbol.into_coder_error())?; @@ -1002,6 +1016,8 @@ where M::Probability: Into, Self::Word: AsPrimitive, { + assert!(State::BITS >= Word::BITS + PRECISION); + let quantile = (self.state % (State::one() << PRECISION)).as_().as_(); let (symbol, left_sided_cumulative, probability) = model.quantile_function(quantile); let remainder = quantile - left_sided_cumulative; @@ -1169,25 +1185,21 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn compress_one() { generic_compress_few(core::iter::once(5), 1) } #[test] - #[cfg_attr(miri, ignore)] fn compress_two() { generic_compress_few([2, 8].iter().cloned(), 1) } #[test] - #[cfg_attr(miri, ignore)] fn compress_ten() { generic_compress_few(0..10, 2) } #[test] - #[cfg_attr(miri, ignore)] fn compress_twenty() { generic_compress_few(-10..10, 4) } @@ -1217,79 +1229,66 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_32() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_24() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_16() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u32_u64_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u64_16() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u64_12() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u64_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u8_u64_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u32_16() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u32_12() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u16_u32_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u8_u32_8() { generic_compress_many::(); } #[test] - #[cfg_attr(miri, ignore)] fn compress_many_u8_u16_8() { generic_compress_many::(); } @@ -1304,7 +1303,12 @@ mod tests { f64: AsPrimitive, i32: AsPrimitive, { + #[cfg(not(miri))] const AMT: usize = 1000; + + #[cfg(miri)] + const AMT: usize = 100; + let mut symbols_gaussian = Vec::with_capacity(AMT); let mut means = Vec::with_capacity(AMT); let mut stds = Vec::with_capacity(AMT); @@ -1388,10 +1392,12 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] fn seek() { - const NUM_CHUNKS: usize = 100; - const SYMBOLS_PER_CHUNK: usize = 100; + #[cfg(not(miri))] + let (num_chunks, symbols_per_chunk) = (100, 100); + + #[cfg(miri)] + let (num_chunks, symbols_per_chunk) = (10, 10); let quantizer = DefaultLeakyQuantizer::new(-100..=100); let model = quantizer.quantize(Gaussian::new(0.0, 10.0)); @@ -1399,12 +1405,12 @@ mod tests { let mut encoder = DefaultAnsCoder::new(); let mut rng = Xoshiro256StarStar::seed_from_u64(123); - let mut symbols = Vec::with_capacity(NUM_CHUNKS); - let mut jump_table = Vec::with_capacity(NUM_CHUNKS); + let mut symbols = Vec::with_capacity(num_chunks); + let mut jump_table = Vec::with_capacity(num_chunks); let (initial_pos, initial_state) = encoder.pos(); - for _ in 0..NUM_CHUNKS { - let chunk = (0..SYMBOLS_PER_CHUNK) + for _ in 0..num_chunks { + let chunk = (0..symbols_per_chunk) .map(|_| model.quantile_function(rng.next_u32() % (1 << 24)).0) .collect::>(); encoder.encode_iid_symbols_reverse(&chunk, &model).unwrap(); @@ -1420,7 +1426,7 @@ mod tests { for (chunk, &(pos, state)) in symbols.iter().zip(&jump_table).rev() { assert_eq!(seekable_decoder.pos(), (pos, state)); let decoded = seekable_decoder - .decode_iid_symbols(SYMBOLS_PER_CHUNK, &model) + .decode_iid_symbols(symbols_per_chunk, &model) .collect::, _>>() .unwrap(); assert_eq!(&decoded, chunk) @@ -1430,11 +1436,11 @@ mod tests { // Seek to some random offsets in the jump table and decode one chunk for _ in 0..100 { - let chunk_index = rng.next_u32() as usize % NUM_CHUNKS; + let chunk_index = rng.next_u32() as usize % num_chunks; let (pos, state) = jump_table[chunk_index]; seekable_decoder.seek((pos, state)).unwrap(); let decoded = seekable_decoder - .decode_iid_symbols(SYMBOLS_PER_CHUNK, &model) + .decode_iid_symbols(symbols_per_chunk, &model) .collect::, _>>() .unwrap(); assert_eq!(&decoded, &symbols[chunk_index]) @@ -1457,7 +1463,7 @@ mod tests { for (chunk, &(pos, state)) in symbols.iter().zip(&jump_table).rev() { assert_eq!(seekable_decoder.pos(), (pos, state)); let decoded = seekable_decoder - .decode_iid_symbols(SYMBOLS_PER_CHUNK, &model) + .decode_iid_symbols(symbols_per_chunk, &model) .collect::, _>>() .unwrap(); assert_eq!(&decoded, chunk) @@ -1467,11 +1473,11 @@ mod tests { // Seek to some random offsets in the jump table and decode one chunk each time. for _ in 0..100 { - let chunk_index = rng.next_u32() as usize % NUM_CHUNKS; + let chunk_index = rng.next_u32() as usize % num_chunks; let (pos, state) = jump_table[chunk_index]; seekable_decoder.seek((pos, state)).unwrap(); let decoded = seekable_decoder - .decode_iid_symbols(SYMBOLS_PER_CHUNK, &model) + .decode_iid_symbols(symbols_per_chunk, &model) .collect::, _>>() .unwrap(); assert_eq!(&decoded, &symbols[chunk_index]) diff --git a/src/symbol/exp_golomb.rs b/src/symbol/exp_golomb.rs index 1e2395e..f1fd398 100644 --- a/src/symbol/exp_golomb.rs +++ b/src/symbol/exp_golomb.rs @@ -1,9 +1,6 @@ use core::{borrow::Borrow, marker::PhantomData}; -use num::{ - traits::{WrappingAdd, WrappingSub}, - PrimInt, Unsigned, -}; +use num_traits::{PrimInt, Unsigned, WrappingAdd, WrappingSub}; use super::{Codebook, DecoderCodebook, EncoderCodebook, SymbolCodeError}; use crate::{CoderError, DefaultEncoderError}; diff --git a/src/symbol/huffman.rs b/src/symbol/huffman.rs index 4d36e01..85349d8 100644 --- a/src/symbol/huffman.rs +++ b/src/symbol/huffman.rs @@ -5,7 +5,7 @@ //! Huffman, David A. "A method for the construction of minimum-redundancy codes." //! Proceedings of the IRE 40.9 (1952): 1098-1101. -use num::Float; +use num_traits::float::FloatCore; use alloc::{collections::BinaryHeap, vec, vec::Vec}; use core::{ @@ -50,14 +50,14 @@ impl EncoderHuffmanTree { pub fn from_float_probabilities(probabilities: I) -> Result where - P: Float + Clone + Add, + P: FloatCore + Clone + Add, I: IntoIterator, I::Item: Borrow

, { Self::try_from_probabilities( probabilities .into_iter() - .map(|p| NonNanFloat::new(*p.borrow())), + .map(|p| NonNanFloatCore::new(*p.borrow())), ) } @@ -185,14 +185,14 @@ impl DecoderHuffmanTree { pub fn from_float_probabilities(probabilities: I) -> Result where - P: Float + Clone + Add, + P: FloatCore + Clone + Add, I: IntoIterator, I::Item: Borrow

, { Self::try_from_probabilities( probabilities .into_iter() - .map(|p| NonNanFloat::new(*p.borrow())), + .map(|p| NonNanFloatCore::new(*p.borrow())), ) } @@ -270,11 +270,11 @@ impl DecoderCodebook for DecoderHuffmanTree { } #[derive(PartialOrd, Clone, Copy)] -struct NonNanFloat { +struct NonNanFloatCore { inner: F, } -impl NonNanFloat { +impl NonNanFloatCore { fn new(x: F) -> Result { if x.is_nan() { Err(NanError::NaN) @@ -284,28 +284,28 @@ impl NonNanFloat { } } -impl PartialEq for NonNanFloat { +impl PartialEq for NonNanFloatCore { fn eq(&self, other: &Self) -> bool { self.inner.eq(&other.inner) } } -impl Eq for NonNanFloat {} +impl Eq for NonNanFloatCore {} #[allow(clippy::derive_ord_xor_partial_ord)] -impl Ord for NonNanFloat { +impl Ord for NonNanFloatCore { fn cmp(&self, other: &Self) -> core::cmp::Ordering { self.inner .partial_cmp(&other.inner) - .expect("NonNanFloat::inner is not NaN.") + .expect("NonNanFloatCore::inner is not NaN.") } } -impl Add for NonNanFloat { +impl Add for NonNanFloatCore { type Output = Self; fn add(self, rhs: Self) -> Self::Output { - NonNanFloat { + NonNanFloatCore { inner: self.inner + rhs.inner, } } @@ -373,7 +373,7 @@ mod tests { assert_eq!(tree.nodes, [12, 13, 15, 10, 11, 14, 16, 17, 0]); assert_eq!(encode_all_symbols(&tree), ["00", "01", "11", "100", "101"]); - // Let's not test ties of sums in floating point probabilities since they'll depend + // Let's not test ties of sums in floatCoreing point probabilities since they'll depend // on rounding errors (but should still be deterministic). let tree = EncoderHuffmanTree::from_float_probabilities::(&[0.19, 0.2, 0.41, 0.1, 0.1]) @@ -438,7 +438,7 @@ mod tests { &EncoderHuffmanTree::from_probabilities::(&[2, 2, 4, 1, 1]), ); - // Let's not test ties of sums in floating point probabilities since they'll depend + // Let's not test ties of sums in floatCoreing point probabilities since they'll depend // on rounding errors (but should still be deterministic). let tree = DecoderHuffmanTree::from_float_probabilities::(&[0.19, 0.2, 0.41, 0.1, 0.1]) diff --git a/src/symbol/mod.rs b/src/symbol/mod.rs index f953384..ba30630 100644 --- a/src/symbol/mod.rs +++ b/src/symbol/mod.rs @@ -698,7 +698,7 @@ impl Display for SymbolCodeError { f, "The compressed data ended before the current codeword was complete." ), - Self::InvalidCodeword(err) => write!(f, "Invalid codeword for this codebook: {}", err), + Self::InvalidCodeword(err) => write!(f, "Invalid codeword for this codebook: {err}"), } } } @@ -929,15 +929,12 @@ mod tests { }) } - let amt; #[cfg(not(miri))] - { - amt = 1000; - } + let amt = 1000; + + // We use different settings when testing on miri so that the test time stays reasonable. #[cfg(miri)] - { - amt = 100; // miri would take forever if we used `amt = 1000` here. - } + let amt = 100; let mut compressed = DefaultQueueEncoder::new(); diff --git a/tests/random_data.rs b/tests/random_data.rs index cb9c63f..4aef82b 100644 --- a/tests/random_data.rs +++ b/tests/random_data.rs @@ -2,7 +2,7 @@ use std::{cmp::max, cmp::min, ops::RangeInclusive}; -use num::cast::AsPrimitive; +use num_traits::AsPrimitive; use probability::{ distribution::{Gaussian, Sample}, source::{Source, Xorshift128Plus}, @@ -130,13 +130,13 @@ fn compare( } #[test] -#[cfg_attr(miri, ignore)] fn grid() { let amts = [ 10, 100, + #[cfg(not(miri))] 1000, - #[cfg(not(debug_assertions))] + #[cfg(not(any(miri, debug_assertions)))] 10000, ]; diff --git a/tests/readme.rs b/tests/readme.rs index ba58495..cf79b95 100644 --- a/tests/readme.rs +++ b/tests/readme.rs @@ -1,7 +1,6 @@ //! This is the example from `README.md` #[test] -#[cfg_attr(miri, ignore)] fn example() { use constriction::stream::{model::DefaultLeakyQuantizer, stack::DefaultAnsCoder, Decode};