diff --git a/Cargo.toml b/Cargo.toml index cd517e4..ed5299d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ rust-version = "1.70.0" [dependencies] crc = "3" +lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] crc = "3" @@ -25,6 +26,7 @@ rand = "0.8" [features] pmull = [] # deprecated, no longer have any effect. +vpclmulqdq = ["lazy_static"] fake-simd = [] [[bench]] diff --git a/README.md b/README.md index df23455..1519b2a 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ crc64fast-nvme SIMD-accelerated carryless-multiplication [CRC-64/NVME](https://reveng.sourceforge.io/crc-catalogue/all.htm#crc.cat.crc-64-nvme) checksum computation (similar to [crc32fast](https://crates.io/crates/crc32fast) and forked from [crc64fast](https://github.com/tikv/crc64fast) which calculates [CRC-64/XZ](https://reveng.sourceforge.io/crc-catalogue/all.htm#crc.cat.crc-64-xz) [a.k.a `CRC-64/GO-ECMA`]). -`CRC-64/NVME` comes from the [NVM Express® NVM Command Set Specification](https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0d-2023.12.28-Ratified.pdf) (Revision 1.0d, December 2023) and has also been implemented in the [Linux kernel](https://github.com/torvalds/linux/blob/786c8248dbd33a5a7a07f7c6e55a7bfc68d2ca48/lib/crc64.c#L66-L73) (where it's called `CRC-64/Rocksoft`) and [AWS S3's recommended checksum option](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) as `CRC64-NVME`. (Note that the Check value in the spec uses incorrect endianness (Section 5.2.1.3.4, Figure 120, page 83). +`CRC-64/NVME` comes from the [NVM Express® NVM Command Set Specification](https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0d-2023.12.28-Ratified.pdf) (Revision 1.0d, December 2023) and has also been implemented in the [Linux kernel](https://github.com/torvalds/linux/blob/786c8248dbd33a5a7a07f7c6e55a7bfc68d2ca48/lib/crc64.c#L66-L73) (where it's called `CRC-64/Rocksoft`) and is [AWS S3's recommended checksum option](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) as `CRC64-NVME`. (Note that the Check value in the spec uses incorrect endianness [Section 5.2.1.3.4, Figure 120, page 83]). SIMD-accelerated carryless-multiplication is based on the Intel [Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction](https://web.archive.org/web/20131224125630/https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf) paper. @@ -44,14 +44,28 @@ be chosen based on CPU feature at runtime. * using PCLMULQDQ + SSE 4.1 on x86/x86_64 * using PMULL + NEON on AArch64 (64-bit ARM) -| Algorithm | Throughput (x86_64) | Throughput (aarch64) | -|:-----------------------|--------------------:|---------------------:| -| [crc 3.0.1] | 0.5 GiB/s | 0.3 GiB/s | -| crc64fast-nvme (table) | 2.3 GiB/s | 1.8 GiB/s | -| crc64fast-nvme (simd) | 28.2 GiB/s | 20.0 GiB/s | +| Algorithm | Throughput (x86_64) | Throughput (aarch64) | +|:----------------------------|--------------------:|---------------------:| +| [crc 3.0.1] | 0.5 GiB/s | 0.3 GiB/s | +| crc64fast-nvme (table) | 2.3 GiB/s | 1.8 GiB/s | +| crc64fast-nvme (SIMD) | 28.2 GiB/s | 20.0 GiB/s | +| crc64fast-nvme (VPCLMULQDQ) | 52 GiB/s | n/a | [crc 3.0.1]: https://docs.rs/crc/3.0.1/crc/index.html +## Experimental "Vector Carry-Less Multiplication of Quadwords" (VPCLMULQDQ) support + +Using Rust's support for [AVX512 intrinsics](https://github.com/rust-lang/rust/issues/111137), specifically [VPCLMULQDQ](https://doc.rust-lang.org/src/core/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs.html), we can massively improve throughput for x86_64 processors which support them (Intel Ice Lake+ and AMD Zen4+). + +Specifically, on an `m7i.8xlarge` EC2 instance (4th gen Xeon, aka Sapphire Rapids), throughput approximately _doubles_ from ~26GiB/s to ~52GiB/s. + +Since these are currently marked as unstable features in Rust, you'll need to build with `nightly` and enable the `vpclmulqdq` feature: + +``` +rustup toolchain install nightly +cargo +nightly build --features="vpclmulqdq" -r +``` + ## References * [crc32-fast](https://crates.io/crates/crc32fast) - Original `crc32` implementation in Rust. @@ -66,6 +80,7 @@ be chosen based on CPU feature at runtime. * [StackOverflow PCLMULQDQ CRC32 question](https://stackoverflow.com/questions/21171733/calculating-constants-for-crc32-using-pclmulqdq) - Insightful question & answer to CRC32 implementation details. * [AWS S3 announcement about CRC64-NVME support](https://aws.amazon.com/blogs/aws/introducing-default-data-integrity-protections-for-new-objects-in-amazon-s3/) * [AWS S3 docs on checking object integrity using CRC64-NVME](https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) +* [Vector Carry-Less Multiplication of Quadwords (VPCLMULQDQ) details](https://en.wikichip.org/wiki/x86/vpclmulqdq) ## License diff --git a/src/lib.rs b/src/lib.rs index 25eaf64..b306ce3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -17,11 +17,22 @@ //! let checksum = c.sum64(); //! assert_eq!(checksum, 0xd9160d1fa8e418e3); //! ``` +//! +//! Tracking links for unstable features used here (which require nightly builds): +//! +//! - simd_ffi: https://github.com/rust-lang/rust/issues/27731 +//! - link_llvm_intrinsics: https://github.com/rust-lang/rust/issues/29602 +//! - avx512_target_feature: https://github.com/rust-lang/rust/issues/111137 + +#![cfg_attr( + feature = "vpclmulqdq", + feature(avx512_target_feature, stdarch_x86_avx512) +)] mod pclmulqdq; mod table; -type UpdateFn = fn(u64, &[u8]) -> u64; +type UpdateFn = unsafe fn(u64, &[u8]) -> u64; /// Represents an in-progress CRC-64 computation. #[derive(Clone)] @@ -52,7 +63,9 @@ impl Digest { /// Writes some data into the digest. pub fn write(&mut self, bytes: &[u8]) { - self.state = (self.computer)(self.state, bytes); + unsafe { + self.state = (self.computer)(self.state, bytes); + } } /// Computes the current CRC-64/NVME value. diff --git a/src/pclmulqdq/mod.rs b/src/pclmulqdq/mod.rs index 3e38f2c..a19c888 100644 --- a/src/pclmulqdq/mod.rs +++ b/src/pclmulqdq/mod.rs @@ -7,8 +7,17 @@ //! //! [white paper]: https://web.archive.org/web/20131224125630/https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +use std::{ + fmt::Debug, + ops::{BitXor, BitXorAssign}, +}; + +use super::table; + +use self::arch::Simd; + #[cfg(not(feature = "fake-simd"))] -#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")] +#[cfg_attr(target_arch = "x86_64", path = "x86_64/mod.rs")] #[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")] #[cfg_attr(target_arch = "x86", path = "x86.rs")] mod arch; @@ -16,13 +25,6 @@ mod arch; #[cfg(feature = "fake-simd")] mod arch; -use self::arch::Simd; -use super::table; -use std::{ - fmt::Debug, - ops::{BitXor, BitXorAssign}, -}; - /// This trait must be implemented on `self::arch::Simd` to provide the /// platform-specific SIMD implementations. trait SimdExt: Copy + Debug + BitXor { @@ -71,24 +73,47 @@ impl BitXorAssign for Simd { } pub fn get_update() -> super::UpdateFn { + #[cfg(feature = "vpclmulqdq")] + { + use arch::vpclmulqdq::*; + if Simd256::is_supported() { + return update_256_batch; + } + } + if Simd::is_supported() { - update + update_128_batch } else { table::update } } -fn update(mut state: u64, bytes: &[u8]) -> u64 { - let (left, middle, right) = unsafe { bytes.align_to::<[Simd; 8]>() }; +// This function is unsafe because it uses platform dependent functions. +unsafe fn update_128_batch(mut state: u64, bytes: &[u8]) -> u64 { + let (left, middle, right) = bytes.align_to::<[Simd; 8]>(); if let Some((first, rest)) = middle.split_first() { state = table::update(state, left); - state = unsafe { update_simd(state, first, rest) }; + state = update_simd(state, first, rest); table::update(state, right) } else { table::update(state, bytes) } } +#[cfg(feature = "vpclmulqdq")] +#[target_feature(enable = "avx2", enable = "vpclmulqdq")] +unsafe fn update_256_batch(mut state: u64, bytes: &[u8]) -> u64 { + use arch::vpclmulqdq::*; + let (left, middle, right) = bytes.align_to::<[[Simd256; 4]; 2]>(); + if let Some((first, rest)) = middle.split_first() { + state = update_128_batch(state, left); + state = update_vpclmulqdq(state, first, rest); + update_128_batch(state, right) + } else { + update_128_batch(state, bytes) + } +} + #[cfg_attr( any(target_arch = "x86", target_arch = "x86_64"), target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1") @@ -112,6 +137,11 @@ unsafe fn update_simd(state: u64, first: &[Simd; 8], rest: &[[Simd; 8]]) -> u64 } } + fold_tail(x) +} + +#[inline(always)] +unsafe fn fold_tail(x: [Simd; 8]) -> u64 { let coeffs = [ Simd::new(table::K_895, table::K_959), // fold by distance of 112 bytes Simd::new(table::K_767, table::K_831), // fold by distance of 96 bytes diff --git a/src/pclmulqdq/x86_64.rs b/src/pclmulqdq/x86_64/mod.rs similarity index 54% rename from src/pclmulqdq/x86_64.rs rename to src/pclmulqdq/x86_64/mod.rs index 63abd80..e09c802 100644 --- a/src/pclmulqdq/x86_64.rs +++ b/src/pclmulqdq/x86_64/mod.rs @@ -1,11 +1,14 @@ // Copyright 2020 TiKV Project Authors. Licensed under MIT or Apache-2.0. -//! x86_64 implementation of the PCLMULQDQ-based CRC calculation. +//! x86 (32-bit) implementation of the PCLMULQDQ-based CRC calculation. #[cfg(target_arch = "x86_64")] use std::arch::x86_64::*; use std::ops::BitXor; +#[cfg(feature = "vpclmulqdq")] +pub mod vpclmulqdq; + #[repr(transparent)] #[derive(Copy, Clone, Debug)] pub struct Simd(__m128i); @@ -14,20 +17,27 @@ impl super::SimdExt for Simd { fn is_supported() -> bool { is_x86_feature_detected!("pclmulqdq") // _mm_clmulepi64_si128 && is_x86_feature_detected!("sse2") // (all other _mm_*) - && is_x86_feature_detected!("sse4.1") // _mm_extract_epi64 + && is_x86_feature_detected!("sse4.1") } #[inline] #[target_feature(enable = "sse2")] unsafe fn new(high: u64, low: u64) -> Self { - Self(_mm_set_epi64x(high as i64, low as i64)) + // On 32-bit systems, we need to split u64 into low and high 32-bit parts + let high_low = (high & 0xFFFFFFFF) as i32; + let high_high = ((high >> 32) & 0xFFFFFFFF) as i32; + let low_low = (low & 0xFFFFFFFF) as i32; + let low_high = ((low >> 32) & 0xFFFFFFFF) as i32; + + // Create the 128-bit vector using 32-bit parts + Self(_mm_set_epi32(high_high, high_low, low_high, low_low)) } #[inline] #[target_feature(enable = "sse2", enable = "pclmulqdq")] unsafe fn fold_16(self, coeff: Self) -> Self { - let h = Self(_mm_clmulepi64_si128(self.0, coeff.0, 0x11)); - let l = Self(_mm_clmulepi64_si128(self.0, coeff.0, 0x00)); + let h = Self(_mm_clmulepi64_si128::<0x11>(self.0, coeff.0)); + let l = Self(_mm_clmulepi64_si128::<0x00>(self.0, coeff.0)); h ^ l } @@ -35,8 +45,8 @@ impl super::SimdExt for Simd { #[target_feature(enable = "sse2", enable = "pclmulqdq")] unsafe fn fold_8(self, coeff: u64) -> Self { let coeff = Self::new(0, coeff); - let h = Self(_mm_clmulepi64_si128(self.0, coeff.0, 0x00)); - let l = Self(_mm_srli_si128(self.0, 8)); + let h = Self(_mm_clmulepi64_si128::<0x00>(self.0, coeff.0)); + let l = Self(_mm_srli_si128::<8>(self.0)); h ^ l } @@ -44,11 +54,11 @@ impl super::SimdExt for Simd { #[target_feature(enable = "sse2", enable = "sse4.1", enable = "pclmulqdq")] unsafe fn barrett(self, poly: u64, mu: u64) -> u64 { let polymu = Self::new(poly, mu); - let t1 = _mm_clmulepi64_si128(self.0, polymu.0, 0x00); - let h = Self(_mm_slli_si128(t1, 8)); - let l = Self(_mm_clmulepi64_si128(t1, polymu.0, 0x10)); + let t1 = _mm_clmulepi64_si128::<0x00>(self.0, polymu.0); + let h = Self(_mm_slli_si128::<8>(t1)); + let l = Self(_mm_clmulepi64_si128::<0x10>(t1, polymu.0)); let reduced = h ^ l ^ self; - _mm_extract_epi64(reduced.0, 1) as u64 + _mm_extract_epi64::<1>(reduced.0) as u64 } } diff --git a/src/pclmulqdq/x86_64/vpclmulqdq.rs b/src/pclmulqdq/x86_64/vpclmulqdq.rs new file mode 100644 index 0000000..d25900e --- /dev/null +++ b/src/pclmulqdq/x86_64/vpclmulqdq.rs @@ -0,0 +1,216 @@ +use super::{super::fold_tail, Simd, __cpuid_count, __m256i, _mm256_set_epi64x, _mm256_xor_si256}; +use core::ops::BitXor; +use lazy_static::lazy_static; +use std::arch::x86_64::_mm256_clmulepi64_epi128; + +#[derive(Clone, Copy, Debug)] +pub struct Simd256(__m256i); + +// this lazy_static bit takes throughput from ~39GiB/s to ~52GiB/s +lazy_static! { + static ref VPCLMULQDQ_SUPPORTED : bool = { + let avx2 = is_x86_feature_detected!("avx2"); + // Rust is very confused about VPCLMULQDQ + // Let us detect it use CPUID directly + let leaf_7 = unsafe { __cpuid_count(7, 0) }; + let vpclmulqdq = (leaf_7.ecx & (1u32 << 10)) != 0; + avx2 && vpclmulqdq + }; +} + +impl Simd256 { + #[inline] + pub fn is_supported() -> bool { + *VPCLMULQDQ_SUPPORTED + } + + #[inline] + #[target_feature(enable = "avx2")] + pub unsafe fn new(x3: u64, x2: u64, x1: u64, x0: u64) -> Self { + Self(_mm256_set_epi64x(x3 as _, x2 as _, x1 as _, x0 as _)) + } + + #[inline] + #[target_feature(enable = "avx2")] + pub unsafe fn to_simd_x8(self4: [Self; 4]) -> [Simd; 8] { + core::mem::transmute(self4) + } + + #[inline] + #[target_feature(enable = "avx2", enable = "vpclmulqdq")] + pub unsafe fn fold_32(self, coeff: Self) -> Self { + let h = _mm256_clmulepi64_epi128(self.0, coeff.0, 0x11); + let l = _mm256_clmulepi64_epi128(self.0, coeff.0, 0x00); + Self(h) ^ Self(l) + } +} + +impl BitXor for Simd256 { + type Output = Self; + + #[inline(always)] + fn bitxor(self, other: Self) -> Self { + Self(unsafe { _mm256_xor_si256(self.0, other.0) }) + } +} + +#[inline] +#[target_feature(enable = "avx2", enable = "vpclmulqdq")] +pub(crate) unsafe fn update_vpclmulqdq( + state: u64, + first: &[[Simd256; 4]; 2], + rest: &[[[Simd256; 4]; 2]], +) -> u64 { + // receive the initial 128 bytes of data + let (mut x, y) = (first[0], first[1]); + + // xor the initial CRC value + x[0] = x[0] ^ Simd256::new(0, 0, 0, state); + + let coeff = Simd256::new( + crate::table::K_1023, + crate::table::K_1087, + crate::table::K_1023, + crate::table::K_1087, + ); + + x[0] = x[0].fold_32(coeff) ^ y[0]; + x[1] = x[1].fold_32(coeff) ^ y[1]; + x[2] = x[2].fold_32(coeff) ^ y[2]; + x[3] = x[3].fold_32(coeff) ^ y[3]; + + // perform 256-byte folding. + for chunk in rest { + let chunk = *chunk; + x[0] = x[0].fold_32(coeff) ^ chunk[0][0]; + x[0] = x[0].fold_32(coeff) ^ chunk[1][0]; + x[1] = x[1].fold_32(coeff) ^ chunk[0][1]; + x[1] = x[1].fold_32(coeff) ^ chunk[1][1]; + x[2] = x[2].fold_32(coeff) ^ chunk[0][2]; + x[2] = x[2].fold_32(coeff) ^ chunk[1][2]; + x[3] = x[3].fold_32(coeff) ^ chunk[0][3]; + x[3] = x[3].fold_32(coeff) ^ chunk[1][3]; + } + + let x = Simd256::to_simd_x8(x); + fold_tail(x) +} + +impl PartialEq for Simd256 { + fn eq(&self, other: &Self) -> bool { + unsafe { + use core::mem::transmute; + let a: [u128; 2] = transmute(*self); + let b: [u128; 2] = transmute(*other); + a == b + } + } +} + +impl Eq for Simd256 {} + +#[cfg(target_feature = "avx2")] +#[test] +fn test_size_and_alignment() { + assert_eq!(std::mem::size_of::(), 32); + assert_eq!(std::mem::align_of::(), 32); +} + +#[cfg(target_feature = "avx2")] +#[test] +fn test_new() { + unsafe { + let x = Simd256::new( + 0xd7c8_11cf_e5c5_c792, + 0x86e6_5c36_e68b_4804, + 0xd7c8_11cf_e5c5_c792, + 0x86e6_5c36_e68b_4804, + ); + let y = Simd256::new( + 0xd7c8_11cf_e5c5_c792, + 0x86e6_5c36_e68b_4804, + 0xd7c8_11cf_e5c5_c792, + 0x86e6_5c36_e68b_4804, + ); + let z = Simd256::new( + 0xfa3e_0099_cd5e_d60d, + 0xad71_9ee6_57d1_498e, + 0xfa3e_0099_cd5e_d60d, + 0xad71_9ee6_57d1_498e, + ); + assert_eq!(x, y); + assert_ne!(x, z); + } +} + +#[cfg(target_feature = "avx2")] +#[test] +fn test_xor() { + unsafe { + let x = Simd256::new( + 0xe450_87f9_b031_0d47, + 0x3d72_e92a_96c7_4c63, + 0xe450_87f9_b031_0d47, + 0x3d72_e92a_96c7_4c63, + ); + let y = Simd256::new( + 0x7ed8_ae0a_dfbd_89c0, + 0x1c9b_dfaa_953e_0ef4, + 0x7ed8_ae0a_dfbd_89c0, + 0x1c9b_dfaa_953e_0ef4, + ); + let mut z = x ^ y; + assert_eq!( + z, + Simd256::new( + 0x9a88_29f3_6f8c_8487, + 0x21e9_3680_03f9_4297, + 0x9a88_29f3_6f8c_8487, + 0x21e9_3680_03f9_4297 + ) + ); + z = z ^ Simd256::new( + 0x57a2_0f44_c005_b2ea, + 0x7056_bde9_9303_aa51, + 0x57a2_0f44_c005_b2ea, + 0x7056_bde9_9303_aa51, + ); + assert_eq!( + z, + Simd256::new( + 0xcd2a_26b7_af89_366d, + 0x51bf_8b69_90fa_e8c6, + 0xcd2a_26b7_af89_366d, + 0x51bf_8b69_90fa_e8c6 + ) + ); + } +} + +#[cfg(all(target_feature = "avx2", target_feature = "vpclmulqdq"))] +#[test] +fn test_fold_32() { + unsafe { + let x = Simd256::new( + 0xb5f1_2590_5645_0b6c, + 0x333a_2c49_c361_9e21, + 0xb5f1_2590_5645_0b6c, + 0x333a_2c49_c361_9e21, + ); + let f = x.fold_32(Simd256::new( + 0xbecc_9dd9_038f_c366, + 0x5ba9_365b_e2e9_5bf5, + 0xbecc_9dd9_038f_c366, + 0x5ba9_365b_e2e9_5bf5, + )); + assert_eq!( + f, + Simd256::new( + 0x4f55_42df_ef35_1810, + 0x0c03_5bd6_70fc_5abd, + 0x4f55_42df_ef35_1810, + 0x0c03_5bd6_70fc_5abd + ) + ); + } +}