Skip to content

Use cfg_match! in core #138996

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 16 additions & 12 deletions library/core/src/ffi/primitives.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ type_alias! { "c_float.md", c_float = f32; }
type_alias! { "c_double.md", c_double = f64; }

mod c_char_definition {
cfg_if! {
crate::cfg_match! {
// These are the targets on which c_char is unsigned. Usually the
// signedness is the same for all target_os values on a given architecture
// but there are some exceptions (see isSignedCharDefault() in clang).
Expand Down Expand Up @@ -105,7 +105,7 @@ mod c_char_definition {
// architecture defaults). As we only have a target for userspace apps so there are no
// special cases for L4Re below.
// https://github.com/rust-lang/rust/pull/132975#issuecomment-2484645240
if #[cfg(all(
all(
not(windows),
not(target_vendor = "apple"),
not(target_os = "vita"),
Expand All @@ -122,24 +122,27 @@ mod c_char_definition {
target_arch = "s390x",
target_arch = "xtensa",
)
))] {
) => {
pub(super) type c_char = u8;
} else {
// On every other target, c_char is signed.
}
// On every other target, c_char is signed.
_ => {
pub(super) type c_char = i8;
}
}
}

mod c_long_definition {
cfg_if! {
if #[cfg(any(
crate::cfg_match! {
any(
all(target_pointer_width = "64", not(windows)),
// wasm32 Linux ABI uses 64-bit long
all(target_arch = "wasm32", target_os = "linux")))] {
all(target_arch = "wasm32", target_os = "linux")
) => {
pub(super) type c_long = i64;
pub(super) type c_ulong = u64;
} else {
}
_ => {
// The minimal size of `long` in the C standard is 32 bits
pub(super) type c_long = i32;
pub(super) type c_ulong = u32;
Expand Down Expand Up @@ -169,11 +172,12 @@ pub type c_ptrdiff_t = isize;
pub type c_ssize_t = isize;

mod c_int_definition {
cfg_if! {
if #[cfg(any(target_arch = "avr", target_arch = "msp430"))] {
crate::cfg_match! {
any(target_arch = "avr", target_arch = "msp430") => {
pub(super) type c_int = i16;
pub(super) type c_uint = u16;
} else {
}
_ => {
pub(super) type c_int = i32;
pub(super) type c_uint = u32;
}
Expand Down
77 changes: 0 additions & 77 deletions library/core/src/internal_macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,80 +120,3 @@ macro_rules! impl_fn_for_zst {
)+
}
}

/// A macro for defining `#[cfg]` if-else statements.
///
/// `cfg_if` is similar to the `if/elif` C preprocessor macro by allowing definition of a cascade
/// of `#[cfg]` cases, emitting the implementation which matches first.
///
/// This allows you to conveniently provide a long list `#[cfg]`'d blocks of code without having to
/// rewrite each clause multiple times.
///
/// # Example
///
/// ```ignore(cannot-test-this-because-non-exported-macro)
/// cfg_if! {
/// if #[cfg(unix)] {
/// fn foo() { /* unix specific functionality */ }
/// } else if #[cfg(target_pointer_width = "32")] {
/// fn foo() { /* non-unix, 32-bit functionality */ }
/// } else {
/// fn foo() { /* fallback implementation */ }
/// }
/// }
///
/// # fn main() {}
/// ```
// This is a copy of `cfg_if!` from the `cfg_if` crate.
// The recursive invocations should use $crate if this is ever exported.
macro_rules! cfg_if {
// match if/else chains with a final `else`
(
$(
if #[cfg( $i_meta:meta )] { $( $i_tokens:tt )* }
) else+
else { $( $e_tokens:tt )* }
) => {
cfg_if! {
@__items () ;
$(
(( $i_meta ) ( $( $i_tokens )* )) ,
)+
(() ( $( $e_tokens )* )) ,
}
};

// Internal and recursive macro to emit all the items
//
// Collects all the previous cfgs in a list at the beginning, so they can be
// negated. After the semicolon is all the remaining items.
(@__items ( $( $_:meta , )* ) ; ) => {};
(
@__items ( $( $no:meta , )* ) ;
(( $( $yes:meta )? ) ( $( $tokens:tt )* )) ,
$( $rest:tt , )*
) => {
// Emit all items within one block, applying an appropriate #[cfg]. The
// #[cfg] will require all `$yes` matchers specified and must also negate
// all previous matchers.
#[cfg(all(
$( $yes , )?
not(any( $( $no ),* ))
))]
cfg_if! { @__identity $( $tokens )* }

// Recurse to emit all other items in `$rest`, and when we do so add all
// our `$yes` matchers to the list of `$no` matchers as future emissions
// will have to negate everything we just matched as well.
cfg_if! {
@__items ( $( $no , )* $( $yes , )? ) ;
$( $rest , )*
}
};

// Internal macro to make __apply work out right for different match types,
// because of how macros match/expand stuff.
(@__identity $( $tokens:tt )* ) => {
$( $tokens )*
};
}
1 change: 1 addition & 0 deletions library/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@
#![feature(bigint_helper_methods)]
#![feature(bstr)]
#![feature(bstr_internals)]
#![feature(cfg_match)]
#![feature(closure_track_caller)]
#![feature(const_carrying_mul_add)]
#![feature(const_eval_select)]
Expand Down
23 changes: 12 additions & 11 deletions library/core/src/num/f32.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
use crate::convert::FloatToInt;
use crate::num::FpCategory;
use crate::panic::const_assert;
use crate::{intrinsics, mem};
use crate::{cfg_match, intrinsics, mem};

/// The radix or base of the internal representation of `f32`.
/// Use [`f32::RADIX`] instead.
Expand Down Expand Up @@ -996,21 +996,22 @@ impl f32 {
#[stable(feature = "num_midpoint", since = "1.85.0")]
#[rustc_const_stable(feature = "num_midpoint", since = "1.85.0")]
pub const fn midpoint(self, other: f32) -> f32 {
cfg_if! {
cfg_match! {
// Allow faster implementation that have known good 64-bit float
// implementations. Falling back to the branchy code on targets that don't
// have 64-bit hardware floats or buggy implementations.
// https://github.com/rust-lang/rust/pull/121062#issuecomment-2123408114
if #[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
all(any(target_arch = "riscv32", target_arch = "riscv64"), target_feature = "d"),
all(target_arch = "arm", target_feature = "vfp2"),
target_arch = "wasm32",
target_arch = "wasm64",
))] {
any(
target_arch = "x86_64",
target_arch = "aarch64",
all(any(target_arch = "riscv32", target_arch = "riscv64"), target_feature = "d"),
all(target_arch = "arm", target_feature = "vfp2"),
target_arch = "wasm32",
target_arch = "wasm64",
) => {
((self as f64 + other as f64) / 2.0) as f32
} else {
}
_ => {
const LO: f32 = f32::MIN_POSITIVE * 2.;
const HI: f32 = f32::MAX / 2.;

Expand Down
8 changes: 5 additions & 3 deletions library/core/src/slice/sort/select.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//! for pivot selection. Using this as a fallback ensures O(n) worst case running time with
//! better performance than one would get using heapsort as fallback.

use crate::cfg_match;
use crate::mem::{self, SizedTypeProperties};
#[cfg(not(feature = "optimize_for_size"))]
use crate::slice::sort::shared::pivot::choose_pivot;
Expand Down Expand Up @@ -41,10 +42,11 @@ where
let min_idx = min_index(v, &mut is_less).unwrap();
v.swap(min_idx, index);
} else {
cfg_if! {
if #[cfg(feature = "optimize_for_size")] {
cfg_match! {
feature = "optimize_for_size" => {
median_of_medians(v, &mut is_less, index);
} else {
}
_ => {
partition_at_index_loop(v, index, None, &mut is_less);
}
}
Expand Down
16 changes: 9 additions & 7 deletions library/core/src/slice/sort/stable/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@

#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
use crate::cmp;
use crate::intrinsics;
use crate::mem::{MaybeUninit, SizedTypeProperties};
#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
use crate::slice::sort::shared::smallsort::{
SMALL_SORT_GENERAL_SCRATCH_LEN, StableSmallSortTypeImpl, insertion_sort_shift_left,
};
use crate::{cfg_match, intrinsics};

pub(crate) mod merge;

Expand Down Expand Up @@ -39,17 +39,18 @@ pub fn sort<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], is_less
return;
}

cfg_if! {
if #[cfg(any(feature = "optimize_for_size", target_pointer_width = "16"))] {
cfg_match! {
any(feature = "optimize_for_size", target_pointer_width = "16") => {
// Unlike driftsort, mergesort only requires len / 2,
// not len - len / 2.
let alloc_len = len / 2;

cfg_if! {
if #[cfg(target_pointer_width = "16")] {
cfg_match! {
target_pointer_width = "16" => {
let mut heap_buf = BufT::with_capacity(alloc_len);
let scratch = heap_buf.as_uninit_slice_mut();
} else {
}
_ => {
// For small inputs 4KiB of stack storage suffices, which allows us to avoid
// calling the (de-)allocator. Benchmarks showed this was quite beneficial.
let mut stack_buf = AlignedStorage::<T, 4096>::new();
Expand All @@ -65,7 +66,8 @@ pub fn sort<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], is_less
}

tiny::mergesort(v, scratch, is_less);
} else {
}
_ => {
// More advanced sorting methods than insertion sort are faster if called in
// a hot loop for small inputs, but for general-purpose code the small
// binary size of insertion sort is more important. The instruction cache in
Expand Down
9 changes: 5 additions & 4 deletions library/core/src/slice/sort/unstable/mod.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
//! This module contains the entry points for `slice::sort_unstable`.

use crate::intrinsics;
use crate::mem::SizedTypeProperties;
#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
use crate::slice::sort::shared::find_existing_run;
#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
use crate::slice::sort::shared::smallsort::insertion_sort_shift_left;
use crate::{cfg_match, intrinsics};

pub(crate) mod heapsort;
pub(crate) mod quicksort;
Expand All @@ -30,10 +30,11 @@ pub fn sort<T, F: FnMut(&T, &T) -> bool>(v: &mut [T], is_less: &mut F) {
return;
}

cfg_if! {
if #[cfg(any(feature = "optimize_for_size", target_pointer_width = "16"))] {
cfg_match! {
any(feature = "optimize_for_size", target_pointer_width = "16") => {
heapsort::heapsort(v, is_less);
} else {
}
_ => {
// More advanced sorting methods than insertion sort are faster if called in
// a hot loop for small inputs, but for general-purpose code the small
// binary size of insertion sort is more important. The instruction cache in
Expand Down
9 changes: 5 additions & 4 deletions library/core/src/slice/sort/unstable/quicksort.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use crate::slice::sort::shared::pivot::choose_pivot;
use crate::slice::sort::shared::smallsort::UnstableSmallSortTypeImpl;
#[cfg(not(feature = "optimize_for_size"))]
use crate::slice::sort::unstable::heapsort;
use crate::{intrinsics, ptr};
use crate::{cfg_match, intrinsics, ptr};

/// Sorts `v` recursively.
///
Expand Down Expand Up @@ -142,10 +142,11 @@ const fn inst_partition<T, F: FnMut(&T, &T) -> bool>() -> fn(&mut [T], &T, &mut
if size_of::<T>() <= MAX_BRANCHLESS_PARTITION_SIZE {
// Specialize for types that are relatively cheap to copy, where branchless optimizations
// have large leverage e.g. `u64` and `String`.
cfg_if! {
if #[cfg(feature = "optimize_for_size")] {
cfg_match! {
feature = "optimize_for_size" => {
partition_lomuto_branchless_simple::<T, F>
} else {
}
_ => {
partition_lomuto_branchless_cyclic::<T, F>
}
}
Expand Down
Loading