@@ -474,7 +474,7 @@ pub struct UniformInt<X> {
474
474
}
475
475
476
476
macro_rules! uniform_int_impl {
477
- ( $ty: ty, $unsigned: ident, $u_large: ident) => {
477
+ ( $ty: ty, $unsigned: ident, $u_large: ident, $u_extra_large : ident ) => {
478
478
impl SampleUniform for $ty {
479
479
type Sampler = UniformInt <$ty>;
480
480
}
@@ -577,9 +577,8 @@ macro_rules! uniform_int_impl {
577
577
"UniformSampler::sample_single_inclusive: low > high"
578
578
) ;
579
579
let range = high. wrapping_sub( low) . wrapping_add( 1 ) as $unsigned as $u_large;
580
- // If the above resulted in wrap-around to 0, the range is $ty::MIN..=$ty::MAX,
581
- // and any integer will do.
582
580
if range == 0 {
581
+ // Range is MAX+1 (unrepresentable), so we need a special case
583
582
return rng. gen ( ) ;
584
583
}
585
584
@@ -604,22 +603,183 @@ macro_rules! uniform_int_impl {
604
603
}
605
604
}
606
605
}
606
+
607
+ #[ inline]
608
+ fn sample_single_inclusive_oneill<R : Rng + ?Sized , B1 , B2 >(
609
+ low_b: B1 , high_b: B2 , rng: & mut R ,
610
+ ) -> $ty
611
+ where
612
+ B1 : SampleBorrow <$ty> + Sized ,
613
+ B2 : SampleBorrow <$ty> + Sized ,
614
+ {
615
+ let low = * low_b. borrow( ) ;
616
+ let high = * high_b. borrow( ) ;
617
+ assert!(
618
+ low <= high,
619
+ "UniformSampler::sample_single_inclusive: low > high"
620
+ ) ;
621
+ let range = high. wrapping_sub( low) . wrapping_add( 1 ) as $unsigned as $u_large;
622
+ if range == 0 {
623
+ // Range is MAX+1 (unrepresentable), so we need a special case
624
+ return rng. gen ( ) ;
625
+ }
626
+
627
+ // we use the "Debiased Int Mult (t-opt, m-opt)" rejection sampling method
628
+ // described here https://www.pcg-random.org/posts/bounded-rands.html
629
+ // and here https://github.com/imneme/bounded-rands
630
+
631
+ let ( mut hi, mut lo) = rng. gen :: <$u_large>( ) . wmul( range) ;
632
+ if lo < range {
633
+ let mut threshold = range. wrapping_neg( ) ;
634
+ // this shortcut works best with large ranges
635
+ if threshold >= range {
636
+ threshold -= range;
637
+ if threshold >= range {
638
+ threshold %= range;
639
+ }
640
+ }
641
+ while lo < threshold {
642
+ let ( new_hi, new_lo) = rng. gen :: <$u_large>( ) . wmul( range) ;
643
+ hi = new_hi;
644
+ lo = new_lo;
645
+ }
646
+ }
647
+ low. wrapping_add( hi as $ty)
648
+ }
649
+
650
+ #[ inline]
651
+ fn sample_single_inclusive_canon<R : Rng + ?Sized , B1 , B2 >(
652
+ low_b: B1 , high_b: B2 , rng: & mut R ,
653
+ ) -> $ty
654
+ where
655
+ B1 : SampleBorrow <$ty> + Sized ,
656
+ B2 : SampleBorrow <$ty> + Sized ,
657
+ {
658
+ let low = * low_b. borrow( ) ;
659
+ let high = * high_b. borrow( ) ;
660
+ assert!(
661
+ low <= high,
662
+ "UniformSampler::sample_single_inclusive: low > high"
663
+ ) ;
664
+ let range = high. wrapping_sub( low) . wrapping_add( 1 ) as $unsigned as $u_extra_large;
665
+ if range == 0 {
666
+ // Range is MAX+1 (unrepresentable), so we need a special case
667
+ return rng. gen ( ) ;
668
+ }
669
+
670
+ // generate a sample using a sensible integer type
671
+ let ( mut result, lo_order) = rng. gen :: <$u_extra_large>( ) . wmul( range) ;
672
+
673
+ // if the sample is biased...
674
+ if lo_order > range. wrapping_neg( ) {
675
+ // ...generate a new sample with 64 more bits, enough that bias is undetectable
676
+ let ( new_hi_order, _) =
677
+ ( rng. gen :: <$u_extra_large>( ) ) . wmul( range as $u_extra_large) ;
678
+ // and adjust if needed
679
+ result += lo_order
680
+ . checked_add( new_hi_order as $u_extra_large)
681
+ . is_none( ) as $u_extra_large;
682
+ }
683
+
684
+ low. wrapping_add( result as $ty)
685
+ }
686
+
687
+ #[ inline]
688
+ fn sample_inclusive_canon_lemire<R : Rng + ?Sized , B1 , B2 >(
689
+ low_b: B1 , high_b: B2 , rng: & mut R ,
690
+ ) -> $ty
691
+ where
692
+ B1 : SampleBorrow <$ty> + Sized ,
693
+ B2 : SampleBorrow <$ty> + Sized ,
694
+ {
695
+ let low = * low_b. borrow( ) ;
696
+ let high = * high_b. borrow( ) ;
697
+ assert!(
698
+ low <= high,
699
+ "UniformSampler::sample_single_inclusive: low > high"
700
+ ) ;
701
+ let range = high. wrapping_sub( low) . wrapping_add( 1 ) as $unsigned as $u_extra_large;
702
+ if range == 0 {
703
+ // Range is MAX+1 (unrepresentable), so we need a special case
704
+ return rng. gen ( ) ;
705
+ }
706
+
707
+ // generate a sample using a sensible integer type
708
+ let ( mut result, lo_order) = rng. gen :: <$u_extra_large>( ) . wmul( range) ;
709
+
710
+ // if the sample is biased... (since range won't be changing we can further
711
+ // improve this check with a modulo)
712
+ if lo_order < range. wrapping_neg( ) % range {
713
+ // ...generate a new sample with 64 more bits, enough that bias is undetectable
714
+ let ( new_hi_order, _) =
715
+ ( rng. gen :: <$u_extra_large>( ) ) . wmul( range as $u_extra_large) ;
716
+ // and adjust if needed
717
+ result += lo_order
718
+ . checked_add( new_hi_order as $u_extra_large)
719
+ . is_none( ) as $u_extra_large;
720
+ }
721
+
722
+ low. wrapping_add( result as $ty)
723
+ }
724
+
725
+ #[ inline]
726
+ fn sample_single_inclusive_bitmask<R : Rng + ?Sized , B1 , B2 >(
727
+ low_b: B1 , high_b: B2 , rng: & mut R ,
728
+ ) -> $ty
729
+ where
730
+ B1 : SampleBorrow <$ty> + Sized ,
731
+ B2 : SampleBorrow <$ty> + Sized ,
732
+ {
733
+ let low = * low_b. borrow( ) ;
734
+ let high = * high_b. borrow( ) ;
735
+ assert!(
736
+ low <= high,
737
+ "UniformSampler::sample_single_inclusive: low > high"
738
+ ) ;
739
+ let mut range = high. wrapping_sub( low) . wrapping_add( 1 ) as $unsigned as $u_large;
740
+ if range == 0 {
741
+ // Range is MAX+1 (unrepresentable), so we need a special case
742
+ return rng. gen ( ) ;
743
+ }
744
+
745
+ // the old impl use a mix of methods for different integer sizes, we only use
746
+ // the lz method here for a better comparison.
747
+
748
+ let mut mask = $u_large:: max_value( ) ;
749
+ range -= 1 ;
750
+ mask >>= ( range | 1 ) . leading_zeros( ) ;
751
+ loop {
752
+ let x = rng. gen :: <$u_large>( ) & mask;
753
+ if x <= range {
754
+ return low. wrapping_add( x as $ty) ;
755
+ }
756
+ }
757
+ }
607
758
}
608
759
} ;
609
760
}
610
-
611
- uniform_int_impl ! { i8 , u8 , u32 }
612
- uniform_int_impl ! { i16 , u16 , u32 }
613
- uniform_int_impl ! { i32 , u32 , u32 }
614
- uniform_int_impl ! { i64 , u64 , u64 }
615
- uniform_int_impl ! { i128 , u128 , u128 }
616
- uniform_int_impl ! { isize , usize , usize }
617
- uniform_int_impl ! { u8 , u8 , u32 }
618
- uniform_int_impl ! { u16 , u16 , u32 }
619
- uniform_int_impl ! { u32 , u32 , u32 }
620
- uniform_int_impl ! { u64 , u64 , u64 }
621
- uniform_int_impl ! { usize , usize , usize }
622
- uniform_int_impl ! { u128 , u128 , u128 }
761
+ uniform_int_impl ! { i8 , u8 , u32 , u64 }
762
+ uniform_int_impl ! { i16 , u16 , u32 , u64 }
763
+ uniform_int_impl ! { i32 , u32 , u32 , u64 }
764
+ uniform_int_impl ! { i64 , u64 , u64 , u64 }
765
+ uniform_int_impl ! { i128 , u128 , u128 , u128 }
766
+ uniform_int_impl ! { u8 , u8 , u32 , u64 }
767
+ uniform_int_impl ! { u16 , u16 , u32 , u64 }
768
+ uniform_int_impl ! { u32 , u32 , u32 , u64 }
769
+ uniform_int_impl ! { u64 , u64 , u64 , u64 }
770
+ uniform_int_impl ! { u128 , u128 , u128 , u128 }
771
+ #[ cfg( any( target_pointer_width = "16" , target_pointer_width = "32" , ) ) ]
772
+ mod isize_int_impls {
773
+ use super :: * ;
774
+ uniform_int_impl ! { isize , usize , usize , u64 }
775
+ uniform_int_impl ! { usize , usize , usize , u64 }
776
+ }
777
+ #[ cfg( not( any( target_pointer_width = "16" , target_pointer_width = "32" , ) ) ) ]
778
+ mod isize_int_impls {
779
+ use super :: * ;
780
+ uniform_int_impl ! { isize , usize , usize , usize }
781
+ uniform_int_impl ! { usize , usize , usize , usize }
782
+ }
623
783
624
784
#[ cfg( feature = "simd_support" ) ]
625
785
macro_rules! uniform_simd_int_impl {
0 commit comments