diff --git a/stl/inc/algorithm b/stl/inc/algorithm index b925caf9900..d41b0095ae6 100644 --- a/stl/inc/algorithm +++ b/stl/inc/algorithm @@ -6178,6 +6178,60 @@ namespace ranges { #endif // _HAS_CXX20 #endif // _HAS_CXX17 +// Batched random integer generation for shuffle optimization. +// From Nevin Brackett-Rozinsky and Daniel Lemire, "Batched Ranged Random Integer Generation", +// Software: Practice and Experience 55(1), 2025. +// +// This algorithm extracts multiple bounded random integers from a single 64-bit random word, +// using only multiplication (no division) in the common case. + +// Check if a URNG has full 64-bit range [0, 2^64 - 1]. +// Batched random generation is only beneficial for such RNGs. +template +constexpr bool _Urng_has_full_64bit_range = + is_same_v<_Invoke_result_t<_Urng&>, uint64_t> && (_Urng::min) () == 0 && (_Urng::max) () == _Max_limit(); + +template +struct _Batched_rng_from_urng { + _Urng& _Ref; + + explicit _Batched_rng_from_urng(_Urng& _Func) noexcept : _Ref(_Func) {} + + // Generate _K bounded random indices from a single 64-bit random word. + // Uses backward Fisher-Yates bounds: _N, _N-1, ..., _N-_K+1. + // _Results[j] is uniform in [0, _N-j). + // + // _Bound is a conservative upper bound on the product of the _K bounds, + // used to skip the threshold computation on the fast path. Returns the + // (possibly tightened) bound for the caller to pass to the next call. + uint64_t _Batch(_Diff* _Results, uint64_t _N, uint64_t _K, uint64_t _Bound) { + uint64_t _Rand = static_cast(_Ref()); + uint64_t _High; + for (uint64_t _J = 0; _J < _K; ++_J) { + _Rand = _Base128::_UMul128(_Rand, _N - _J, _High); + _Results[_J] = static_cast<_Diff>(_High); + } + if (_Rand < _Bound) { + _Bound = _N; + for (uint64_t _J = 1; _J < _K; ++_J) { + _Bound *= _N - _J; + } + const uint64_t _Threshold = (0 - _Bound) % _Bound; + while (_Rand < _Threshold) { + _Rand = static_cast(_Ref()); + for (uint64_t _J = 0; _J < _K; ++_J) { + _Rand = _Base128::_UMul128(_Rand, _N - _J, _High); + _Results[_J] = static_cast<_Diff>(_High); + } + } + } + return _Bound; + } + + _Batched_rng_from_urng(const _Batched_rng_from_urng&) = delete; + _Batched_rng_from_urng& operator=(const _Batched_rng_from_urng&) = delete; +}; + template class _Rng_from_urng_v2 { // wrap a URNG as an RNG public: @@ -6521,11 +6575,106 @@ void _Random_shuffle1(_RanIt _First, _RanIt _Last, _RngFn& _RngFunc) { } } +// Batched shuffle implementation for 64-bit URNGs with full range. +// Uses backward Fisher-Yates (Durstenfeld) with batched random generation +// to reduce URNG calls. Adapted from Lemire's cpp_batched_random. +template +void _Random_shuffle_batched(_RanIt _First, _RanIt _Last, _Urng& _Func) { + _STD _Adl_verify_range(_First, _Last); + auto _UFirst = _STD _Get_unwrapped(_First); + const auto _ULast = _STD _Get_unwrapped(_Last); + + using _Diff = _Iter_diff_t<_RanIt>; + auto _Remaining = static_cast(_ULast - _UFirst); + if (_Remaining <= 1) { + return; + } + + _Batched_rng_from_urng<_Diff, _Urng> _BatchedRng(_Func); + _Diff _Offsets[7]; + + // Process one element at a time while bounds are too large for batching. + for (; _Remaining > (uint64_t{1} << 30); --_Remaining) { + _BatchedRng._Batch(_Offsets, _Remaining, 1, _Remaining); + swap(*(_UFirst + _Remaining - 1), *(_UFirst + _Offsets[0])); // intentional ADL + } + + // Batch of 2: product of 2 consecutive bounds fits in 64 bits. + { + uint64_t _Bound = uint64_t{1} << 60; + for (; _Remaining > (uint64_t{1} << 19); _Remaining -= 2) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 2, _Bound); + for (uint64_t _J = 0; _J < 2; ++_J) { + swap(*(_UFirst + _Remaining - _J - 1), *(_UFirst + _Offsets[_J])); // intentional ADL + } + } + } + + // Batch of 3. + { + uint64_t _Bound = uint64_t{1} << 57; + for (; _Remaining > (uint64_t{1} << 14); _Remaining -= 3) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 3, _Bound); + for (uint64_t _J = 0; _J < 3; ++_J) { + swap(*(_UFirst + _Remaining - _J - 1), *(_UFirst + _Offsets[_J])); // intentional ADL + } + } + } + + // Batch of 4. + { + uint64_t _Bound = uint64_t{1} << 56; + for (; _Remaining > (uint64_t{1} << 11); _Remaining -= 4) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 4, _Bound); + for (uint64_t _J = 0; _J < 4; ++_J) { + swap(*(_UFirst + _Remaining - _J - 1), *(_UFirst + _Offsets[_J])); // intentional ADL + } + } + } + + // Batch of 5. + { + uint64_t _Bound = uint64_t{1} << 55; + for (; _Remaining > (uint64_t{1} << 9); _Remaining -= 5) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 5, _Bound); + for (uint64_t _J = 0; _J < 5; ++_J) { + swap(*(_UFirst + _Remaining - _J - 1), *(_UFirst + _Offsets[_J])); // intentional ADL + } + } + } + + // Batch of 6. + { + uint64_t _Bound = uint64_t{1} << 54; + for (; _Remaining > 6; _Remaining -= 6) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 6, _Bound); + for (uint64_t _J = 0; _J < 6; ++_J) { + swap(*(_UFirst + _Remaining - _J - 1), *(_UFirst + _Offsets[_J])); // intentional ADL + } + } + } + + // Final: remaining <= 6 elements, handle in one batch. + if (_Remaining > 1) { + const auto _K = _Remaining - 1; + _BatchedRng._Batch(_Offsets, _Remaining, _K, 720); + for (uint64_t _J = 0; _J < _K; ++_J) { + swap(*(_UFirst + _Remaining - _J - 1), *(_UFirst + _Offsets[_J])); // intentional ADL + } + } +} + _EXPORT_STD template void shuffle(_RanIt _First, _RanIt _Last, _Urng&& _Func) { // shuffle [_First, _Last) using URNG _Func using _Urng0 = remove_reference_t<_Urng>; - _Rng_from_urng_v2<_Iter_diff_t<_RanIt>, _Urng0> _RngFunc(_Func); - _STD _Random_shuffle1(_First, _Last, _RngFunc); + + // Use batched shuffle when the URNG produces full 64-bit range values. + if constexpr (_Urng_has_full_64bit_range<_Urng0>) { + _STD _Random_shuffle_batched(_First, _Last, _Func); + } else { + _Rng_from_urng_v2<_Iter_diff_t<_RanIt>, _Urng0> _RngFunc(_Func); + _STD _Random_shuffle1(_First, _Last, _RngFunc); + } } #if _HAS_CXX20 @@ -6537,20 +6686,37 @@ namespace ranges { _STATIC_CALL_OPERATOR _It operator()(_It _First, _Se _Last, _Urng&& _Func) _CONST_CALL_OPERATOR { _STD _Adl_verify_range(_First, _Last); - _Rng_from_urng_v2, remove_reference_t<_Urng>> _RngFunc(_Func); - auto _UResult = _Shuffle_unchecked( - _RANGES _Unwrap_iter<_Se>(_STD move(_First)), _RANGES _Unwrap_sent<_It>(_STD move(_Last)), _RngFunc); + using _Urng0 = remove_reference_t<_Urng>; + using _Diff = iter_difference_t<_It>; - _STD _Seek_wrapped(_First, _STD move(_UResult)); + // Use batched shuffle when the URNG produces full 64-bit range values. + if constexpr (_Urng_has_full_64bit_range<_Urng0>) { + auto _UResult = _Shuffle_unchecked_batched( + _RANGES _Unwrap_iter<_Se>(_STD move(_First)), _RANGES _Unwrap_sent<_It>(_STD move(_Last)), _Func); + _STD _Seek_wrapped(_First, _STD move(_UResult)); + } else { + _Rng_from_urng_v2<_Diff, _Urng0> _RngFunc(_Func); + auto _UResult = _Shuffle_unchecked(_RANGES _Unwrap_iter<_Se>(_STD move(_First)), + _RANGES _Unwrap_sent<_It>(_STD move(_Last)), _RngFunc); + _STD _Seek_wrapped(_First, _STD move(_UResult)); + } return _First; } template requires permutable> && uniform_random_bit_generator> _STATIC_CALL_OPERATOR borrowed_iterator_t<_Rng> operator()(_Rng&& _Range, _Urng&& _Func) _CONST_CALL_OPERATOR { - _Rng_from_urng_v2, remove_reference_t<_Urng>> _RngFunc(_Func); + using _Urng0 = remove_reference_t<_Urng>; + using _Diff = range_difference_t<_Rng>; - return _RANGES _Rewrap_iterator(_Range, _Shuffle_unchecked(_Ubegin(_Range), _Uend(_Range), _RngFunc)); + // Use batched shuffle when the URNG produces full 64-bit range values. + if constexpr (_Urng_has_full_64bit_range<_Urng0>) { + return _RANGES _Rewrap_iterator( + _Range, _Shuffle_unchecked_batched(_Ubegin(_Range), _Uend(_Range), _Func)); + } else { + _Rng_from_urng_v2<_Diff, _Urng0> _RngFunc(_Func); + return _RANGES _Rewrap_iterator(_Range, _Shuffle_unchecked(_Ubegin(_Range), _Uend(_Range), _RngFunc)); + } } private: @@ -6578,6 +6744,104 @@ namespace ranges { } return _Target; } + + // Batched shuffle implementation for ranges. + // Uses backward Fisher-Yates (Durstenfeld) with batched random generation. + template + _NODISCARD static _It _Shuffle_unchecked_batched(_It _First, const _Se _Last, _Urng& _Func) { + _STL_INTERNAL_STATIC_ASSERT(random_access_iterator<_It>); + _STL_INTERNAL_STATIC_ASSERT(sentinel_for<_Se, _It>); + _STL_INTERNAL_STATIC_ASSERT(permutable<_It>); + + using _Diff = iter_difference_t<_It>; + const auto _Count = static_cast<_Diff>(_Last - _First); + auto _Remaining = static_cast(_Count); + if (_Remaining <= 1) { + return _First + _Count; + } + + _Batched_rng_from_urng<_Diff, _Urng> _BatchedRng(_Func); + _Diff _Offsets[7]; + + // Process one element at a time while bounds are too large for batching. + for (; _Remaining > (uint64_t{1} << 30); --_Remaining) { + _BatchedRng._Batch(_Offsets, _Remaining, 1, _Remaining); + _RANGES iter_swap( + _First + static_cast<_Diff>(_Remaining - 1), _First + static_cast<_Diff>(_Offsets[0])); + } + + // Batch of 2. + { + uint64_t _Bound = uint64_t{1} << 60; + for (; _Remaining > (uint64_t{1} << 19); _Remaining -= 2) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 2, _Bound); + for (uint64_t _J = 0; _J < 2; ++_J) { + _RANGES iter_swap(_First + static_cast<_Diff>(_Remaining - _J - 1), + _First + static_cast<_Diff>(_Offsets[_J])); + } + } + } + + // Batch of 3. + { + uint64_t _Bound = uint64_t{1} << 57; + for (; _Remaining > (uint64_t{1} << 14); _Remaining -= 3) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 3, _Bound); + for (uint64_t _J = 0; _J < 3; ++_J) { + _RANGES iter_swap(_First + static_cast<_Diff>(_Remaining - _J - 1), + _First + static_cast<_Diff>(_Offsets[_J])); + } + } + } + + // Batch of 4. + { + uint64_t _Bound = uint64_t{1} << 56; + for (; _Remaining > (uint64_t{1} << 11); _Remaining -= 4) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 4, _Bound); + for (uint64_t _J = 0; _J < 4; ++_J) { + _RANGES iter_swap(_First + static_cast<_Diff>(_Remaining - _J - 1), + _First + static_cast<_Diff>(_Offsets[_J])); + } + } + } + + // Batch of 5. + { + uint64_t _Bound = uint64_t{1} << 55; + for (; _Remaining > (uint64_t{1} << 9); _Remaining -= 5) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 5, _Bound); + for (uint64_t _J = 0; _J < 5; ++_J) { + _RANGES iter_swap(_First + static_cast<_Diff>(_Remaining - _J - 1), + _First + static_cast<_Diff>(_Offsets[_J])); + } + } + } + + // Batch of 6. + { + uint64_t _Bound = uint64_t{1} << 54; + for (; _Remaining > 6; _Remaining -= 6) { + _Bound = _BatchedRng._Batch(_Offsets, _Remaining, 6, _Bound); + for (uint64_t _J = 0; _J < 6; ++_J) { + _RANGES iter_swap(_First + static_cast<_Diff>(_Remaining - _J - 1), + _First + static_cast<_Diff>(_Offsets[_J])); + } + } + } + + // Final: remaining <= 6 elements, handle in one batch. + if (_Remaining > 1) { + const auto _K = _Remaining - 1; + _BatchedRng._Batch(_Offsets, _Remaining, _K, 720); + for (uint64_t _J = 0; _J < _K; ++_J) { + _RANGES iter_swap( + _First + static_cast<_Diff>(_Remaining - _J - 1), _First + static_cast<_Diff>(_Offsets[_J])); + } + } + + return _First + _Count; + } }; _EXPORT_STD inline constexpr _Shuffle_fn shuffle; diff --git a/tests/std/tests/P0896R4_ranges_alg_shuffle/test.cpp b/tests/std/tests/P0896R4_ranges_alg_shuffle/test.cpp index 6c2bb00b8a3..f0d3a5257fc 100644 --- a/tests/std/tests/P0896R4_ranges_alg_shuffle/test.cpp +++ b/tests/std/tests/P0896R4_ranges_alg_shuffle/test.cpp @@ -6,15 +6,18 @@ #include #include #include +#include #include #include #include +#include #include using namespace std; const unsigned int seed = random_device{}(); mt19937 gen{seed}; +mt19937_64 gen64{seed}; // 64-bit generator for batched random path // Validate dangling story static_assert(same_as{}, gen)), ranges::dangling>); @@ -72,8 +75,307 @@ void test_urbg() { // COMPILE-ONLY ranges::shuffle(arr, RandGen{}); } +// Test that shuffle produces a valid permutation for various sizes. +// This exercises both the batched path (for 64-bit RNGs) and the fallback path. +void test_shuffle_permutation() { + // Test with 64-bit generator (batched random path) + { + vector v(100); + iota(v.begin(), v.end(), 0); + vector original = v; + + shuffle(v.begin(), v.end(), gen64); + + // Verify it's still a permutation + vector sorted_v = v; + sort(sorted_v.begin(), sorted_v.end()); + assert(sorted_v == original); + } + + // Test with ranges::shuffle and 64-bit generator + { + vector v(100); + iota(v.begin(), v.end(), 0); + vector original = v; + + ranges::shuffle(v, gen64); + + // Verify it's still a permutation + vector sorted_v = v; + sort(sorted_v.begin(), sorted_v.end()); + assert(sorted_v == original); + } + + // Test with 32-bit generator (non-batched path) + { + vector v(100); + iota(v.begin(), v.end(), 0); + vector original = v; + + shuffle(v.begin(), v.end(), gen); + + // Verify it's still a permutation + vector sorted_v = v; + sort(sorted_v.begin(), sorted_v.end()); + assert(sorted_v == original); + } + + // Test with ranges::shuffle and 32-bit generator + { + vector v(100); + iota(v.begin(), v.end(), 0); + vector original = v; + + ranges::shuffle(v, gen); + + // Verify it's still a permutation + vector sorted_v = v; + sort(sorted_v.begin(), sorted_v.end()); + assert(sorted_v == original); + } +} + +// Test edge cases for shuffle +void test_shuffle_edge_cases() { + // Empty range + { + vector v; + shuffle(v.begin(), v.end(), gen64); + assert(v.empty()); + } + + // Single element + { + vector v = {42}; + shuffle(v.begin(), v.end(), gen64); + assert(v.size() == 1); + assert(v[0] == 42); + } + + // Two elements + { + vector v = {1, 2}; + vector original = v; + shuffle(v.begin(), v.end(), gen64); + sort(v.begin(), v.end()); + assert(v == original); + } + + // Three elements (odd count, tests batching boundary) + { + vector v = {1, 2, 3}; + vector original = v; + shuffle(v.begin(), v.end(), gen64); + sort(v.begin(), v.end()); + assert(v == original); + } + + // Four elements (even count) + { + vector v = {1, 2, 3, 4}; + vector original = v; + shuffle(v.begin(), v.end(), gen64); + sort(v.begin(), v.end()); + assert(v == original); + } + + // Large array to ensure batching is effective + { + vector v(10000); + iota(v.begin(), v.end(), 0); + vector original = v; + shuffle(v.begin(), v.end(), gen64); + sort(v.begin(), v.end()); + assert(v == original); + } +} + +// Regression test: consecutive shuffles with the same URNG must produce different results. +// The batched random implementation had a bug where _Unsigned128 brace-initialization zeroed the high word +// of the multiplication result, making all random indices 0 and the shuffle deterministic. +void test_gh_6112() { + // Test std::shuffle with mt19937_64 (exercises the batched random path) + { + mt19937_64 urng(6); + vector v(100); + + iota(v.begin(), v.end(), 0); + shuffle(v.begin(), v.end(), urng); + const auto first_shuffle = v; + + iota(v.begin(), v.end(), 0); + shuffle(v.begin(), v.end(), urng); + + assert(v != first_shuffle); // "should be vanishingly impossible" for these to be equal + } + + // Test ranges::shuffle with mt19937_64 + { + mt19937_64 urng(6); + vector v(100); + + iota(v.begin(), v.end(), 0); + ranges::shuffle(v, urng); + const auto first_shuffle = v; + + iota(v.begin(), v.end(), 0); + ranges::shuffle(v, urng); + + assert(v != first_shuffle); + } +} + +// Shuffle quality tests adapted from Lemire's cpp_batched_random test suite. +// These verify that the shuffle produces a proper uniform random permutation, +// not just a valid permutation. + +// Every element must be able to reach every position over many trials. +void test_everyone_can_move_everywhere() { + constexpr size_t size = 64; + constexpr size_t trials = size * size; // 4096 trials; probability of missing any (pos,val): ~e^-64 + + // Test std::shuffle with mt19937_64 (batched path) + { + mt19937_64 urng(42); + vector input(size); + vector seen(size * size, 0); // seen[position * size + value] + + for (size_t trial = 0; trial < trials; ++trial) { + iota(input.begin(), input.end(), 0); + shuffle(input.begin(), input.end(), urng); + for (size_t i = 0; i < size; ++i) { + seen[i * size + static_cast(input[i])] = 1; + } + } + + for (size_t pos = 0; pos < size; ++pos) { + for (size_t val = 0; val < size; ++val) { + assert(seen[pos * size + val]); + } + } + } + + // Test ranges::shuffle with mt19937_64 (batched path) + { + mt19937_64 urng(42); + vector input(size); + vector seen(size * size, 0); + + for (size_t trial = 0; trial < trials; ++trial) { + iota(input.begin(), input.end(), 0); + ranges::shuffle(input, urng); + for (size_t i = 0; i < size; ++i) { + seen[i * size + static_cast(input[i])] = 1; + } + } + + for (size_t pos = 0; pos < size; ++pos) { + for (size_t val = 0; val < size; ++val) { + assert(seen[pos * size + val]); + } + } + } +} + +// Check that the distribution of values across positions is roughly uniform. +// Uses the "relative gap" metric: (max_count - min_count) / mean_count < 0.6. +void test_uniformity() { + constexpr size_t size = 32; + constexpr size_t trials = size * size * 16; // 16384 trials; expected count per cell = 512 + + mt19937_64 urng(42); + vector input(size); + vector count(size * size, 0); // count[position * size + value] + + for (size_t trial = 0; trial < trials; ++trial) { + iota(input.begin(), input.end(), 0); + shuffle(input.begin(), input.end(), urng); + for (size_t i = 0; i < size; ++i) { + ++count[i * size + static_cast(input[i])]; + } + } + + size_t overall_min = SIZE_MAX; + size_t overall_max = 0; + size_t total = 0; + + for (size_t cell = 0; cell < size * size; ++cell) { + total += count[cell]; + if (count[cell] > overall_max) { + overall_max = count[cell]; + } + if (count[cell] < overall_min) { + overall_min = count[cell]; + } + } + + const double mean = static_cast(total) / static_cast(size * size); + const double relative_gap = static_cast(overall_max - overall_min) / mean; + + assert(relative_gap < 0.6); +} + +// Every distinct pair of values must be able to appear at the first two positions. +void test_any_possible_pair_at_start() { + constexpr size_t size = 32; + constexpr size_t trials = size * size * size; // 32768 trials; expected count per pair ~33 + + mt19937_64 urng(42); + vector input(size); + vector seen(size * size, 0); // seen[first * size + second] + + for (size_t trial = 0; trial < trials; ++trial) { + iota(input.begin(), input.end(), 0); + shuffle(input.begin(), input.end(), urng); + seen[static_cast(input[0]) * size + static_cast(input[1])] = 1; + } + + for (size_t i = 0; i < size; ++i) { + for (size_t j = 0; j < size; ++j) { + if (i == j) { + assert(!seen[i * size + j]); // same value can't occupy both positions + } else { + assert(seen[i * size + j]); // every distinct pair must appear + } + } + } +} + +// Every distinct pair of values must be able to appear at the last two positions. +void test_any_possible_pair_at_end() { + constexpr size_t size = 32; + constexpr size_t trials = size * size * size; + + mt19937_64 urng(42); + vector input(size); + vector seen(size * size, 0); + + for (size_t trial = 0; trial < trials; ++trial) { + iota(input.begin(), input.end(), 0); + shuffle(input.begin(), input.end(), urng); + seen[static_cast(input[size - 2]) * size + static_cast(input[size - 1])] = 1; + } + + for (size_t i = 0; i < size; ++i) { + for (size_t j = 0; j < size; ++j) { + if (i == j) { + assert(!seen[i * size + j]); + } else { + assert(seen[i * size + j]); + } + } + } +} + int main() { printf("Using seed: %u\n", seed); test_random(); + test_shuffle_permutation(); + test_shuffle_edge_cases(); + test_gh_6112(); + test_everyone_can_move_everywhere(); + test_uniformity(); + test_any_possible_pair_at_start(); + test_any_possible_pair_at_end(); }