Update vendored deps

This commit is contained in:
2022-08-29 13:59:48 -04:00
parent 73f3dde770
commit 7a950b49eb
723 changed files with 86515 additions and 54105 deletions

View File

@@ -16,7 +16,6 @@
# ABSL random-number generation libraries.
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
@@ -101,8 +100,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":seed_gen_exception",
"//absl/container:inlined_vector",
"//absl/random/internal:nonsecure_base",
"//absl/base:config",
"//absl/random/internal:pool_urbg",
"//absl/random/internal:salted_seed_seq",
"//absl/random/internal:seed_material",
@@ -129,6 +127,7 @@ cc_library(
name = "mock_distributions",
testonly = 1,
hdrs = ["mock_distributions.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":distributions",
":mocking_bit_gen",
@@ -184,6 +183,9 @@ cc_test(
copts = ABSL_TEST_COPTS,
flaky = 1,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
"no_test_wasm",
],
deps = [
":distributions",
":random",
@@ -236,6 +238,9 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
"no_test_wasm", # Does not converge on WASM.
],
deps = [
":distributions",
":random",
@@ -430,6 +435,9 @@ cc_test(
srcs = ["mocking_bit_gen_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
"no_test_wasm",
],
deps = [
":bit_gen_ref",
":mock_distributions",
@@ -445,6 +453,9 @@ cc_test(
srcs = ["mock_distributions_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
"no_test_wasm",
],
deps = [
":mock_distributions",
":mocking_bit_gen",
@@ -459,6 +470,9 @@ cc_test(
srcs = ["examples_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
"no_test_wasm",
],
deps = [
":random",
"@com_google_googletest//:gtest_main",

View File

@@ -63,8 +63,7 @@ absl_source_set("seed_sequences") {
sources = [ "seed_sequences.cc" ]
deps = [
":seed_gen_exception",
"//third_party/abseil-cpp/absl/container:inlined_vector",
"//third_party/abseil-cpp/absl/random/internal:nonsecure_base",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/random/internal:pool_urbg",
"//third_party/abseil-cpp/absl/random/internal:salted_seed_seq",
"//third_party/abseil-cpp/absl/random/internal:seed_material",

View File

@@ -121,6 +121,7 @@ absl_cc_library(
absl::variant
GTest::gmock
GTest::gtest
PUBLIC
TESTONLY
)
@@ -222,8 +223,8 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
absl::inlined_vector
absl::random_internal_nonsecure_base
absl::random_internal_pool_urbg
absl::random_internal_salted_seed_seq
absl::random_internal_seed_material
@@ -726,7 +727,7 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::core_headers
absl::optional
absl::inlined_vector
absl::random_internal_pool_urbg
absl::random_internal_salted_seed_seq
absl::random_internal_seed_material
@@ -1210,5 +1211,6 @@ absl_cc_test(
absl::random_internal_wide_multiply
absl::bits
absl::int128
GTest::gmock
GTest::gtest_main
)

View File

@@ -138,16 +138,16 @@ bool bernoulli_distribution::Generate(double p,
// 64 bits.
//
// Second, `c` is constructed by first casting explicitly to a signed
// integer and then converting implicitly to an unsigned integer of the same
// integer and then casting explicitly to an unsigned integer of the same
// size. This is done because the hardware conversion instructions produce
// signed integers from double; if taken as a uint64_t the conversion would
// be wrong for doubles greater than 2^63 (not relevant in this use-case).
// If converted directly to an unsigned integer, the compiler would end up
// emitting code to handle such large values that are not relevant due to
// the known bounds on `c`. To avoid these extra instructions this
// implementation converts first to the signed type and then use the
// implicit conversion to unsigned (which is a no-op).
const uint64_t c = static_cast<int64_t>(p * kP32);
// implementation converts first to the signed type and then convert to
// unsigned (which is a no-op).
const uint64_t c = static_cast<uint64_t>(static_cast<int64_t>(p * kP32));
const uint32_t v = fast_u32(g);
// FAST PATH: this path fails with probability 1/2^32. Note that simply
// returning v <= c would approximate P very well (up to an absolute error

View File

@@ -45,16 +45,26 @@ namespace {
template <typename IntType>
class BetaDistributionInterfaceTest : public ::testing::Test {};
// double-double arithmetic is not supported well by either GCC or Clang; see
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
// https://bugs.llvm.org/show_bug.cgi?id=49131, and
// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests
// with double doubles until compiler support is better.
using RealTypes =
std::conditional<absl::numeric_internal::IsDoubleDouble(),
::testing::Types<float, double>,
::testing::Types<float, double, long double>>::type;
TYPED_TEST_CASE(BetaDistributionInterfaceTest, RealTypes);
constexpr bool ShouldExerciseLongDoubleTests() {
// long double arithmetic is not supported well by either GCC or Clang on
// most platforms specifically not when implemented in terms of double-double;
// see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
// https://bugs.llvm.org/show_bug.cgi?id=49131, and
// https://bugs.llvm.org/show_bug.cgi?id=49132.
// So a conservative choice here is to disable long-double tests pretty much
// everywhere except on x64 but only if long double is not implemented as
// double-double.
#if defined(__i686__) && defined(__x86_64__)
return !absl::numeric_internal::IsDoubleDouble();
#else
return false;
#endif
}
using RealTypes = std::conditional<ShouldExerciseLongDoubleTests(),
::testing::Types<float, double, long double>,
::testing::Types<float, double>>::type;
TYPED_TEST_SUITE(BetaDistributionInterfaceTest, RealTypes);
TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) {
// The threshold for whether std::exp(1/a) is finite.
@@ -431,13 +441,13 @@ std::string ParamName(
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
INSTANTIATE_TEST_CASE_P(
INSTANTIATE_TEST_SUITE_P(
TestSampleStatisticsCombinations, BetaDistributionTest,
::testing::Combine(::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4),
::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4)),
ParamName);
INSTANTIATE_TEST_CASE_P(
INSTANTIATE_TEST_SUITE_P(
TestSampleStatistics_SelectedPairs, BetaDistributionTest,
::testing::Values(std::make_pair(0.5, 1000), std::make_pair(1000, 0.5),
std::make_pair(900, 1000), std::make_pair(10000, 20000),

View File

@@ -24,6 +24,10 @@
#ifndef ABSL_RANDOM_BIT_GEN_REF_H_
#define ABSL_RANDOM_BIT_GEN_REF_H_
#include <limits>
#include <type_traits>
#include <utility>
#include "absl/base/internal/fast_type_id.h"
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"

View File

@@ -373,7 +373,7 @@ RealType Gaussian(URBG&& urbg, // NOLINT(runtime/references)
template <typename IntType, typename URBG>
IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references)
IntType lo, IntType hi, IntType base = 2) {
static_assert(std::is_integral<IntType>::value,
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::LogUniform<IntType, URBG>(...)");
@@ -403,7 +403,7 @@ IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references)
template <typename IntType, typename URBG>
IntType Poisson(URBG&& urbg, // NOLINT(runtime/references)
double mean = 1.0) {
static_assert(std::is_integral<IntType>::value,
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Poisson<IntType, URBG>(...)");
@@ -435,7 +435,7 @@ template <typename IntType, typename URBG>
IntType Zipf(URBG&& urbg, // NOLINT(runtime/references)
IntType hi = (std::numeric_limits<IntType>::max)(), double q = 2.0,
double v = 1.0) {
static_assert(std::is_integral<IntType>::value,
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Zipf<IntType, URBG>(...)");

View File

@@ -220,6 +220,7 @@ TEST_F(RandomDistributionsTest, UniformNoBounds) {
absl::Uniform<uint16_t>(gen);
absl::Uniform<uint32_t>(gen);
absl::Uniform<uint64_t>(gen);
absl::Uniform<absl::uint128>(gen);
}
TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {

View File

@@ -58,7 +58,7 @@ using RealTypes =
std::conditional<absl::numeric_internal::IsDoubleDouble(),
::testing::Types<float, double>,
::testing::Types<float, double, long double>>::type;
TYPED_TEST_CASE(ExponentialDistributionTypedTest, RealTypes);
TYPED_TEST_SUITE(ExponentialDistributionTypedTest, RealTypes);
TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) {
using param_type =
@@ -343,8 +343,8 @@ std::string ParamName(const ::testing::TestParamInfo<Param>& info) {
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
INSTANTIATE_TEST_CASE_P(All, ExponentialDistributionTests,
::testing::ValuesIn(GenParams()), ParamName);
INSTANTIATE_TEST_SUITE_P(All, ExponentialDistributionTests,
::testing::ValuesIn(GenParams()), ParamName);
// NOTE: absl::exponential_distribution is not guaranteed to be stable.
TEST(ExponentialDistributionTest, StabilityTest) {

View File

@@ -54,7 +54,7 @@ using RealTypes =
std::conditional<absl::numeric_internal::IsDoubleDouble(),
::testing::Types<float, double>,
::testing::Types<float, double, long double>>::type;
TYPED_TEST_CASE(GaussianDistributionInterfaceTest, RealTypes);
TYPED_TEST_SUITE(GaussianDistributionInterfaceTest, RealTypes);
TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) {
using param_type =

View File

@@ -107,6 +107,8 @@ void TestPoisson(URBG* gen) {
absl::Poisson<int64_t>(*gen);
absl::Poisson<uint64_t>(*gen);
absl::Poisson<uint64_t>(URBG());
absl::Poisson<absl::int128>(*gen);
absl::Poisson<absl::uint128>(*gen);
}
template <typename URBG>
@@ -126,6 +128,8 @@ void TestZipf(URBG* gen) {
absl::Zipf<int64_t>(*gen, 1 << 10);
absl::Zipf<uint64_t>(*gen, 1 << 10);
absl::Zipf<uint64_t>(URBG(), 1 << 10);
absl::Zipf<absl::int128>(*gen, 1 << 10);
absl::Zipf<absl::uint128>(*gen, 1 << 10);
}
template <typename URBG>
@@ -146,6 +150,8 @@ void TestLogNormal(URBG* gen) {
absl::LogUniform<int64_t>(*gen, 0, 1 << 10);
absl::LogUniform<uint64_t>(*gen, 0, 1 << 10);
absl::LogUniform<uint64_t>(URBG(), 0, 1 << 10);
absl::LogUniform<absl::int128>(*gen, 0, 1 << 10);
absl::LogUniform<absl::uint128>(*gen, 0, 1 << 10);
}
template <typename URBG>

View File

@@ -14,8 +14,6 @@
# limitations under the License.
#
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
# Internal-only implementation classes for Abseil Random
load(
"//absl:copts/configure_copts.bzl",
@@ -37,7 +35,11 @@ cc_library(
hdrs = ["traits.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/base:config"],
deps = [
"//absl/base:config",
"//absl/numeric:bits",
"//absl/numeric:int128",
],
)
cc_library(
@@ -60,6 +62,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":traits",
"//absl/base:config",
"//absl/meta:type_traits",
],
@@ -219,8 +222,8 @@ cc_library(
":salted_seed_seq",
":seed_material",
"//absl/base:core_headers",
"//absl/container:inlined_vector",
"//absl/meta:type_traits",
"//absl/types:optional",
"//absl/types:span",
],
)
@@ -499,6 +502,7 @@ cc_test(
cc_library(
name = "mock_helpers",
hdrs = ["mock_helpers.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:fast_type_id",
"//absl/types:optional",
@@ -509,6 +513,7 @@ cc_library(
name = "mock_overload_set",
testonly = 1,
hdrs = ["mock_overload_set.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":mock_helpers",
"//absl/random:mocking_bit_gen",
@@ -676,6 +681,7 @@ cc_library(
":traits",
"//absl/base:config",
"//absl/meta:type_traits",
"//absl/numeric:int128",
],
)
@@ -689,6 +695,7 @@ cc_test(
"benchmark",
"no_test_ios_x86_64",
"no_test_loonix", # Crashing.
"no_test_wasm",
],
deps = [
":nanobenchmark",

View File

@@ -6,7 +6,11 @@ import("//third_party/abseil-cpp/absl.gni")
absl_source_set("traits") {
public = [ "traits.h" ]
deps = [ "//third_party/abseil-cpp/absl/base:config" ]
deps = [
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/numeric:bits",
"//third_party/abseil-cpp/absl/numeric:int128",
]
}
absl_source_set("distribution_caller") {
@@ -21,6 +25,7 @@ absl_source_set("distribution_caller") {
absl_source_set("fast_uniform_bits") {
public = [ "fast_uniform_bits.h" ]
deps = [
":traits",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
@@ -127,8 +132,8 @@ absl_source_set("nonsecure_base") {
":salted_seed_seq",
":seed_material",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/container:inlined_vector",
"//third_party/abseil-cpp/absl/meta:type_traits",
"//third_party/abseil-cpp/absl/types:optional",
"//third_party/abseil-cpp/absl/types:span",
]
}
@@ -265,5 +270,6 @@ absl_source_set("uniform_helper") {
":traits",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/meta:type_traits",
"//third_party/abseil-cpp/absl/numeric:int128",
]
}

View File

@@ -125,7 +125,8 @@ double ChiSquareValue(int dof, double p) {
const double variance = 2.0 / (9 * dof);
// Cannot use this method if the variance is 0.
if (variance != 0) {
return std::pow(z * std::sqrt(variance) + mean, 3.0) * dof;
double term = z * std::sqrt(variance) + mean;
return dof * (term * term * term);
}
}

View File

@@ -18,6 +18,7 @@
#define ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_
#include <utility>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/internal/fast_type_id.h"
@@ -32,6 +33,8 @@ namespace random_internal {
// to intercept such calls.
template <typename URBG>
struct DistributionCaller {
static_assert(!std::is_pointer<URBG>::value,
"You must pass a reference, not a pointer.");
// SFINAE to detect whether the URBG type includes a member matching
// bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
//

View File

@@ -74,7 +74,7 @@ class ExplicitSeedSeq {
template <typename OutIterator>
void generate(OutIterator begin, OutIterator end) {
for (size_t index = 0; begin != end; begin++) {
*begin = state_.empty() ? 0 : little_endian::FromHost32(state_[index++]);
*begin = state_.empty() ? 0 : state_[index++];
if (index >= state_.size()) {
index = 0;
}

View File

@@ -24,6 +24,8 @@
namespace {
using ::absl::random_internal::ExplicitSeedSeq;
template <typename Sseq>
bool ConformsToInterface() {
// Check that the SeedSequence can be default-constructed.
@@ -64,14 +66,14 @@ TEST(SeedSequences, CheckInterfaces) {
EXPECT_TRUE(ConformsToInterface<std::seed_seq>());
// Abseil classes
EXPECT_TRUE(ConformsToInterface<absl::random_internal::ExplicitSeedSeq>());
EXPECT_TRUE(ConformsToInterface<ExplicitSeedSeq>());
}
TEST(ExplicitSeedSeq, DefaultConstructorGeneratesZeros) {
const size_t kNumBlocks = 128;
uint32_t outputs[kNumBlocks];
absl::random_internal::ExplicitSeedSeq seq;
ExplicitSeedSeq seq;
seq.generate(outputs, &outputs[kNumBlocks]);
for (uint32_t& seed : outputs) {
@@ -87,8 +89,7 @@ TEST(ExplicitSeeqSeq, SeedMaterialIsForwardedIdentically) {
for (uint32_t& seed : seed_material) {
seed = urandom();
}
absl::random_internal::ExplicitSeedSeq seq(seed_material,
&seed_material[kNumBlocks]);
ExplicitSeedSeq seq(seed_material, &seed_material[kNumBlocks]);
// Check that output is same as seed-material provided to constructor.
{
@@ -133,17 +134,14 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
for (uint32_t& entry : entropy) {
entry = urandom();
}
absl::random_internal::ExplicitSeedSeq seq_from_entropy(std::begin(entropy),
std::end(entropy));
ExplicitSeedSeq seq_from_entropy(std::begin(entropy), std::end(entropy));
// Copy constructor.
{
absl::random_internal::ExplicitSeedSeq seq_copy(seq_from_entropy);
ExplicitSeedSeq seq_copy(seq_from_entropy);
EXPECT_EQ(seq_copy.size(), seq_from_entropy.size());
std::vector<uint32_t> seeds_1;
seeds_1.resize(1000, 0);
std::vector<uint32_t> seeds_2;
seeds_2.resize(1000, 1);
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 1);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
seq_copy.generate(seeds_2.begin(), seeds_2.end());
@@ -155,13 +153,10 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
for (uint32_t& entry : entropy) {
entry = urandom();
}
absl::random_internal::ExplicitSeedSeq another_seq(std::begin(entropy),
std::end(entropy));
ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy));
std::vector<uint32_t> seeds_1;
seeds_1.resize(1000, 0);
std::vector<uint32_t> seeds_2;
seeds_2.resize(1000, 0);
std::vector<uint32_t> seeds_1(1000, 0);
std::vector<uint32_t> seeds_2(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
@@ -170,7 +165,15 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2)));
// Apply the assignment-operator.
// GCC 12 has a false-positive -Wstringop-overflow warning here.
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
another_seq = seq_from_entropy;
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
// Re-generate seeds.
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
@@ -182,15 +185,13 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
// Move constructor.
{
// Get seeds from seed-sequence constructed from entropy.
std::vector<uint32_t> seeds_1;
seeds_1.resize(1000, 0);
std::vector<uint32_t> seeds_1(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
// Apply move-constructor move the sequence to another instance.
absl::random_internal::ExplicitSeedSeq moved_seq(
std::move(seq_from_entropy));
std::vector<uint32_t> seeds_2;
seeds_2.resize(1000, 1);
std::vector<uint32_t> seeds_2(1000, 1);
moved_seq.generate(seeds_2.begin(), seeds_2.end());
// Verify that seeds produced by moved-instance are the same as original.
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
@@ -202,3 +203,35 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
EXPECT_THAT(seeds_1, Each(Eq(0)));
}
}
TEST(ExplicitSeedSeq, StdURBGGoldenTests) {
// Verify that for std::- URBG instances the results are stable across
// platforms (these should have deterministic output).
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::minstd_rand rng(seed_sequence);
std::minstd_rand::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values,
testing::ElementsAre(579252, 43785881, 464353103, 1501811174));
}
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::mt19937 rng(seed_sequence);
std::mt19937::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values, testing::ElementsAre(138416803, 151130212, 33817739,
138416803));
}
{
ExplicitSeedSeq seed_sequence{12, 34, 56};
std::mt19937_64 rng(seed_sequence);
std::mt19937_64::result_type values[4] = {rng(), rng(), rng(), rng()};
EXPECT_THAT(values,
testing::ElementsAre(19738651785169348, 1464811352364190456,
18054685302720800, 19738651785169348));
}
}

View File

@@ -22,6 +22,7 @@
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -98,7 +99,7 @@ class FastUniformBits {
result_type operator()(URBG& g); // NOLINT(runtime/references)
private:
static_assert(std::is_unsigned<UIntType>::value,
static_assert(IsUnsigned<UIntType>::value,
"Class-template FastUniformBits<> must be parameterized using "
"an unsigned type.");

View File

@@ -50,10 +50,10 @@ struct GenerateSignedTag {};
// inputs, otherwise it never returns 0.
//
// When a value in U(0,1) is required, use:
// Uniform64ToReal<double, PositiveValueT, true>;
// GenerateRealFromBits<double, PositiveValueT, true>;
//
// When a value in U(-1,1) is required, use:
// Uniform64ToReal<double, SignedValueT, false>;
// GenerateRealFromBits<double, SignedValueT, false>;
//
// This generates more distinct values than the mathematical equivalent
// `U(0, 1) * 2.0 - 1.0`.

View File

@@ -18,6 +18,7 @@
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/internal/fast_type_id.h"
#include "absl/types/optional.h"

View File

@@ -17,28 +17,82 @@
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <iterator>
#include <random>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/pool_urbg.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
// RandenPoolSeedSeq is a custom seed sequence type where generate() fills the
// provided buffer via the RandenPool entropy source.
class RandenPoolSeedSeq {
private:
struct ContiguousTag {};
struct BufferTag {};
// Generate random unsigned values directly into the buffer.
template <typename Contiguous>
void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
const size_t n = std::distance(begin, end);
auto* a = &(*begin);
RandenPool<uint8_t>::Fill(
absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
}
// Construct a buffer of size n and fill it with values, then copy
// those values into the seed iterators.
template <typename RandomAccessIterator>
void generate_impl(BufferTag, RandomAccessIterator begin,
RandomAccessIterator end) {
const size_t n = std::distance(begin, end);
absl::InlinedVector<uint32_t, 8> data(n, 0);
RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
std::copy(std::begin(data), std::end(data), begin);
}
public:
using result_type = uint32_t;
size_t size() { return 0; }
template <typename OutIterator>
void param(OutIterator) const {}
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
// RandomAccessIterator must be assignable from uint32_t
if (begin != end) {
using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
// ContiguousTag indicates the common case of a known contiguous buffer,
// which allows directly filling the buffer. In C++20,
// std::contiguous_iterator_tag provides a mechanism for testing this
// capability, however until Abseil's support requirements allow us to
// assume C++20, limit checks to a few common cases.
using TagType = absl::conditional_t<
(std::is_pointer<RandomAccessIterator>::value ||
std::is_same<RandomAccessIterator,
typename std::vector<U>::iterator>::value),
ContiguousTag, BufferTag>;
generate_impl(TagType{}, begin, end);
}
}
};
// Each instance of NonsecureURBGBase<URBG> will be seeded by variates produced
// by a thread-unique URBG-instance.
template <typename URBG>
template <typename URBG, typename Seeder = RandenPoolSeedSeq>
class NonsecureURBGBase {
public:
using result_type = typename URBG::result_type;
@@ -85,49 +139,6 @@ class NonsecureURBGBase {
}
private:
// Seeder is a custom seed sequence type where generate() fills the provided
// buffer via the RandenPool entropy source.
struct Seeder {
using result_type = uint32_t;
size_t size() { return 0; }
template <typename OutIterator>
void param(OutIterator) const {}
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
if (begin != end) {
// begin, end must be random access iterators assignable from uint32_t.
generate_impl(
std::integral_constant<bool, sizeof(*begin) == sizeof(uint32_t)>{},
begin, end);
}
}
// Commonly, generate is invoked with a pointer to a buffer which
// can be cast to a uint32_t.
template <typename RandomAccessIterator>
void generate_impl(std::integral_constant<bool, true>,
RandomAccessIterator begin, RandomAccessIterator end) {
auto buffer = absl::MakeSpan(begin, end);
auto target = absl::MakeSpan(reinterpret_cast<uint32_t*>(buffer.data()),
buffer.size());
RandenPool<uint32_t>::Fill(target);
}
// The non-uint32_t case should be uncommon, and involves an extra copy,
// filling the uint32_t buffer and then mixing into the output.
template <typename RandomAccessIterator>
void generate_impl(std::integral_constant<bool, false>,
RandomAccessIterator begin, RandomAccessIterator end) {
const size_t n = std::distance(begin, end);
absl::InlinedVector<uint32_t, 8> data(n, 0);
RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
std::copy(std::begin(data), std::end(data), begin);
}
};
static URBG ConstructURBG() {
Seeder seeder;
return URBG(seeder);

View File

@@ -15,6 +15,7 @@
#include "absl/random/internal/nonsecure_base.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <memory>
#include <random>
@@ -192,54 +193,35 @@ TEST(NonsecureURBGBase, EqualSeedSequencesYieldEqualVariates) {
}
}
// This is a PRNG-compatible type specifically designed to test
// that NonsecureURBGBase::Seeder can correctly handle iterators
// to arbitrary non-uint32_t size types.
template <typename T>
struct SeederTestEngine {
using result_type = T;
TEST(RandenPoolSeedSeqTest, SeederWorksForU32) {
absl::random_internal::RandenPoolSeedSeq seeder;
static constexpr result_type(min)() {
return (std::numeric_limits<result_type>::min)();
}
static constexpr result_type(max)() {
return (std::numeric_limits<result_type>::max)();
}
template <class SeedSequence,
typename = typename absl::enable_if_t<
!std::is_same<SeedSequence, SeederTestEngine>::value>>
explicit SeederTestEngine(SeedSequence&& seq) {
seed(seq);
}
SeederTestEngine(const SeederTestEngine&) = default;
SeederTestEngine& operator=(const SeederTestEngine&) = default;
SeederTestEngine(SeederTestEngine&&) = default;
SeederTestEngine& operator=(SeederTestEngine&&) = default;
result_type operator()() { return state[0]; }
template <class SeedSequence>
void seed(SeedSequence&& seq) {
std::fill(std::begin(state), std::end(state), T(0));
seq.generate(std::begin(state), std::end(state));
}
T state[2];
};
TEST(NonsecureURBGBase, SeederWorksForU32) {
using U32 =
absl::random_internal::NonsecureURBGBase<SeederTestEngine<uint32_t>>;
U32 x;
EXPECT_NE(0, x());
uint32_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
TEST(NonsecureURBGBase, SeederWorksForU64) {
using U64 =
absl::random_internal::NonsecureURBGBase<SeederTestEngine<uint64_t>>;
TEST(RandenPoolSeedSeqTest, SeederWorksForU64) {
absl::random_internal::RandenPoolSeedSeq seeder;
U64 x;
EXPECT_NE(0, x());
uint64_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
EXPECT_FALSE((state[0] >> 32) == 0 && (state[1] >> 32) == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForS32) {
absl::random_internal::RandenPoolSeedSeq seeder;
int32_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForVector) {
absl::random_internal::RandenPoolSeedSeq seeder;
std::vector<uint32_t> state(2);
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}

View File

@@ -262,7 +262,7 @@ struct pcg_xsl_rr_128_64 {
uint64_t rotate = h >> 58u;
uint64_t s = Uint128Low64(state) ^ h;
#endif
return rotr(s, rotate);
return rotr(s, static_cast<int>(rotate));
}
};

View File

@@ -43,10 +43,8 @@ class Randen {
// Generate updates the randen sponge. The outer portion of the sponge
// (kCapacityBytes .. kStateBytes) may be consumed as PRNG state.
template <typename T, size_t N>
void Generate(T (&state)[N]) const {
static_assert(N * sizeof(T) == kStateBytes,
"Randen::Generate() requires kStateBytes of state");
// REQUIRES: state points to kStateBytes of state.
inline void Generate(void* state) const {
#if ABSL_RANDOM_INTERNAL_AES_DISPATCH
// HW AES Dispatch.
if (has_crypto_) {
@@ -65,13 +63,9 @@ class Randen {
// Absorb incorporates additional seed material into the randen sponge. After
// absorb returns, Generate must be called before the state may be consumed.
template <typename S, size_t M, typename T, size_t N>
void Absorb(const S (&seed)[M], T (&state)[N]) const {
static_assert(M * sizeof(S) == RandenTraits::kSeedBytes,
"Randen::Absorb() requires kSeedBytes of seed");
static_assert(N * sizeof(T) == RandenTraits::kStateBytes,
"Randen::Absorb() requires kStateBytes of state");
// REQUIRES: seed points to kSeedBytes of seed.
// REQUIRES: state points to kStateBytes of state.
inline void Absorb(const void* seed, void* state) const {
#if ABSL_RANDOM_INTERNAL_AES_DISPATCH
// HW AES Dispatch.
if (has_crypto_) {

View File

@@ -24,6 +24,11 @@
#include "absl/random/internal/platform.h"
#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
(__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
#define ABSL_HAVE_GETAUXVAL
#endif
#if defined(ABSL_ARCH_X86_64)
#define ABSL_INTERNAL_USE_X86_CPUID
#elif defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \
@@ -31,7 +36,7 @@
#if defined(__ANDROID__)
#define ABSL_INTERNAL_USE_ANDROID_GETAUXVAL
#define ABSL_INTERNAL_USE_GETAUXVAL
#elif defined(__linux__)
#elif defined(__linux__) && defined(ABSL_HAVE_GETAUXVAL)
#define ABSL_INTERNAL_USE_LINUX_GETAUXVAL
#define ABSL_INTERNAL_USE_GETAUXVAL
#endif
@@ -40,7 +45,6 @@
#if defined(ABSL_INTERNAL_USE_X86_CPUID)
#if defined(_WIN32) || defined(_WIN64)
#include <intrin.h> // NOLINT(build/include_order)
#pragma intrinsic(__cpuid)
#else
// MSVC-equivalent __cpuid intrinsic function.
static void __cpuid(int cpu_info[4], int info_type) {

View File

@@ -42,7 +42,7 @@ namespace random_internal {
// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
template <typename T>
class alignas(16) randen_engine {
class alignas(8) randen_engine {
public:
// C++11 URBG interface:
using result_type = T;
@@ -58,7 +58,8 @@ class alignas(16) randen_engine {
return (std::numeric_limits<result_type>::max)();
}
explicit randen_engine(result_type seed_value = 0) { seed(seed_value); }
randen_engine() : randen_engine(0) {}
explicit randen_engine(result_type seed_value) { seed(seed_value); }
template <class SeedSequence,
typename = typename absl::enable_if_t<
@@ -67,17 +68,27 @@ class alignas(16) randen_engine {
seed(seq);
}
randen_engine(const randen_engine&) = default;
// alignment requirements dictate custom copy and move constructors.
randen_engine(const randen_engine& other)
: next_(other.next_), impl_(other.impl_) {
std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
}
randen_engine& operator=(const randen_engine& other) {
next_ = other.next_;
impl_ = other.impl_;
std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
return *this;
}
// Returns random bits from the buffer in units of result_type.
result_type operator()() {
// Refill the buffer if needed (unlikely).
auto* begin = state();
if (next_ >= kStateSizeT) {
next_ = kCapacityT;
impl_.Generate(state_);
impl_.Generate(begin);
}
return little_endian::ToHost(state_[next_++]);
return little_endian::ToHost(begin[next_++]);
}
template <class SeedSequence>
@@ -92,9 +103,10 @@ class alignas(16) randen_engine {
void seed(result_type seed_value = 0) {
next_ = kStateSizeT;
// Zeroes the inner state and fills the outer state with seed_value to
// mimics behaviour of reseed
std::fill(std::begin(state_), std::begin(state_) + kCapacityT, 0);
std::fill(std::begin(state_) + kCapacityT, std::end(state_), seed_value);
// mimic the behaviour of reseed
auto* begin = state();
std::fill(begin, begin + kCapacityT, 0);
std::fill(begin + kCapacityT, begin + kStateSizeT, seed_value);
}
// Inserts entropy into (part of) the state. Calling this periodically with
@@ -105,7 +117,6 @@ class alignas(16) randen_engine {
using sequence_result_type = typename SeedSequence::result_type;
static_assert(sizeof(sequence_result_type) == 4,
"SeedSequence::result_type must be 32-bit");
constexpr size_t kBufferSize =
Randen::kSeedBytes / sizeof(sequence_result_type);
alignas(16) sequence_result_type buffer[kBufferSize];
@@ -119,8 +130,15 @@ class alignas(16) randen_engine {
if (entropy_size < kBufferSize) {
// ... and only request that many values, or 256-bits, when unspecified.
const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size;
std::fill(std::begin(buffer) + requested_entropy, std::end(buffer), 0);
seq.generate(std::begin(buffer), std::begin(buffer) + requested_entropy);
std::fill(buffer + requested_entropy, buffer + kBufferSize, 0);
seq.generate(buffer, buffer + requested_entropy);
#ifdef ABSL_IS_BIG_ENDIAN
// Randen expects the seed buffer to be in Little Endian; reverse it on
// Big Endian platforms.
for (sequence_result_type& e : buffer) {
e = absl::little_endian::FromHost(e);
}
#endif
// The Randen paper suggests preferentially initializing even-numbered
// 128-bit vectors of the randen state (there are 16 such vectors).
// The seed data is merged into the state offset by 128-bits, which
@@ -139,9 +157,9 @@ class alignas(16) randen_engine {
std::swap(buffer[--dst], buffer[--src]);
}
} else {
seq.generate(std::begin(buffer), std::end(buffer));
seq.generate(buffer, buffer + kBufferSize);
}
impl_.Absorb(buffer, state_);
impl_.Absorb(buffer, state());
// Generate will be called when operator() is called
next_ = kStateSizeT;
@@ -152,9 +170,10 @@ class alignas(16) randen_engine {
count -= step;
constexpr uint64_t kRateT = kStateSizeT - kCapacityT;
auto* begin = state();
while (count > 0) {
next_ = kCapacityT;
impl_.Generate(state_);
impl_.Generate(*reinterpret_cast<result_type(*)[kStateSizeT]>(begin));
step = std::min<uint64_t>(kRateT, count);
count -= step;
}
@@ -162,9 +181,9 @@ class alignas(16) randen_engine {
}
bool operator==(const randen_engine& other) const {
const auto* begin = state();
return next_ == other.next_ &&
std::equal(std::begin(state_), std::end(state_),
std::begin(other.state_));
std::equal(begin, begin + kStateSizeT, other.state());
}
bool operator!=(const randen_engine& other) const {
@@ -178,11 +197,12 @@ class alignas(16) randen_engine {
using numeric_type =
typename random_internal::stream_format_type<result_type>::type;
auto saver = random_internal::make_ostream_state_saver(os);
for (const auto& elem : engine.state_) {
auto* it = engine.state();
for (auto* end = it + kStateSizeT; it < end; ++it) {
// In the case that `elem` is `uint8_t`, it must be cast to something
// larger so that it prints as an integer rather than a character. For
// simplicity, apply the cast all circumstances.
os << static_cast<numeric_type>(little_endian::FromHost(elem))
os << static_cast<numeric_type>(little_endian::FromHost(*it))
<< os.fill();
}
os << engine.next_;
@@ -208,7 +228,7 @@ class alignas(16) randen_engine {
if (is.fail()) {
return is;
}
std::memcpy(engine.state_, state, sizeof(engine.state_));
std::memcpy(engine.state(), state, sizeof(state));
engine.next_ = next;
return is;
}
@@ -219,9 +239,21 @@ class alignas(16) randen_engine {
static constexpr size_t kCapacityT =
Randen::kCapacityBytes / sizeof(result_type);
// First kCapacityT are `inner', the others are accessible random bits.
alignas(16) result_type state_[kStateSizeT];
size_t next_; // index within state_
// Returns the state array pointer, which is aligned to 16 bytes.
// The first kCapacityT are the `inner' sponge; the remainder are available.
result_type* state() {
return reinterpret_cast<result_type*>(
(reinterpret_cast<uintptr_t>(&raw_state_) & 0xf) ? (raw_state_ + 8)
: raw_state_);
}
const result_type* state() const {
return const_cast<randen_engine*>(this)->state();
}
// raw state array, manually aligned in state(). This overallocates
// by 8 bytes since C++ does not guarantee extended heap alignment.
alignas(8) char raw_state_[Randen::kStateBytes + 8];
size_t next_; // index within state()
Randen impl_;
};

View File

@@ -31,7 +31,7 @@
// a hardware accelerated implementation of randen, or whether it
// will contain stubs that exit the process.
#if ABSL_HAVE_ACCELERATED_AES
// The following plaforms have implemented RandenHwAws.
// The following plaforms have implemented RandenHwAes.
#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) || \
defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \
defined(ABSL_ARCH_AARCH64)
@@ -211,7 +211,7 @@ inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {}
#elif defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32)
// On x86 we rely on the aesni instructions
#include <wmmintrin.h>
#include <immintrin.h>
namespace {

View File

@@ -27,44 +27,39 @@ namespace {
using absl::random_internal::RandenHwAes;
using absl::random_internal::RandenTraits;
// Local state parameters.
constexpr size_t kSeedBytes =
RandenTraits::kStateBytes - RandenTraits::kCapacityBytes;
constexpr size_t kStateSizeT = RandenTraits::kStateBytes / sizeof(uint64_t);
constexpr size_t kSeedSizeT = kSeedBytes / sizeof(uint32_t);
struct alignas(16) randen {
uint64_t state[kStateSizeT];
uint32_t seed[kSeedSizeT];
};
TEST(RandenHwAesTest, Default) {
EXPECT_TRUE(absl::random_internal::CPUSupportsRandenHwAes());
constexpr uint64_t kGolden[] = {
0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977,
0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912,
0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6,
0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a,
0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37,
0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556,
0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42,
0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc,
0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f,
0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3,
0x026ff374c101da7e, 0x811ef0821c3de851,
constexpr uint8_t kGolden[] = {
0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
0x82, 0xf0, 0x1e, 0x81,
};
alignas(16) randen d;
memset(d.state, 0, sizeof(d.state));
RandenHwAes::Generate(RandenHwAes::GetKeys(), d.state);
alignas(16) uint8_t state[RandenTraits::kStateBytes];
std::memset(state, 0, sizeof(state));
uint64_t* id = d.state;
for (const auto& elem : kGolden) {
auto a = absl::StrFormat("%#x", elem);
auto b = absl::StrFormat("%#x", *id++);
EXPECT_EQ(a, b);
}
RandenHwAes::Generate(RandenHwAes::GetKeys(), state);
EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
}
} // namespace

View File

@@ -395,6 +395,23 @@ inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
}
}
// Enables native loads in the round loop by pre-swapping.
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
absl::uint128* state) {
#ifdef ABSL_IS_BIG_ENDIAN
for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) {
uint64_t new_lo = absl::little_endian::ToHost64(
static_cast<uint64_t>(state[block] >> 64));
uint64_t new_hi = absl::little_endian::ToHost64(
static_cast<uint64_t>((state[block] << 64) >> 64));
state[block] = (static_cast<absl::uint128>(new_hi) << 64) | new_lo;
}
#else
// Avoid warning about unused variable.
(void)state;
#endif
}
} // namespace
namespace absl {
@@ -439,8 +456,12 @@ void RandenSlow::Generate(const void* keys_void, void* state_void) {
const absl::uint128 prev_inner = state[0];
SwapEndian(state);
Permute(state, keys);
SwapEndian(state);
// Ensure backtracking resistance.
*state ^= prev_inner;
}

View File

@@ -25,40 +25,37 @@ namespace {
using absl::random_internal::RandenSlow;
using absl::random_internal::RandenTraits;
// Local state parameters.
constexpr size_t kSeedBytes =
RandenTraits::kStateBytes - RandenTraits::kCapacityBytes;
constexpr size_t kStateSizeT = RandenTraits::kStateBytes / sizeof(uint64_t);
constexpr size_t kSeedSizeT = kSeedBytes / sizeof(uint32_t);
struct alignas(16) randen {
uint64_t state[kStateSizeT];
uint32_t seed[kSeedSizeT];
};
TEST(RandenSlowTest, Default) {
constexpr uint64_t kGolden[] = {
0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977,
0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912,
0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6,
0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a,
0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37,
0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556,
0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42,
0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc,
0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f,
0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3,
0x026ff374c101da7e, 0x811ef0821c3de851,
constexpr uint8_t kGolden[] = {
0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
0x82, 0xf0, 0x1e, 0x81,
};
alignas(16) randen d;
std::memset(d.state, 0, sizeof(d.state));
RandenSlow::Generate(RandenSlow::GetKeys(), d.state);
alignas(16) uint8_t state[RandenTraits::kStateBytes];
std::memset(state, 0, sizeof(state));
uint64_t* id = d.state;
for (const auto& elem : kGolden) {
EXPECT_EQ(absl::little_endian::FromHost64(elem), *id++);
}
RandenSlow::Generate(RandenSlow::GetKeys(), state);
EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
}
} // namespace

View File

@@ -23,9 +23,6 @@ namespace {
using absl::random_internal::Randen;
// Local state parameters.
constexpr size_t kStateSizeT = Randen::kStateBytes / sizeof(uint64_t);
TEST(RandenTest, CopyAndMove) {
static_assert(std::is_copy_constructible<Randen>::value,
"Randen must be copy constructible");
@@ -41,30 +38,38 @@ TEST(RandenTest, CopyAndMove) {
}
TEST(RandenTest, Default) {
constexpr uint64_t kGolden[] = {
0x6c6534090ee6d3ee, 0x044e2b9b9d5333c6, 0xc3c14f134e433977,
0xdda9f47cd90410ee, 0x887bf3087fd8ca10, 0xf0b780f545c72912,
0x15dbb1d37696599f, 0x30ec63baff3c6d59, 0xb29f73606f7f20a6,
0x02808a316f49a54c, 0x3b8feaf9d5c8e50e, 0x9cbf605e3fd9de8a,
0xc970ae1a78183bbb, 0xd8b2ffd356301ed5, 0xf4b327fe0fc73c37,
0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d, 0xd5af05dd3eff9556,
0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c, 0x58d3575834956d42,
0xed1ef4c26b87b840, 0x8eef32a23e0b2df3, 0x497cabf3431154fc,
0x4e24370570029a8b, 0xd88b5749f090e5ea, 0xc651a582a970692f,
0x78fcec2cbb6342f5, 0x463cb745612f55db, 0x352ee4ad1816afe3,
0x026ff374c101da7e, 0x811ef0821c3de851,
constexpr uint8_t kGolden[] = {
0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
0x82, 0xf0, 0x1e, 0x81,
};
alignas(16) uint64_t state[kStateSizeT];
alignas(16) uint8_t state[Randen::kStateBytes];
std::memset(state, 0, sizeof(state));
Randen r;
r.Generate(state);
auto id = std::begin(state);
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, *id++);
}
EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
}
} // namespace

View File

@@ -22,6 +22,7 @@
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
@@ -65,15 +66,19 @@ class SaltedSeedSeq {
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
// The common case is that generate is called with ContiguousIterators
// to uint arrays. Such contiguous memory regions may be optimized,
// which we detect here.
using tag = absl::conditional_t<
(std::is_pointer<RandomAccessIterator>::value &&
std::is_same<absl::decay_t<decltype(*begin)>, uint32_t>::value),
using TagType = absl::conditional_t<
(std::is_same<U, uint32_t>::value &&
(std::is_pointer<RandomAccessIterator>::value ||
std::is_same<RandomAccessIterator,
typename std::vector<U>::iterator>::value)),
ContiguousAndUint32Tag, DefaultTag>;
if (begin != end) {
generate_impl(begin, end, tag{});
generate_impl(TagType{}, begin, end, std::distance(begin, end));
}
}
@@ -89,8 +94,15 @@ class SaltedSeedSeq {
struct DefaultTag {};
// Generate which requires the iterators are contiguous pointers to uint32_t.
void generate_impl(uint32_t* begin, uint32_t* end, ContiguousAndUint32Tag) {
generate_contiguous(absl::MakeSpan(begin, end));
// Fills the initial seed buffer the underlying SSeq::generate() call,
// then mixes in the salt material.
template <typename Contiguous>
void generate_impl(ContiguousAndUint32Tag, Contiguous begin, Contiguous end,
size_t n) {
seq_->generate(begin, end);
const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
auto span = absl::Span<uint32_t>(&*begin, n);
MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), span);
}
// The uncommon case for generate is that it is called with iterators over
@@ -98,27 +110,13 @@ class SaltedSeedSeq {
// case we allocate a temporary 32-bit buffer and then copy-assign back
// to the initial inputs.
template <typename RandomAccessIterator>
void generate_impl(RandomAccessIterator begin, RandomAccessIterator end,
DefaultTag) {
return generate_and_copy(std::distance(begin, end), begin);
}
// Fills the initial seed buffer the underlying SSeq::generate() call,
// mixing in the salt material.
void generate_contiguous(absl::Span<uint32_t> buffer) {
seq_->generate(buffer.begin(), buffer.end());
const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), buffer);
}
// Allocates a seed buffer of `n` elements, generates the seed, then
// copies the result into the `out` iterator.
template <typename Iterator>
void generate_and_copy(size_t n, Iterator out) {
// Allocate a temporary buffer, generate, and then copy.
void generate_impl(DefaultTag, RandomAccessIterator begin,
RandomAccessIterator, size_t n) {
// Allocates a seed buffer of `n` elements, generates the seed, then
// copies the result into the `out` iterator.
absl::InlinedVector<uint32_t, 8> data(n, 0);
generate_contiguous(absl::MakeSpan(data.data(), data.size()));
std::copy(data.begin(), data.end(), out);
generate_impl(ContiguousAndUint32Tag{}, data.begin(), data.end(), n);
std::copy(data.begin(), data.end(), begin);
}
// Because [rand.req.seedseq] is not required to be copy-constructible,

View File

@@ -20,6 +20,8 @@
#include <type_traits>
#include "absl/base/config.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -59,6 +61,31 @@ class is_widening_convertible {
rank<A>() <= rank<B>();
};
template <typename T>
struct IsIntegral : std::is_integral<T> {};
template <>
struct IsIntegral<absl::int128> : std::true_type {};
template <>
struct IsIntegral<absl::uint128> : std::true_type {};
template <typename T>
struct MakeUnsigned : std::make_unsigned<T> {};
template <>
struct MakeUnsigned<absl::int128> {
using type = absl::uint128;
};
template <>
struct MakeUnsigned<absl::uint128> {
using type = absl::uint128;
};
template <typename T>
struct IsUnsigned : std::is_unsigned<T> {};
template <>
struct IsUnsigned<absl::int128> : std::false_type {};
template <>
struct IsUnsigned<absl::uint128> : std::true_type {};
// unsigned_bits<N>::type returns the unsigned int type with the indicated
// number of bits.
template <size_t N>
@@ -81,19 +108,40 @@ struct unsigned_bits<64> {
using type = uint64_t;
};
#ifdef ABSL_HAVE_INTRINSIC_INT128
template <>
struct unsigned_bits<128> {
using type = __uint128_t;
using type = absl::uint128;
};
// 256-bit wrapper for wide multiplications.
struct U256 {
uint128 hi;
uint128 lo;
};
template <>
struct unsigned_bits<256> {
using type = U256;
};
#endif
template <typename IntType>
struct make_unsigned_bits {
using type = typename unsigned_bits<std::numeric_limits<
typename std::make_unsigned<IntType>::type>::digits>::type;
using type = typename unsigned_bits<
std::numeric_limits<typename MakeUnsigned<IntType>::type>::digits>::type;
};
template <typename T>
int BitWidth(T v) {
// Workaround for bit_width not supporting int128.
// Don't hardcode `64` to make sure this code does not trigger compiler
// warnings in smaller types.
constexpr int half_bits = sizeof(T) * 8 / 2;
if (sizeof(T) == 16 && (v >> half_bits) != 0) {
return bit_width(static_cast<uint64_t>(v >> half_bits)) + half_bits;
} else {
return bit_width(static_cast<uint64_t>(v));
}
}
} // namespace random_internal
ABSL_NAMESPACE_END
} // namespace absl

View File

@@ -100,7 +100,7 @@ using uniform_inferred_return_t =
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
std::is_integral<IntType>,
IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
@@ -131,7 +131,7 @@ uniform_lower_bound(Tag, NumType a, NumType) {
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
std::is_integral<IntType>,
IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
@@ -153,7 +153,7 @@ uniform_upper_bound(Tag, FloatType, FloatType b) {
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
std::is_integral<IntType>,
IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>,
std::is_same<Tag, IntervalOpenClosedTag>>>::value,
IntType>
@@ -201,7 +201,7 @@ is_uniform_range_valid(FloatType a, FloatType b) {
}
template <typename IntType>
absl::enable_if_t<std::is_integral<IntType>::value, bool>
absl::enable_if_t<IsIntegral<IntType>::value, bool>
is_uniform_range_valid(IntType a, IntType b) {
return a <= b;
}
@@ -210,7 +210,7 @@ is_uniform_range_valid(IntType a, IntType b) {
// or absl::uniform_real_distribution depending on the NumType parameter.
template <typename NumType>
using UniformDistribution =
typename std::conditional<std::is_integral<NumType>::value,
typename std::conditional<IsIntegral<NumType>::value,
absl::uniform_int_distribution<NumType>,
absl::uniform_real_distribution<NumType>>::type;

View File

@@ -34,43 +34,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
// Helper object to multiply two 64-bit values to a 128-bit value.
// MultiplyU64ToU128 multiplies two 64-bit values to a 128-bit value.
// If an intrinsic is available, it is used, otherwise use native 32-bit
// multiplies to construct the result.
inline absl::uint128 MultiplyU64ToU128(uint64_t a, uint64_t b) {
#if defined(ABSL_HAVE_INTRINSIC_INT128)
return absl::uint128(static_cast<__uint128_t>(a) * b);
#elif defined(ABSL_INTERNAL_USE_UMUL128)
// uint64_t * uint64_t => uint128 multiply using imul intrinsic on MSVC.
uint64_t high = 0;
const uint64_t low = _umul128(a, b, &high);
return absl::MakeUint128(high, low);
#else
// uint128(a) * uint128(b) in emulated mode computes a full 128-bit x 128-bit
// multiply. However there are many cases where that is not necessary, and it
// is only necessary to support a 64-bit x 64-bit = 128-bit multiply. This is
// for those cases.
const uint64_t a00 = static_cast<uint32_t>(a);
const uint64_t a32 = a >> 32;
const uint64_t b00 = static_cast<uint32_t>(b);
const uint64_t b32 = b >> 32;
const uint64_t c00 = a00 * b00;
const uint64_t c32a = a00 * b32;
const uint64_t c32b = a32 * b00;
const uint64_t c64 = a32 * b32;
const uint32_t carry =
static_cast<uint32_t>(((c00 >> 32) + static_cast<uint32_t>(c32a) +
static_cast<uint32_t>(c32b)) >>
32);
return absl::MakeUint128(c64 + (c32a >> 32) + (c32b >> 32) + carry,
c00 + (c32a << 32) + (c32b << 32));
#endif
}
// wide_multiply<T> multiplies two N-bit values to a 2N-bit result.
template <typename UIntType>
struct wide_multiply {
@@ -82,27 +45,49 @@ struct wide_multiply {
return static_cast<result_type>(a) * b;
}
static input_type hi(result_type r) { return r >> kN; }
static input_type lo(result_type r) { return r; }
static input_type hi(result_type r) {
return static_cast<input_type>(r >> kN);
}
static input_type lo(result_type r) { return static_cast<input_type>(r); }
static_assert(std::is_unsigned<UIntType>::value,
"Class-template wide_multiply<> argument must be unsigned.");
};
#ifndef ABSL_HAVE_INTRINSIC_INT128
template <>
struct wide_multiply<uint64_t> {
using input_type = uint64_t;
using result_type = absl::uint128;
// MultiplyU128ToU256 multiplies two 128-bit values to a 256-bit value.
inline U256 MultiplyU128ToU256(uint128 a, uint128 b) {
const uint128 a00 = static_cast<uint64_t>(a);
const uint128 a64 = a >> 64;
const uint128 b00 = static_cast<uint64_t>(b);
const uint128 b64 = b >> 64;
static result_type multiply(uint64_t a, uint64_t b) {
return MultiplyU64ToU128(a, b);
const uint128 c00 = a00 * b00;
const uint128 c64a = a00 * b64;
const uint128 c64b = a64 * b00;
const uint128 c128 = a64 * b64;
const uint64_t carry =
static_cast<uint64_t>(((c00 >> 64) + static_cast<uint64_t>(c64a) +
static_cast<uint64_t>(c64b)) >>
64);
return {c128 + (c64a >> 64) + (c64b >> 64) + carry,
c00 + (c64a << 64) + (c64b << 64)};
}
template <>
struct wide_multiply<uint128> {
using input_type = uint128;
using result_type = U256;
static result_type multiply(input_type a, input_type b) {
return MultiplyU128ToU256(a, b);
}
static uint64_t hi(result_type r) { return absl::Uint128High64(r); }
static uint64_t lo(result_type r) { return absl::Uint128Low64(r); }
static input_type hi(result_type r) { return r.hi; }
static input_type lo(result_type r) { return r.lo; }
};
#endif
} // namespace random_internal
ABSL_NAMESPACE_END

View File

@@ -14,52 +14,106 @@
#include "absl/random/internal/wide_multiply.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/numeric/int128.h"
using absl::random_internal::MultiplyU64ToU128;
using absl::random_internal::MultiplyU128ToU256;
using absl::random_internal::U256;
namespace {
TEST(WideMultiplyTest, MultiplyU64ToU128Test) {
constexpr uint64_t k1 = 1;
constexpr uint64_t kMax = ~static_cast<uint64_t>(0);
U256 LeftShift(U256 v, int s) {
if (s == 0) {
return v;
} else if (s < 128) {
return {(v.hi << s) | (v.lo >> (128 - s)), v.lo << s};
} else {
return {v.lo << (s - 128), 0};
}
}
EXPECT_EQ(absl::uint128(0), MultiplyU64ToU128(0, 0));
MATCHER_P2(Eq256, hi, lo, "") { return arg.hi == hi && arg.lo == lo; }
MATCHER_P(Eq256, v, "") { return arg.hi == v.hi && arg.lo == v.lo; }
// Max uint64_t
EXPECT_EQ(MultiplyU64ToU128(kMax, kMax),
absl::MakeUint128(0xfffffffffffffffe, 0x0000000000000001));
EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(kMax, 1));
EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(1, kMax));
TEST(WideMultiplyTest, MultiplyU128ToU256Test) {
using absl::uint128;
constexpr uint128 k1 = 1;
constexpr uint128 kMax = ~static_cast<uint128>(0);
EXPECT_THAT(MultiplyU128ToU256(0, 0), Eq256(0, 0));
// Max uin128_t
EXPECT_THAT(MultiplyU128ToU256(kMax, kMax), Eq256(kMax << 1, 1));
EXPECT_THAT(MultiplyU128ToU256(kMax, 1), Eq256(0, kMax));
EXPECT_THAT(MultiplyU128ToU256(1, kMax), Eq256(0, kMax));
for (int i = 0; i < 64; ++i) {
EXPECT_EQ(absl::MakeUint128(0, kMax) << i,
MultiplyU64ToU128(kMax, k1 << i));
EXPECT_EQ(absl::MakeUint128(0, kMax) << i,
MultiplyU64ToU128(k1 << i, kMax));
SCOPED_TRACE(i);
EXPECT_THAT(MultiplyU128ToU256(kMax, k1 << i),
Eq256(LeftShift({0, kMax}, i)));
EXPECT_THAT(MultiplyU128ToU256(k1 << i, kMax),
Eq256(LeftShift({0, kMax}, i)));
}
// 1-bit x 1-bit.
for (int i = 0; i < 64; ++i) {
for (int j = 0; j < 64; ++j) {
EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j),
MultiplyU64ToU128(k1 << i, k1 << j));
EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j),
MultiplyU64ToU128(k1 << i, k1 << j));
EXPECT_THAT(MultiplyU128ToU256(k1 << i, k1 << j),
Eq256(LeftShift({0, 1}, i + j)));
}
}
// Verified multiplies
EXPECT_EQ(MultiplyU64ToU128(0xffffeeeeddddcccc, 0xbbbbaaaa99998888),
absl::MakeUint128(0xbbbb9e2692c5dddc, 0xc28f7531048d2c60));
EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfedcba9876543210),
absl::MakeUint128(0x0121fa00ad77d742, 0x2236d88fe5618cf0));
EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfdb97531eca86420),
absl::MakeUint128(0x0120ae99d26725fc, 0xce197f0ecac319e0));
EXPECT_EQ(MultiplyU64ToU128(0x97a87f4f261ba3f2, 0xfedcba9876543210),
absl::MakeUint128(0x96fbf1a8ae78d0ba, 0x5a6dd4b71f278320));
EXPECT_EQ(MultiplyU64ToU128(0xfedcba9876543210, 0xfdb97531eca86420),
absl::MakeUint128(0xfc98c6981a413e22, 0x342d0bbf48948200));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xc502da0d6ea99fe8, 0xfa3c9141a1f50912),
absl::MakeUint128(0x96bcf1ac37f97bd6, 0x27e2cdeb5fb2299e)),
Eq256(absl::MakeUint128(0x740113d838f96a64, 0x22e8cfa4d71f89ea),
absl::MakeUint128(0x19184a345c62e993, 0x237871b630337b1c)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x6f29e670cee07230, 0xc3d8e6c3e4d86759),
absl::MakeUint128(0x3227d29fa6386db1, 0x231682bb1e4b764f)),
Eq256(absl::MakeUint128(0x15c779d9d5d3b07c, 0xd7e6c827f0c81cbe),
absl::MakeUint128(0xf88e3914f7fa287a, 0x15b79975137dea77)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xafb77107215646e1, 0x3b844cb1ac5769e7),
absl::MakeUint128(0x1ff7b2d888b62479, 0x92f758ae96fcba0b)),
Eq256(absl::MakeUint128(0x15f13b70181f6985, 0x2adb36bbabce7d02),
absl::MakeUint128(0x6c470d72e13aad04, 0x63fba3f5841762ed)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xd85d5558d67ac905, 0xf88c70654dae19b1),
absl::MakeUint128(0x17252c6727db3738, 0x399ff658c511eedc)),
Eq256(absl::MakeUint128(0x138fcdaf8b0421ee, 0x1b465ddf2a0d03f6),
absl::MakeUint128(0x8f573ba68296860f, 0xf327d2738741a21c)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x46f0421a37ff6bee, 0xa61df89f09d140b1),
absl::MakeUint128(0x3d712ec9f37ca2e1, 0x9658a2cba47ef4b1)),
Eq256(absl::MakeUint128(0x11069cc48ee7c95d, 0xd35fb1c7aa91c978),
absl::MakeUint128(0xbe2f4a6de874b015, 0xd2f7ac1b76746e61)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x730d27c72d58fa49, 0x3ebeda7498f8827c),
absl::MakeUint128(0xa2c959eca9f503af, 0x189c687eb842bbd8)),
Eq256(absl::MakeUint128(0x4928d0ea356ba022, 0x1546d34a2963393),
absl::MakeUint128(0x7481531e1e0a16d1, 0xdd8025015cf6aca0)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x6ca41020f856d2f1, 0xb9b0838c04a7f4aa),
absl::MakeUint128(0x9cf41d28a8396f54, 0x1d681695e377ffe6)),
Eq256(absl::MakeUint128(0x429b92934d9be6f1, 0xea182877157c1e7),
absl::MakeUint128(0x7135c23f0a4a475, 0xc1adc366f4a126bc)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x57472833797c332, 0x6c79272fdec4687a),
absl::MakeUint128(0xb5f022ea3838e46b, 0x16face2f003e27a6)),
Eq256(absl::MakeUint128(0x3e072e0962b3400, 0x5d9fe8fdc3d0e1f4),
absl::MakeUint128(0x7dc0df47cedafd62, 0xbe6501f1acd2551c)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xf0fb4198322eb1c2, 0xfe7f5f31f3885938),
absl::MakeUint128(0xd99012b71bb7aa31, 0xac7a6f9eb190789)),
Eq256(absl::MakeUint128(0xcccc998cf075ca01, 0x642d144322fb873a),
absl::MakeUint128(0xc79dc12b69d91ed4, 0xa83459132ce046f8)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xb5c04120848cdb47, 0x8aa62a827bf52635),
absl::MakeUint128(0x8d07a359be2f1380, 0x467bb90d59da0dea)),
Eq256(absl::MakeUint128(0x64205019d139a9ce, 0x99425c5fb6e7a977),
absl::MakeUint128(0xd3e99628a9e5fca7, 0x9c7824cb7279d72)));
}
} // namespace

View File

@@ -69,10 +69,8 @@ class log_uniform_int_distribution {
if (base_ == 2) {
// Determine where the first set bit is on range(), giving a log2(range)
// value which can be used to construct bounds.
log_range_ =
(std::min)(bit_width(range()),
static_cast<unsigned_type>(
std::numeric_limits<unsigned_type>::digits));
log_range_ = (std::min)(random_internal::BitWidth(range()),
std::numeric_limits<unsigned_type>::digits);
} else {
// NOTE: Computing the logN(x) introduces error from 2 sources:
// 1. Conversion of int to double loses precision for values >=
@@ -83,7 +81,7 @@ class log_uniform_int_distribution {
//
// Thus a result which should equal K may equal K +/- epsilon,
// which can eliminate some values depending on where the bounds fall.
const double inv_log_base = 1.0 / std::log(base_);
const double inv_log_base = 1.0 / std::log(static_cast<double>(base_));
const double log_range = std::log(static_cast<double>(range()) + 0.5);
log_range_ = static_cast<int>(std::ceil(inv_log_base * log_range));
}
@@ -113,7 +111,7 @@ class log_uniform_int_distribution {
unsigned_type range_; // max - min
int log_range_; // ceil(logN(range_))
static_assert(std::is_integral<IntType>::value,
static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::log_uniform_int_distribution<> must be "
"parameterized using an integral type.");
};
@@ -139,7 +137,7 @@ class log_uniform_int_distribution {
template <typename URBG>
result_type operator()(URBG& g, // NOLINT(runtime/references)
const param_type& p) {
return (p.min)() + Generate(g, p);
return static_cast<result_type>((p.min)() + Generate(g, p));
}
result_type(min)() const { return (param_.min)(); }
@@ -193,8 +191,8 @@ log_uniform_int_distribution<IntType>::Generate(
? (std::numeric_limits<unsigned_type>::max)()
: (static_cast<unsigned_type>(1) << e) - 1;
} else {
const double r = std::pow(p.base(), d);
const double s = (r * p.base()) - 1.0;
const double r = std::pow(static_cast<double>(p.base()), d);
const double s = (r * static_cast<double>(p.base())) - 1.0;
base_e =
(r > static_cast<double>((std::numeric_limits<unsigned_type>::max)()))
@@ -211,7 +209,8 @@ log_uniform_int_distribution<IntType>::Generate(
const unsigned_type hi = (top_e >= p.range()) ? p.range() : top_e;
// choose uniformly over [lo, hi]
return absl::uniform_int_distribution<result_type>(lo, hi)(g);
return absl::uniform_int_distribution<result_type>(
static_cast<result_type>(lo), static_cast<result_type>(hi))(g);
}
template <typename CharT, typename Traits, typename IntType>

View File

@@ -42,7 +42,7 @@ class LogUniformIntDistributionTypeTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, //
uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_CASE(LogUniformIntDistributionTypeTest, IntTypes);
TYPED_TEST_SUITE(LogUniformIntDistributionTypeTest, IntTypes);
TYPED_TEST(LogUniformIntDistributionTypeTest, SerializeTest) {
using param_type =

View File

@@ -87,7 +87,7 @@ class BitGenRef;
//
// ON_CALL(absl::MockUniform<int>(), Call(bitgen, testing::_, testing::_))
// .WillByDefault([] (int low, int high) {
// return (low + high) / 2;
// return low + (high - low) / 2;
// });
//
// EXPECT_EQ(absl::Uniform<int>(gen, 0, 10), 5);

View File

@@ -26,6 +26,7 @@
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/generate_real.h"
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -80,7 +81,7 @@ class poisson_distribution {
double log_k_;
int split_;
static_assert(std::is_integral<IntType>::value,
static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::poisson_distribution<> must be "
"parameterized using an integral type.");
};
@@ -133,7 +134,8 @@ template <typename IntType>
poisson_distribution<IntType>::param_type::param_type(double mean)
: mean_(mean), split_(0) {
assert(mean >= 0);
assert(mean <= (std::numeric_limits<result_type>::max)());
assert(mean <=
static_cast<double>((std::numeric_limits<result_type>::max)()));
// As a defensive measure, avoid large values of the mean. The rejection
// algorithm used does not support very large values well. It my be worth
// changing algorithms to better deal with these cases.
@@ -222,8 +224,9 @@ poisson_distribution<IntType>::operator()(
// clang-format on
const double lhs = 2.0 * std::log(u) + p.log_k_ + s;
if (lhs < rhs) {
return x > (max)() ? (max)()
: static_cast<result_type>(x); // f(x)/k >= u^2
return x > static_cast<double>((max)())
? (max)()
: static_cast<result_type>(x); // f(x)/k >= u^2
}
}
}

View File

@@ -73,7 +73,7 @@ class PoissonDistributionInterfaceTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_CASE(PoissonDistributionInterfaceTest, IntTypes);
TYPED_TEST_SUITE(PoissonDistributionInterfaceTest, IntTypes);
TYPED_TEST(PoissonDistributionInterfaceTest, SerializeTest) {
using param_type = typename absl::poisson_distribution<TypeParam>::param_type;

View File

@@ -28,6 +28,7 @@
#include <iterator>
#include <random>
#include "absl/base/config.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
#include "absl/random/seed_gen_exception.h"

View File

@@ -97,7 +97,7 @@ class uniform_int_distribution {
result_type lo_;
unsigned_type range_;
static_assert(std::is_integral<result_type>::value,
static_assert(random_internal::IsIntegral<result_type>::value,
"Class-template absl::uniform_int_distribution<> must be "
"parameterized using an integral type.");
}; // param_type
@@ -125,7 +125,7 @@ class uniform_int_distribution {
template <typename URBG>
result_type operator()(
URBG& gen, const param_type& param) { // NOLINT(runtime/references)
return param.a() + Generate(gen, param.range());
return static_cast<result_type>(param.a() + Generate(gen, param.range()));
}
result_type a() const { return param_.a(); }

View File

@@ -73,12 +73,12 @@ class uniform_real_distribution {
: lo_(lo), hi_(hi), range_(hi - lo) {
// [rand.dist.uni.real] preconditions 2 & 3
assert(lo <= hi);
// NOTE: For integral types, we can promote the range to an unsigned type,
// which gives full width of the range. However for real (fp) types, this
// is not possible, so value generation cannot use the full range of the
// real type.
assert(range_ <= (std::numeric_limits<result_type>::max)());
assert(std::isfinite(range_));
}
result_type a() const { return lo_; }

View File

@@ -78,62 +78,74 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
GTEST_SKIP()
<< "Skipping the test because we detected x87 floating-point semantics";
#endif
using DistributionType = absl::uniform_real_distribution<TypeParam>;
using real_type = TypeParam;
using param_type = typename DistributionType::param_type;
using param_type =
typename absl::uniform_real_distribution<TypeParam>::param_type;
constexpr const real_type kMax = std::numeric_limits<real_type>::max();
constexpr const real_type kMin = std::numeric_limits<real_type>::min();
constexpr const real_type kEpsilon =
std::numeric_limits<real_type>::epsilon();
constexpr const real_type kLowest =
std::numeric_limits<real_type>::lowest(); // -max
constexpr const TypeParam a{1152921504606846976};
const real_type kDenormMax = std::nextafter(kMin, real_type{0});
const real_type kOneMinusE =
std::nextafter(real_type{1}, real_type{0}); // 1 - epsilon
constexpr const real_type kTwo60{1152921504606846976}; // 2^60
constexpr int kCount = 1000;
absl::InsecureBitGen gen;
for (const auto& param : {
param_type(),
param_type(TypeParam(2.0), TypeParam(2.0)), // Same
param_type(TypeParam(-0.1), TypeParam(0.1)),
param_type(TypeParam(0.05), TypeParam(0.12)),
param_type(TypeParam(-0.05), TypeParam(0.13)),
param_type(TypeParam(-0.05), TypeParam(-0.02)),
param_type(real_type{0}, real_type{1}),
param_type(real_type(-0.1), real_type(0.1)),
param_type(real_type(0.05), real_type(0.12)),
param_type(real_type(-0.05), real_type(0.13)),
param_type(real_type(-0.05), real_type(-0.02)),
// range = 0
param_type(real_type(2.0), real_type(2.0)), // Same
// double range = 0
// 2^60 , 2^60 + 2^6
param_type(a, TypeParam(1152921504606847040)),
param_type(kTwo60, real_type(1152921504606847040)),
// 2^60 , 2^60 + 2^7
param_type(a, TypeParam(1152921504606847104)),
param_type(kTwo60, real_type(1152921504606847104)),
// double range = 2^8
// 2^60 , 2^60 + 2^8
param_type(a, TypeParam(1152921504606847232)),
param_type(kTwo60, real_type(1152921504606847232)),
// float range = 0
// 2^60 , 2^60 + 2^36
param_type(a, TypeParam(1152921573326323712)),
param_type(kTwo60, real_type(1152921573326323712)),
// 2^60 , 2^60 + 2^37
param_type(a, TypeParam(1152921642045800448)),
param_type(kTwo60, real_type(1152921642045800448)),
// float range = 2^38
// 2^60 , 2^60 + 2^38
param_type(a, TypeParam(1152921779484753920)),
param_type(kTwo60, real_type(1152921779484753920)),
// Limits
param_type(0, std::numeric_limits<TypeParam>::max()),
param_type(std::numeric_limits<TypeParam>::lowest(), 0),
param_type(0, std::numeric_limits<TypeParam>::epsilon()),
param_type(-std::numeric_limits<TypeParam>::epsilon(),
std::numeric_limits<TypeParam>::epsilon()),
param_type(std::numeric_limits<TypeParam>::epsilon(),
2 * std::numeric_limits<TypeParam>::epsilon()),
param_type(0, kMax),
param_type(kLowest, 0),
param_type(0, kMin),
param_type(0, kEpsilon),
param_type(-kEpsilon, kEpsilon),
param_type(0, kOneMinusE),
param_type(0, kDenormMax),
}) {
// Validate parameters.
const auto a = param.a();
const auto b = param.b();
absl::uniform_real_distribution<TypeParam> before(a, b);
DistributionType before(a, b);
EXPECT_EQ(before.a(), param.a());
EXPECT_EQ(before.b(), param.b());
{
absl::uniform_real_distribution<TypeParam> via_param(param);
DistributionType via_param(param);
EXPECT_EQ(via_param, before);
}
std::stringstream ss;
ss << before;
absl::uniform_real_distribution<TypeParam> after(TypeParam(1.0),
TypeParam(3.1));
DistributionType after(real_type(1.0), real_type(3.1));
EXPECT_NE(before.a(), after.a());
EXPECT_NE(before.b(), after.b());
@@ -168,7 +180,7 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
}
}
if (!std::is_same<TypeParam, long double>::value) {
if (!std::is_same<real_type, long double>::value) {
// static_cast<double>(long double) can overflow.
std::string msg = absl::StrCat("Range: ", static_cast<double>(sample_min),
", ", static_cast<double>(sample_max));
@@ -182,33 +194,52 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
#pragma warning(disable:4756) // Constant arithmetic overflow.
#endif
TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) {
using DistributionType = absl::uniform_real_distribution<TypeParam>;
using real_type = TypeParam;
#if GTEST_HAS_DEATH_TEST
// Hi < Lo
EXPECT_DEBUG_DEATH(
{ absl::uniform_real_distribution<TypeParam> dist(10.0, 1.0); }, "");
EXPECT_DEBUG_DEATH({ DistributionType dist(10.0, 1.0); }, "");
// Hi - Lo > numeric_limits<>::max()
EXPECT_DEBUG_DEATH(
{
absl::uniform_real_distribution<TypeParam> dist(
std::numeric_limits<TypeParam>::lowest(),
std::numeric_limits<TypeParam>::max());
DistributionType dist(std::numeric_limits<real_type>::lowest(),
std::numeric_limits<real_type>::max());
},
"");
// kEpsilon guarantees that max + kEpsilon = inf.
const auto kEpsilon = std::nexttoward(
(std::numeric_limits<real_type>::max() -
std::nexttoward(std::numeric_limits<real_type>::max(), 0.0)) /
2,
std::numeric_limits<real_type>::max());
EXPECT_DEBUG_DEATH(
{
DistributionType dist(-kEpsilon, std::numeric_limits<real_type>::max());
},
"");
EXPECT_DEBUG_DEATH(
{
DistributionType dist(std::numeric_limits<real_type>::lowest(),
kEpsilon);
},
"");
#endif // GTEST_HAS_DEATH_TEST
#if defined(NDEBUG)
// opt-mode, for invalid parameters, will generate a garbage value,
// but should not enter an infinite loop.
absl::InsecureBitGen gen;
{
absl::uniform_real_distribution<TypeParam> dist(10.0, 1.0);
DistributionType dist(10.0, 1.0);
auto x = dist(gen);
EXPECT_FALSE(std::isnan(x)) << x;
}
{
absl::uniform_real_distribution<TypeParam> dist(
std::numeric_limits<TypeParam>::lowest(),
std::numeric_limits<TypeParam>::max());
DistributionType dist(std::numeric_limits<real_type>::lowest(),
std::numeric_limits<real_type>::max());
auto x = dist(gen);
// Infinite result.
EXPECT_FALSE(std::isfinite(x)) << x;
@@ -220,6 +251,8 @@ TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) {
#endif
TYPED_TEST(UniformRealDistributionTest, TestMoments) {
using DistributionType = absl::uniform_real_distribution<TypeParam>;
constexpr int kSize = 1000000;
std::vector<double> values(kSize);
@@ -228,7 +261,7 @@ TYPED_TEST(UniformRealDistributionTest, TestMoments) {
// implementation.
absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6};
absl::uniform_real_distribution<TypeParam> dist;
DistributionType dist;
for (int i = 0; i < kSize; i++) {
values[i] = dist(rng);
}
@@ -242,9 +275,10 @@ TYPED_TEST(UniformRealDistributionTest, TestMoments) {
}
TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
using DistributionType = absl::uniform_real_distribution<TypeParam>;
using param_type = typename DistributionType::param_type;
using absl::random_internal::kChiSquared;
using param_type =
typename absl::uniform_real_distribution<TypeParam>::param_type;
constexpr size_t kTrials = 100000;
constexpr int kBuckets = 50;
@@ -269,7 +303,7 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
const double factor = kBuckets / (max_val - min_val);
std::vector<int32_t> counts(kBuckets, 0);
absl::uniform_real_distribution<TypeParam> dist(param);
DistributionType dist(param);
for (size_t i = 0; i < kTrials; i++) {
auto x = dist(rng);
auto bucket = static_cast<size_t>((x - min_val) * factor);
@@ -297,8 +331,11 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
}
TYPED_TEST(UniformRealDistributionTest, StabilityTest) {
using DistributionType = absl::uniform_real_distribution<TypeParam>;
using real_type = TypeParam;
// absl::uniform_real_distribution stability relies only on
// random_internal::RandU64ToDouble and random_internal::RandU64ToFloat.
// random_internal::GenerateRealFromBits.
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
@@ -307,9 +344,9 @@ TYPED_TEST(UniformRealDistributionTest, StabilityTest) {
std::vector<int> output(12);
absl::uniform_real_distribution<TypeParam> dist;
DistributionType dist;
std::generate(std::begin(output), std::end(output), [&] {
return static_cast<int>(TypeParam(1000000) * dist(urbg));
return static_cast<int>(real_type(1000000) * dist(urbg));
});
EXPECT_THAT(

View File

@@ -23,13 +23,14 @@
#include <type_traits>
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/traits.h"
#include "absl/random/uniform_real_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
// absl::zipf_distribution produces random integer-values in the range [0, k],
// distributed according to the discrete probability function:
// distributed according to the unnormalized discrete probability function:
//
// P(x) = (v + x) ^ -q
//
@@ -94,7 +95,7 @@ class zipf_distribution {
double hxm_; // h(k + 0.5)
double hx0_minus_hxm_; // h(x0) - h(k + 0.5)
static_assert(std::is_integral<IntType>::value,
static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::zipf_distribution<> must be "
"parameterized using an integral type.");
};
@@ -221,7 +222,7 @@ zipf_distribution<IntType>::operator()(
const double u = p.hxm_ + v * p.hx0_minus_hxm_;
const double x = p.hinv(u);
k = rint(x); // std::floor(x + 0.5);
if (k > p.k()) continue; // reject k > max_k
if (k > static_cast<double>(p.k())) continue; // reject k > max_k
if (k - x <= p.s_) break;
const double h = p.h(k + 0.5);
const double r = p.pow_negative_q(p.v_ + k);

View File

@@ -44,7 +44,7 @@ class ZipfDistributionTypedTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_CASE(ZipfDistributionTypedTest, IntTypes);
TYPED_TEST_SUITE(ZipfDistributionTypedTest, IntTypes);
TYPED_TEST(ZipfDistributionTypedTest, SerializeTest) {
using param_type = typename absl::zipf_distribution<TypeParam>::param_type;