diff --git a/hecl/extern/boo b/hecl/extern/boo index 4c0c01f84..fa45c6750 160000 --- a/hecl/extern/boo +++ b/hecl/extern/boo @@ -1 +1 @@ -Subproject commit 4c0c01f84f530cfbd6752d0047a6455e8d1886c4 +Subproject commit fa45c6750a0d9d876341017a7e2b4915afa90369 diff --git a/hecl/include/hecl/BitVector.hpp b/hecl/include/hecl/BitVector.hpp new file mode 100644 index 000000000..557e9e841 --- /dev/null +++ b/hecl/include/hecl/BitVector.hpp @@ -0,0 +1,614 @@ +//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the BitVector class. +// +//===----------------------------------------------------------------------===// + +#ifndef HECL_LLVM_ADT_BITVECTOR_H +#define HECL_LLVM_ADT_BITVECTOR_H + +#include "MathExtras.hpp" +#include +#include +#include +#include +#include +#include + +namespace hecl { +namespace llvm { + +class BitVector { + typedef unsigned long BitWord; + + enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT }; + + static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32, + "Unsupported word size"); + + BitWord *Bits; // Actual bits. + unsigned Size; // Size of bitvector in bits. + unsigned Capacity; // Number of BitWords allocated in the Bits array. + +public: + typedef unsigned size_type; + // Encapsulation of a single bit. + class reference { + friend class BitVector; + + BitWord *WordRef; + unsigned BitPos; + + reference(); // Undefined + + public: + reference(BitVector &b, unsigned Idx) { + WordRef = &b.Bits[Idx / BITWORD_SIZE]; + BitPos = Idx % BITWORD_SIZE; + } + + reference(const reference&) = default; + + reference &operator=(reference t) { + *this = bool(t); + return *this; + } + + reference& operator=(bool t) { + if (t) + *WordRef |= BitWord(1) << BitPos; + else + *WordRef &= ~(BitWord(1) << BitPos); + return *this; + } + + operator bool() const { + return ((*WordRef) & (BitWord(1) << BitPos)) != 0; + } + }; + + + /// BitVector default ctor - Creates an empty bitvector. + BitVector() : Size(0), Capacity(0) { + Bits = nullptr; + } + + /// BitVector ctor - Creates a bitvector of specified number of bits. All + /// bits are initialized to the specified value. + explicit BitVector(unsigned s, bool t = false) : Size(s) { + Capacity = NumBitWords(s); + Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord)); + init_words(Bits, Capacity, t); + if (t) + clear_unused_bits(); + } + + /// BitVector copy ctor. + BitVector(const BitVector &RHS) : Size(RHS.size()) { + if (Size == 0) { + Bits = nullptr; + Capacity = 0; + return; + } + + Capacity = NumBitWords(RHS.size()); + Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord)); + std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord)); + } + + BitVector(BitVector &&RHS) + : Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) { + RHS.Bits = nullptr; + RHS.Size = RHS.Capacity = 0; + } + + ~BitVector() { + std::free(Bits); + } + + /// empty - Tests whether there are no bits in this bitvector. + bool empty() const { return Size == 0; } + + /// size - Returns the number of bits in this bitvector. + size_type size() const { return Size; } + + /// count - Returns the number of bits which are set. + size_type count() const { + unsigned NumBits = 0; + for (unsigned i = 0; i < NumBitWords(size()); ++i) + NumBits += countPopulation(Bits[i]); + return NumBits; + } + + /// any - Returns true if any bit is set. + bool any() const { + for (unsigned i = 0; i < NumBitWords(size()); ++i) + if (Bits[i] != 0) + return true; + return false; + } + + /// all - Returns true if all bits are set. + bool all() const { + for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i) + if (Bits[i] != ~0UL) + return false; + + // If bits remain check that they are ones. The unused bits are always zero. + if (unsigned Remainder = Size % BITWORD_SIZE) + return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1; + + return true; + } + + /// none - Returns true if none of the bits are set. + bool none() const { + return !any(); + } + + /// find_first - Returns the index of the first set bit, -1 if none + /// of the bits are set. + int find_first() const { + for (unsigned i = 0; i < NumBitWords(size()); ++i) + if (Bits[i] != 0) + return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); + return -1; + } + + /// find_next - Returns the index of the next set bit following the + /// "Prev" bit. Returns -1 if the next set bit is not found. + int find_next(unsigned Prev) const { + ++Prev; + if (Prev >= Size) + return -1; + + unsigned WordPos = Prev / BITWORD_SIZE; + unsigned BitPos = Prev % BITWORD_SIZE; + BitWord Copy = Bits[WordPos]; + // Mask off previous bits. + Copy &= ~0UL << BitPos; + + if (Copy != 0) + return WordPos * BITWORD_SIZE + countTrailingZeros(Copy); + + // Check subsequent words. + for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i) + if (Bits[i] != 0) + return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); + return -1; + } + + /// find_first_contiguous - Returns the index of the first contiguous + /// set of bits of "Length", -1 if no contiguous bits found. + int find_first_contiguous(unsigned Length) const { + for (int idx = find_first(); idx != -1; idx = find_next(idx)) { + if (idx + Length > size()) + return -1; + bool good = true; + for (int i = 0; i < Length; ++i) { + int ThisIdx = idx + i; + if (!test(ThisIdx)) { + good = false; + idx = ThisIdx; + break; + } + } + if (good) + return idx; + } + return -1; + } + + /// clear - Clear all bits. + void clear() { + Size = 0; + } + + /// resize - Grow or shrink the bitvector. + void resize(unsigned N, bool t = false) { + if (N > Capacity * BITWORD_SIZE) { + unsigned OldCapacity = Capacity; + grow(N); + init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t); + } + + // Set any old unused bits that are now included in the BitVector. This + // may set bits that are not included in the new vector, but we will clear + // them back out below. + if (N > Size) + set_unused_bits(t); + + // Update the size, and clear out any bits that are now unused + unsigned OldSize = Size; + Size = N; + if (t || N < OldSize) + clear_unused_bits(); + } + + void reserve(unsigned N) { + if (N > Capacity * BITWORD_SIZE) + grow(N); + } + + // Set, reset, flip + BitVector &set() { + init_words(Bits, Capacity, true); + clear_unused_bits(); + return *this; + } + + BitVector &set(unsigned Idx) { + assert(Bits && "Bits never allocated"); + Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE); + return *this; + } + + /// set - Efficiently set a range of bits in [I, E) + BitVector &set(unsigned I, unsigned E) { + assert(I <= E && "Attempted to set backwards range!"); + assert(E <= size() && "Attempted to set out-of-bounds range!"); + + if (I == E) return *this; + + if (I / BITWORD_SIZE == E / BITWORD_SIZE) { + BitWord EMask = 1UL << (E % BITWORD_SIZE); + BitWord IMask = 1UL << (I % BITWORD_SIZE); + BitWord Mask = EMask - IMask; + Bits[I / BITWORD_SIZE] |= Mask; + return *this; + } + + BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE); + Bits[I / BITWORD_SIZE] |= PrefixMask; + I = alignTo(I, BITWORD_SIZE); + + for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE) + Bits[I / BITWORD_SIZE] = ~0UL; + + BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1; + if (I < E) + Bits[I / BITWORD_SIZE] |= PostfixMask; + + return *this; + } + + BitVector &reset() { + init_words(Bits, Capacity, false); + return *this; + } + + BitVector &reset(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE)); + return *this; + } + + /// reset - Efficiently reset a range of bits in [I, E) + BitVector &reset(unsigned I, unsigned E) { + assert(I <= E && "Attempted to reset backwards range!"); + assert(E <= size() && "Attempted to reset out-of-bounds range!"); + + if (I == E) return *this; + + if (I / BITWORD_SIZE == E / BITWORD_SIZE) { + BitWord EMask = 1UL << (E % BITWORD_SIZE); + BitWord IMask = 1UL << (I % BITWORD_SIZE); + BitWord Mask = EMask - IMask; + Bits[I / BITWORD_SIZE] &= ~Mask; + return *this; + } + + BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE); + Bits[I / BITWORD_SIZE] &= ~PrefixMask; + I = alignTo(I, BITWORD_SIZE); + + for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE) + Bits[I / BITWORD_SIZE] = 0UL; + + BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1; + if (I < E) + Bits[I / BITWORD_SIZE] &= ~PostfixMask; + + return *this; + } + + BitVector &flip() { + for (unsigned i = 0; i < NumBitWords(size()); ++i) + Bits[i] = ~Bits[i]; + clear_unused_bits(); + return *this; + } + + BitVector &flip(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE); + return *this; + } + + // Indexing. + reference operator[](unsigned Idx) { + assert (Idx < Size && "Out-of-bounds Bit access."); + return reference(*this, Idx); + } + + bool operator[](unsigned Idx) const { + assert (Idx < Size && "Out-of-bounds Bit access."); + BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE); + return (Bits[Idx / BITWORD_SIZE] & Mask) != 0; + } + + bool test(unsigned Idx) const { + return (*this)[Idx]; + } + + /// Test if any common bits are set. + bool anyCommon(const BitVector &RHS) const { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i) + if (Bits[i] & RHS.Bits[i]) + return true; + return false; + } + + // Comparison operators. + bool operator==(const BitVector &RHS) const { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + if (Bits[i] != RHS.Bits[i]) + return false; + + // Verify that any extra words are all zeros. + if (i != ThisWords) { + for (; i != ThisWords; ++i) + if (Bits[i]) + return false; + } else if (i != RHSWords) { + for (; i != RHSWords; ++i) + if (RHS.Bits[i]) + return false; + } + return true; + } + + bool operator!=(const BitVector &RHS) const { + return !(*this == RHS); + } + + /// Intersection, union, disjoint union. + BitVector &operator&=(const BitVector &RHS) { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + Bits[i] &= RHS.Bits[i]; + + // Any bits that are just in this bitvector become zero, because they aren't + // in the RHS bit vector. Any words only in RHS are ignored because they + // are already zero in the LHS. + for (; i != ThisWords; ++i) + Bits[i] = 0; + + return *this; + } + + /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS. + BitVector &reset(const BitVector &RHS) { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + Bits[i] &= ~RHS.Bits[i]; + return *this; + } + + /// test - Check if (This - RHS) is zero. + /// This is the same as reset(RHS) and any(). + bool test(const BitVector &RHS) const { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + if ((Bits[i] & ~RHS.Bits[i]) != 0) + return true; + + for (; i != ThisWords ; ++i) + if (Bits[i] != 0) + return true; + + return false; + } + + BitVector &operator|=(const BitVector &RHS) { + if (size() < RHS.size()) + resize(RHS.size()); + for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i) + Bits[i] |= RHS.Bits[i]; + return *this; + } + + BitVector &operator^=(const BitVector &RHS) { + if (size() < RHS.size()) + resize(RHS.size()); + for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i) + Bits[i] ^= RHS.Bits[i]; + return *this; + } + + // Assignment operator. + const BitVector &operator=(const BitVector &RHS) { + if (this == &RHS) return *this; + + Size = RHS.size(); + unsigned RHSWords = NumBitWords(Size); + if (Size <= Capacity * BITWORD_SIZE) { + if (Size) + std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord)); + clear_unused_bits(); + return *this; + } + + // Grow the bitvector to have enough elements. + Capacity = RHSWords; + assert(Capacity > 0 && "negative capacity?"); + BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord)); + std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord)); + + // Destroy the old bits. + std::free(Bits); + Bits = NewBits; + + return *this; + } + + const BitVector &operator=(BitVector &&RHS) { + if (this == &RHS) return *this; + + std::free(Bits); + Bits = RHS.Bits; + Size = RHS.Size; + Capacity = RHS.Capacity; + + RHS.Bits = nullptr; + RHS.Size = RHS.Capacity = 0; + + return *this; + } + + void swap(BitVector &RHS) { + std::swap(Bits, RHS.Bits); + std::swap(Size, RHS.Size); + std::swap(Capacity, RHS.Capacity); + } + + //===--------------------------------------------------------------------===// + // Portable bit mask operations. + //===--------------------------------------------------------------------===// + // + // These methods all operate on arrays of uint32_t, each holding 32 bits. The + // fixed word size makes it easier to work with literal bit vector constants + // in portable code. + // + // The LSB in each word is the lowest numbered bit. The size of a portable + // bit mask is always a whole multiple of 32 bits. If no bit mask size is + // given, the bit mask is assumed to cover the entire BitVector. + + /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize. + /// This computes "*this |= Mask". + void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask(Mask, MaskWords); + } + + /// clearBitsInMask - Clear any bits in this vector that are set in Mask. + /// Don't resize. This computes "*this &= ~Mask". + void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask(Mask, MaskWords); + } + + /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask. + /// Don't resize. This computes "*this |= ~Mask". + void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask(Mask, MaskWords); + } + + /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask. + /// Don't resize. This computes "*this &= Mask". + void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask(Mask, MaskWords); + } + +private: + unsigned NumBitWords(unsigned S) const { + return (S + BITWORD_SIZE-1) / BITWORD_SIZE; + } + + // Set the unused bits in the high words. + void set_unused_bits(bool t = true) { + // Set high words first. + unsigned UsedWords = NumBitWords(Size); + if (Capacity > UsedWords) + init_words(&Bits[UsedWords], (Capacity-UsedWords), t); + + // Then set any stray high bits of the last used word. + unsigned ExtraBits = Size % BITWORD_SIZE; + if (ExtraBits) { + BitWord ExtraBitMask = ~0UL << ExtraBits; + if (t) + Bits[UsedWords-1] |= ExtraBitMask; + else + Bits[UsedWords-1] &= ~ExtraBitMask; + } + } + + // Clear the unused bits in the high words. + void clear_unused_bits() { + set_unused_bits(false); + } + + void grow(unsigned NewSize) { + Capacity = std::max(NumBitWords(NewSize), Capacity * 2); + assert(Capacity > 0 && "realloc-ing zero space"); + Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord)); + + clear_unused_bits(); + } + + void init_words(BitWord *B, unsigned NumWords, bool t) { + memset(B, 0 - (int)t, NumWords*sizeof(BitWord)); + } + + template + void applyMask(const uint32_t *Mask, unsigned MaskWords) { + static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size."); + MaskWords = std::min(MaskWords, (size() + 31) / 32); + const unsigned Scale = BITWORD_SIZE / 32; + unsigned i; + for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) { + BitWord BW = Bits[i]; + // This inner loop should unroll completely when BITWORD_SIZE > 32. + for (unsigned b = 0; b != BITWORD_SIZE; b += 32) { + uint32_t M = *Mask++; + if (InvertMask) M = ~M; + if (AddBits) BW |= BitWord(M) << b; + else BW &= ~(BitWord(M) << b); + } + Bits[i] = BW; + } + for (unsigned b = 0; MaskWords; b += 32, --MaskWords) { + uint32_t M = *Mask++; + if (InvertMask) M = ~M; + if (AddBits) Bits[i] |= BitWord(M) << b; + else Bits[i] &= ~(BitWord(M) << b); + } + if (AddBits) + clear_unused_bits(); + } + +public: + /// Return the size (in bytes) of the bit vector. + size_t getMemorySize() const { return Capacity * sizeof(BitWord); } +}; + +static inline size_t capacity_in_bytes(const BitVector &X) { + return X.getMemorySize(); +} + +} // end namespace llvm +} // end namespace hecl + +namespace std { + /// Implement std::swap in terms of BitVector swap. + inline void + swap(hecl::llvm::BitVector &LHS, hecl::llvm::BitVector &RHS) { + LHS.swap(RHS); + } +} // end namespace std + +#endif // LLVM_ADT_BITVECTOR_H diff --git a/hecl/include/hecl/MathExtras.hpp b/hecl/include/hecl/MathExtras.hpp new file mode 100644 index 000000000..4c927c20e --- /dev/null +++ b/hecl/include/hecl/MathExtras.hpp @@ -0,0 +1,853 @@ +//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains some functions that are useful for math stuff. +// +//===----------------------------------------------------------------------===// + +#ifndef HECL_LLVM_SUPPORT_MATHEXTRAS_H +#define HECL_LLVM_SUPPORT_MATHEXTRAS_H + +/// \macro LLVM_GNUC_PREREQ +/// \brief Extend the default __GNUC_PREREQ even if glibc's features.h isn't +/// available. +#ifndef LLVM_GNUC_PREREQ +# if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) +# define LLVM_GNUC_PREREQ(maj, min, patch) \ + ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \ + ((maj) << 20) + ((min) << 10) + (patch)) +# elif defined(__GNUC__) && defined(__GNUC_MINOR__) +# define LLVM_GNUC_PREREQ(maj, min, patch) \ + ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10)) +# else +# define LLVM_GNUC_PREREQ(maj, min, patch) 0 +# endif +#endif + +#include "hecl.hpp" +#include +#include +#include +#include +#include + +#ifdef _MSC_VER +#include +#endif + +#ifdef __ANDROID_NDK__ +#include +#endif + +namespace hecl { +namespace llvm { +/// \brief The behavior an operation has on an input of 0. +enum ZeroBehavior { + /// \brief The returned value is undefined. + ZB_Undefined, + /// \brief The returned value is numeric_limits::max() + ZB_Max, + /// \brief The returned value is numeric_limits::digits + ZB_Width +}; + +namespace detail { +template struct TrailingZerosCounter { + static std::size_t count(T Val, ZeroBehavior) { + if (!Val) + return std::numeric_limits::digits; + if (Val & 0x1) + return 0; + + // Bisection method. + std::size_t ZeroBits = 0; + T Shift = std::numeric_limits::digits >> 1; + T Mask = std::numeric_limits::max() >> Shift; + while (Shift) { + if ((Val & Mask) == 0) { + Val >>= Shift; + ZeroBits |= Shift; + } + Shift >>= 1; + Mask >>= Shift; + } + return ZeroBits; + } +}; + +#if __GNUC__ >= 4 || defined(_MSC_VER) +template struct TrailingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 32; + +#if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_ctz(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanForward(&Index, Val); + return Index; +#endif + } +}; + +#if !defined(_MSC_VER) || defined(_M_X64) +template struct TrailingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 64; + +#if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_ctzll(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanForward64(&Index, Val); + return Index; +#endif + } +}; +#endif +#endif +} // namespace detail + +/// \brief Count number of 0's from the least significant bit to the most +/// stopping at the first 1. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are +/// valid arguments. +template +std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { + static_assert(std::numeric_limits::is_integer && + !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return detail::TrailingZerosCounter::count(Val, ZB); +} + +namespace detail { +template struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior) { + if (!Val) + return std::numeric_limits::digits; + + // Bisection method. + std::size_t ZeroBits = 0; + for (T Shift = std::numeric_limits::digits >> 1; Shift; Shift >>= 1) { + T Tmp = Val >> Shift; + if (Tmp) + Val = Tmp; + else + ZeroBits |= Shift; + } + return ZeroBits; + } +}; + +#if __GNUC__ >= 4 || defined(_MSC_VER) +template struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 32; + +#if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_clz(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanReverse(&Index, Val); + return Index ^ 31; +#endif + } +}; + +#if !defined(_MSC_VER) || defined(_M_X64) +template struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 64; + +#if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_clzll(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanReverse64(&Index, Val); + return Index ^ 63; +#endif + } +}; +#endif +#endif +} // namespace detail + +/// \brief Count number of 0's from the most significant bit to the least +/// stopping at the first 1. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are +/// valid arguments. +template +std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { + static_assert(std::numeric_limits::is_integer && + !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return detail::LeadingZerosCounter::count(Val, ZB); +} + +/// \brief Get the index of the first set bit starting from the least +/// significant bit. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are +/// valid arguments. +template T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { + if (ZB == ZB_Max && Val == 0) + return std::numeric_limits::max(); + + return countTrailingZeros(Val, ZB_Undefined); +} + +/// \brief Get the index of the last set bit starting from the least +/// significant bit. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are +/// valid arguments. +template T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { + if (ZB == ZB_Max && Val == 0) + return std::numeric_limits::max(); + + // Use ^ instead of - because both gcc and llvm can remove the associated ^ + // in the __builtin_clz intrinsic on x86. + return countLeadingZeros(Val, ZB_Undefined) ^ + (std::numeric_limits::digits - 1); +} + +/// \brief Macro compressed bit reversal table for 256 bits. +/// +/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable +static const unsigned char BitReverseTable256[256] = { +#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 +#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) +#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) + R6(0), R6(2), R6(1), R6(3) +#undef R2 +#undef R4 +#undef R6 +}; + +/// \brief Reverse the bits in \p Val. +template +T reverseBits(T Val) { + unsigned char in[sizeof(Val)]; + unsigned char out[sizeof(Val)]; + std::memcpy(in, &Val, sizeof(Val)); + for (unsigned i = 0; i < sizeof(Val); ++i) + out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; + std::memcpy(&Val, out, sizeof(Val)); + return Val; +} + +// NOTE: The following support functions use the _32/_64 extensions instead of +// type overloading so that signed and unsigned integers can be used without +// ambiguity. + +/// Hi_32 - This function returns the high 32 bits of a 64 bit value. +constexpr inline uint32_t Hi_32(uint64_t Value) { + return static_cast(Value >> 32); +} + +/// Lo_32 - This function returns the low 32 bits of a 64 bit value. +constexpr inline uint32_t Lo_32(uint64_t Value) { + return static_cast(Value); +} + +/// Make_64 - This functions makes a 64-bit integer from a high / low pair of +/// 32-bit integers. +constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { + return ((uint64_t)High << 32) | (uint64_t)Low; +} + +/// isInt - Checks if an integer fits into the given bit width. +template constexpr inline bool isInt(int64_t x) { + return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); +} +// Template specializations to get better code for common cases. +template <> constexpr inline bool isInt<8>(int64_t x) { + return static_cast(x) == x; +} +template <> constexpr inline bool isInt<16>(int64_t x) { + return static_cast(x) == x; +} +template <> constexpr inline bool isInt<32>(int64_t x) { + return static_cast(x) == x; +} + +/// isShiftedInt - Checks if a signed integer is an N bit number shifted +/// left by S. +template +constexpr inline bool isShiftedInt(int64_t x) { + static_assert( + N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); + static_assert(N + S <= 64, "isShiftedInt with N + S > 64 is too wide."); + return isInt(x) && (x % (UINT64_C(1) << S) == 0); +} + +/// isUInt - Checks if an unsigned integer fits into the given bit width. +/// +/// This is written as two functions rather than as simply +/// +/// return N >= 64 || X < (UINT64_C(1) << N); +/// +/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting +/// left too many places. +template +constexpr inline typename std::enable_if<(N < 64), bool>::type +isUInt(uint64_t X) { + static_assert(N > 0, "isUInt<0> doesn't make sense"); + return X < (UINT64_C(1) << (N)); +} +template +constexpr inline typename std::enable_if= 64, bool>::type +isUInt(uint64_t X) { + return true; +} + +// Template specializations to get better code for common cases. +template <> constexpr inline bool isUInt<8>(uint64_t x) { + return static_cast(x) == x; +} +template <> constexpr inline bool isUInt<16>(uint64_t x) { + return static_cast(x) == x; +} +template <> constexpr inline bool isUInt<32>(uint64_t x) { + return static_cast(x) == x; +} + +/// Checks if a unsigned integer is an N bit number shifted left by S. +template +constexpr inline bool isShiftedUInt(uint64_t x) { + static_assert( + N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); + static_assert(N + S <= 64, + "isShiftedUInt with N + S > 64 is too wide."); + // Per the two static_asserts above, S must be strictly less than 64. So + // 1 << S is not undefined behavior. + return isUInt(x) && (x % (UINT64_C(1) << S) == 0); +} + +/// Gets the maximum value for a N-bit unsigned integer. +inline uint64_t maxUIntN(uint64_t N) { + assert(N > 0 && N <= 64 && "integer width out of range"); + + // uint64_t(1) << 64 is undefined behavior, so we can't do + // (uint64_t(1) << N) - 1 + // without checking first that N != 64. But this works and doesn't have a + // branch. + return UINT64_MAX >> (64 - N); +} + +/// Gets the minimum value for a N-bit signed integer. +inline int64_t minIntN(int64_t N) { + assert(N > 0 && N <= 64 && "integer width out of range"); + + return -(UINT64_C(1)<<(N-1)); +} + +/// Gets the maximum value for a N-bit signed integer. +inline int64_t maxIntN(int64_t N) { + assert(N > 0 && N <= 64 && "integer width out of range"); + + // This relies on two's complement wraparound when N == 64, so we convert to + // int64_t only at the very end to avoid UB. + return (UINT64_C(1) << (N - 1)) - 1; +} + +/// isUIntN - Checks if an unsigned integer fits into the given (dynamic) +/// bit width. +inline bool isUIntN(unsigned N, uint64_t x) { + return N >= 64 || x <= maxUIntN(N); +} + +/// isIntN - Checks if an signed integer fits into the given (dynamic) +/// bit width. +inline bool isIntN(unsigned N, int64_t x) { + return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); +} + +/// isMask_32 - This function returns true if the argument is a non-empty +/// sequence of ones starting at the least significant bit with the remainder +/// zero (32 bit version). Ex. isMask_32(0x0000FFFFU) == true. +constexpr inline bool isMask_32(uint32_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// isMask_64 - This function returns true if the argument is a non-empty +/// sequence of ones starting at the least significant bit with the remainder +/// zero (64 bit version). +constexpr inline bool isMask_64(uint64_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// isShiftedMask_32 - This function returns true if the argument contains a +/// non-empty sequence of ones with the remainder zero (32 bit version.) +/// Ex. isShiftedMask_32(0x0000FF00U) == true. +constexpr inline bool isShiftedMask_32(uint32_t Value) { + return Value && isMask_32((Value - 1) | Value); +} + +/// isShiftedMask_64 - This function returns true if the argument contains a +/// non-empty sequence of ones with the remainder zero (64 bit version.) +constexpr inline bool isShiftedMask_64(uint64_t Value) { + return Value && isMask_64((Value - 1) | Value); +} + +/// isPowerOf2_32 - This function returns true if the argument is a power of +/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) +constexpr inline bool isPowerOf2_32(uint32_t Value) { + return Value && !(Value & (Value - 1)); +} + +/// isPowerOf2_64 - This function returns true if the argument is a power of two +/// > 0 (64 bit edition.) +constexpr inline bool isPowerOf2_64(uint64_t Value) { + return Value && !(Value & (Value - int64_t(1L))); +} + +/// ByteSwap_16 - This function returns a byte-swapped representation of the +/// 16-bit argument, Value. +inline uint16_t ByteSwap_16(uint16_t Value) { + return hecl::bswap16(Value); +} + +/// ByteSwap_32 - This function returns a byte-swapped representation of the +/// 32-bit argument, Value. +inline uint32_t ByteSwap_32(uint32_t Value) { + return hecl::bswap32(Value); +} + +/// ByteSwap_64 - This function returns a byte-swapped representation of the +/// 64-bit argument, Value. +inline uint64_t ByteSwap_64(uint64_t Value) { + return hecl::bswap64(Value); +} + +/// \brief Count the number of ones from the most significant bit to the first +/// zero bit. +/// +/// Ex. CountLeadingOnes(0xFF0FFF00) == 8. +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of all ones. Only ZB_Width and +/// ZB_Undefined are valid arguments. +template +std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { + static_assert(std::numeric_limits::is_integer && + !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return countLeadingZeros(~Value, ZB); +} + +/// \brief Count the number of ones from the least significant bit to the first +/// zero bit. +/// +/// Ex. countTrailingOnes(0x00FF00FF) == 8. +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of all ones. Only ZB_Width and +/// ZB_Undefined are valid arguments. +template +std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { + static_assert(std::numeric_limits::is_integer && + !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return countTrailingZeros(~Value, ZB); +} + +namespace detail { +template struct PopulationCounter { + static unsigned count(T Value) { + // Generic version, forward to 32 bits. + static_assert(SizeOfT <= 4, "Not implemented!"); +#if __GNUC__ >= 4 + return __builtin_popcount(Value); +#else + uint32_t v = Value; + v = v - ((v >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; +#endif + } +}; + +template struct PopulationCounter { + static unsigned count(T Value) { +#if __GNUC__ >= 4 + return __builtin_popcountll(Value); +#else + uint64_t v = Value; + v = v - ((v >> 1) & 0x5555555555555555ULL); + v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); + v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); +#endif + } +}; +} // namespace detail + +/// \brief Count the number of set bits in a value. +/// Ex. countPopulation(0xF000F000) = 8 +/// Returns 0 if the word is zero. +template +inline unsigned countPopulation(T Value) { + static_assert(std::numeric_limits::is_integer && + !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return detail::PopulationCounter::count(Value); +} + +/// Log2 - This function returns the log base 2 of the specified value +inline double Log2(double Value) { +#if defined(__ANDROID_API__) && __ANDROID_API__ < 18 + return __builtin_log(Value) / __builtin_log(2.0); +#else + return log2(Value); +#endif +} + +/// Log2_32 - This function returns the floor log base 2 of the specified value, +/// -1 if the value is zero. (32 bit edition.) +/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 +inline unsigned Log2_32(uint32_t Value) { + return 31 - countLeadingZeros(Value); +} + +/// Log2_64 - This function returns the floor log base 2 of the specified value, +/// -1 if the value is zero. (64 bit edition.) +inline unsigned Log2_64(uint64_t Value) { + return 63 - countLeadingZeros(Value); +} + +/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified +/// value, 32 if the value is zero. (32 bit edition). +/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 +inline unsigned Log2_32_Ceil(uint32_t Value) { + return 32 - countLeadingZeros(Value - 1); +} + +/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified +/// value, 64 if the value is zero. (64 bit edition.) +inline unsigned Log2_64_Ceil(uint64_t Value) { + return 64 - countLeadingZeros(Value - 1); +} + +/// GreatestCommonDivisor64 - Return the greatest common divisor of the two +/// values using Euclid's algorithm. +inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { + while (B) { + uint64_t T = B; + B = A % B; + A = T; + } + return A; +} + +/// BitsToDouble - This function takes a 64-bit integer and returns the bit +/// equivalent double. +inline double BitsToDouble(uint64_t Bits) { + union { + uint64_t L; + double D; + } T; + T.L = Bits; + return T.D; +} + +/// BitsToFloat - This function takes a 32-bit integer and returns the bit +/// equivalent float. +inline float BitsToFloat(uint32_t Bits) { + union { + uint32_t I; + float F; + } T; + T.I = Bits; + return T.F; +} + +/// DoubleToBits - This function takes a double and returns the bit +/// equivalent 64-bit integer. Note that copying doubles around +/// changes the bits of NaNs on some hosts, notably x86, so this +/// routine cannot be used if these bits are needed. +inline uint64_t DoubleToBits(double Double) { + union { + uint64_t L; + double D; + } T; + T.D = Double; + return T.L; +} + +/// FloatToBits - This function takes a float and returns the bit +/// equivalent 32-bit integer. Note that copying floats around +/// changes the bits of NaNs on some hosts, notably x86, so this +/// routine cannot be used if these bits are needed. +inline uint32_t FloatToBits(float Float) { + union { + uint32_t I; + float F; + } T; + T.F = Float; + return T.I; +} + +/// MinAlign - A and B are either alignments or offsets. Return the minimum +/// alignment that may be assumed after adding the two together. +constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { + // The largest power of 2 that divides both A and B. + // + // Replace "-Value" by "1+~Value" in the following commented code to avoid + // MSVC warning C4146 + // return (A | B) & -(A | B); + return (A | B) & (1 + ~(A | B)); +} + +/// \brief Aligns \c Addr to \c Alignment bytes, rounding up. +/// +/// Alignment should be a power of two. This method rounds up, so +/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. +inline uintptr_t alignAddr(const void *Addr, size_t Alignment) { + assert(Alignment && isPowerOf2_64((uint64_t)Alignment) && + "Alignment is not a power of two!"); + + assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr); + + return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); +} + +/// \brief Returns the necessary adjustment for aligning \c Ptr to \c Alignment +/// bytes, rounding up. +inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) { + return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; +} + +/// NextPowerOf2 - Returns the next power of two (in 64-bits) +/// that is strictly greater than A. Returns zero on overflow. +inline uint64_t NextPowerOf2(uint64_t A) { + A |= (A >> 1); + A |= (A >> 2); + A |= (A >> 4); + A |= (A >> 8); + A |= (A >> 16); + A |= (A >> 32); + return A + 1; +} + +/// Returns the power of two which is less than or equal to the given value. +/// Essentially, it is a floor operation across the domain of powers of two. +inline uint64_t PowerOf2Floor(uint64_t A) { + if (!A) return 0; + return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); +} + +/// Returns the power of two which is greater than or equal to the given value. +/// Essentially, it is a ceil operation across the domain of powers of two. +inline uint64_t PowerOf2Ceil(uint64_t A) { + if (!A) + return 0; + return NextPowerOf2(A - 1); +} + +/// Returns the next integer (mod 2**64) that is greater than or equal to +/// \p Value and is a multiple of \p Align. \p Align must be non-zero. +/// +/// If non-zero \p Skew is specified, the return value will be a minimal +/// integer that is greater than or equal to \p Value and equal to +/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than +/// \p Align, its value is adjusted to '\p Skew mod \p Align'. +/// +/// Examples: +/// \code +/// alignTo(5, 8) = 8 +/// alignTo(17, 8) = 24 +/// alignTo(~0LL, 8) = 0 +/// alignTo(321, 255) = 510 +/// +/// alignTo(5, 8, 7) = 7 +/// alignTo(17, 8, 1) = 17 +/// alignTo(~0LL, 8, 3) = 3 +/// alignTo(321, 255, 42) = 552 +/// \endcode +inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { + assert(Align != 0u && "Align can't be 0."); + Skew %= Align; + return (Value + Align - 1 - Skew) / Align * Align + Skew; +} + +/// Returns the next integer (mod 2**64) that is greater than or equal to +/// \p Value and is a multiple of \c Align. \c Align must be non-zero. +template constexpr inline uint64_t alignTo(uint64_t Value) { + static_assert(Align != 0u, "Align must be non-zero"); + return (Value + Align - 1) / Align * Align; +} + +/// \c alignTo for contexts where a constant expression is required. +/// \sa alignTo +/// +/// \todo FIXME: remove when \c constexpr becomes really \c constexpr +template +struct AlignTo { + static_assert(Align != 0u, "Align must be non-zero"); + template + struct from_value { + static const uint64_t value = (Value + Align - 1) / Align * Align; + }; +}; + +/// Returns the largest uint64_t less than or equal to \p Value and is +/// \p Skew mod \p Align. \p Align must be non-zero +inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { + assert(Align != 0u && "Align can't be 0."); + Skew %= Align; + return (Value - Skew) / Align * Align + Skew; +} + +/// Returns the offset to the next integer (mod 2**64) that is greater than +/// or equal to \p Value and is a multiple of \p Align. \p Align must be +/// non-zero. +inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { + return alignTo(Value, Align) - Value; +} + +/// Sign-extend the number in the bottom B bits of X to a 32-bit integer. +/// Requires 0 < B <= 32. +template constexpr inline int32_t SignExtend32(uint32_t X) { + static_assert(B > 0, "Bit width can't be 0."); + static_assert(B <= 32, "Bit width out of range."); + return int32_t(X << (32 - B)) >> (32 - B); +} + +/// Sign-extend the number in the bottom B bits of X to a 32-bit integer. +/// Requires 0 < B < 32. +inline int32_t SignExtend32(uint32_t X, unsigned B) { + assert(B > 0 && "Bit width can't be 0."); + assert(B <= 32 && "Bit width out of range."); + return int32_t(X << (32 - B)) >> (32 - B); +} + +/// Sign-extend the number in the bottom B bits of X to a 64-bit integer. +/// Requires 0 < B < 64. +template constexpr inline int64_t SignExtend64(uint64_t x) { + static_assert(B > 0, "Bit width can't be 0."); + static_assert(B <= 64, "Bit width out of range."); + return int64_t(x << (64 - B)) >> (64 - B); +} + +/// Sign-extend the number in the bottom B bits of X to a 64-bit integer. +/// Requires 0 < B < 64. +inline int64_t SignExtend64(uint64_t X, unsigned B) { + assert(B > 0 && "Bit width can't be 0."); + assert(B <= 64 && "Bit width out of range."); + return int64_t(X << (64 - B)) >> (64 - B); +} + +/// Subtract two unsigned integers, X and Y, of type T and return the absolute +/// value of the result. +template +typename std::enable_if::value, T>::type +AbsoluteDifference(T X, T Y) { + return std::max(X, Y) - std::min(X, Y); +} + +/// Add two unsigned integers, X and Y, of type T. Clamp the result to the +/// maximum representable value of T on overflow. ResultOverflowed indicates if +/// the result is larger than the maximum representable value of type T. +template +typename std::enable_if::value, T>::type +SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { + bool Dummy; + bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; + // Hacker's Delight, p. 29 + T Z = X + Y; + Overflowed = (Z < X || Z < Y); + if (Overflowed) + return std::numeric_limits::max(); + else + return Z; +} + +/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the +/// maximum representable value of T on overflow. ResultOverflowed indicates if +/// the result is larger than the maximum representable value of type T. +template +typename std::enable_if::value, T>::type +SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { + bool Dummy; + bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; + + // Hacker's Delight, p. 30 has a different algorithm, but we don't use that + // because it fails for uint16_t (where multiplication can have undefined + // behavior due to promotion to int), and requires a division in addition + // to the multiplication. + + Overflowed = false; + + // Log2(Z) would be either Log2Z or Log2Z + 1. + // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z + // will necessarily be less than Log2Max as desired. + int Log2Z = Log2_64(X) + Log2_64(Y); + const T Max = std::numeric_limits::max(); + int Log2Max = Log2_64(Max); + if (Log2Z < Log2Max) { + return X * Y; + } + if (Log2Z > Log2Max) { + Overflowed = true; + return Max; + } + + // We're going to use the top bit, and maybe overflow one + // bit past it. Multiply all but the bottom bit then add + // that on at the end. + T Z = (X >> 1) * Y; + if (Z & ~(Max >> 1)) { + Overflowed = true; + return Max; + } + Z <<= 1; + if (X & 1) + return SaturatingAdd(Z, Y, ResultOverflowed); + + return Z; +} + +/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to +/// the product. Clamp the result to the maximum representable value of T on +/// overflow. ResultOverflowed indicates if the result is larger than the +/// maximum representable value of type T. +template +typename std::enable_if::value, T>::type +SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { + bool Dummy; + bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; + + T Product = SaturatingMultiply(X, Y, &Overflowed); + if (Overflowed) + return Product; + + return SaturatingAdd(A, Product, &Overflowed); +} + +/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. +extern const float huge_valf; +} // End llvm namespace +} // End hecl namespace + +#endif diff --git a/hecl/lib/CMakeLists.txt b/hecl/lib/CMakeLists.txt index 77f6e7379..ac90f4726 100644 --- a/hecl/lib/CMakeLists.txt +++ b/hecl/lib/CMakeLists.txt @@ -47,6 +47,8 @@ add_library(hecl-common ../include/hecl/Database.hpp ../include/hecl/Runtime.hpp ../include/hecl/ClientProcess.hpp + ../include/hecl/BitVector.hpp + ../include/hecl/MathExtras.hpp ClientProcess.cpp atdna_HMDLMeta.cpp atdna_Frontend.cpp