diff --git a/gcc/d/expr.cc b/gcc/d/expr.cc index 9a1aad42d..91cb02f1e 100644 --- a/gcc/d/expr.cc +++ b/gcc/d/expr.cc @@ -42,6 +42,20 @@ along with GCC; see the file COPYING3. If not see #include "d-tree.h" +/* Helper function for floating point identity comparison. Compare + only well-defined bits, ignore padding (e.g. for X86 80bit real). */ + +static tree build_float_identity (tree_code code, tree t1, tree t2) +{ + /* For floating-point values, identity is defined as the bits in the + operands being identical. */ + tree tmemcmp = builtin_decl_explicit (BUILT_IN_MEMCMP); + tree size = size_int (TYPE_PRECISION (TREE_TYPE (t1)) / BITS_PER_UNIT); + + tree result = build_call_expr (tmemcmp, 3, build_address (t1), + build_address (t2), size); + return build_boolop (code, result, integer_zero_node); +} /* Implements the visitor interface to build the GCC trees of all Expression AST classes emitted from the D Front-end. @@ -275,19 +289,23 @@ class ExprVisitor : public Visitor this->result_ = d_convert (build_ctype (e->type), build_boolop (code, t1, t2)); } - else if (tb1->isfloating () && tb1->ty != Tvector) + else if (tb1->iscomplex () && tb1->ty != Tvector) { - /* For floating-point values, identity is defined as the bits in the - operands being identical. */ - tree t1 = d_save_expr (build_expr (e->e1)); - tree t2 = d_save_expr (build_expr (e->e2)); - - tree tmemcmp = builtin_decl_explicit (BUILT_IN_MEMCMP); - tree size = size_int (TYPE_PRECISION (TREE_TYPE (t1)) / BITS_PER_UNIT); + tree e1 = d_save_expr (build_expr (e->e1)); + tree e2 = d_save_expr (build_expr (e->e2)); + tree req = build_float_identity (code, real_part (e1), real_part (e2)); + tree ieq = build_float_identity (code, imaginary_part (e1), imaginary_part (e2)); - tree result = build_call_expr (tmemcmp, 3, build_address (t1), - build_address (t2), size); - this->result_ = build_boolop (code, result, integer_zero_node); + if (code == EQ_EXPR) + this->result_ = build_boolop (TRUTH_ANDIF_EXPR, req, ieq); + else + this->result_ = build_boolop (TRUTH_ORIF_EXPR, req, ieq); + } + else if (tb1->isfloating () && tb1->ty != Tvector) + { + tree e1 = d_save_expr (build_expr (e->e1)); + tree e2 = d_save_expr (build_expr (e->e2)); + this->result_ = build_float_identity (code, e1, e2); } else if (tb1->ty == Tstruct) { diff --git a/gcc/testsuite/gdc.dg/runnable.d b/gcc/testsuite/gdc.dg/runnable.d index 4f1ef76e4..2d33d7d10 100644 --- a/gcc/testsuite/gdc.dg/runnable.d +++ b/gcc/testsuite/gdc.dg/runnable.d @@ -1533,6 +1533,27 @@ void test286() assert(0); } +/******************************************/ +// https://bugzilla.gdcproject.org/show_bug.cgi?id=309 + +void test309() +{ + creal f1 = +0.0 + 0.0i; + creal f2 = +0.0 - 0.0i; + creal f3 = -0.0 + 0.0i; + creal f4 = +0.0 + 0.0i; + + assert(f1 !is f2); + assert(f1 !is f3); + assert(f2 !is f3); + assert(f1 is f4); + + assert(!(f1 is f2)); + assert(!(f1 is f3)); + assert(!(f2 is f3)); + assert(!(f1 !is f4)); +} + /******************************************/ void main() @@ -1570,6 +1591,7 @@ void main() test273(); test285(); test286(); + test309(); printf("Success!\n"); } diff --git a/libphobos/libdruntime/Makefile.am b/libphobos/libdruntime/Makefile.am index 2a4d341a5..7596117db 100644 --- a/libphobos/libdruntime/Makefile.am +++ b/libphobos/libdruntime/Makefile.am @@ -79,16 +79,8 @@ ALL_DRUNTIME_COMPILE_DSOURCES += $(DRUNTIME_DSOURCES_GENERATED) ALL_DRUNTIME_SOURCES = $(ALL_DRUNTIME_COMPILE_DSOURCES) $(DRUNTIME_CSOURCES) \ $(DRUNTIME_SSOURCES) -REAL_DRUNTIME_TEST_LOBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.lo) -REAL_DRUNTIME_TEST_OBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.o) -# Workaround issue # -DRUNTIME_TEST_OBJECTS = $(filter-out rt/util/typeinfo.t.o \ - core/internal/convert.t.o, $(REAL_DRUNTIME_TEST_OBJECTS)) \ - rt/util/typeinfo.o core/internal/convert.o - -DRUNTIME_TEST_LOBJECTS = $(filter-out rt/util/typeinfo.t.lo \ - core/internal/convert.t.lo, $(REAL_DRUNTIME_TEST_LOBJECTS)) \ - rt/util/typeinfo.lo core/internal/convert.lo +DRUNTIME_TEST_LOBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.lo) +DRUNTIME_TEST_OBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.o) # Main library build definitions diff --git a/libphobos/libdruntime/Makefile.in b/libphobos/libdruntime/Makefile.in index 585182f59..a7cad64b3 100644 --- a/libphobos/libdruntime/Makefile.in +++ b/libphobos/libdruntime/Makefile.in @@ -658,16 +658,8 @@ ALL_DRUNTIME_COMPILE_DSOURCES = $(DRUNTIME_DSOURCES) $(am__append_1) \ ALL_DRUNTIME_SOURCES = $(ALL_DRUNTIME_COMPILE_DSOURCES) $(DRUNTIME_CSOURCES) \ $(DRUNTIME_SSOURCES) -REAL_DRUNTIME_TEST_LOBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.lo) -REAL_DRUNTIME_TEST_OBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.o) -# Workaround issue # -DRUNTIME_TEST_OBJECTS = $(filter-out rt/util/typeinfo.t.o \ - core/internal/convert.t.o, $(REAL_DRUNTIME_TEST_OBJECTS)) \ - rt/util/typeinfo.o core/internal/convert.o - -DRUNTIME_TEST_LOBJECTS = $(filter-out rt/util/typeinfo.t.lo \ - core/internal/convert.t.lo, $(REAL_DRUNTIME_TEST_LOBJECTS)) \ - rt/util/typeinfo.lo core/internal/convert.lo +DRUNTIME_TEST_LOBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.lo) +DRUNTIME_TEST_OBJECTS = $(ALL_DRUNTIME_COMPILE_DSOURCES:.d=.t.o) @ENABLE_SHARED_TRUE@check_LTLIBRARIES = libgdruntime_t.la toolexeclib_LTLIBRARIES = libgdruntime.la diff --git a/libphobos/libdruntime/core/internal/convert.d b/libphobos/libdruntime/core/internal/convert.d index c4745b6f5..76d6f013b 100644 --- a/libphobos/libdruntime/core/internal/convert.d +++ b/libphobos/libdruntime/core/internal/convert.d @@ -3,7 +3,7 @@ * This module provides functions to converting different values to const(ubyte)[] * * Copyright: Copyright Igor Stepanov 2013-2013. - * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0). + * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). * Authors: Igor Stepanov * Source: $(DRUNTIMESRC core/internal/_convert.d) */ @@ -33,7 +33,7 @@ private ubyte[] ctfe_alloc()(size_t n) } } -@trusted pure nothrow +@trusted pure nothrow @nogc const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqual!T == double) || is(Unqual!T == real) || is(Unqual!T == ifloat) || is(Unqual!T == idouble) || is(Unqual!T == ireal)) { @@ -72,7 +72,7 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqua ulong mantissa2 = parsed.mantissa2; off_bytes--; // go back one, since mantissa only stored data in 56 // bits, ie 7 bytes - for(; off_bytes < FloatTraits!T.MANTISSA/8; ++off_bytes) + for (; off_bytes < FloatTraits!T.MANTISSA/8; ++off_bytes) { buff[off_bytes] = cast(ubyte)mantissa2; mantissa2 >>= 8; @@ -114,13 +114,13 @@ const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == float) || is(Unqua } } -@safe pure nothrow +@safe pure nothrow @nogc private Float parse(bool is_denormalized = false, T)(T x) if (is(Unqual!T == ifloat) || is(Unqual!T == idouble) || is(Unqual!T == ireal)) { return parse(x.im); } -@safe pure nothrow +@safe pure nothrow @nogc private Float parse(bool is_denormalized = false, T:real)(T x_) if (floatFormat!T != FloatFormat.Real80) { Unqual!T x = x_; @@ -178,7 +178,7 @@ private Float parse(bool is_denormalized = false, T:real)(T x_) if (floatFormat! } } -@safe pure nothrow +@safe pure nothrow @nogc private Float parse(bool _ = false, T:real)(T x_) if (floatFormat!T == FloatFormat.Real80) { Unqual!T x = x_; @@ -232,6 +232,7 @@ private struct Float private template FloatTraits(T) if (floatFormat!T == FloatFormat.Float) { + enum DATASIZE = 4; enum EXPONENT = 8; enum MANTISSA = 23; enum ZERO = Float(0, 0, 0); @@ -244,6 +245,7 @@ private template FloatTraits(T) if (floatFormat!T == FloatFormat.Float) private template FloatTraits(T) if (floatFormat!T == FloatFormat.Double) { + enum DATASIZE = 8; enum EXPONENT = 11; enum MANTISSA = 52; enum ZERO = Float(0, 0, 0); @@ -256,6 +258,7 @@ private template FloatTraits(T) if (floatFormat!T == FloatFormat.Double) private template FloatTraits(T) if (floatFormat!T == FloatFormat.Real80) { + enum DATASIZE = 10; enum EXPONENT = 15; enum MANTISSA = 64; enum ZERO = Float(0, 0, 0); @@ -268,6 +271,7 @@ private template FloatTraits(T) if (floatFormat!T == FloatFormat.Real80) private template FloatTraits(T) if (floatFormat!T == FloatFormat.DoubleDouble) //Unsupported in CTFE { + enum DATASIZE = 16; enum EXPONENT = 11; enum MANTISSA = 106; enum ZERO = Float(0, 0, 0); @@ -280,6 +284,7 @@ private template FloatTraits(T) if (floatFormat!T == FloatFormat.DoubleDouble) / private template FloatTraits(T) if (floatFormat!T == FloatFormat.Quadruple) { + enum DATASIZE = 16; enum EXPONENT = 15; enum MANTISSA = 112; enum ZERO = Float(0, 0, 0); @@ -291,10 +296,10 @@ private template FloatTraits(T) if (floatFormat!T == FloatFormat.Quadruple) } -@safe pure nothrow +@safe pure nothrow @nogc private real binPow2(int pow) { - static real binPosPow2(int pow) @safe pure nothrow + static real binPosPow2(int pow) @safe pure nothrow @nogc { assert(pow > 0); @@ -319,14 +324,14 @@ private real binPow2(int pow) //Need in CTFE, because CTFE float and double expressions computed more precisely that run-time expressions. -@safe pure nothrow +@safe pure nothrow @nogc private ulong shiftrRound(ulong x) { return (x >> 1) + (x & 1); } -@safe pure nothrow -private uint binLog2(T)(T x) +@safe pure nothrow @nogc +private uint binLog2(T)(const T x) { assert(x > 0); int max = 2 ^^ (FloatTraits!T.EXPONENT-1)-1; @@ -353,7 +358,7 @@ private uint binLog2(T)(T x) return max; } -@safe pure nothrow +@safe pure nothrow @nogc private Float denormalizedMantissa(T)(T x, uint sign) if (floatFormat!T == FloatFormat.Real80) { x *= 2.0L^^FloatTraits!T.MANTISSA; @@ -362,7 +367,7 @@ private Float denormalizedMantissa(T)(T x, uint sign) if (floatFormat!T == Float return Float(fl.mantissa >> pow, 0, sign); } -@safe pure nothrow +@safe pure nothrow @nogc private Float denormalizedMantissa(T)(T x, uint sign) if (floatFormat!T == FloatFormat.Float || floatFormat!T == FloatFormat.Double) { @@ -372,7 +377,7 @@ private Float denormalizedMantissa(T)(T x, uint sign) return Float(shiftrRound(mant), 0, sign); } -@safe pure nothrow +@safe pure nothrow @nogc private Float denormalizedMantissa(T)(T x, uint sign) if (floatFormat!T == FloatFormat.Quadruple) { x *= 2.0L^^FloatTraits!T.MANTISSA; @@ -496,8 +501,8 @@ version (unittest) testNumberConvert!("real.min_normal/2UL^^63"); // check subnormal storage edge case for Quadruple testNumberConvert!("real.min_normal/2UL^^56"); - //testNumberConvert!("real.min_normal/19"); // XGDC: ct[0] == 0, rt[0] == 27 - //testNumberConvert!("real.min_normal/17"); // XGDC: ct[0= == 128, rt[0] == 136 + testNumberConvert!("real.min_normal/19"); + testNumberConvert!("real.min_normal/17"); /**Test imaginary values: convert algorithm is same with real values*/ testNumberConvert!("0.0Fi"); @@ -505,8 +510,8 @@ version (unittest) testNumberConvert!("0.0Li"); /**True random values*/ - //testNumberConvert!("-0x9.0f7ee55df77618fp-13829L"); //XGDC: ct[0,1] == [0,96], rt[0,1] == [143,97] - //testNumberConvert!("0x7.36e6e2640120d28p+8797L"); // XGDC: ct[0,1] == [0,24], rt[0,1] == [80,26] + testNumberConvert!("-0x9.0f7ee55df77618fp-13829L"); + testNumberConvert!("0x7.36e6e2640120d28p+8797L"); testNumberConvert!("-0x1.05df6ce4702ccf8p+15835L"); testNumberConvert!("0x9.54bb0d88806f714p-7088L"); @@ -567,22 +572,29 @@ template floatFormat(T) if (is(T:real) || is(T:ireal)) } +package template floatSize(T) if (is(T:real) || is(T:ireal)) +{ + enum floatSize = FloatTraits!(T).DATASIZE; +} + // all toUbyte functions must be evaluable at compile time -@trusted pure nothrow -const(ubyte)[] toUbyte(T)(T[] arr) if (T.sizeof == 1) +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const T[] arr) if (T.sizeof == 1) { return cast(const(ubyte)[])arr; } -@trusted pure nothrow -const(ubyte)[] toUbyte(T)(T[] arr) if ((is(typeof(toUbyte(arr[0])) == const(ubyte)[])) && (T.sizeof > 1)) +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const T[] arr) if ((is(typeof(toUbyte(arr[0])) == const(ubyte)[])) && (T.sizeof > 1)) { if (__ctfe) { - const(ubyte)[] ret; + ubyte[] ret = ctfe_alloc(T.sizeof * arr.length); + size_t offset = 0; foreach (cur; arr) { - ret ~= toUbyte(cur); + ret[offset .. offset + T.sizeof] = toUbyte(cur)[0 .. T.sizeof]; + offset += T.sizeof; } return ret; } @@ -592,14 +604,16 @@ const(ubyte)[] toUbyte(T)(T[] arr) if ((is(typeof(toUbyte(arr[0])) == const(ubyt } } -@trusted pure nothrow -const(ubyte)[] toUbyte(T)(ref T val) if (__traits(isIntegral, T) && !is(T == enum)) +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const ref T val) if (__traits(isIntegral, T) && !is(T == enum) && !is(T == __vector)) { static if (T.sizeof == 1) { if (__ctfe) { - return cast(const(ubyte)[])[val]; + ubyte[] result = ctfe_alloc(1); + result[0] = cast(ubyte) val; + return result; } else { @@ -608,7 +622,7 @@ const(ubyte)[] toUbyte(T)(ref T val) if (__traits(isIntegral, T) && !is(T == enu } else if (__ctfe) { - ubyte[T.sizeof] tmp; + ubyte[] tmp = ctfe_alloc(T.sizeof); Unqual!T val_ = val; for (size_t i = 0; i < T.sizeof; ++i) { @@ -618,7 +632,7 @@ const(ubyte)[] toUbyte(T)(ref T val) if (__traits(isIntegral, T) && !is(T == enu tmp[idx] = cast(ubyte)(val_&0xff); val_ >>= 8; } - return tmp[].dup; + return tmp; } else { @@ -626,14 +640,41 @@ const(ubyte)[] toUbyte(T)(ref T val) if (__traits(isIntegral, T) && !is(T == enu } } -@trusted pure nothrow -const(ubyte)[] toUbyte(T)(ref T val) if (is(Unqual!T == cfloat) || is(Unqual!T == cdouble) ||is(Unqual!T == creal)) +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == __vector)) +{ + if (!__ctfe) + return (cast(const ubyte*) &val)[0 .. T.sizeof]; + else static if (is(typeof(val[0]) : void)) + assert(0, "Unable to compute byte representation of " ~ T.stringof ~ " at compile time."); + else + { + // This code looks like it should work in CTFE but it segfaults: + // auto a = val.array; + // return toUbyte(a); + alias E = typeof(val[0]); + ubyte[] result = ctfe_alloc(T.sizeof); + for (size_t i = 0, j = 0; i < T.sizeof; i += E.sizeof, ++j) + { + result[i .. i + E.sizeof] = toUbyte(val[j]); + } + return result; + } +} + +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const ref T val) if (is(Unqual!T == cfloat) || is(Unqual!T == cdouble) ||is(Unqual!T == creal)) { if (__ctfe) { auto re = val.re; auto im = val.im; - return (re.toUbyte() ~ im.toUbyte()); + auto a = re.toUbyte(); + auto b = im.toUbyte(); + ubyte[] result = ctfe_alloc(a.length + b.length); + result[0 .. a.length] = a[0 .. a.length]; + result[a.length .. $] = b[0 .. b.length]; + return result; } else { @@ -641,14 +682,13 @@ const(ubyte)[] toUbyte(T)(ref T val) if (is(Unqual!T == cfloat) || is(Unqual!T = } } -@trusted pure nothrow -const(ubyte)[] toUbyte(T)(ref T val) if (is(T == enum) && is(typeof(toUbyte(cast(V)val)) == const(ubyte)[])) +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const ref T val) if (is(T V == enum) && is(typeof(toUbyte(cast(const V)val)) == const(ubyte)[])) { if (__ctfe) { static if (is(T V == enum)){} - V e_val = val; - return toUbyte(e_val); + return toUbyte(cast(const V) val); } else { @@ -656,7 +696,16 @@ const(ubyte)[] toUbyte(T)(ref T val) if (is(T == enum) && is(typeof(toUbyte(cast } } -private bool isNonReference(T)() +nothrow pure @safe unittest +{ + // Issue 19008 - check toUbyte works on enums. + enum Month : uint { jan = 1} + Month m = Month.jan; + const bytes = toUbyte(m); + enum ctfe_works = (() => { Month x = Month.jan; return toUbyte(x).length > 0; })(); +} + +package(core.internal) bool isNonReference(T)() { static if (is(T == struct) || is(T == union)) { @@ -664,7 +713,10 @@ private bool isNonReference(T)() } else static if (__traits(isStaticArray, T)) { - return isNonReference!(typeof(T.init[0]))(); + static if (T.length > 0) + return isNonReference!(typeof(T.init[0]))(); + else + return true; } else static if (is(T E == enum)) { @@ -698,12 +750,12 @@ private bool isNonReferenceStruct(T)() if (is(T == struct) || is(T == union)) return true; } -@trusted pure nothrow -const(ubyte)[] toUbyte(T)(ref T val) if (is(T == struct) || is(T == union)) +@trusted pure nothrow @nogc +const(ubyte)[] toUbyte(T)(const ref T val) if (is(T == struct) || is(T == union)) { if (__ctfe) { - ubyte[T.sizeof] bytes; + ubyte[] bytes = ctfe_alloc(T.sizeof); foreach (key, cur; val.tupleof) { alias CUR_TYPE = typeof(cur); @@ -722,7 +774,7 @@ const(ubyte)[] toUbyte(T)(ref T val) if (is(T == struct) || is(T == union)) assert(0, "Unable to compute byte representation of "~typeof(CUR_TYPE).stringof~" field at compile time"); } } - return bytes[].dup; + return bytes; } else { diff --git a/libphobos/libdruntime/core/internal/hash.d b/libphobos/libdruntime/core/internal/hash.d index 1805860a0..4d216efc8 100644 --- a/libphobos/libdruntime/core/internal/hash.d +++ b/libphobos/libdruntime/core/internal/hash.d @@ -3,16 +3,236 @@ * This module provides functions to uniform calculating hash values for different types * * Copyright: Copyright Igor Stepanov 2013-2013. - * License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0). + * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). * Authors: Igor Stepanov * Source: $(DRUNTIMESRC core/internal/_hash.d) */ module core.internal.hash; import core.internal.convert; +import core.internal.traits : allSatisfy, Unconst; + +// If true ensure that positive zero and negative zero have the same hash. +// Historically typeid(float).getHash did this but hashOf(float) did not. +private enum floatCoalesceZeroes = true; +// If true ensure that all NaNs of the same floating point type have the same hash. +// Historically typeid(float).getHash didn't do this but hashOf(float) did. +private enum floatCoalesceNaNs = true; + +// If either of the above are true then no struct or array that contains the +// representation of a floating point number may be hashed with `bytesHash`. + +@nogc nothrow pure @safe unittest +{ + static if (floatCoalesceZeroes) + assert(hashOf(+0.0) == hashOf(-0.0)); // Same hash for +0.0 and -0.0. + static if (floatCoalesceNaNs) + assert(hashOf(double.nan) == hashOf(-double.nan)); // Same hash for different NaN. +} + +private enum hasCallableToHash(T) = __traits(compiles, + { + size_t hash = ((T* x) => (*x).toHash())(null); + }); + +@nogc nothrow pure @safe unittest +{ + static struct S { size_t toHash() { return 4; } } + assert(hasCallableToHash!S); + assert(!hasCallableToHash!(shared const S)); +} + +private enum isFinalClassWithAddressBasedHash(T) = __traits(isFinalClass, T) + // Use __traits(compiles, ...) in case there are multiple overloads of `toHash`. + && __traits(compiles, {static assert(&Object.toHash is &T.toHash);}); + +@nogc nothrow pure @safe unittest +{ + static class C1 {} + final static class C2 : C1 {} + final static class C3 : C1 { override size_t toHash() const nothrow { return 1; }} + static assert(!isFinalClassWithAddressBasedHash!Object); + static assert(!isFinalClassWithAddressBasedHash!C1); + static assert(isFinalClassWithAddressBasedHash!C2); + static assert(!isFinalClassWithAddressBasedHash!C3); +} + +private template isCppClassWithoutHash(T) +{ + static if (!is(T == class) && !is(T == interface)) + enum isCppClassWithoutHash = false; + else + { + import core.internal.traits : Unqual; + enum bool isCppClassWithoutHash = __traits(getLinkage, T) == "C++" + && !is(Unqual!T : Object) && !hasCallableToHash!T; + } +} + +/+ +Is it valid to calculate a hash code for T based on the bits of its +representation? Always false for interfaces, dynamic arrays, and +associative arrays. False for all classes except final classes that do +not override `toHash`. + +Note: according to the spec as of +https://github.com/dlang/dlang.org/commit/d66eff16491b0664c0fc00ba80a7aa291703f1f2 +the contents of unnamed paddings between fields is undefined. Currently +this hashing implementation assumes that the padding contents (if any) +for all instances of `T` are the same. The correctness of this +assumption is yet to be verified. ++/ +private template canBitwiseHash(T) +{ + static if (is(T EType == enum)) + enum canBitwiseHash = .canBitwiseHash!EType; + else static if (__traits(isFloating, T)) + enum canBitwiseHash = !(floatCoalesceZeroes || floatCoalesceNaNs); + else static if (__traits(isScalar, T)) + enum canBitwiseHash = true; + else static if (is(T == class)) + { + enum canBitwiseHash = isFinalClassWithAddressBasedHash!T || isCppClassWithoutHash!T; + } + else static if (is(T == interface)) + { + enum canBitwiseHash = isCppClassWithoutHash!T; + } + else static if (is(T == struct)) + { + static if (hasCallableToHash!T || __traits(isNested, T)) + enum canBitwiseHash = false; + else + enum canBitwiseHash = allSatisfy!(.canBitwiseHash, typeof(T.tupleof)); + } + else static if (is(T == union)) + { + // Right now we always bytewise hash unions that lack callable `toHash`. + enum canBitwiseHash = !hasCallableToHash!T; + } + else static if (is(T E : E[])) + { + static if (__traits(isStaticArray, T)) + enum canBitwiseHash = (T.length == 0) || .canBitwiseHash!E; + else + enum canBitwiseHash = false; + } + else static if (__traits(isAssociativeArray, T)) + { + enum canBitwiseHash = false; + } + else + { + static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)), + "Internal error: unanticipated type "~T.stringof); + enum canBitwiseHash = true; + } +} + +private template UnqualUnsigned(T) if (__traits(isIntegral, T) && !is(T == __vector)) +{ + static if (T.sizeof == ubyte.sizeof) alias UnqualUnsigned = ubyte; + else static if (T.sizeof == ushort.sizeof) alias UnqualUnsigned = ushort; + else static if (T.sizeof == uint.sizeof) alias UnqualUnsigned = uint; + else static if (T.sizeof == ulong.sizeof) alias UnqualUnsigned = ulong; + else static if (T.sizeof == ulong.sizeof * 2) + { + static assert(T.sizeof == ucent.sizeof); + alias UnqualUnsigned = ucent; + } + else + { + static assert(0, "No known unsigned equivalent of " ~ T.stringof); + } + + static assert(UnqualUnsigned.sizeof == T.sizeof && __traits(isUnsigned, UnqualUnsigned)); +} + +// Overly restrictive for simplicity: has false negatives but no false positives. +private template useScopeConstPassByValue(T) +{ + static if (__traits(isScalar, T)) + enum useScopeConstPassByValue = true; + else static if (is(T == class) || is(T == interface)) + // Overly restrictive for simplicity. + enum useScopeConstPassByValue = isFinalClassWithAddressBasedHash!T; + else static if (is(T == struct) || is(T == union)) + { + // Overly restrictive for simplicity. + enum useScopeConstPassByValue = T.sizeof <= (int[]).sizeof && + __traits(isPOD, T) && // "isPOD" just to check there's no dtor or postblit. + canBitwiseHash!T; // We can't verify toHash doesn't leak. + } + else static if (is(T : E[], E)) + { + static if (!__traits(isStaticArray, T)) + // Overly restrictive for simplicity. + enum useScopeConstPassByValue = .useScopeConstPassByValue!E; + else static if (T.length == 0) + enum useScopeConstPassByValue = true; + else + enum useScopeConstPassByValue = T.sizeof <= (uint[]).sizeof + && .useScopeConstPassByValue!(typeof(T.init[0])); + } + else static if (is(T : V[K], K, V)) + { + // Overly restrictive for simplicity. + enum useScopeConstPassByValue = .useScopeConstPassByValue!K + && .useScopeConstPassByValue!V; + } + else + { + static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)), + "Internal error: unanticipated type "~T.stringof); + enum useScopeConstPassByValue = true; + } +} + +@safe unittest +{ + static assert(useScopeConstPassByValue!int); + static assert(useScopeConstPassByValue!string); + + static int ctr; + static struct S1 { ~this() { ctr++; } } + static struct S2 { this(this) { ctr++; } } + static assert(!useScopeConstPassByValue!S1, + "Don't default pass by value a struct with a non-vacuous destructor."); + static assert(!useScopeConstPassByValue!S2, + "Don't default pass by value a struct with a non-vacuous postblit."); +} //enum hash. CTFE depends on base type -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (is(T == enum)) +size_t hashOf(T)(scope const T val) +if (is(T EType == enum) && useScopeConstPassByValue!EType) +{ + static if (is(T EType == enum)) //for EType + { + return hashOf(cast(const EType) val); + } + else + { + static assert(0); + } +} + +//enum hash. CTFE depends on base type +size_t hashOf(T)(scope const T val, size_t seed) +if (is(T EType == enum) && useScopeConstPassByValue!EType) +{ + static if (is(T EType == enum)) //for EType + { + return hashOf(cast(const EType) val, seed); + } + else + { + static assert(0); + } +} + +//enum hash. CTFE depends on base type +size_t hashOf(T)(auto ref T val, size_t seed = 0) +if (is(T EType == enum) && !useScopeConstPassByValue!EType) { static if (is(T EType == enum)) //for EType { @@ -25,75 +245,241 @@ size_t hashOf(T)(auto ref T val, size_t seed = 0) if (is(T == enum)) } } -//CTFE ready (depends on base type). Can be merged with dynamic array hash -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && __traits(isStaticArray, T)) +//CTFE ready (depends on base type). +size_t hashOf(T)(scope const auto ref T val, size_t seed = 0) +if (!is(T == enum) && __traits(isStaticArray, T) && canBitwiseHash!T) { - size_t cur_hash = seed; - foreach (ref cur; val) + // FIXME: + // We would like to to do this: + // + //static if (T.length == 0) + // return seed; + //else static if (T.length == 1) + // return hashOf(val[0], seed); + //else + // return bytesHashWithExactSizeAndAlignment!T(toUbyte(val), seed); + // + // ... but that's inefficient when using a runtime TypeInfo (introduces a branch) + // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as + // hashOf(val). + static if (T.length == 0) + { + return bytesHashAlignedBy!size_t((ubyte[]).init, seed); + } + static if (is(typeof(toUbyte(val)) == const(ubyte)[])) { - cur_hash = hashOf(cur, cur_hash); + return bytesHashAlignedBy!T(toUbyte(val), seed); + } + else //Other types. CTFE unsupported + { + assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time"); + return bytesHashAlignedBy!T((cast(const(ubyte)*) &val)[0 .. T.sizeof], seed); } - return cur_hash; } -//dynamic array hash +//CTFE ready (depends on base type). size_t hashOf(T)(auto ref T val, size_t seed = 0) +if (!is(T == enum) && __traits(isStaticArray, T) && !canBitwiseHash!T) +{ + // FIXME: + // We would like to to do this: + // + //static if (T.length == 0) + // return seed; + //else static if (T.length == 1) + // return hashOf(val[0], seed); + //else + // /+ hash like a dynamic array +/ + // + // ... but that's inefficient when using a runtime TypeInfo (introduces a branch) + // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as + // hashOf(val). + return hashOf(val[], seed); +} + +//dynamic array hash +size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStaticArray, T) - && !is(T == struct) && !is(T == class) && !is(T == union)) + && !is(T == struct) && !is(T == class) && !is(T == union) + && (__traits(isScalar, S) || canBitwiseHash!S)) { alias ElementType = typeof(val[0]); - static if (is(ElementType == interface) || is(ElementType == class) || - ((is(ElementType == struct) || is(ElementType == union)) - && is(typeof(val[0].toHash()) == size_t))) - //class or interface array or struct array with toHash(); CTFE depend on toHash() method + static if (!canBitwiseHash!ElementType) { size_t hash = seed; - foreach (o; val) + foreach (ref o; val) { - hash = hashOf(o, hash); + hash = hashOf(hashOf(o), hash); // double hashing to match TypeInfo.getHash } return hash; } else static if (is(typeof(toUbyte(val)) == const(ubyte)[])) //ubyteble array (arithmetic types and structs without toHash) CTFE ready for arithmetic types and structs without reference fields { - auto bytes = toUbyte(val); - return bytesHash(bytes.ptr, bytes.length, seed); + return bytesHashAlignedBy!ElementType(toUbyte(val), seed); } else //Other types. CTFE unsupported { - assert(!__ctfe, "unable to compute hash of "~T.stringof); - return bytesHash(val.ptr, ElementType.sizeof*val.length, seed); + assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time"); + return bytesHashAlignedBy!ElementType((cast(const(ubyte)*) val.ptr)[0 .. ElementType.sizeof*val.length], seed); + } +} + +//dynamic array hash +size_t hashOf(T)(T val, size_t seed = 0) +if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStaticArray, T) + && !is(T == struct) && !is(T == class) && !is(T == union) + && !(__traits(isScalar, S) || canBitwiseHash!S)) +{ + size_t hash = seed; + foreach (ref o; val) + { + hash = hashOf(hashOf(o), hash); // double hashing because TypeInfo.getHash doesn't allow to pass seed value + } + return hash; +} + +//arithmetic type hash +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val) if (!is(T == enum) && __traits(isArithmetic, T) + && __traits(isIntegral, T) && T.sizeof <= size_t.sizeof && !is(T == __vector)) +{ + return cast(UnqualUnsigned!T) val; +} + +//arithmetic type hash +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && __traits(isArithmetic, T) + && __traits(isIntegral, T) && T.sizeof <= size_t.sizeof && !is(T == __vector)) +{ + static if (size_t.sizeof < ulong.sizeof) + { + //MurmurHash3 32-bit single round + enum uint c1 = 0xcc9e2d51; + enum uint c2 = 0x1b873593; + enum uint c3 = 0xe6546b64; + enum uint r1 = 15; + enum uint r2 = 13; + } + else + { + //Half of MurmurHash3 64-bit single round + //(omits second interleaved update) + enum ulong c1 = 0x87c37b91114253d5; + enum ulong c2 = 0x4cf5ad432745937f; + enum ulong c3 = 0x52dce729; + enum uint r1 = 31; + enum uint r2 = 27; } + auto h = c1 * cast(UnqualUnsigned!T) val; + h = (h << r1) | (h >>> (typeof(h).sizeof * 8 - r1)); + h = (h * c2) ^ seed; + h = (h << r2) | (h >>> (typeof(h).sizeof * 8 - r2)); + return h * 5 + c3; } //arithmetic type hash -@trusted nothrow pure -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && __traits(isArithmetic, T)) +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && __traits(isArithmetic, T) + && (!__traits(isIntegral, T) || T.sizeof > size_t.sizeof) && !is(T == __vector)) { static if (__traits(isFloating, val)) { - T data = (val != val) ? T.nan : val; - auto bytes = toUbyte(data); - return bytesHash(bytes.ptr, bytes.length, seed); + import core.internal.convert : floatSize; + + static if (floatCoalesceZeroes || floatCoalesceNaNs) + { + import core.internal.traits : Unqual; + Unqual!T data = val; + // +0.0 and -0.0 become the same. + static if (floatCoalesceZeroes && is(typeof(data = 0))) + if (data == 0) data = 0; + static if (floatCoalesceZeroes && is(typeof(data = 0.0i))) + if (data == 0.0i) data = 0.0i; + static if (floatCoalesceZeroes && is(typeof(data = 0.0 + 0.0i))) + { + if (data.re == 0.0) data = 0.0 + (data.im * 1.0i); + if (data.im == 0.0i) data = data.re + 0.0i; + } + static if (floatCoalesceNaNs) + if (data != data) data = T.nan; // All NaN patterns become the same. + } + else + { + alias data = val; + } + + static if (T.mant_dig == float.mant_dig && T.sizeof == uint.sizeof) + return hashOf(*cast(const uint*) &data, seed); + else static if (T.mant_dig == double.mant_dig && T.sizeof == ulong.sizeof) + return hashOf(*cast(const ulong*) &data, seed); + else + { + static if (is(T : creal) && T.sizeof != 2 * floatSize!(typeof(T.re))) + { + auto h1 = hashOf(data.re); + return hashOf(data.im, h1); + } + else static if (is(T : real) || is(T : ireal)) + { + // Ignore trailing padding + auto bytes = toUbyte(data)[0 .. floatSize!T]; + return bytesHashWithExactSizeAndAlignment!T(bytes, seed); + } + else + { + return bytesHashWithExactSizeAndAlignment!T(toUbyte(data), seed); + } + } + } + else + { + static assert(T.sizeof > size_t.sizeof && __traits(isIntegral, T)); + foreach (i; 0 .. T.sizeof / size_t.sizeof) + seed = hashOf(cast(size_t) (val >>> (size_t.sizeof * 8 * i)), seed); + return seed; + } +} + +size_t hashOf(T)(scope const auto ref T val, size_t seed = 0) @safe @nogc nothrow pure +if (is(T == __vector) && !is(T == enum)) +{ + static if (__traits(isFloating, T) && (floatCoalesceZeroes || floatCoalesceNaNs)) + { + if (__ctfe) + { + // Workaround for CTFE bug. + alias E = Unqual!(typeof(val[0])); + E[T.sizeof / E.sizeof] array; + foreach (i; 0 .. T.sizeof / E.sizeof) + array[i] = val[i]; + return hashOf(array, seed); + } + return hashOf(val.array, seed); } else { - auto bytes = toUbyte(val); - return bytesHash(bytes.ptr, bytes.length, seed); + return bytesHashAlignedBy!T(toUbyte(val), seed); } } //typeof(null) hash. CTFE supported -@trusted nothrow pure -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && is(T : typeof(null))) +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val) if (!is(T == enum) && is(T : typeof(null))) { - return hashOf(cast(void*)null); + return 0; +} + +//typeof(null) hash. CTFE supported +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && is(T : typeof(null))) +{ + return hashOf(size_t(0), seed); } //Pointers hash. CTFE unsupported if not null -@trusted nothrow pure -size_t hashOf(T)(auto ref T val, size_t seed = 0) +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val) if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null)) && !is(T == struct) && !is(T == class) && !is(T == union)) { @@ -101,7 +487,7 @@ if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null)) { if (val is null) { - return hashOf(cast(size_t)0); + return 0; } else { @@ -109,56 +495,231 @@ if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null)) } } - return hashOf(cast(size_t)val); + auto addr = cast(size_t) val; + return addr ^ (addr >>> 4); } -//struct or union hash -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && (is(T == struct) || is(T == union))) +//Pointers hash. CTFE unsupported if not null +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val, size_t seed) +if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null)) + && !is(T == struct) && !is(T == class) && !is(T == union)) { - static if (is(typeof(val.toHash()) == size_t)) //CTFE depends on toHash() + if (__ctfe) + { + if (val is null) + { + return hashOf(cast(size_t)0, seed); + } + else + { + assert(0, "Unable to calculate hash of non-null pointer at compile time"); + } + + } + return hashOf(cast(size_t)val, seed); +} + +private enum _hashOfStruct = +q{ + enum bool isChained = is(typeof(seed) : size_t); + static if (!isChained) enum size_t seed = 0; + static if (hasCallableToHash!(typeof(val))) //CTFE depends on toHash() { - return hashOf(val.toHash(), seed); + static if (isChained) + return hashOf(cast(size_t) val.toHash(), seed); + else + return val.toHash(); } else { static if (__traits(hasMember, T, "toHash") && is(typeof(T.toHash) == function)) { - pragma(msg, "Warning: struct "~__traits(identifier, T)~" has method toHash, however it cannot be called with "~T.stringof~" this."); + // TODO: in the future maybe this should be changed to a static + // assert(0), because if there's a `toHash` the programmer probably + // expected it to be called and a compilation failure here will + // expose a bug in his code. + // In the future we also might want to disallow non-const toHash + // altogether. + pragma(msg, "Warning: struct "~__traits(identifier, T) + ~" has method toHash, however it cannot be called with " + ~typeof(val).stringof~" this."); } - static if (is(typeof(toUbyte(val)) == const(ubyte)[]))//CTFE ready for structs without reference fields + static if (T.tupleof.length == 0) { - auto bytes = toUbyte(val); - return bytesHash(bytes.ptr, bytes.length, seed); + return seed; } - else // CTFE unsupproreted for structs with reference fields + else static if ((is(T == struct) && !canBitwiseHash!T) || T.tupleof.length == 1) { - assert(!__ctfe, "unable to compute hash of "~T.stringof); - const(ubyte)[] bytes = (cast(const(ubyte)*)&val)[0 .. T.sizeof]; - return bytesHash(bytes.ptr, bytes.length, seed); + static if (isChained) size_t h = seed; + static foreach (i, F; typeof(val.tupleof)) + { + static if (__traits(isStaticArray, F)) + { + static if (i == 0 && !isChained) size_t h = 0; + static if (F.sizeof > 0 && canBitwiseHash!F) + // May use smallBytesHash instead of bytesHash. + h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h); + else + // We can avoid the "double hashing" the top-level version uses + // for consistency with TypeInfo.getHash. + foreach (ref e; val.tupleof[i]) + h = hashOf(e, h); + } + else static if (is(F == struct) || is(F == union)) + { + static if (hasCallableToHash!F) + { + static if (i == 0 && !isChained) + size_t h = val.tupleof[i].toHash(); + else + h = hashOf(cast(size_t) val.tupleof[i].toHash(), h); + } + else static if (F.tupleof.length == 1) + { + // Handle the single member case separately to avoid unnecessarily using bytesHash. + static if (i == 0 && !isChained) + size_t h = hashOf(val.tupleof[i].tupleof[0]); + else + h = hashOf(val.tupleof[i].tupleof[0], h); + } + else static if (canBitwiseHash!F) + { + // May use smallBytesHash instead of bytesHash. + static if (i == 0 && !isChained) size_t h = 0; + h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h); + } + else + { + // Nothing special happening. + static if (i == 0 && !isChained) + size_t h = hashOf(val.tupleof[i]); + else + h = hashOf(val.tupleof[i], h); + } + } + else + { + // Nothing special happening. + static if (i == 0 && !isChained) + size_t h = hashOf(val.tupleof[i]); + else + h = hashOf(val.tupleof[i], h); + } + } + return h; + } + else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))//CTFE ready for structs without reference fields + { + // Not using bytesHashWithExactSizeAndAlignment here because + // the result may differ from typeid(T).hashOf(&val). + return bytesHashAlignedBy!T(toUbyte(val), seed); + } + else // CTFE unsupported + { + assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time"); + const(ubyte)[] bytes = (() @trusted => (cast(const(ubyte)*)&val)[0 .. T.sizeof])(); + // Not using bytesHashWithExactSizeAndAlignment here because + // the result may differ from typeid(T).hashOf(&val). + return bytesHashAlignedBy!T(bytes, seed); } } +}; + +//struct or union hash +size_t hashOf(T)(scope const auto ref T val, size_t seed = 0) +if (!is(T == enum) && (is(T == struct) || is(T == union)) + && !is(T == const) && !is(T == immutable) + && canBitwiseHash!T) +{ + mixin(_hashOfStruct); +} + +//struct or union hash +size_t hashOf(T)(auto ref T val) +if (!is(T == enum) && (is(T == struct) || is(T == union)) + && !canBitwiseHash!T) +{ + mixin(_hashOfStruct); +} + +//struct or union hash +size_t hashOf(T)(auto ref T val, size_t seed) +if (!is(T == enum) && (is(T == struct) || is(T == union)) + && !canBitwiseHash!T) +{ + mixin(_hashOfStruct); +} + +//struct or union hash - https://issues.dlang.org/show_bug.cgi?id=19332 (support might be removed in future) +size_t hashOf(T)(scope auto ref T val, size_t seed = 0) +if (!is(T == enum) && (is(T == struct) || is(T == union)) + && (is(T == const) || is(T == immutable)) + && canBitwiseHash!T && !canBitwiseHash!(Unconst!T)) +{ + mixin(_hashOfStruct); } //delegate hash. CTFE unsupported -@trusted nothrow pure -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && is(T == delegate)) +@trusted @nogc nothrow pure +size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && is(T == delegate)) { - assert(!__ctfe, "unable to compute hash of "~T.stringof); + assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time"); const(ubyte)[] bytes = (cast(const(ubyte)*)&val)[0 .. T.sizeof]; - return bytesHash(bytes.ptr, bytes.length, seed); + return bytesHashWithExactSizeAndAlignment!T(bytes, seed); +} + +//address-based class hash. CTFE only if null. +@nogc nothrow pure @trusted +size_t hashOf(T)(scope const T val) +if (!is(T == enum) && (is(T == interface) || is(T == class)) + && canBitwiseHash!T) +{ + if (__ctfe) if (val is null) return 0; + return hashOf(cast(const void*) val); +} + +//address-based class hash. CTFE only if null. +@nogc nothrow pure @trusted +size_t hashOf(T)(scope const T val, size_t seed) +if (!is(T == enum) && (is(T == interface) || is(T == class)) + && canBitwiseHash!T) +{ + if (__ctfe) if (val is null) return hashOf(size_t(0), seed); + return hashOf(cast(const void*) val, seed); +} + +//class or interface hash. CTFE depends on toHash +size_t hashOf(T)(T val) +if (!is(T == enum) && (is(T == interface) || is(T == class)) + && !canBitwiseHash!T) +{ + static if (__traits(compiles, {size_t h = val.toHash();})) + return val ? val.toHash() : 0; + else + return val ? (cast(Object)val).toHash() : 0; } //class or interface hash. CTFE depends on toHash -size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && is(T == interface) || is(T == class)) +size_t hashOf(T)(T val, size_t seed) +if (!is(T == enum) && (is(T == interface) || is(T == class)) + && !canBitwiseHash!T) { - return hashOf(val ? (cast(Object)val).toHash() : 0, seed); + static if (__traits(compiles, {size_t h = val.toHash();})) + return hashOf(val ? cast(size_t) val.toHash() : size_t(0), seed); + else + return hashOf(val ? (cast(Object)val).toHash() : 0, seed); } //associative array hash. CTFE depends on base types -size_t hashOf(T)(auto ref T aa, size_t seed = 0) if (!is(T == enum) && __traits(isAssociativeArray, T)) +size_t hashOf(T)(T aa) if (!is(T == enum) && __traits(isAssociativeArray, T)) { - if (!aa.length) return hashOf(0, seed); + static if (is(typeof(aa) : V[K], K, V)) {} // Put K & V in scope. + static if (__traits(compiles, (ref K k, ref V v) nothrow => .hashOf(k) + .hashOf(v))) + scope (failure) assert(0); // Allow compiler to infer nothrow. + + if (!aa.length) return 0; size_t h = 0; // The computed hash is independent of the foreach traversal order. @@ -167,315 +728,81 @@ size_t hashOf(T)(auto ref T aa, size_t seed = 0) if (!is(T == enum) && __traits( size_t[2] hpair; hpair[0] = key.hashOf(); hpair[1] = val.hashOf(); - h ^= hpair.hashOf(); + h += hpair.hashOf(); } - return h.hashOf(seed); + return h; } -unittest +//associative array hash. CTFE depends on base types +size_t hashOf(T)(T aa, size_t seed) if (!is(T == enum) && __traits(isAssociativeArray, T)) { - static struct Foo - { - int a = 99; - float b = 4.0; - size_t toHash() const pure @safe nothrow - { - return a; - } - } - - static struct Bar - { - char c = 'x'; - int a = 99; - float b = 4.0; - void* d = null; - } - - static struct Boom - { - char c = 'M'; - int* a = null; - } - - interface IBoo - { - void boo(); - } - - static class Boo: IBoo - { - override void boo() - { - } - - override size_t toHash() - { - return 1; - } - } - - static struct Goo - { - size_t toHash() pure @safe nothrow - { - return 1; - } - } - - enum Gun: long - { - A = 99, - B = 17 - } - - enum double dexpr = 3.14; - enum float fexpr = 2.71; - enum wstring wsexpr = "abcdef"w; - enum string csexpr = "abcdef"; - enum int iexpr = 7; - enum long lexpr = 42; - enum int[2][3] saexpr = [[1, 2], [3, 4], [5, 6]]; - enum int[] daexpr = [7,8,9]; - enum Foo thsexpr = Foo(); - enum Bar vsexpr = Bar(); - enum int[int] aaexpr = [99:2, 12:6, 45:4]; - enum Gun eexpr = Gun.A; - enum cdouble cexpr = 7+4i; - enum Foo[] staexpr = [Foo(), Foo(), Foo()]; - enum Bar[] vsaexpr = [Bar(), Bar(), Bar()]; - enum realexpr = 7.88; - enum raexpr = [8.99L+86i, 3.12L+99i, 5.66L+12i]; - enum nullexpr = null; - - //No CTFE: - Boom rstructexpr = Boom(); - Boom[] rstrarrexpr = [Boom(), Boom(), Boom()]; - int delegate() dgexpr = (){return 78;}; - void* ptrexpr = &dgexpr; - - - //CTFE hashes - enum h1 = dexpr.hashOf(); - enum h2 = fexpr.hashOf(); - enum h3 = wsexpr.hashOf(); - enum h4 = csexpr.hashOf(); - enum h5 = iexpr.hashOf(); - enum h6 = lexpr.hashOf(); - enum h7 = saexpr.hashOf(); - enum h8 = daexpr.hashOf(); - enum h9 = thsexpr.hashOf(); - enum h10 = vsexpr.hashOf(); - enum h11 = aaexpr.hashOf(); - enum h12 = eexpr.hashOf(); - enum h13 = cexpr.hashOf(); - enum h14 = hashOf(new Boo); - enum h15 = staexpr.hashOf(); - enum h16 = hashOf([new Boo, new Boo, new Boo]); - enum h17 = hashOf([cast(IBoo)new Boo, cast(IBoo)new Boo, cast(IBoo)new Boo]); - enum h18 = hashOf(cast(IBoo)new Boo); - enum h19 = vsaexpr.hashOf(); - enum h20 = hashOf(cast(Foo[3])staexpr); - - //BUG: cannot cast [Boo(), Boo(), Boo()][0] to object.Object at compile time - auto h21 = hashOf(cast(Boo[3])[new Boo, new Boo, new Boo]); - auto h22 = hashOf(cast(IBoo[3])[cast(IBoo)new Boo, cast(IBoo)new Boo, cast(IBoo)new Boo]); - enum h23 = hashOf(cast(Bar[3])vsaexpr); - - //NO CTFE (Compute, but don't check correctness): - auto h24 = rstructexpr.hashOf(); - auto h25 = rstrarrexpr.hashOf(); - auto h26 = dgexpr.hashOf(); - auto h27 = ptrexpr.hashOf(); - - enum h28 = realexpr.hashOf(); - enum h29 = raexpr.hashOf(); - enum h30 = nullexpr.hashOf(); - - auto v1 = dexpr; - auto v2 = fexpr; - auto v3 = wsexpr; - auto v4 = csexpr; - auto v5 = iexpr; - auto v6 = lexpr; - auto v7 = saexpr; - auto v8 = daexpr; - auto v9 = thsexpr; - auto v10 = vsexpr; - auto v11 = aaexpr; - auto v12 = eexpr; - auto v13 = cexpr; - auto v14 = new Boo; - auto v15 = staexpr; - auto v16 = [new Boo, new Boo, new Boo]; - auto v17 = [cast(IBoo)new Boo, cast(IBoo)new Boo, cast(IBoo)new Boo]; - auto v18 = cast(IBoo)new Boo; - auto v19 = vsaexpr; - auto v20 = cast(Foo[3])staexpr; - auto v21 = cast(Boo[3])[new Boo, new Boo, new Boo]; - auto v22 = cast(IBoo[3])[cast(IBoo)new Boo, cast(IBoo)new Boo, cast(IBoo)new Boo]; - auto v23 = cast(Bar[3])vsaexpr; - auto v30 = null; - - //NO CTFE: - /*auto v24 = rstructexpr; - auto v25 = rstrarrexpr; - auto v26 = dgexpr; - auto v27 = ptrexpr; - auto v28 = realexpr; - auto v29 = raexpr;*/ - - //runtime hashes - auto rth1 = hashOf(v1); - auto rth2 = hashOf(v2); - auto rth3 = hashOf(v3); - auto rth4 = hashOf(v4); - auto rth5 = hashOf(v5); - auto rth6 = hashOf(v6); - auto rth7 = hashOf(v7); - auto rth8 = hashOf(v8); - auto rth9 = hashOf(v9); - auto rth10 = hashOf(v10); - auto rth11 = hashOf(v11); - auto rth12 = hashOf(v12); - auto rth13 = hashOf(v13); - auto rth14 = hashOf(v14); - auto rth15 = hashOf(v15); - auto rth16 = hashOf(v16); - auto rth17 = hashOf(v17); - auto rth18 = hashOf(v18); - auto rth19 = hashOf(v19); - auto rth20 = hashOf(v20); - auto rth21 = hashOf(v21); - auto rth22 = hashOf(v22); - auto rth23 = hashOf(v23); - auto rth30 = hashOf(v30); - /*//NO CTFE: - auto rth24 = hashOf(v24); - auto rth25 = hashOf(v25); - auto rth26 = hashOf(v26); - auto rth27 = hashOf(v27); - auto rth28 = hashOf(v28); - auto rth29 = hashOf(v29);*/ - - assert(h1 == rth1); - assert(h2 == rth2); - assert(h3 == rth3); - assert(h4 == rth4); - assert(h5 == rth5); - assert(h6 == rth6); - assert(h7 == rth7); - assert(h8 == rth8); - assert(h9 == rth9); - assert(h10 == rth10); - assert(h11 == rth11); - assert(h12 == rth12); - assert(h13 == rth13); - assert(h14 == rth14); - assert(h15 == rth15); - assert(h16 == rth16); - assert(h17 == rth17); - assert(h18 == rth18); - assert(h19 == rth19); - assert(h20 == rth20); - assert(h21 == rth21); - assert(h22 == rth22); - assert(h23 == rth23); - /*assert(h24 == rth24); - assert(h25 == rth25); - assert(h26 == rth26); - assert(h27 == rth27); - assert(h28 == rth28); - assert(h29 == rth29);*/ - assert(h30 == rth30); -} - - -unittest // issue 15111 -{ - void testAlias(T)() - { - static struct Foo - { - T t; - alias t this; - } - Foo foo; - static assert(is(typeof(hashOf(foo)))); - } - // was fixed - testAlias!(int[]); - testAlias!(int*); - // was not affected - testAlias!int; - testAlias!(void delegate()); - testAlias!(string[string]); - testAlias!(int[8]); + return hashOf(hashOf(aa), seed); } // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. -version (X86) - version = AnyX86; -version (X86_64) - version = AnyX86; +// This overload is for backwards compatibility. +@system pure nothrow @nogc +size_t bytesHash()(scope const(void)* buf, size_t len, size_t seed) +{ + return bytesHashAlignedBy!ubyte((cast(const(ubyte)*) buf)[0 .. len], seed); +} -version (AnyX86) +private template bytesHashAlignedBy(AlignType) { - version (DigitalMars) - { - } - else - { - version = HasUnalignedOps; - } + alias bytesHashAlignedBy = bytesHash!(AlignType.alignof >= uint.alignof); } +private template bytesHashWithExactSizeAndAlignment(SizeAndAlignType) +{ + static if (SizeAndAlignType.alignof < uint.alignof + ? SizeAndAlignType.sizeof <= 12 + : SizeAndAlignType.sizeof <= 10) + alias bytesHashWithExactSizeAndAlignment = smallBytesHash; + else + alias bytesHashWithExactSizeAndAlignment = bytesHashAlignedBy!SizeAndAlignType; +} -@system pure nothrow @nogc -size_t bytesHash(const(void)* buf, size_t len, size_t seed) +// Fowler/Noll/Vo hash. http://www.isthe.com/chongo/tech/comp/fnv/ +private size_t fnv()(scope const(ubyte)[] bytes, size_t seed) @nogc nothrow pure @safe { - static uint rotl32(uint n)(in uint x) pure nothrow @safe @nogc - { - return (x << n) | (x >> (32 - n)); - } + static if (size_t.max <= uint.max) + enum prime = (1U << 24) + (1U << 8) + 0x93U; + else static if (size_t.max <= ulong.max) + enum prime = (1UL << 40) + (1UL << 8) + 0xb3UL; + else + enum prime = (size_t(1) << 88) + (size_t(1) << 8) + size_t(0x3b); + foreach (b; bytes) + seed = (seed ^ b) * prime; + return seed; +} +private alias smallBytesHash = fnv; - //----------------------------------------------------------------------------- - // Block read - if your platform needs to do endian-swapping or can only - // handle aligned reads, do the conversion here - static uint get32bits(const (ubyte)* x) pure nothrow @nogc +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here +private uint get32bits()(scope const(ubyte)* x) @nogc nothrow pure @system +{ + version (BigEndian) { - //Compiler can optimize this code to simple *cast(uint*)x if it possible. - version (HasUnalignedOps) - { - if (!__ctfe) - return *cast(uint*)x; //BUG: Can't be inlined by DMD - } - version (BigEndian) - { - return ((cast(uint) x[0]) << 24) | ((cast(uint) x[1]) << 16) | ((cast(uint) x[2]) << 8) | (cast(uint) x[3]); - } - else - { - return ((cast(uint) x[3]) << 24) | ((cast(uint) x[2]) << 16) | ((cast(uint) x[1]) << 8) | (cast(uint) x[0]); - } + return ((cast(uint) x[0]) << 24) | ((cast(uint) x[1]) << 16) | ((cast(uint) x[2]) << 8) | (cast(uint) x[3]); } - - //----------------------------------------------------------------------------- - // Finalization mix - force all bits of a hash block to avalanche - static uint fmix32(uint h) pure nothrow @safe @nogc + else { - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - - return h; + return ((cast(uint) x[3]) << 24) | ((cast(uint) x[2]) << 16) | ((cast(uint) x[1]) << 8) | (cast(uint) x[0]); } +} - auto data = cast(const(ubyte)*)buf; +/+ +Params: + dataKnownToBeAligned = whether the data is known at compile time to be uint-aligned. ++/ +@nogc nothrow pure @trusted +private size_t bytesHash(bool dataKnownToBeAligned)(scope const(ubyte)[] bytes, size_t seed) +{ + auto len = bytes.length; + auto data = bytes.ptr; auto nblocks = len / 4; uint h1 = cast(uint)seed; @@ -489,13 +816,16 @@ size_t bytesHash(const(void)* buf, size_t len, size_t seed) auto end_data = data+nblocks*uint.sizeof; for (; data!=end_data; data += uint.sizeof) { - uint k1 = get32bits(data); + static if (dataKnownToBeAligned) + uint k1 = __ctfe ? get32bits(data) : *(cast(const uint*) data); + else + uint k1 = get32bits(data); k1 *= c1; - k1 = rotl32!15(k1); + k1 = (k1 << 15) | (k1 >> (32 - 15)); k1 *= c2; h1 ^= k1; - h1 = rotl32!13(h1); + h1 = (h1 << 13) | (h1 >> (32 - 13)); h1 = h1*5+c3; } @@ -508,7 +838,7 @@ size_t bytesHash(const(void)* buf, size_t len, size_t seed) case 3: k1 ^= data[2] << 16; goto case; case 2: k1 ^= data[1] << 8; goto case; case 1: k1 ^= data[0]; - k1 *= c1; k1 = rotl32!15(k1); k1 *= c2; h1 ^= k1; + k1 *= c1; k1 = (k1 << 15) | (k1 >> (32 - 15)); k1 *= c2; h1 ^= k1; goto default; default: } @@ -516,7 +846,10 @@ size_t bytesHash(const(void)* buf, size_t len, size_t seed) //---------- // finalization h1 ^= len; - h1 = fmix32(h1); + // Force all bits of the hash block to avalanche. + h1 = (h1 ^ (h1 >> 16)) * 0x85ebca6b; + h1 = (h1 ^ (h1 >> 13)) * 0xc2b2ae35; + h1 ^= h1 >> 16; return h1; } @@ -531,4 +864,21 @@ pure nothrow @system @nogc unittest enum test_str = "Sample string"; enum size_t hashVal = ctfeHash(test_str); assert(hashVal == bytesHash(&test_str[0], test_str.length, 0)); + + // Detect unintended changes to bytesHash on unaligned and aligned inputs. + version (BigEndian) + { + const ubyte[7] a = [99, 4, 3, 2, 1, 5, 88]; + const uint[2] b = [0x04_03_02_01, 0x05_ff_ff_ff]; + } + else + { + const ubyte[7] a = [99, 1, 2, 3, 4, 5, 88]; + const uint[2] b = [0x04_03_02_01, 0xff_ff_ff_05]; + } + // It is okay to change the below values if you make a change + // that you expect to change the result of bytesHash. + assert(bytesHash(&a[1], a.length - 2, 0) == 2727459272); + assert(bytesHash(&b, 5, 0) == 2727459272); + assert(bytesHashAlignedBy!uint((cast(const ubyte*) &b)[0 .. 5], 0) == 2727459272); } diff --git a/libphobos/libdruntime/core/internal/traits.d b/libphobos/libdruntime/core/internal/traits.d index a8c734005..5f30b309a 100644 --- a/libphobos/libdruntime/core/internal/traits.d +++ b/libphobos/libdruntime/core/internal/traits.d @@ -128,6 +128,25 @@ template dtorIsNothrow(T) enum dtorIsNothrow = is(typeof(function{T t=void;}) : void function() nothrow); } +// taken from std.meta.allSatisfy +template allSatisfy(alias F, T...) +{ + static if (T.length == 0) + { + enum allSatisfy = true; + } + else static if (T.length == 1) + { + enum allSatisfy = F!(T[0]); + } + else + { + enum allSatisfy = + allSatisfy!(F, T[ 0 .. $/2]) && + allSatisfy!(F, T[$/2 .. $ ]); + } +} + template anySatisfy(alias F, T...) { static if (T.length == 0) diff --git a/libphobos/libdruntime/rt/util/typeinfo.d b/libphobos/libdruntime/rt/util/typeinfo.d index ded5d4da5..2cc1c236c 100644 --- a/libphobos/libdruntime/rt/util/typeinfo.d +++ b/libphobos/libdruntime/rt/util/typeinfo.d @@ -6,6 +6,7 @@ * Authors: Kenji Hara */ module rt.util.typeinfo; +static import core.internal.hash; template Floating(T) if (is(T == float) || is(T == double) || is(T == real)) @@ -32,19 +33,7 @@ if (is(T == float) || is(T == double) || is(T == real)) return (d1 == d2) ? 0 : ((d1 < d2) ? -1 : 1); } - size_t hashOf(T value) @trusted - { - if (value == 0) // +0.0 and -0.0 - value = 0; - - static if (is(T == float)) // special case? - return *cast(uint*)&value; - else - { - import rt.util.hash; - return rt.util.hash.hashOf((&value)[0 .. 1], 0); - } - } + public alias hashOf = core.internal.hash.hashOf; } template Floating(T) if (is(T == cfloat) || is(T == cdouble) || is(T == creal)) @@ -73,13 +62,7 @@ if (is(T == cfloat) || is(T == cdouble) || is(T == creal)) return result; } - size_t hashOf(T value) @trusted - { - if (value == 0 + 0i) - value = 0 + 0i; - import rt.util.hash; - return rt.util.hash.hashOf((&value)[0 .. 1], 0); - } + public alias hashOf = core.internal.hash.hashOf; } template Array(T) @@ -118,13 +101,7 @@ if (is(T == float) || is(T == double) || is(T == real) || return 0; } - size_t hashOf(T[] value) - { - size_t h = 0; - foreach (e; value) - h += Floating!T.hashOf(e); - return h; - } + public alias hashOf = core.internal.hash.hashOf; } version (unittest) @@ -247,7 +224,7 @@ unittest { assert(f1 == 0 + 0i); - assert(f1 == f2); + assert(f1 == f2); assert(f1 !is f2); ti = typeid(F); assert(ti.getHash(&f1) == ti.getHash(&f2));