1 module core.internal.hash;
2 import core.internal.traits : Unconst;
3 
4 // If true ensure that positive zero and negative zero have the same hash.
5 // Historically typeid(float).getHash did this but hashOf(float) did not.
6 private enum floatCoalesceZeroes = true;
7 // If true ensure that all NaNs of the same floating point type have the same hash.
8 // Historically typeid(float).getHash didn't do this but hashOf(float) did.
9 private enum floatCoalesceNaNs = true;
10 
11 private enum hasCallableToHash(T) = __traits(compiles,
12     {
13         size_t hash = ((T* x) => (*x).toHash())(null);
14     });
15 
16 
17 private enum isFinalClassWithAddressBasedHash(T) = __traits(isFinalClass, T)
18     // Use __traits(compiles, ...) in case there are multiple overloads of `toHash`.
19     && __traits(compiles, {static assert(&Object.toHash is &T.toHash);});
20 
21 private template isCppClassWithoutHash(T)
22 {
23     static if (!is(T == class) && !is(T == interface))
24         enum isCppClassWithoutHash = false;
25     else
26         enum bool isCppClassWithoutHash = __traits(getLinkage, T) == "C++"
27             && !is(immutable T* : immutable Object*) && !hasCallableToHash!T;
28 }
29 
30 
31 /+
32 Is it valid to calculate a hash code for T based on the bits of its
33 representation? Always false for interfaces, dynamic arrays, and
34 associative arrays. False for all classes except final classes that do
35 not override `toHash`.
36 
37 Note: according to the spec as of
38 https://github.com/dlang/dlang.org/commit/d66eff16491b0664c0fc00ba80a7aa291703f1f2
39 the contents of unnamed paddings between fields is undefined. Currently
40 this hashing implementation assumes that the padding contents (if any)
41 for all instances of `T` are the same. The correctness of this
42 assumption is yet to be verified.
43 +/
44 private template canBitwiseHash(T)
45 {
46     static if (is(T EType == enum))
47         enum canBitwiseHash = .canBitwiseHash!EType;
48     else static if (__traits(isFloating, T))
49         enum canBitwiseHash = !(floatCoalesceZeroes || floatCoalesceNaNs);
50     else static if (__traits(isScalar, T))
51         enum canBitwiseHash = true;
52     else static if (is(T == class))
53     {
54         enum canBitwiseHash = isFinalClassWithAddressBasedHash!T || isCppClassWithoutHash!T;
55     }
56     else static if (is(T == interface))
57     {
58         enum canBitwiseHash = isCppClassWithoutHash!T;
59     }
60     else static if (is(T == struct))
61     {
62         static if (hasCallableToHash!T || __traits(isNested, T))
63             enum canBitwiseHash = false;
64         else
65         {
66             import core.internal.traits : allSatisfy;
67             enum canBitwiseHash = allSatisfy!(.canBitwiseHash, typeof(T.tupleof));
68         }
69     }
70     else static if (is(T == union))
71     {
72         // Right now we always bytewise hash unions that lack callable `toHash`.
73         enum canBitwiseHash = !hasCallableToHash!T;
74     }
75     else static if (is(T E : E[]))
76     {
77         static if (__traits(isStaticArray, T))
78             enum canBitwiseHash = (T.length == 0) || .canBitwiseHash!E;
79         else
80             enum canBitwiseHash = false;
81     }
82     else static if (__traits(isAssociativeArray, T))
83     {
84         enum canBitwiseHash = false;
85     }
86     else
87     {
88         static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)),
89             "Internal error: unanticipated type "~T.stringof);
90         enum canBitwiseHash = true;
91     }
92 }
93 
94 //enum hash. CTFE depends on base type
95 size_t hashOf(T)(auto ref T val, size_t seed = 0)
96 if (is(T == enum) && !__traits(isScalar, T))
97 {
98     static if (is(T EType == enum)) {} //for EType
99     return hashOf(cast(EType) val, seed);
100 }
101 
102 //CTFE ready (depends on base type).
103 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
104 if (!is(T == enum) && __traits(isStaticArray, T) && canBitwiseHash!T)
105 {
106     import core.internal.convert : toUbyte;
107     // FIXME:
108     // We would like to to do this:
109     //
110     //static if (T.length == 0)
111     //    return seed;
112     //else static if (T.length == 1)
113     //    return hashOf(val[0], seed);
114     //else
115     //    return bytesHashWithExactSizeAndAlignment!T(toUbyte(val), seed);
116     //
117     // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
118     // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
119     // hashOf(val).
120     static if (T.length == 0)
121     {
122         return bytesHashAlignedBy!size_t((ubyte[]).init, seed);
123     }
124     static if (is(typeof(toUbyte(val)) == const(ubyte)[]))
125     {
126         return bytesHashAlignedBy!T(toUbyte(val), seed);
127     }
128     else //Other types. CTFE unsupported
129     {
130         assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
131         return bytesHashAlignedBy!T((cast(const(ubyte)*) &val)[0 .. T.sizeof], seed);
132     }
133 }
134 
135 //CTFE ready (depends on base type).
136 size_t hashOf(T)(auto ref T val, size_t seed = 0)
137 if (!is(T == enum) && __traits(isStaticArray, T) && !canBitwiseHash!T)
138 {
139     // FIXME:
140     // We would like to to do this:
141     //
142     //static if (T.length == 0)
143     //    return seed;
144     //else static if (T.length == 1)
145     //    return hashOf(val[0], seed);
146     //else
147     //    /+ hash like a dynamic array +/
148     //
149     // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
150     // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
151     // hashOf(val).
152     return hashOf(val[], seed);
153 }
154 
155 //dynamic array hash
156 size_t hashOf(T)(scope const T val, size_t seed = 0)
157 if (is(T == S[], S) && (__traits(isScalar, S) || canBitwiseHash!S)) // excludes enum types
158 {
159     import core.internal.convert : toUbyte;
160     alias ElementType = typeof(val[0]);
161     static if (!canBitwiseHash!ElementType)
162     {
163         size_t hash = seed;
164         foreach (ref o; val)
165         {
166             hash = hashOf(hashOf(o), hash); // double hashing to match TypeInfo.getHash
167         }
168         return hash;
169     }
170     else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))
171     //ubyteble array (arithmetic types and structs without toHash) CTFE ready for arithmetic types and structs without reference fields
172     {
173         return bytesHashAlignedBy!ElementType(toUbyte(val), seed);
174     }
175     else //Other types. CTFE unsupported
176     {
177         assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
178         return bytesHashAlignedBy!ElementType((cast(const(ubyte)*) val.ptr)[0 .. ElementType.sizeof*val.length], seed);
179     }
180 }
181 
182 //dynamic array hash
183 size_t hashOf(T)(T val, size_t seed = 0)
184 if (is(T == S[], S) && !(__traits(isScalar, S) || canBitwiseHash!S)) // excludes enum types
185 {
186     size_t hash = seed;
187     foreach (ref o; val)
188     {
189         hash = hashOf(hashOf(o), hash); // double hashing because TypeInfo.getHash doesn't allow to pass seed value
190     }
191     return hash;
192 }
193 
194 // Indicates if F is a built-in complex number type.
195 private F coalesceFloat(F)(const F val)
196 if (__traits(isFloating, val) && !is(F == __vector) && !is(F : creal))
197 {
198     static if (floatCoalesceZeroes)
199         if (val == cast(F) 0)
200             return cast(F) 0;
201     static if (floatCoalesceNaNs)
202         if (val != val)
203             return F.nan;
204     return val;
205 }
206 
207 //scalar type hash
208 @trusted @nogc nothrow pure
209 size_t hashOf(T)(scope const T val) if (__traits(isScalar, T) && !is(T == __vector))
210 {
211     static if (is(T V : V*))
212     {
213         if (__ctfe)
214         {
215             if (val is null) return 0;
216             assert(0, "Unable to calculate hash of non-null pointer at compile time");
217         }
218         size_t result = cast(size_t) val;
219         return result ^ (result >> 4);
220     }
221     else static if (is(T EType == enum) && is(typeof(val[0])))
222     {
223         // Enum type whose base type is vector.
224         return hashOf(cast(EType) val);
225     }
226     else static if (__traits(isIntegral, T))
227     {
228         static if (T.sizeof <= size_t.sizeof)
229             return val;
230         else
231             return cast(size_t) (val ^ (val >>> (size_t.sizeof * 8)));
232     }
233     else static if (is(T : creal))
234     {
235         return hashOf(coalesceFloat(val.re), hashOf(coalesceFloat(val.im)));
236     }
237     else
238     {
239         static assert(__traits(isFloating, T));
240         auto data = coalesceFloat(val);
241         static if (T.sizeof == float.sizeof && T.mant_dig == float.mant_dig)
242             return *cast(const uint*) &data;
243         else static if (T.sizeof == double.sizeof && T.mant_dig == double.mant_dig)
244             return hashOf(*cast(const ulong*) &data);
245         else
246         {
247             import core.internal.convert : floatSize, toUbyte;
248             return bytesHashWithExactSizeAndAlignment!T(toUbyte(data)[0 .. floatSize!T], 0);
249         }
250     }
251 }
252 
253 //scalar type hash
254 @trusted @nogc nothrow pure
255 size_t hashOf(T)(scope const T val, size_t seed) if (__traits(isScalar, T) && !is(T == __vector))
256 {
257     static if (is(T V : V*))
258     {
259         if (__ctfe)
260         {
261             if (val is null) return hashOf(size_t(0), seed);
262             assert(0, "Unable to calculate hash of non-null pointer at compile time");
263         }
264         return hashOf(cast(size_t) val, seed);
265     }
266     else static if (is(T EType == enum) && is(typeof(val[0])))
267     {
268         // Enum type whose base type is vector.
269         return hashOf(cast(EType) val, seed);
270     }
271     else static if (__traits(isIntegral, val) && T.sizeof <= size_t.sizeof)
272     {
273         static if (size_t.sizeof < ulong.sizeof)
274         {
275             //MurmurHash3 32-bit single round
276             enum uint c1 = 0xcc9e2d51;
277             enum uint c2 = 0x1b873593;
278             enum uint c3 = 0xe6546b64;
279             enum uint r1 = 15;
280             enum uint r2 = 13;
281         }
282         else
283         {
284             //Half of MurmurHash3 64-bit single round
285             //(omits second interleaved update)
286             enum ulong c1 = 0x87c37b91114253d5;
287             enum ulong c2 = 0x4cf5ad432745937f;
288             enum ulong c3 = 0x52dce729;
289             enum uint r1 = 31;
290             enum uint r2 = 27;
291         }
292         size_t h = c1 * val;
293         h = (h << r1) | (h >>> (size_t.sizeof * 8 - r1));
294         h = (h * c2) ^ seed;
295         h = (h << r2) | (h >>> (size_t.sizeof * 8 - r2));
296         return h * 5 + c3;
297     }
298     else static if (__traits(isIntegral, val) && T.sizeof > size_t.sizeof)
299     {
300         static foreach (i; 0 .. T.sizeof / size_t.sizeof)
301             seed = hashOf(cast(size_t) (val >>> (size_t.sizeof * 8 * i)), seed);
302         return seed;
303     }
304     else static if (is(T : creal))
305     {
306         return hashOf(val.re, hashOf(val.im, seed));
307     }
308     else static if (__traits(isFloating, T))
309     {
310         auto data = coalesceFloat(val);
311         static if (T.sizeof == float.sizeof && T.mant_dig == float.mant_dig)
312             return hashOf(*cast(const uint*) &data, seed);
313         else static if (T.sizeof == double.sizeof && T.mant_dig == double.mant_dig)
314             return hashOf(*cast(const ulong*) &data, seed);
315         else
316         {
317             import core.internal.convert : floatSize, toUbyte;
318             return bytesHashWithExactSizeAndAlignment!T(toUbyte(data)[0 .. floatSize!T], seed);
319         }
320     }
321     else
322     {
323         static assert(0);
324     }
325 }
326 
327 size_t hashOf(T)(scope const T val, size_t seed = 0) @safe @nogc nothrow pure
328 if (is(T == __vector)) // excludes enum types
329 {
330     static if (__traits(isFloating, T) && (floatCoalesceZeroes || floatCoalesceNaNs))
331     {
332         if (__ctfe)
333         {
334             // Workaround for CTFE bug.
335             static if (is(immutable typeof(val[0]) == immutable E, E)) {} // Get E.
336             E[T.sizeof / E.sizeof] array;
337             foreach (i; 0 .. T.sizeof / E.sizeof)
338                 array[i] = val[i];
339             return hashOf(array, seed);
340         }
341         return hashOf(val.array, seed);
342     }
343     else
344     {
345         import core.internal.convert : toUbyte;
346         return bytesHashAlignedBy!T(toUbyte(val), seed);
347     }
348 }
349 
350 //typeof(null) hash. CTFE supported
351 @trusted @nogc nothrow pure
352 size_t hashOf(T)(scope const T val) if (!is(T == enum) && is(T : typeof(null)))
353 {
354     return 0;
355 }
356 
357 //typeof(null) hash. CTFE supported
358 @trusted @nogc nothrow pure
359 size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && is(T : typeof(null)))
360 {
361     return hashOf(size_t(0), seed);
362 }
363 
364 private enum _hashOfStruct =
365 q{
366     enum bool isChained = is(typeof(seed) : size_t);
367     static if (!isChained) enum size_t seed = 0;
368     static if (hasCallableToHash!(typeof(val))) //CTFE depends on toHash()
369     {
370         static if (!__traits(isSame, typeof(val), __traits(parent, val.toHash))
371             && is(typeof(val is null)))
372         {
373             static if (isChained)
374                 return hashOf(__traits(getMember, val, __traits(getAliasThis, typeof(val))), seed);
375             else
376                 return hashOf(__traits(getMember, val, __traits(getAliasThis, typeof(val))));
377         }
378         else
379         {
380             static if (isChained)
381                 return hashOf(cast(size_t) val.toHash(), seed);
382             else
383                 return val.toHash();
384         }
385     }
386     else
387     {
388         import core.internal.convert : toUbyte;
389         static if (__traits(hasMember, T, "toHash") && is(typeof(T.toHash) == function))
390         {
391             // TODO: in the future maybe this should be changed to a static
392             // assert(0), because if there's a `toHash` the programmer probably
393             // expected it to be called and a compilation failure here will
394             // expose a bug in his code.
395             //   In the future we also might want to disallow non-const toHash
396             // altogether.
397             pragma(msg, "Warning: struct "~__traits(identifier, T)
398                 ~" has method toHash, however it cannot be called with "
399                 ~typeof(val).stringof~" this.");
400             static if (__traits(compiles, __traits(getLocation, T.toHash)))
401             {
402                 enum file = __traits(getLocation, T.toHash)[0];
403                 enum line = __traits(getLocation, T.toHash)[1].stringof;
404                 pragma(msg, "  ",__traits(identifier, T),".toHash defined here: ",file,"(",line,")");
405             }
406         }
407 
408         static if (T.tupleof.length == 0)
409         {
410             return seed;
411         }
412         else static if ((is(T == struct) && !canBitwiseHash!T) || T.tupleof.length == 1)
413         {
414             static if (isChained) size_t h = seed;
415             static foreach (i, F; typeof(val.tupleof))
416             {
417                 static if (__traits(isStaticArray, F))
418                 {
419                     static if (i == 0 && !isChained) size_t h = 0;
420                     static if (F.sizeof > 0 && canBitwiseHash!F)
421                         // May use smallBytesHash instead of bytesHash.
422                         h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
423                     else
424                         // We can avoid the "double hashing" the top-level version uses
425                         // for consistency with TypeInfo.getHash.
426                         foreach (ref e; val.tupleof[i])
427                             h = hashOf(e, h);
428                 }
429                 else static if (is(F == struct) || is(F == union))
430                 {
431                     static if (hasCallableToHash!F)
432                     {
433                         static if (!__traits(isSame, F, __traits(parent, val.tupleof[i].toHash))
434                             && is(typeof(val.tupleof[i] is null)))
435                         {
436                             static if (i == 0 && !isChained)
437                                 size_t h = hashOf(__traits(getMember, val.tupleof[i], __traits(getAliasThis, F)));
438                             else
439                                 h = hashOf(__traits(getMember, val.tupleof[i], __traits(getAliasThis, F)), h);
440                         }
441                         else
442                         {
443                             static if (i == 0 && !isChained)
444                                 size_t h = val.tupleof[i].toHash();
445                             else
446                                 h = hashOf(cast(size_t) val.tupleof[i].toHash(), h);
447                         }
448                     }
449                     else static if (F.tupleof.length == 1)
450                     {
451                         // Handle the single member case separately to avoid unnecessarily using bytesHash.
452                         static if (i == 0 && !isChained)
453                             size_t h = hashOf(val.tupleof[i].tupleof[0]);
454                         else
455                             h = hashOf(val.tupleof[i].tupleof[0], h);
456                     }
457                     else static if (canBitwiseHash!F)
458                     {
459                         // May use smallBytesHash instead of bytesHash.
460                         static if (i == 0 && !isChained) size_t h = 0;
461                         h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
462                     }
463                     else
464                     {
465                         // Nothing special happening.
466                         static if (i == 0 && !isChained)
467                             size_t h = hashOf(val.tupleof[i]);
468                         else
469                             h = hashOf(val.tupleof[i], h);
470                     }
471                 }
472                 else
473                 {
474                     // Nothing special happening.
475                     static if (i == 0 && !isChained)
476                         size_t h = hashOf(val.tupleof[i]);
477                     else
478                         h = hashOf(val.tupleof[i], h);
479                 }
480             }
481             return h;
482         }
483         else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))//CTFE ready for structs without reference fields
484         {
485             // Not using bytesHashWithExactSizeAndAlignment here because
486             // the result may differ from typeid(T).hashOf(&val).
487             return bytesHashAlignedBy!T(toUbyte(val), seed);
488         }
489         else // CTFE unsupported
490         {
491             assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
492             const(ubyte)[] bytes = (() @trusted => (cast(const(ubyte)*)&val)[0 .. T.sizeof])();
493             // Not using bytesHashWithExactSizeAndAlignment here because
494             // the result may differ from typeid(T).hashOf(&val).
495             return bytesHashAlignedBy!T(bytes, seed);
496         }
497     }
498 };
499 
500 //struct or union hash
501 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
502 if (!is(T == enum) && (is(T == struct) || is(T == union))
503     && !is(T == const) && !is(T == immutable)
504     && canBitwiseHash!T)
505 {
506     mixin(_hashOfStruct);
507 }
508 
509 //struct or union hash
510 size_t hashOf(T)(auto ref T val)
511 if (!is(T == enum) && (is(T == struct) || is(T == union))
512     && !canBitwiseHash!T)
513 {
514     mixin(_hashOfStruct);
515 }
516 
517 //struct or union hash
518 size_t hashOf(T)(auto ref T val, size_t seed)
519 if (!is(T == enum) && (is(T == struct) || is(T == union))
520     && !canBitwiseHash!T)
521 {
522     mixin(_hashOfStruct);
523 }
524 
525 import core.internal.traits : Unconst;
526 //struct or union hash - https://issues.dlang.org/show_bug.cgi?id=19332 (support might be removed in future)
527 size_t hashOf(T)(scope auto ref T val, size_t seed = 0)
528 if (!is(T == enum) && (is(T == struct) || is(T == union))
529     && (is(T == const) || is(T == immutable))
530     && canBitwiseHash!T && !canBitwiseHash!(Unconst!T))
531 {
532     mixin(_hashOfStruct);
533 }
534 
535 //delegate hash. CTFE only if null.
536 @trusted @nogc nothrow pure
537 size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && is(T == delegate))
538 {
539     if (__ctfe)
540     {
541         if (val is null) return hashOf(size_t(0), hashOf(size_t(0), seed));
542         assert(0, "unable to compute hash of "~T.stringof~" at compile time");
543     }
544     return hashOf(val.ptr, hashOf(cast(void*) val.funcptr, seed));
545 }
546 
547 //address-based class hash. CTFE only if null.
548 @nogc nothrow pure @trusted
549 size_t hashOf(T)(scope const T val)
550 if (!is(T == enum) && (is(T == interface) || is(T == class))
551     && canBitwiseHash!T)
552 {
553     if (__ctfe) if (val is null) return 0;
554     return hashOf(cast(const void*) val);
555 }
556 
557 //address-based class hash. CTFE only if null.
558 @nogc nothrow pure @trusted
559 size_t hashOf(T)(scope const T val, size_t seed)
560 if (!is(T == enum) && (is(T == interface) || is(T == class))
561     && canBitwiseHash!T)
562 {
563     if (__ctfe) if (val is null) return hashOf(size_t(0), seed);
564     return hashOf(cast(const void*) val, seed);
565 }
566 
567 //class or interface hash. CTFE depends on toHash
568 size_t hashOf(T)(T val)
569 if (!is(T == enum) && (is(T == interface) || is(T == class))
570     && !canBitwiseHash!T)
571 {
572     static if (__traits(compiles, {size_t h = val.toHash();}))
573     {
574         static if (is(__traits(parent, val.toHash) P) && !is(immutable T* : immutable P*)
575                 && is(typeof((ref P p) => p is null)))
576             return val ? hashOf(__traits(getMember, val, __traits(getAliasThis, T))) : 0;
577         else
578             return val ? val.toHash() : 0;
579     }
580     else
581         return val ? (cast(Object)val).toHash() : 0;
582 }
583 
584 //class or interface hash. CTFE depends on toHash
585 size_t hashOf(T)(T val, size_t seed)
586 if (!is(T == enum) && (is(T == interface) || is(T == class))
587     && !canBitwiseHash!T)
588 {
589     static if (__traits(compiles, {size_t h = val.toHash();}))
590     {
591         static if (is(__traits(parent, val.toHash) P) && !is(immutable T* : immutable P*)
592                 && is(typeof((ref P p) => p is null)))
593             return hashOf(val ? hashOf(__traits(getMember, val, __traits(getAliasThis, T)))
594                               : size_t(0), seed);
595         else
596             return hashOf(val ? cast(size_t) val.toHash() : size_t(0), seed);
597     }
598     else
599         return hashOf(val ? (cast(Object)val).toHash() : 0, seed);
600 }
601 
602 //associative array hash. CTFE depends on base types
603 size_t hashOf(T)(T aa) if (!is(T == enum) && __traits(isAssociativeArray, T))
604 {
605     static if (is(typeof(aa) : V[K], K, V)) {} // Put K & V in scope.
606     static if (__traits(compiles, (ref K k, ref V v) nothrow => .hashOf(k) + .hashOf(v)))
607         scope (failure) assert(0); // Allow compiler to infer nothrow.
608 
609     if (!aa.length) return 0;
610     size_t h = 0;
611 
612     // The computed hash is independent of the foreach traversal order.
613     foreach (ref key, ref val; aa)
614         h += hashOf(hashOf(val), hashOf(key));
615     return h;
616 }
617 
618 //associative array hash. CTFE depends on base types
619 size_t hashOf(T)(T aa, size_t seed) if (!is(T == enum) && __traits(isAssociativeArray, T))
620 {
621     return hashOf(hashOf(aa), seed);
622 }
623 
624 // MurmurHash3 was written by Austin Appleby, and is placed in the public
625 // domain. The author hereby disclaims copyright to this source code.
626 
627 // This overload is for backwards compatibility.
628 @system pure nothrow @nogc
629 size_t bytesHash()(scope const(void)* buf, size_t len, size_t seed)
630 {
631     return bytesHashAlignedBy!ubyte((cast(const(ubyte)*) buf)[0 .. len], seed);
632 }
633 
634 private template bytesHashAlignedBy(AlignType)
635 {
636     alias bytesHashAlignedBy = bytesHash!(AlignType.alignof >= uint.alignof);
637 }
638 
639 private template bytesHashWithExactSizeAndAlignment(SizeAndAlignType)
640 {
641     static if (SizeAndAlignType.alignof < uint.alignof
642             ? SizeAndAlignType.sizeof <= 12
643             : SizeAndAlignType.sizeof <= 10)
644         alias bytesHashWithExactSizeAndAlignment = smallBytesHash;
645     else
646         alias bytesHashWithExactSizeAndAlignment = bytesHashAlignedBy!SizeAndAlignType;
647 }
648 
649 // Fowler/Noll/Vo hash. http://www.isthe.com/chongo/tech/comp/fnv/
650 private size_t fnv()(scope const(ubyte)[] bytes, size_t seed) @nogc nothrow pure @safe
651 {
652     static if (size_t.max <= uint.max)
653         enum prime = (1U << 24) + (1U << 8) + 0x93U;
654     else static if (size_t.max <= ulong.max)
655         enum prime = (1UL << 40) + (1UL << 8) + 0xb3UL;
656     else
657         enum prime = (size_t(1) << 88) + (size_t(1) << 8) + size_t(0x3b);
658     foreach (b; bytes)
659         seed = (seed ^ b) * prime;
660     return seed;
661 }
662 private alias smallBytesHash = fnv;
663 
664 //-----------------------------------------------------------------------------
665 // Block read - if your platform needs to do endian-swapping or can only
666 // handle aligned reads, do the conversion here
667 private uint get32bits()(scope const(ubyte)* x) @nogc nothrow pure @system
668 {
669     version (BigEndian)
670     {
671         return ((cast(uint) x[0]) << 24) | ((cast(uint) x[1]) << 16) | ((cast(uint) x[2]) << 8) | (cast(uint) x[3]);
672     }
673     else
674     {
675         return ((cast(uint) x[3]) << 24) | ((cast(uint) x[2]) << 16) | ((cast(uint) x[1]) << 8) | (cast(uint) x[0]);
676     }
677 }
678 
679 /+
680 Params:
681     dataKnownToBeAligned = whether the data is known at compile time to be uint-aligned.
682 +/
683 @nogc nothrow pure @trusted
684 private size_t bytesHash(bool dataKnownToBeAligned)(scope const(ubyte)[] bytes, size_t seed)
685 {
686     auto len = bytes.length;
687     auto data = bytes.ptr;
688     auto nblocks = len / 4;
689 
690     uint h1 = cast(uint)seed;
691 
692     enum uint c1 = 0xcc9e2d51;
693     enum uint c2 = 0x1b873593;
694     enum uint c3 = 0xe6546b64;
695 
696     //----------
697     // body
698     auto end_data = data+nblocks*uint.sizeof;
699     for (; data!=end_data; data += uint.sizeof)
700     {
701         static if (dataKnownToBeAligned)
702             uint k1 = __ctfe ? get32bits(data) : *(cast(const uint*) data);
703         else
704             uint k1 = get32bits(data);
705         k1 *= c1;
706         k1 = (k1 << 15) | (k1 >> (32 - 15));
707         k1 *= c2;
708 
709         h1 ^= k1;
710         h1 = (h1 << 13) | (h1 >> (32 - 13));
711         h1 = h1*5+c3;
712     }
713 
714     //----------
715     // tail
716     uint k1 = 0;
717 
718     switch (len & 3)
719     {
720         case 3: k1 ^= data[2] << 16; goto case;
721         case 2: k1 ^= data[1] << 8;  goto case;
722         case 1: k1 ^= data[0];
723                 k1 *= c1; k1 = (k1 << 15) | (k1 >> (32 - 15)); k1 *= c2; h1 ^= k1;
724                 goto default;
725         default:
726     }
727 
728     //----------
729     // finalization
730     h1 ^= len;
731     // Force all bits of the hash block to avalanche.
732     h1 = (h1 ^ (h1 >> 16)) * 0x85ebca6b;
733     h1 = (h1 ^ (h1 >> 13)) * 0xc2b2ae35;
734     h1 ^= h1 >> 16;
735     return h1;
736 }
737