|
|
@@ -259,9 +259,7 @@ static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
|
|
|
u.ptr = key;
|
|
|
if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
|
|
|
const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
|
|
|
-#ifdef VALGRIND
|
|
|
const uint8_t *k8;
|
|
|
-#endif
|
|
|
|
|
|
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
|
|
|
while (length > 12)
|
|
|
@@ -283,9 +281,10 @@ static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
|
|
|
* does it on word boundaries, so is OK with this. But VALGRIND will
|
|
|
* still catch it and complain. The masking trick does make the hash
|
|
|
* noticably faster for short strings (like English words).
|
|
|
+ *
|
|
|
+ * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
|
|
|
*/
|
|
|
-#ifndef VALGRIND
|
|
|
-
|
|
|
+#if 0
|
|
|
switch(length)
|
|
|
{
|
|
|
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
|
|
|
@@ -437,9 +436,7 @@ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
|
|
|
u.ptr = key;
|
|
|
if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
|
|
|
const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
|
|
|
-#ifdef VALGRIND
|
|
|
const uint8_t *k8;
|
|
|
-#endif
|
|
|
|
|
|
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
|
|
|
while (length > 12)
|
|
|
@@ -461,9 +458,10 @@ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
|
|
|
* does it on word boundaries, so is OK with this. But VALGRIND will
|
|
|
* still catch it and complain. The masking trick does make the hash
|
|
|
* noticably faster for short strings (like English words).
|
|
|
+ *
|
|
|
+ * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
|
|
|
*/
|
|
|
-#ifndef VALGRIND
|
|
|
-
|
|
|
+#if 0
|
|
|
switch(length)
|
|
|
{
|
|
|
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
|