Visualization Library

A lightweight C++ OpenGL middleware for 2D/3D graphics
[Home] [Tutorials] [All Classes] [Grouped Classes]

X:/dropbox/visualizationlibrary/src/vlCore/MurmurHash3.cpp

Go to the documentation of this file.
00001 //-----------------------------------------------------------------------------
00002 // MurmurHash3 was written by Austin Appleby, and is placed in the public
00003 // domain. The author hereby disclaims copyright to this source code.
00004 
00005 // Note - The x86 and x64 versions do _not_ produce the same results, as the
00006 // algorithms are optimized for their respective platforms. You can still
00007 // compile and run any of them on any platform, but your performance with the
00008 // non-native version will be less than optimal.
00009 
00010 #include <vlCore/MurmurHash3.hpp>
00011 
00012 using namespace vl;
00013 
00014 //-----------------------------------------------------------------------------
00015 // Platform-specific functions and macros
00016 
00017 // Microsoft Visual Studio
00018 
00019 #if defined(_MSC_VER)
00020 
00021 #define FORCE_INLINE    __forceinline
00022 
00023 #include <stdlib.h>
00024 
00025 #define ROTL32(x,y) _rotl(x,y)
00026 #define ROTL64(x,y) _rotl64(x,y)
00027 
00028 #define BIG_CONSTANT(x) (x)
00029 
00030 // Other compilers
00031 
00032 #else   // defined(_MSC_VER)
00033 
00034 // #define  FORCE_INLINE __attribute__((always_inline))
00035 #define FORCE_INLINE inline
00036 
00037 inline u32 rotl32 ( u32 x, i8 r )
00038 {
00039   return (x << r) | (x >> (32 - r));
00040 }
00041 
00042 inline u64 rotl64 ( u64 x, i8 r )
00043 {
00044   return (x << r) | (x >> (64 - r));
00045 }
00046 
00047 #define ROTL32(x,y) rotl32(x,y)
00048 #define ROTL64(x,y) rotl64(x,y)
00049 
00050 #define BIG_CONSTANT(x) (x##LLU)
00051 
00052 #endif // !defined(_MSC_VER)
00053 
00054 //-----------------------------------------------------------------------------
00055 // Block read - if your platform needs to do endian-swapping or can only
00056 // handle aligned reads, do the conversion here
00057 
00058 FORCE_INLINE u32 getblock ( const u32 * p, int i )
00059 {
00060   return p[i];
00061 }
00062 
00063 FORCE_INLINE u64 getblock ( const u64 * p, int i )
00064 {
00065   return p[i];
00066 }
00067 
00068 //-----------------------------------------------------------------------------
00069 // Finalization mix - force all bits of a hash block to avalanche
00070 
00071 FORCE_INLINE u32 fmix ( u32 h )
00072 {
00073   h ^= h >> 16;
00074   h *= 0x85ebca6b;
00075   h ^= h >> 13;
00076   h *= 0xc2b2ae35;
00077   h ^= h >> 16;
00078 
00079   return h;
00080 }
00081 
00082 //----------
00083 
00084 FORCE_INLINE u64 fmix ( u64 k )
00085 {
00086   k ^= k >> 33;
00087   k *= BIG_CONSTANT(0xff51afd7ed558ccd);
00088   k ^= k >> 33;
00089   k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
00090   k ^= k >> 33;
00091 
00092   return k;
00093 }
00094 
00095 //-----------------------------------------------------------------------------
00096 
00097 void vl::MurmurHash3_x86_32 ( const void * key, int len, u32 seed, void * out )
00098 {
00099   const u8 * data = (const u8*)key;
00100   const int nblocks = len / 4;
00101 
00102   u32 h1 = seed;
00103 
00104   u32 c1 = 0xcc9e2d51;
00105   u32 c2 = 0x1b873593;
00106 
00107   //----------
00108   // body
00109 
00110   const u32 * blocks = (const u32 *)(data + nblocks*4);
00111 
00112   for(int i = -nblocks; i; i++)
00113   {
00114     u32 k1 = getblock(blocks,i);
00115 
00116     k1 *= c1;
00117     k1 = ROTL32(k1,15);
00118     k1 *= c2;
00119     
00120     h1 ^= k1;
00121     h1 = ROTL32(h1,13); 
00122     h1 = h1*5+0xe6546b64;
00123   }
00124 
00125   //----------
00126   // tail
00127 
00128   const u8 * tail = (const u8*)(data + nblocks*4);
00129 
00130   u32 k1 = 0;
00131 
00132   switch(len & 3)
00133   {
00134   case 3: k1 ^= tail[2] << 16;
00135   case 2: k1 ^= tail[1] << 8;
00136   case 1: k1 ^= tail[0];
00137           k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
00138   };
00139 
00140   //----------
00141   // finalization
00142 
00143   h1 ^= len;
00144 
00145   h1 = fmix(h1);
00146 
00147   *(u32*)out = h1;
00148 } 
00149 
00150 //-----------------------------------------------------------------------------
00151 
00152 void vl::MurmurHash3_x86_128 ( const void * key, const int len, u32 seed, void * out )
00153 {
00154   const u8 * data = (const u8*)key;
00155   const int nblocks = len / 16;
00156 
00157   u32 h1 = seed;
00158   u32 h2 = seed;
00159   u32 h3 = seed;
00160   u32 h4 = seed;
00161 
00162   u32 c1 = 0x239b961b; 
00163   u32 c2 = 0xab0e9789;
00164   u32 c3 = 0x38b34ae5; 
00165   u32 c4 = 0xa1e38b93;
00166 
00167   //----------
00168   // body
00169 
00170   const u32 * blocks = (const u32 *)(data + nblocks*16);
00171 
00172   for(int i = -nblocks; i; i++)
00173   {
00174     u32 k1 = getblock(blocks,i*4+0);
00175     u32 k2 = getblock(blocks,i*4+1);
00176     u32 k3 = getblock(blocks,i*4+2);
00177     u32 k4 = getblock(blocks,i*4+3);
00178 
00179     k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
00180 
00181     h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
00182 
00183     k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
00184 
00185     h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
00186 
00187     k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
00188 
00189     h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
00190 
00191     k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
00192 
00193     h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
00194   }
00195 
00196   //----------
00197   // tail
00198 
00199   const u8 * tail = (const u8*)(data + nblocks*16);
00200 
00201   u32 k1 = 0;
00202   u32 k2 = 0;
00203   u32 k3 = 0;
00204   u32 k4 = 0;
00205 
00206   switch(len & 15)
00207   {
00208   case 15: k4 ^= tail[14] << 16;
00209   case 14: k4 ^= tail[13] << 8;
00210   case 13: k4 ^= tail[12] << 0;
00211            k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
00212 
00213   case 12: k3 ^= tail[11] << 24;
00214   case 11: k3 ^= tail[10] << 16;
00215   case 10: k3 ^= tail[ 9] << 8;
00216   case  9: k3 ^= tail[ 8] << 0;
00217            k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
00218 
00219   case  8: k2 ^= tail[ 7] << 24;
00220   case  7: k2 ^= tail[ 6] << 16;
00221   case  6: k2 ^= tail[ 5] << 8;
00222   case  5: k2 ^= tail[ 4] << 0;
00223            k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
00224 
00225   case  4: k1 ^= tail[ 3] << 24;
00226   case  3: k1 ^= tail[ 2] << 16;
00227   case  2: k1 ^= tail[ 1] << 8;
00228   case  1: k1 ^= tail[ 0] << 0;
00229            k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
00230   };
00231 
00232   //----------
00233   // finalization
00234 
00235   h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
00236 
00237   h1 += h2; h1 += h3; h1 += h4;
00238   h2 += h1; h3 += h1; h4 += h1;
00239 
00240   h1 = fmix(h1);
00241   h2 = fmix(h2);
00242   h3 = fmix(h3);
00243   h4 = fmix(h4);
00244 
00245   h1 += h2; h1 += h3; h1 += h4;
00246   h2 += h1; h3 += h1; h4 += h1;
00247 
00248   ((u32*)out)[0] = h1;
00249   ((u32*)out)[1] = h2;
00250   ((u32*)out)[2] = h3;
00251   ((u32*)out)[3] = h4;
00252 }
00253 
00254 //-----------------------------------------------------------------------------
00255 
00256 void vl::MurmurHash3_x64_128 ( const void * key, const int len, const u32 seed, void * out )
00257 {
00258   const u8 * data = (const u8*)key;
00259   const int nblocks = len / 16;
00260 
00261   u64 h1 = seed;
00262   u64 h2 = seed;
00263 
00264   u64 c1 = BIG_CONSTANT(0x87c37b91114253d5);
00265   u64 c2 = BIG_CONSTANT(0x4cf5ad432745937f);
00266 
00267   //----------
00268   // body
00269 
00270   const u64 * blocks = (const u64 *)(data);
00271 
00272   for(int i = 0; i < nblocks; i++)
00273   {
00274     u64 k1 = getblock(blocks,i*2+0);
00275     u64 k2 = getblock(blocks,i*2+1);
00276 
00277     k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
00278 
00279     h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
00280 
00281     k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
00282 
00283     h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
00284   }
00285 
00286   //----------
00287   // tail
00288 
00289   const u8 * tail = (const u8*)(data + nblocks*16);
00290 
00291   u64 k1 = 0;
00292   u64 k2 = 0;
00293 
00294   switch(len & 15)
00295   {
00296   case 15: k2 ^= u64(tail[14]) << 48;
00297   case 14: k2 ^= u64(tail[13]) << 40;
00298   case 13: k2 ^= u64(tail[12]) << 32;
00299   case 12: k2 ^= u64(tail[11]) << 24;
00300   case 11: k2 ^= u64(tail[10]) << 16;
00301   case 10: k2 ^= u64(tail[ 9]) << 8;
00302   case  9: k2 ^= u64(tail[ 8]) << 0;
00303            k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
00304 
00305   case  8: k1 ^= u64(tail[ 7]) << 56;
00306   case  7: k1 ^= u64(tail[ 6]) << 48;
00307   case  6: k1 ^= u64(tail[ 5]) << 40;
00308   case  5: k1 ^= u64(tail[ 4]) << 32;
00309   case  4: k1 ^= u64(tail[ 3]) << 24;
00310   case  3: k1 ^= u64(tail[ 2]) << 16;
00311   case  2: k1 ^= u64(tail[ 1]) << 8;
00312   case  1: k1 ^= u64(tail[ 0]) << 0;
00313            k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
00314   };
00315 
00316   //----------
00317   // finalization
00318 
00319   h1 ^= len; h2 ^= len;
00320 
00321   h1 += h2;
00322   h2 += h1;
00323 
00324   h1 = fmix(h1);
00325   h2 = fmix(h2);
00326 
00327   h1 += h2;
00328   h2 += h1;
00329 
00330   ((u64*)out)[0] = h1;
00331   ((u64*)out)[1] = h2;
00332 }
00333 
00334 //-----------------------------------------------------------------------------

Visualization Library 2011.09.1160 Reference Documentation
Copyright 2005-2011 Michele Bosi. All rights reserved.
Updated on Thu May 2 2013 13:40:30.
Permission is granted to use this page to write and publish articles regarding Visualization Library.