From 354bb40e75d94466e91fe6960523612c9d17ccfb Mon Sep 17 00:00:00 2001 From: Karen Arutyunov Date: Thu, 2 Nov 2017 23:11:29 +0300 Subject: Add implementation --- mysql/mysys_ssl/my_murmur3.cpp | 134 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 mysql/mysys_ssl/my_murmur3.cpp (limited to 'mysql/mysys_ssl/my_murmur3.cpp') diff --git a/mysql/mysys_ssl/my_murmur3.cpp b/mysql/mysys_ssl/my_murmur3.cpp new file mode 100644 index 0000000..82dccb6 --- /dev/null +++ b/mysql/mysys_ssl/my_murmur3.cpp @@ -0,0 +1,134 @@ +/* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ + +/* + Implementation of 32-bit version of MurmurHash3 - fast non-cryptographic + hash function with good statistical properties, which is based on public + domain code by Austin Appleby. +*/ + +#include + + +/* + Platform-specific implementations of ROTL32 helper. +*/ + +#if defined(_MSC_VER) + +/* Microsoft Visual Studio supports intrinsic for rotate left. */ + +#include + +#define ROTL32(x, y) _rotl(x, y) + +/* + Force inlining of intrinsic even though /Oi option is turned off + in release builds. +*/ +#pragma intrinsic(_rotl) + +#else // !defined(_MSC_VER) + +/* + Many other compilers should be able to optimize the below + function to single instruction. +*/ + +inline uint32 rotl32 (uint32 x, char r) +{ + return (x << r) | (x >> (32 - r)); +} + +#define ROTL32(x,y) rotl32(x,y) + +#endif // !defined(_MSC_VER) + + +/** + Compute 32-bit version of MurmurHash3 hash for the key. + + @param key Key for which hash value to be computed. + @param len Key length. + @param seed Seed for hash computation. + + @note WARNING! Since MurmurHash3 is known to be susceptible to "hash DoS" + attack it should not be used in any situation where attacker has + control over key being hashed and thus can cause performance problems + due to degradation of hash lookup to linear list search. + + @returns Hash value for the key. +*/ + +uint32 murmur3_32(const uchar *key, size_t len, uint32 seed) +{ + const uchar *tail= key + (len - len % 4); + + uint32 h1= seed; + + /* Constants for magic numbers that are used more than once. */ + const uint32 c1= 0xcc9e2d51; + const uint32 c2= 0x1b873593; + + /* Body: process all 32-bit blocks in the key. */ + + for (const uchar *data= key; data != tail; data+= 4) + { + uint32 k1= uint4korr(data); + + k1*= c1; + k1= ROTL32(k1, 15); + k1*= c2; + + h1^= k1; + h1= ROTL32(h1, 13); + h1= h1 * 5 + 0xe6546b64; + } + + /* Tail: handle remaining len % 4 bytes. */ + + uint32 k1= 0; + + switch(len % 4) + { + case 3: + k1^= static_cast(tail[2]) << 16; + /* Fall through. */ + case 2: + k1^= static_cast(tail[1]) << 8; + /* Fall through. */ + case 1: + k1^= tail[0]; + k1*= c1; + k1= ROTL32(k1, 15); + k1*= c2; + h1^= k1; + }; + + /* + Finalization mix: + Add length and force all bits of a hash block to avalanche. + */ + + h1^= len; + + h1^= h1 >> 16; + h1*= 0x85ebca6b; + h1^= h1 >> 13; + h1*= 0xc2b2ae35; + h1^= h1 >> 16; + + return h1; +} -- cgit v1.1