misc.h

00001 #ifndef CRYPTOPP_MISC_H
00002 #define CRYPTOPP_MISC_H
00003 
00004 #include <cstring>
00005 #include "cryptlib.h"
00006 #include "smartptr.h"
00007 #include <string.h>             // for memcpy and memmove
00008 
00009 #ifdef _MSC_VER
00010         #include <stdlib.h>
00011         #if _MSC_VER >= 1400
00012                 // VC2005 workaround: disable declarations that conflict with winnt.h
00013                 #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
00014                 #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
00015                 #include <intrin.h>
00016                 #undef _interlockedbittestandset
00017                 #undef _interlockedbittestandreset
00018                 #define CRYPTOPP_FAST_ROTATE(x) 1
00019         #elif _MSC_VER >= 1300
00020                 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
00021         #else
00022                 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00023         #endif
00024 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
00025         (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
00026         #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00027 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)     // depend on GCC's peephole optimization to generate rotate instructions
00028         #define CRYPTOPP_FAST_ROTATE(x) 1
00029 #else
00030         #define CRYPTOPP_FAST_ROTATE(x) 0
00031 #endif
00032 
00033 #ifdef __BORLANDC__
00034 #include <mem.h>
00035 #endif
00036 
00037 #if defined(__GNUC__) && defined(__linux__)
00038 #define CRYPTOPP_BYTESWAP_AVAILABLE
00039 #include <byteswap.h>
00040 #endif
00041 
00042 NAMESPACE_BEGIN(CryptoPP)
00043 
00044 // ************** compile-time assertion ***************
00045 
00046 template <bool b>
00047 struct CompileAssert
00048 {
00049         static char dummy[2*b-1];
00050 };
00051 
00052 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
00053 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
00054 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
00055 #else
00056 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
00057 #endif
00058 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
00059 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
00060 
00061 // ************** misc classes ***************
00062 
00063 class CRYPTOPP_DLL Empty
00064 {
00065 };
00066 
00067 //! _
00068 template <class BASE1, class BASE2>
00069 class CRYPTOPP_NO_VTABLE TwoBases : public BASE1, public BASE2
00070 {
00071 };
00072 
00073 //! _
00074 template <class BASE1, class BASE2, class BASE3>
00075 class CRYPTOPP_NO_VTABLE ThreeBases : public BASE1, public BASE2, public BASE3
00076 {
00077 };
00078 
00079 template <class T>
00080 class ObjectHolder
00081 {
00082 protected:
00083         T m_object;
00084 };
00085 
00086 class NotCopyable
00087 {
00088 public:
00089         NotCopyable() {}
00090 private:
00091     NotCopyable(const NotCopyable &);
00092     void operator=(const NotCopyable &);
00093 };
00094 
00095 template <class T>
00096 struct NewObject
00097 {
00098         T* operator()() const {return new T;}
00099 };
00100 
00101 /*! This function safely initializes a static object in a multithreaded environment without using locks.
00102         It may leak memory when two threads try to initialize the static object at the same time
00103         but this should be acceptable since each static object is only initialized once per session.
00104 */
00105 template <class T, class F = NewObject<T>, int instance=0>
00106 class Singleton
00107 {
00108 public:
00109         Singleton(F objectFactory = F()) : m_objectFactory(objectFactory) {}
00110 
00111         // prevent this function from being inlined
00112         CRYPTOPP_NOINLINE const T & Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const;
00113 
00114 private:
00115         F m_objectFactory;
00116 };
00117 
00118 template <class T, class F, int instance>
00119 const T & Singleton<T, F, instance>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const
00120 {
00121         static simple_ptr<T> s_pObject;
00122         static char s_objectState = 0;
00123 
00124 retry:
00125         switch (s_objectState)
00126         {
00127         case 0:
00128                 s_objectState = 1;
00129                 try
00130                 {
00131                         s_pObject.m_p = m_objectFactory();
00132                 }
00133                 catch(...)
00134                 {
00135                         s_objectState = 0;
00136                         throw;
00137                 }
00138                 s_objectState = 2;
00139                 break;
00140         case 1:
00141                 goto retry;
00142         default:
00143                 break;
00144         }
00145         return *s_pObject.m_p;
00146 }
00147 
00148 // ************** misc functions ***************
00149 
00150 #if (!__STDC_WANT_SECURE_LIB__)
00151 inline void memcpy_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00152 {
00153         if (count > sizeInBytes)
00154                 throw InvalidArgument("memcpy_s: buffer overflow");
00155         memcpy(dest, src, count);
00156 }
00157 
00158 inline void memmove_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00159 {
00160         if (count > sizeInBytes)
00161                 throw InvalidArgument("memmove_s: buffer overflow");
00162         memmove(dest, src, count);
00163 }
00164 #endif
00165 
00166 // can't use std::min or std::max in MSVC60 or Cygwin 1.1.0
00167 template <class T> inline const T& STDMIN(const T& a, const T& b)
00168 {
00169         return b < a ? b : a;
00170 }
00171 
00172 template <class T1, class T2> inline const T1 UnsignedMin(const T1& a, const T2& b)
00173 {
00174         CRYPTOPP_COMPILE_ASSERT((sizeof(T1)<=sizeof(T2) && T2(-1)>0) || (sizeof(T1)>sizeof(T2) && T1(-1)>0));
00175         assert(a==0 || a>0);    // GCC workaround: get rid of the warning "comparison is always true due to limited range of data type"
00176         assert(b>=0);
00177 
00178         if (sizeof(T1)<=sizeof(T2))
00179                 return b < (T2)a ? (T1)b : a;
00180         else
00181                 return (T1)b < a ? (T1)b : a;
00182 }
00183 
00184 template <class T> inline const T& STDMAX(const T& a, const T& b)
00185 {
00186         return a < b ? b : a;
00187 }
00188 
00189 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
00190 
00191 // this version of the macro is fastest on Pentium 3 and Pentium 4 with MSVC 6 SP5 w/ Processor Pack
00192 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
00193 // these may be faster on other CPUs/compilers
00194 // #define GETBYTE(x, y) (unsigned int)(((x)>>(8*(y)))&255)
00195 // #define GETBYTE(x, y) (((byte *)&(x))[y])
00196 
00197 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
00198 
00199 template <class T>
00200 unsigned int Parity(T value)
00201 {
00202         for (unsigned int i=8*sizeof(value)/2; i>0; i/=2)
00203                 value ^= value >> i;
00204         return (unsigned int)value&1;
00205 }
00206 
00207 template <class T>
00208 unsigned int BytePrecision(const T &value)
00209 {
00210         if (!value)
00211                 return 0;
00212 
00213         unsigned int l=0, h=8*sizeof(value);
00214 
00215         while (h-l > 8)
00216         {
00217                 unsigned int t = (l+h)/2;
00218                 if (value >> t)
00219                         l = t;
00220                 else
00221                         h = t;
00222         }
00223 
00224         return h/8;
00225 }
00226 
00227 template <class T>
00228 unsigned int BitPrecision(const T &value)
00229 {
00230         if (!value)
00231                 return 0;
00232 
00233         unsigned int l=0, h=8*sizeof(value);
00234 
00235         while (h-l > 1)
00236         {
00237                 unsigned int t = (l+h)/2;
00238                 if (value >> t)
00239                         l = t;
00240                 else
00241                         h = t;
00242         }
00243 
00244         return h;
00245 }
00246 
00247 template <class T>
00248 inline T Crop(T value, size_t size)
00249 {
00250         if (size < 8*sizeof(value))
00251         return T(value & ((T(1) << size) - 1));
00252         else
00253                 return value;
00254 }
00255 
00256 template <class T1, class T2>
00257 inline bool SafeConvert(T1 from, T2 &to)
00258 {
00259         to = (T2)from;
00260         if (from != to || (from > 0) != (to > 0))
00261                 return false;
00262         return true;
00263 }
00264 
00265 inline size_t BitsToBytes(size_t bitCount)
00266 {
00267         return ((bitCount+7)/(8));
00268 }
00269 
00270 inline size_t BytesToWords(size_t byteCount)
00271 {
00272         return ((byteCount+WORD_SIZE-1)/WORD_SIZE);
00273 }
00274 
00275 inline size_t BitsToWords(size_t bitCount)
00276 {
00277         return ((bitCount+WORD_BITS-1)/(WORD_BITS));
00278 }
00279 
00280 inline size_t BitsToDwords(size_t bitCount)
00281 {
00282         return ((bitCount+2*WORD_BITS-1)/(2*WORD_BITS));
00283 }
00284 
00285 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *buf, const byte *mask, size_t count);
00286 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *output, const byte *input, const byte *mask, size_t count);
00287 
00288 template <class T>
00289 inline bool IsPowerOf2(const T &n)
00290 {
00291         return n > 0 && (n & (n-1)) == 0;
00292 }
00293 
00294 template <class T1, class T2>
00295 inline T2 ModPowerOf2(const T1 &a, const T2 &b)
00296 {
00297         assert(IsPowerOf2(b));
00298         return T2(a) & (b-1);
00299 }
00300 
00301 template <class T1, class T2>
00302 inline T1 RoundDownToMultipleOf(const T1 &n, const T2 &m)
00303 {
00304         if (IsPowerOf2(m))
00305                 return n - ModPowerOf2(n, m);
00306         else
00307                 return n - n%m;
00308 }
00309 
00310 template <class T1, class T2>
00311 inline T1 RoundUpToMultipleOf(const T1 &n, const T2 &m)
00312 {
00313         if (n+m-1 < n)
00314                 throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
00315         return RoundDownToMultipleOf(n+m-1, m);
00316 }
00317 
00318 template <class T>
00319 inline unsigned int GetAlignmentOf(T *dummy=NULL)       // VC60 workaround
00320 {
00321 #if CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86
00322         if (sizeof(T) < 16)
00323                 return 1;                       // alignment not needed on x86 and x64
00324 #endif
00325 
00326 #if (_MSC_VER >= 1300)
00327         return __alignof(T);
00328 #elif defined(__GNUC__)
00329         return __alignof__(T);
00330 #elif defined(CRYPTOPP_SLOW_WORD64)
00331         return UnsignedMin(4U, sizeof(T));
00332 #else
00333         return sizeof(T);
00334 #endif
00335 }
00336 
00337 inline bool IsAlignedOn(const void *p, unsigned int alignment)
00338 {
00339         return alignment==1 || (IsPowerOf2(alignment) ? ModPowerOf2((size_t)p, alignment) == 0 : (size_t)p % alignment == 0);
00340 }
00341 
00342 template <class T>
00343 inline bool IsAligned(const void *p, T *dummy=NULL)     // VC60 workaround
00344 {
00345         return IsAlignedOn(p, GetAlignmentOf<T>());
00346 }
00347 
00348 #ifdef IS_LITTLE_ENDIAN
00349         typedef LittleEndian NativeByteOrder;
00350 #else
00351         typedef BigEndian NativeByteOrder;
00352 #endif
00353 
00354 inline ByteOrder GetNativeByteOrder()
00355 {
00356         return NativeByteOrder::ToEnum();
00357 }
00358 
00359 inline bool NativeByteOrderIs(ByteOrder order)
00360 {
00361         return order == GetNativeByteOrder();
00362 }
00363 
00364 template <class T>
00365 std::string IntToString(T a, unsigned int base = 10)
00366 {
00367         if (a == 0)
00368                 return "0";
00369         bool negate = false;
00370         if (a < 0)
00371         {
00372                 negate = true;
00373                 a = 0-a;        // VC .NET does not like -a
00374         }
00375         std::string result;
00376         while (a > 0)
00377         {
00378                 T digit = a % base;
00379                 result = char((digit < 10 ? '0' : ('a' - 10)) + digit) + result;
00380                 a /= base;
00381         }
00382         if (negate)
00383                 result = "-" + result;
00384         return result;
00385 }
00386 
00387 template <class T1, class T2>
00388 inline T1 SaturatingSubtract(const T1 &a, const T2 &b)
00389 {
00390         return T1((a > b) ? (a - b) : 0);
00391 }
00392 
00393 template <class T>
00394 inline CipherDir GetCipherDir(const T &obj)
00395 {
00396         return obj.IsForwardTransformation() ? ENCRYPTION : DECRYPTION;
00397 }
00398 
00399 CRYPTOPP_DLL void CRYPTOPP_API CallNewHandler();
00400 
00401 inline void IncrementCounterByOne(byte *inout, unsigned int s)
00402 {
00403         for (int i=s-1, carry=1; i>=0 && carry; i--)
00404                 carry = !++inout[i];
00405 }
00406 
00407 inline void IncrementCounterByOne(byte *output, const byte *input, unsigned int s)
00408 {
00409         int i, carry;
00410         for (i=s-1, carry=1; i>=0 && carry; i--)
00411                 carry = ((output[i] = input[i]+1) == 0);
00412         memcpy_s(output, s, input, i+1);
00413 }
00414 
00415 // ************** rotate functions ***************
00416 
00417 template <class T> inline T rotlFixed(T x, unsigned int y)
00418 {
00419         assert(y < sizeof(T)*8);
00420         return T((x<<y) | (x>>(sizeof(T)*8-y)));
00421 }
00422 
00423 template <class T> inline T rotrFixed(T x, unsigned int y)
00424 {
00425         assert(y < sizeof(T)*8);
00426         return T((x>>y) | (x<<(sizeof(T)*8-y)));
00427 }
00428 
00429 template <class T> inline T rotlVariable(T x, unsigned int y)
00430 {
00431         assert(y < sizeof(T)*8);
00432         return T((x<<y) | (x>>(sizeof(T)*8-y)));
00433 }
00434 
00435 template <class T> inline T rotrVariable(T x, unsigned int y)
00436 {
00437         assert(y < sizeof(T)*8);
00438         return T((x>>y) | (x<<(sizeof(T)*8-y)));
00439 }
00440 
00441 template <class T> inline T rotlMod(T x, unsigned int y)
00442 {
00443         y %= sizeof(T)*8;
00444         return T((x<<y) | (x>>(sizeof(T)*8-y)));
00445 }
00446 
00447 template <class T> inline T rotrMod(T x, unsigned int y)
00448 {
00449         y %= sizeof(T)*8;
00450         return T((x>>y) | (x<<(sizeof(T)*8-y)));
00451 }
00452 
00453 #ifdef _MSC_VER
00454 
00455 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00456 {
00457         assert(y < 8*sizeof(x));
00458         return y ? _lrotl(x, y) : x;
00459 }
00460 
00461 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00462 {
00463         assert(y < 8*sizeof(x));
00464         return y ? _lrotr(x, y) : x;
00465 }
00466 
00467 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00468 {
00469         assert(y < 8*sizeof(x));
00470         return _lrotl(x, y);
00471 }
00472 
00473 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00474 {
00475         assert(y < 8*sizeof(x));
00476         return _lrotr(x, y);
00477 }
00478 
00479 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00480 {
00481         return _lrotl(x, y);
00482 }
00483 
00484 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00485 {
00486         return _lrotr(x, y);
00487 }
00488 
00489 #endif // #ifdef _MSC_VER
00490 
00491 #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
00492 // Intel C++ Compiler 10.0 calls a function instead of using the rotate instruction when using these instructions
00493 
00494 template<> inline word64 rotlFixed<word64>(word64 x, unsigned int y)
00495 {
00496         assert(y < 8*sizeof(x));
00497         return y ? _rotl64(x, y) : x;
00498 }
00499 
00500 template<> inline word64 rotrFixed<word64>(word64 x, unsigned int y)
00501 {
00502         assert(y < 8*sizeof(x));
00503         return y ? _rotr64(x, y) : x;
00504 }
00505 
00506 template<> inline word64 rotlVariable<word64>(word64 x, unsigned int y)
00507 {
00508         assert(y < 8*sizeof(x));
00509         return _rotl64(x, y);
00510 }
00511 
00512 template<> inline word64 rotrVariable<word64>(word64 x, unsigned int y)
00513 {
00514         assert(y < 8*sizeof(x));
00515         return _rotr64(x, y);
00516 }
00517 
00518 template<> inline word64 rotlMod<word64>(word64 x, unsigned int y)
00519 {
00520         return _rotl64(x, y);
00521 }
00522 
00523 template<> inline word64 rotrMod<word64>(word64 x, unsigned int y)
00524 {
00525         return _rotr64(x, y);
00526 }
00527 
00528 #endif // #if _MSC_VER >= 1310
00529 
00530 #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
00531 // Intel C++ Compiler 10.0 gives undefined externals with these
00532 
00533 template<> inline word16 rotlFixed<word16>(word16 x, unsigned int y)
00534 {
00535         assert(y < 8*sizeof(x));
00536         return y ? _rotl16(x, y) : x;
00537 }
00538 
00539 template<> inline word16 rotrFixed<word16>(word16 x, unsigned int y)
00540 {
00541         assert(y < 8*sizeof(x));
00542         return y ? _rotr16(x, y) : x;
00543 }
00544 
00545 template<> inline word16 rotlVariable<word16>(word16 x, unsigned int y)
00546 {
00547         assert(y < 8*sizeof(x));
00548         return _rotl16(x, y);
00549 }
00550 
00551 template<> inline word16 rotrVariable<word16>(word16 x, unsigned int y)
00552 {
00553         assert(y < 8*sizeof(x));
00554         return _rotr16(x, y);
00555 }
00556 
00557 template<> inline word16 rotlMod<word16>(word16 x, unsigned int y)
00558 {
00559         return _rotl16(x, y);
00560 }
00561 
00562 template<> inline word16 rotrMod<word16>(word16 x, unsigned int y)
00563 {
00564         return _rotr16(x, y);
00565 }
00566 
00567 template<> inline byte rotlFixed<byte>(byte x, unsigned int y)
00568 {
00569         assert(y < 8*sizeof(x));
00570         return y ? _rotl8(x, y) : x;
00571 }
00572 
00573 template<> inline byte rotrFixed<byte>(byte x, unsigned int y)
00574 {
00575         assert(y < 8*sizeof(x));
00576         return y ? _rotr8(x, y) : x;
00577 }
00578 
00579 template<> inline byte rotlVariable<byte>(byte x, unsigned int y)
00580 {
00581         assert(y < 8*sizeof(x));
00582         return _rotl8(x, y);
00583 }
00584 
00585 template<> inline byte rotrVariable<byte>(byte x, unsigned int y)
00586 {
00587         assert(y < 8*sizeof(x));
00588         return _rotr8(x, y);
00589 }
00590 
00591 template<> inline byte rotlMod<byte>(byte x, unsigned int y)
00592 {
00593         return _rotl8(x, y);
00594 }
00595 
00596 template<> inline byte rotrMod<byte>(byte x, unsigned int y)
00597 {
00598         return _rotr8(x, y);
00599 }
00600 
00601 #endif // #if _MSC_VER >= 1400
00602 
00603 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00604 
00605 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00606 {
00607         assert(y < 32);
00608         return y ? __rlwinm(x,y,0,31) : x;
00609 }
00610 
00611 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00612 {
00613         assert(y < 32);
00614         return y ? __rlwinm(x,32-y,0,31) : x;
00615 }
00616 
00617 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00618 {
00619         assert(y < 32);
00620         return (__rlwnm(x,y,0,31));
00621 }
00622 
00623 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00624 {
00625         assert(y < 32);
00626         return (__rlwnm(x,32-y,0,31));
00627 }
00628 
00629 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00630 {
00631         return (__rlwnm(x,y,0,31));
00632 }
00633 
00634 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00635 {
00636         return (__rlwnm(x,32-y,0,31));
00637 }
00638 
00639 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00640 
00641 // ************** endian reversal ***************
00642 
00643 template <class T>
00644 inline unsigned int GetByte(ByteOrder order, T value, unsigned int index)
00645 {
00646         if (order == LITTLE_ENDIAN_ORDER)
00647                 return GETBYTE(value, index);
00648         else
00649                 return GETBYTE(value, sizeof(T)-index-1);
00650 }
00651 
00652 inline byte ByteReverse(byte value)
00653 {
00654         return value;
00655 }
00656 
00657 inline word16 ByteReverse(word16 value)
00658 {
00659 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
00660         return bswap_16(value);
00661 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00662         return _byteswap_ushort(value);
00663 #else
00664         return rotlFixed(value, 8U);
00665 #endif
00666 }
00667 
00668 inline word32 ByteReverse(word32 value)
00669 {
00670 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
00671         __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00672         return value;
00673 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00674         return bswap_32(value);
00675 #elif defined(__MWERKS__) && TARGET_CPU_PPC
00676         return (word32)__lwbrx(&value,0);
00677 #elif _MSC_VER >= 1400 || (_MSC_VER >= 1300 && !defined(_DLL))
00678         return _byteswap_ulong(value);
00679 #elif CRYPTOPP_FAST_ROTATE(32)
00680         // 5 instructions with rotate instruction, 9 without
00681         return (rotrFixed(value, 8U) & 0xff00ff00) | (rotlFixed(value, 8U) & 0x00ff00ff);
00682 #else
00683         // 6 instructions with rotate instruction, 8 without
00684         value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
00685         return rotlFixed(value, 16U);
00686 #endif
00687 }
00688 
00689 #ifdef WORD64_AVAILABLE
00690 inline word64 ByteReverse(word64 value)
00691 {
00692 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
00693         __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00694         return value;
00695 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00696         return bswap_64(value);
00697 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00698         return _byteswap_uint64(value);
00699 #elif defined(CRYPTOPP_SLOW_WORD64)
00700         return (word64(ByteReverse(word32(value))) << 32) | ByteReverse(word32(value>>32));
00701 #else
00702         value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
00703         value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
00704         return rotlFixed(value, 32U);
00705 #endif
00706 }
00707 #endif
00708 
00709 inline byte BitReverse(byte value)
00710 {
00711         value = ((value & 0xAA) >> 1) | ((value & 0x55) << 1);
00712         value = ((value & 0xCC) >> 2) | ((value & 0x33) << 2);
00713         return rotlFixed(value, 4U);
00714 }
00715 
00716 inline word16 BitReverse(word16 value)
00717 {
00718         value = ((value & 0xAAAA) >> 1) | ((value & 0x5555) << 1);
00719         value = ((value & 0xCCCC) >> 2) | ((value & 0x3333) << 2);
00720         value = ((value & 0xF0F0) >> 4) | ((value & 0x0F0F) << 4);
00721         return ByteReverse(value);
00722 }
00723 
00724 inline word32 BitReverse(word32 value)
00725 {
00726         value = ((value & 0xAAAAAAAA) >> 1) | ((value & 0x55555555) << 1);
00727         value = ((value & 0xCCCCCCCC) >> 2) | ((value & 0x33333333) << 2);
00728         value = ((value & 0xF0F0F0F0) >> 4) | ((value & 0x0F0F0F0F) << 4);
00729         return ByteReverse(value);
00730 }
00731 
00732 #ifdef WORD64_AVAILABLE
00733 inline word64 BitReverse(word64 value)
00734 {
00735 #ifdef CRYPTOPP_SLOW_WORD64
00736         return (word64(BitReverse(word32(value))) << 32) | BitReverse(word32(value>>32));
00737 #else
00738         value = ((value & W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value & W64LIT(0x5555555555555555)) << 1);
00739         value = ((value & W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value & W64LIT(0x3333333333333333)) << 2);
00740         value = ((value & W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value & W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
00741         return ByteReverse(value);
00742 #endif
00743 }
00744 #endif
00745 
00746 template <class T>
00747 inline T BitReverse(T value)
00748 {
00749         if (sizeof(T) == 1)
00750                 return (T)BitReverse((byte)value);
00751         else if (sizeof(T) == 2)
00752                 return (T)BitReverse((word16)value);
00753         else if (sizeof(T) == 4)
00754                 return (T)BitReverse((word32)value);
00755         else
00756         {
00757 #ifdef WORD64_AVAILABLE
00758                 assert(sizeof(T) == 8);
00759                 return (T)BitReverse((word64)value);
00760 #else
00761                 assert(false);
00762                 return 0;
00763 #endif
00764         }
00765 }
00766 
00767 template <class T>
00768 inline T ConditionalByteReverse(ByteOrder order, T value)
00769 {
00770         return NativeByteOrderIs(order) ? value : ByteReverse(value);
00771 }
00772 
00773 template <class T>
00774 void ByteReverse(T *out, const T *in, size_t byteCount)
00775 {
00776         assert(byteCount % sizeof(T) == 0);
00777         size_t count = byteCount/sizeof(T);
00778         for (size_t i=0; i<count; i++)
00779                 out[i] = ByteReverse(in[i]);
00780 }
00781 
00782 template <class T>
00783 inline void ConditionalByteReverse(ByteOrder order, T *out, const T *in, size_t byteCount)
00784 {
00785         if (!NativeByteOrderIs(order))
00786                 ByteReverse(out, in, byteCount);
00787         else if (in != out)
00788                 memcpy_s(out, byteCount, in, byteCount);
00789 }
00790 
00791 template <class T>
00792 inline void GetUserKey(ByteOrder order, T *out, size_t outlen, const byte *in, size_t inlen)
00793 {
00794         const size_t U = sizeof(T);
00795         assert(inlen <= outlen*U);
00796         memcpy(out, in, inlen);
00797         memset((byte *)out+inlen, 0, outlen*U-inlen);
00798         ConditionalByteReverse(order, out, out, RoundUpToMultipleOf(inlen, U));
00799 }
00800 
00801 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00802 inline byte UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const byte *)
00803 {
00804         return block[0];
00805 }
00806 
00807 inline word16 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word16 *)
00808 {
00809         return (order == BIG_ENDIAN_ORDER)
00810                 ? block[1] | (block[0] << 8)
00811                 : block[0] | (block[1] << 8);
00812 }
00813 
00814 inline word32 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word32 *)
00815 {
00816         return (order == BIG_ENDIAN_ORDER)
00817                 ? word32(block[3]) | (word32(block[2]) << 8) | (word32(block[1]) << 16) | (word32(block[0]) << 24)
00818                 : word32(block[0]) | (word32(block[1]) << 8) | (word32(block[2]) << 16) | (word32(block[3]) << 24);
00819 }
00820 
00821 #ifdef WORD64_AVAILABLE
00822 inline word64 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word64 *)
00823 {
00824         return (order == BIG_ENDIAN_ORDER)
00825                 ?
00826                 (word64(block[7]) |
00827                 (word64(block[6]) <<  8) |
00828                 (word64(block[5]) << 16) |
00829                 (word64(block[4]) << 24) |
00830                 (word64(block[3]) << 32) |
00831                 (word64(block[2]) << 40) |
00832                 (word64(block[1]) << 48) |
00833                 (word64(block[0]) << 56))
00834                 :
00835                 (word64(block[0]) |
00836                 (word64(block[1]) <<  8) |
00837                 (word64(block[2]) << 16) |
00838                 (word64(block[3]) << 24) |
00839                 (word64(block[4]) << 32) |
00840                 (word64(block[5]) << 40) |
00841                 (word64(block[6]) << 48) |
00842                 (word64(block[7]) << 56));
00843 }
00844 #endif
00845 
00846 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, byte value, const byte *xorBlock)
00847 {
00848         block[0] = xorBlock ? (value ^ xorBlock[0]) : value;
00849 }
00850 
00851 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word16 value, const byte *xorBlock)
00852 {
00853         if (order == BIG_ENDIAN_ORDER)
00854         {
00855                 if (xorBlock)
00856                 {
00857                         block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00858                         block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00859                 }
00860                 else
00861                 {
00862                         block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00863                         block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00864                 }
00865         }
00866         else
00867         {
00868                 if (xorBlock)
00869                 {
00870                         block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00871                         block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00872                 }
00873                 else
00874                 {
00875                         block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00876                         block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00877                 }
00878         }
00879 }
00880 
00881 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word32 value, const byte *xorBlock)
00882 {
00883         if (order == BIG_ENDIAN_ORDER)
00884         {
00885                 if (xorBlock)
00886                 {
00887                         block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00888                         block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00889                         block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00890                         block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00891                 }
00892                 else
00893                 {
00894                         block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00895                         block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00896                         block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00897                         block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00898                 }
00899         }
00900         else
00901         {
00902                 if (xorBlock)
00903                 {
00904                         block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00905                         block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00906                         block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00907                         block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00908                 }
00909                 else
00910                 {
00911                         block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00912                         block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00913                         block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00914                         block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00915                 }
00916         }
00917 }
00918 
00919 #ifdef WORD64_AVAILABLE
00920 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word64 value, const byte *xorBlock)
00921 {
00922         if (order == BIG_ENDIAN_ORDER)
00923         {
00924                 if (xorBlock)
00925                 {
00926                         block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00927                         block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00928                         block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00929                         block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00930                         block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00931                         block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00932                         block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00933                         block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00934                 }
00935                 else
00936                 {
00937                         block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00938                         block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00939                         block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00940                         block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00941                         block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00942                         block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00943                         block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00944                         block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00945                 }
00946         }
00947         else
00948         {
00949                 if (xorBlock)
00950                 {
00951                         block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00952                         block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00953                         block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00954                         block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00955                         block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00956                         block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00957                         block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00958                         block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00959                 }
00960                 else
00961                 {
00962                         block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00963                         block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00964                         block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00965                         block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00966                         block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00967                         block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00968                         block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00969                         block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00970                 }
00971         }
00972 }
00973 #endif
00974 #endif  // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00975 
00976 template <class T>
00977 inline T GetWord(bool assumeAligned, ByteOrder order, const byte *block)
00978 {
00979 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00980         if (!assumeAligned)
00981                 return UnalignedGetWordNonTemplate(order, block, (T*)NULL);
00982         assert(IsAligned<T>(block));
00983 #endif
00984         return ConditionalByteReverse(order, *reinterpret_cast<const T *>(block));
00985 }
00986 
00987 template <class T>
00988 inline void GetWord(bool assumeAligned, ByteOrder order, T &result, const byte *block)
00989 {
00990         result = GetWord<T>(assumeAligned, order, block);
00991 }
00992 
00993 template <class T>
00994 inline void PutWord(bool assumeAligned, ByteOrder order, byte *block, T value, const byte *xorBlock = NULL)
00995 {
00996 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00997         if (!assumeAligned)
00998                 return UnalignedPutWordNonTemplate(order, block, value, xorBlock);
00999         assert(IsAligned<T>(block));
01000         assert(IsAligned<T>(xorBlock));
01001 #endif
01002         *reinterpret_cast<T *>(block) = ConditionalByteReverse(order, value) ^ (xorBlock ? *reinterpret_cast<const T *>(xorBlock) : 0);
01003 }
01004 
01005 template <class T, class B, bool A=true>
01006 class GetBlock
01007 {
01008 public:
01009         GetBlock(const void *block)
01010                 : m_block((const byte *)block) {}
01011 
01012         template <class U>
01013         inline GetBlock<T, B, A> & operator()(U &x)
01014         {
01015                 CRYPTOPP_COMPILE_ASSERT(sizeof(U) >= sizeof(T));
01016                 x = GetWord<T>(A, B::ToEnum(), m_block);
01017                 m_block += sizeof(T);
01018                 return *this;
01019         }
01020 
01021 private:
01022         const byte *m_block;
01023 };
01024 
01025 template <class T, class B, bool A=false>
01026 class PutBlock
01027 {
01028 public:
01029         PutBlock(const void *xorBlock, void *block)
01030                 : m_xorBlock((const byte *)xorBlock), m_block((byte *)block) {}
01031 
01032         template <class U>
01033         inline PutBlock<T, B, A> & operator()(U x)
01034         {
01035                 PutWord(A, B::ToEnum(), m_block, (T)x, m_xorBlock);
01036                 m_block += sizeof(T);
01037                 if (m_xorBlock)
01038                         m_xorBlock += sizeof(T);
01039                 return *this;
01040         }
01041 
01042 private:
01043         const byte *m_xorBlock;
01044         byte *m_block;
01045 };
01046 
01047 template <class T, class B, bool GA=true, bool PA=false>
01048 struct BlockGetAndPut
01049 {
01050         // function needed because of C++ grammatical ambiguity between expression-statements and declarations
01051         static inline GetBlock<T, B, GA> Get(const void *block) {return GetBlock<T, B, GA>(block);}
01052         typedef PutBlock<T, B, PA> Put;
01053 };
01054 
01055 template <class T>
01056 std::string WordToString(T value, ByteOrder order = BIG_ENDIAN_ORDER)
01057 {
01058         if (!NativeByteOrderIs(order))
01059                 value = ByteReverse(value);
01060 
01061         return std::string((char *)&value, sizeof(value));
01062 }
01063 
01064 template <class T>
01065 T StringToWord(const std::string &str, ByteOrder order = BIG_ENDIAN_ORDER)
01066 {
01067         T value = 0;
01068         memcpy_s(&value, sizeof(value), str.data(), UnsignedMin(str.size(), sizeof(value)));
01069         return NativeByteOrderIs(order) ? value : ByteReverse(value);
01070 }
01071 
01072 // ************** help remove warning on g++ ***************
01073 
01074 template <bool overflow> struct SafeShifter;
01075 
01076 template<> struct SafeShifter<true>
01077 {
01078         template <class T>
01079         static inline T RightShift(T value, unsigned int bits)
01080         {
01081                 return 0;
01082         }
01083 
01084         template <class T>
01085         static inline T LeftShift(T value, unsigned int bits)
01086         {
01087                 return 0;
01088         }
01089 };
01090 
01091 template<> struct SafeShifter<false>
01092 {
01093         template <class T>
01094         static inline T RightShift(T value, unsigned int bits)
01095         {
01096                 return value >> bits;
01097         }
01098 
01099         template <class T>
01100         static inline T LeftShift(T value, unsigned int bits)
01101         {
01102                 return value << bits;
01103         }
01104 };
01105 
01106 template <unsigned int bits, class T>
01107 inline T SafeRightShift(T value)
01108 {
01109         return SafeShifter<(bits>=(8*sizeof(T)))>::RightShift(value, bits);
01110 }
01111 
01112 template <unsigned int bits, class T>
01113 inline T SafeLeftShift(T value)
01114 {
01115         return SafeShifter<(bits>=(8*sizeof(T)))>::LeftShift(value, bits);
01116 }
01117 
01118 NAMESPACE_END
01119 
01120 #endif

Generated on Fri Feb 6 00:56:24 2009 for Crypto++ by  doxygen 1.4.7