Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

832 lines
22 KiB

  1. // vmac.cpp - written and placed in the public domain by Wei Dai
  2. // based on Ted Krovetz's public domain vmac.c and draft-krovetz-vmac-01.txt
  3. #include "pch.h"
  4. #include "vmac.h"
  5. #include "argnames.h"
  6. #include "cpu.h"
  7. NAMESPACE_BEGIN(CryptoPP)
  8. #if defined(_MSC_VER) && !CRYPTOPP_BOOL_SLOW_WORD64
  9. #include <intrin.h>
  10. #endif
  11. #define VMAC_BOOL_WORD128 (defined(CRYPTOPP_WORD128_AVAILABLE) && !defined(CRYPTOPP_X64_ASM_AVAILABLE))
  12. #ifdef __BORLANDC__
  13. #define const // Turbo C++ 2006 workaround
  14. #endif
  15. static const word64 p64 = W64LIT(0xfffffffffffffeff); /* 2^64 - 257 prime */
  16. static const word64 m62 = W64LIT(0x3fffffffffffffff); /* 62-bit mask */
  17. static const word64 m63 = W64LIT(0x7fffffffffffffff); /* 63-bit mask */
  18. static const word64 m64 = W64LIT(0xffffffffffffffff); /* 64-bit mask */
  19. static const word64 mpoly = W64LIT(0x1fffffff1fffffff); /* Poly key mask */
  20. #ifdef __BORLANDC__
  21. #undef const
  22. #endif
  23. #if VMAC_BOOL_WORD128
  24. #ifdef __powerpc__
  25. // workaround GCC Bug 31690: ICE with const __uint128_t and C++ front-end
  26. #define m126 ((word128(m62)<<64)|m64)
  27. #else
  28. static const word128 m126 = (word128(m62)<<64)|m64; /* 126-bit mask */
  29. #endif
  30. #endif
  31. void VMAC_Base::UncheckedSetKey(const byte *userKey, unsigned int keylength, const NameValuePairs &params)
  32. {
  33. int digestLength = params.GetIntValueWithDefault(Name::DigestSize(), DefaultDigestSize());
  34. if (digestLength != 8 && digestLength != 16)
  35. throw InvalidArgument("VMAC: DigestSize must be 8 or 16");
  36. m_is128 = digestLength == 16;
  37. m_L1KeyLength = params.GetIntValueWithDefault(Name::L1KeyLength(), 128);
  38. if (m_L1KeyLength <= 0 || m_L1KeyLength % 128 != 0)
  39. throw InvalidArgument("VMAC: L1KeyLength must be a positive multiple of 128");
  40. AllocateBlocks();
  41. BlockCipher &cipher = AccessCipher();
  42. cipher.SetKey(userKey, keylength, params);
  43. unsigned int blockSize = cipher.BlockSize();
  44. unsigned int blockSizeInWords = blockSize / sizeof(word64);
  45. SecBlock<word64> out(blockSizeInWords);
  46. SecByteBlock in;
  47. in.CleanNew(blockSize);
  48. size_t i;
  49. /* Fill nh key */
  50. in[0] = 0x80;
  51. cipher.AdvancedProcessBlocks(in, NULL, (byte *)m_nhKey(), m_nhKeySize()*sizeof(word64), cipher.BT_InBlockIsCounter);
  52. ConditionalByteReverse<word64>(BIG_ENDIAN_ORDER, m_nhKey(), m_nhKey(), m_nhKeySize()*sizeof(word64));
  53. /* Fill poly key */
  54. in[0] = 0xC0;
  55. in[15] = 0;
  56. for (i = 0; i <= (size_t)m_is128; i++)
  57. {
  58. cipher.ProcessBlock(in, out.BytePtr());
  59. m_polyState()[i*4+2] = GetWord<word64>(true, BIG_ENDIAN_ORDER, out.BytePtr()) & mpoly;
  60. m_polyState()[i*4+3] = GetWord<word64>(true, BIG_ENDIAN_ORDER, out.BytePtr()+8) & mpoly;
  61. in[15]++;
  62. }
  63. /* Fill ip key */
  64. in[0] = 0xE0;
  65. in[15] = 0;
  66. word64 *l3Key = m_l3Key();
  67. for (i = 0; i <= (size_t)m_is128; i++)
  68. do
  69. {
  70. cipher.ProcessBlock(in, out.BytePtr());
  71. l3Key[i*2+0] = GetWord<word64>(true, BIG_ENDIAN_ORDER, out.BytePtr());
  72. l3Key[i*2+1] = GetWord<word64>(true, BIG_ENDIAN_ORDER, out.BytePtr()+8);
  73. in[15]++;
  74. } while ((l3Key[i*2+0] >= p64) || (l3Key[i*2+1] >= p64));
  75. m_padCached = false;
  76. size_t nonceLength;
  77. const byte *nonce = GetIVAndThrowIfInvalid(params, nonceLength);
  78. Resynchronize(nonce, (int)nonceLength);
  79. }
  80. void VMAC_Base::GetNextIV(RandomNumberGenerator &rng, byte *IV)
  81. {
  82. SimpleKeyingInterface::GetNextIV(rng, IV);
  83. IV[0] &= 0x7f;
  84. }
  85. void VMAC_Base::Resynchronize(const byte *nonce, int len)
  86. {
  87. size_t length = ThrowIfInvalidIVLength(len);
  88. size_t s = IVSize();
  89. byte *storedNonce = m_nonce();
  90. if (m_is128)
  91. {
  92. memset(storedNonce, 0, s-length);
  93. memcpy(storedNonce+s-length, nonce, length);
  94. AccessCipher().ProcessBlock(storedNonce, m_pad());
  95. }
  96. else
  97. {
  98. if (m_padCached && (storedNonce[s-1] | 1) == (nonce[length-1] | 1))
  99. {
  100. m_padCached = VerifyBufsEqual(storedNonce+s-length, nonce, length-1);
  101. for (size_t i=0; m_padCached && i<s-length; i++)
  102. m_padCached = (storedNonce[i] == 0);
  103. }
  104. if (!m_padCached)
  105. {
  106. memset(storedNonce, 0, s-length);
  107. memcpy(storedNonce+s-length, nonce, length-1);
  108. storedNonce[s-1] = nonce[length-1] & 0xfe;
  109. AccessCipher().ProcessBlock(storedNonce, m_pad());
  110. m_padCached = true;
  111. }
  112. storedNonce[s-1] = nonce[length-1];
  113. }
  114. m_isFirstBlock = true;
  115. Restart();
  116. }
  117. void VMAC_Base::HashEndianCorrectedBlock(const word64 *data)
  118. {
  119. assert(false);
  120. throw 0;
  121. }
  122. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && CRYPTOPP_BOOL_X86
  123. #pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly code
  124. void
  125. #ifdef __GNUC__
  126. __attribute__ ((noinline)) // Intel Compiler 9.1 workaround
  127. #endif
  128. VMAC_Base::VHASH_Update_SSE2(const word64 *data, size_t blocksRemainingInWord64, int tagPart)
  129. {
  130. const word64 *nhK = m_nhKey();
  131. word64 *polyS = m_polyState();
  132. word32 L1KeyLength = m_L1KeyLength;
  133. #ifdef __GNUC__
  134. word32 temp;
  135. __asm__ __volatile__
  136. (
  137. AS2( mov %%ebx, %0)
  138. AS2( mov %1, %%ebx)
  139. ".intel_syntax noprefix;"
  140. #else
  141. #if _MSC_VER < 1300 || defined(__INTEL_COMPILER)
  142. char isFirstBlock = m_isFirstBlock;
  143. AS2( mov ebx, [L1KeyLength])
  144. AS2( mov dl, [isFirstBlock])
  145. #else
  146. AS2( mov ecx, this)
  147. AS2( mov ebx, [ecx+m_L1KeyLength])
  148. AS2( mov dl, [ecx+m_isFirstBlock])
  149. #endif
  150. AS2( mov eax, tagPart)
  151. AS2( shl eax, 4)
  152. AS2( mov edi, nhK)
  153. AS2( add edi, eax)
  154. AS2( add eax, eax)
  155. AS2( add eax, polyS)
  156. AS2( mov esi, data)
  157. AS2( mov ecx, blocksRemainingInWord64)
  158. #endif
  159. AS2( shr ebx, 3)
  160. AS1( push ebp)
  161. AS2( sub esp, 12)
  162. ASL(4)
  163. AS2( mov ebp, ebx)
  164. AS2( cmp ecx, ebx)
  165. AS2( cmovl ebp, ecx)
  166. AS2( sub ecx, ebp)
  167. AS2( lea ebp, [edi+8*ebp]) // end of nhK
  168. AS2( movq mm6, [esi])
  169. AS2( paddq mm6, [edi])
  170. AS2( movq mm5, [esi+8])
  171. AS2( paddq mm5, [edi+8])
  172. AS2( add esi, 16)
  173. AS2( add edi, 16)
  174. AS2( movq mm4, mm6)
  175. ASS( pshufw mm2, mm6, 1, 0, 3, 2)
  176. AS2( pmuludq mm6, mm5)
  177. ASS( pshufw mm3, mm5, 1, 0, 3, 2)
  178. AS2( pmuludq mm5, mm2)
  179. AS2( pmuludq mm2, mm3)
  180. AS2( pmuludq mm3, mm4)
  181. AS2( pxor mm7, mm7)
  182. AS2( movd [esp], mm6)
  183. AS2( psrlq mm6, 32)
  184. AS2( movd [esp+4], mm5)
  185. AS2( psrlq mm5, 32)
  186. AS2( cmp edi, ebp)
  187. ASJ( je, 1, f)
  188. ASL(0)
  189. AS2( movq mm0, [esi])
  190. AS2( paddq mm0, [edi])
  191. AS2( movq mm1, [esi+8])
  192. AS2( paddq mm1, [edi+8])
  193. AS2( add esi, 16)
  194. AS2( add edi, 16)
  195. AS2( movq mm4, mm0)
  196. AS2( paddq mm5, mm2)
  197. ASS( pshufw mm2, mm0, 1, 0, 3, 2)
  198. AS2( pmuludq mm0, mm1)
  199. AS2( movd [esp+8], mm3)
  200. AS2( psrlq mm3, 32)
  201. AS2( paddq mm5, mm3)
  202. ASS( pshufw mm3, mm1, 1, 0, 3, 2)
  203. AS2( pmuludq mm1, mm2)
  204. AS2( pmuludq mm2, mm3)
  205. AS2( pmuludq mm3, mm4)
  206. AS2( movd mm4, [esp])
  207. AS2( paddq mm7, mm4)
  208. AS2( movd mm4, [esp+4])
  209. AS2( paddq mm6, mm4)
  210. AS2( movd mm4, [esp+8])
  211. AS2( paddq mm6, mm4)
  212. AS2( movd [esp], mm0)
  213. AS2( psrlq mm0, 32)
  214. AS2( paddq mm6, mm0)
  215. AS2( movd [esp+4], mm1)
  216. AS2( psrlq mm1, 32)
  217. AS2( paddq mm5, mm1)
  218. AS2( cmp edi, ebp)
  219. ASJ( jne, 0, b)
  220. ASL(1)
  221. AS2( paddq mm5, mm2)
  222. AS2( movd [esp+8], mm3)
  223. AS2( psrlq mm3, 32)
  224. AS2( paddq mm5, mm3)
  225. AS2( movd mm4, [esp])
  226. AS2( paddq mm7, mm4)
  227. AS2( movd mm4, [esp+4])
  228. AS2( paddq mm6, mm4)
  229. AS2( movd mm4, [esp+8])
  230. AS2( paddq mm6, mm4)
  231. AS2( lea ebp, [8*ebx])
  232. AS2( sub edi, ebp) // reset edi to start of nhK
  233. AS2( movd [esp], mm7)
  234. AS2( psrlq mm7, 32)
  235. AS2( paddq mm6, mm7)
  236. AS2( movd [esp+4], mm6)
  237. AS2( psrlq mm6, 32)
  238. AS2( paddq mm5, mm6)
  239. AS2( psllq mm5, 2)
  240. AS2( psrlq mm5, 2)
  241. #define a0 [eax+2*4]
  242. #define a1 [eax+3*4]
  243. #define a2 [eax+0*4]
  244. #define a3 [eax+1*4]
  245. #define k0 [eax+2*8+2*4]
  246. #define k1 [eax+2*8+3*4]
  247. #define k2 [eax+2*8+0*4]
  248. #define k3 [eax+2*8+1*4]
  249. AS2( test dl, dl)
  250. ASJ( jz, 2, f)
  251. AS2( movd mm1, k0)
  252. AS2( movd mm0, [esp])
  253. AS2( paddq mm0, mm1)
  254. AS2( movd a0, mm0)
  255. AS2( psrlq mm0, 32)
  256. AS2( movd mm1, k1)
  257. AS2( movd mm2, [esp+4])
  258. AS2( paddq mm1, mm2)
  259. AS2( paddq mm0, mm1)
  260. AS2( movd a1, mm0)
  261. AS2( psrlq mm0, 32)
  262. AS2( paddq mm5, k2)
  263. AS2( paddq mm0, mm5)
  264. AS2( movq a2, mm0)
  265. AS2( xor edx, edx)
  266. ASJ( jmp, 3, f)
  267. ASL(2)
  268. AS2( movd mm0, a3)
  269. AS2( movq mm4, mm0)
  270. AS2( pmuludq mm0, k3) // a3*k3
  271. AS2( movd mm1, a0)
  272. AS2( pmuludq mm1, k2) // a0*k2
  273. AS2( movd mm2, a1)
  274. AS2( movd mm6, k1)
  275. AS2( pmuludq mm2, mm6) // a1*k1
  276. AS2( movd mm3, a2)
  277. AS2( psllq mm0, 1)
  278. AS2( paddq mm0, mm5)
  279. AS2( movq mm5, mm3)
  280. AS2( movd mm7, k0)
  281. AS2( pmuludq mm3, mm7) // a2*k0
  282. AS2( pmuludq mm4, mm7) // a3*k0
  283. AS2( pmuludq mm5, mm6) // a2*k1
  284. AS2( paddq mm0, mm1)
  285. AS2( movd mm1, a1)
  286. AS2( paddq mm4, mm5)
  287. AS2( movq mm5, mm1)
  288. AS2( pmuludq mm1, k2) // a1*k2
  289. AS2( paddq mm0, mm2)
  290. AS2( movd mm2, a0)
  291. AS2( paddq mm0, mm3)
  292. AS2( movq mm3, mm2)
  293. AS2( pmuludq mm2, k3) // a0*k3
  294. AS2( pmuludq mm3, mm7) // a0*k0
  295. AS2( movd [esp+8], mm0)
  296. AS2( psrlq mm0, 32)
  297. AS2( pmuludq mm7, mm5) // a1*k0
  298. AS2( pmuludq mm5, k3) // a1*k3
  299. AS2( paddq mm0, mm1)
  300. AS2( movd mm1, a2)
  301. AS2( pmuludq mm1, k2) // a2*k2
  302. AS2( paddq mm0, mm2)
  303. AS2( paddq mm0, mm4)
  304. AS2( movq mm4, mm0)
  305. AS2( movd mm2, a3)
  306. AS2( pmuludq mm2, mm6) // a3*k1
  307. AS2( pmuludq mm6, a0) // a0*k1
  308. AS2( psrlq mm0, 31)
  309. AS2( paddq mm0, mm3)
  310. AS2( movd mm3, [esp])
  311. AS2( paddq mm0, mm3)
  312. AS2( movd mm3, a2)
  313. AS2( pmuludq mm3, k3) // a2*k3
  314. AS2( paddq mm5, mm1)
  315. AS2( movd mm1, a3)
  316. AS2( pmuludq mm1, k2) // a3*k2
  317. AS2( paddq mm5, mm2)
  318. AS2( movd mm2, [esp+4])
  319. AS2( psllq mm5, 1)
  320. AS2( paddq mm0, mm5)
  321. AS2( psllq mm4, 33)
  322. AS2( movd a0, mm0)
  323. AS2( psrlq mm0, 32)
  324. AS2( paddq mm6, mm7)
  325. AS2( movd mm7, [esp+8])
  326. AS2( paddq mm0, mm6)
  327. AS2( paddq mm0, mm2)
  328. AS2( paddq mm3, mm1)
  329. AS2( psllq mm3, 1)
  330. AS2( paddq mm0, mm3)
  331. AS2( psrlq mm4, 1)
  332. AS2( movd a1, mm0)
  333. AS2( psrlq mm0, 32)
  334. AS2( por mm4, mm7)
  335. AS2( paddq mm0, mm4)
  336. AS2( movq a2, mm0)
  337. #undef a0
  338. #undef a1
  339. #undef a2
  340. #undef a3
  341. #undef k0
  342. #undef k1
  343. #undef k2
  344. #undef k3
  345. ASL(3)
  346. AS2( test ecx, ecx)
  347. ASJ( jnz, 4, b)
  348. AS2( add esp, 12)
  349. AS1( pop ebp)
  350. AS1( emms)
  351. #ifdef __GNUC__
  352. ".att_syntax prefix;"
  353. AS2( mov %0, %%ebx)
  354. : "=m" (temp)
  355. : "m" (L1KeyLength), "c" (blocksRemainingInWord64), "S" (data), "D" (nhK+tagPart*2), "d" (m_isFirstBlock), "a" (polyS+tagPart*4)
  356. : "memory", "cc"
  357. );
  358. #endif
  359. }
  360. #endif
  361. #if VMAC_BOOL_WORD128
  362. #define DeclareNH(a) word128 a=0
  363. #define MUL64(rh,rl,i1,i2) {word128 p = word128(i1)*(i2); rh = word64(p>>64); rl = word64(p);}
  364. #define AccumulateNH(a, b, c) a += word128(b)*(c)
  365. #define Multiply128(r, i1, i2) r = word128(word64(i1)) * word64(i2)
  366. #else
  367. #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
  368. #define MUL32(a, b) __emulu(word32(a), word32(b))
  369. #else
  370. #define MUL32(a, b) ((word64)((word32)(a)) * (word32)(b))
  371. #endif
  372. #if defined(CRYPTOPP_X64_ASM_AVAILABLE)
  373. #define DeclareNH(a) word64 a##0=0, a##1=0
  374. #define MUL64(rh,rl,i1,i2) asm ("mulq %3" : "=a"(rl), "=d"(rh) : "a"(i1), "g"(i2) : "cc");
  375. #define AccumulateNH(a, b, c) asm ("mulq %3; addq %%rax, %0; adcq %%rdx, %1" : "+r"(a##0), "+r"(a##1) : "a"(b), "g"(c) : "%rdx", "cc");
  376. #define ADD128(rh,rl,ih,il) asm ("addq %3, %1; adcq %2, %0" : "+r"(rh),"+r"(rl) : "r"(ih),"r"(il) : "cc");
  377. #elif defined(_MSC_VER) && !CRYPTOPP_BOOL_SLOW_WORD64
  378. #define DeclareNH(a) word64 a##0=0, a##1=0
  379. #define MUL64(rh,rl,i1,i2) (rl) = _umul128(i1,i2,&(rh));
  380. #define AccumulateNH(a, b, c) {\
  381. word64 ph, pl;\
  382. pl = _umul128(b,c,&ph);\
  383. a##0 += pl;\
  384. a##1 += ph + (a##0 < pl);}
  385. #else
  386. #define VMAC_BOOL_32BIT 1
  387. #define DeclareNH(a) word64 a##0=0, a##1=0, a##2=0
  388. #define MUL64(rh,rl,i1,i2) \
  389. { word64 _i1 = (i1), _i2 = (i2); \
  390. word64 m1= MUL32(_i1,_i2>>32); \
  391. word64 m2= MUL32(_i1>>32,_i2); \
  392. rh = MUL32(_i1>>32,_i2>>32); \
  393. rl = MUL32(_i1,_i2); \
  394. ADD128(rh,rl,(m1 >> 32),(m1 << 32)); \
  395. ADD128(rh,rl,(m2 >> 32),(m2 << 32)); \
  396. }
  397. #define AccumulateNH(a, b, c) {\
  398. word64 p = MUL32(b, c);\
  399. a##1 += word32((p)>>32);\
  400. a##0 += word32(p);\
  401. p = MUL32((b)>>32, c);\
  402. a##2 += word32((p)>>32);\
  403. a##1 += word32(p);\
  404. p = MUL32((b)>>32, (c)>>32);\
  405. a##2 += p;\
  406. p = MUL32(b, (c)>>32);\
  407. a##1 += word32(p);\
  408. a##2 += word32(p>>32);}
  409. #endif
  410. #endif
  411. #ifndef VMAC_BOOL_32BIT
  412. #define VMAC_BOOL_32BIT 0
  413. #endif
  414. #ifndef ADD128
  415. #define ADD128(rh,rl,ih,il) \
  416. { word64 _il = (il); \
  417. (rl) += (_il); \
  418. (rh) += (ih) + ((rl) < (_il)); \
  419. }
  420. #endif
  421. #if !(defined(_MSC_VER) && _MSC_VER < 1300)
  422. template <bool T_128BitTag>
  423. #endif
  424. void VMAC_Base::VHASH_Update_Template(const word64 *data, size_t blocksRemainingInWord64)
  425. {
  426. #define INNER_LOOP_ITERATION(j) {\
  427. word64 d0 = ConditionalByteReverse(LITTLE_ENDIAN_ORDER, data[i+2*j+0]);\
  428. word64 d1 = ConditionalByteReverse(LITTLE_ENDIAN_ORDER, data[i+2*j+1]);\
  429. AccumulateNH(nhA, d0+nhK[i+2*j+0], d1+nhK[i+2*j+1]);\
  430. if (T_128BitTag)\
  431. AccumulateNH(nhB, d0+nhK[i+2*j+2], d1+nhK[i+2*j+3]);\
  432. }
  433. #if (defined(_MSC_VER) && _MSC_VER < 1300)
  434. bool T_128BitTag = m_is128;
  435. #endif
  436. size_t L1KeyLengthInWord64 = m_L1KeyLength / 8;
  437. size_t innerLoopEnd = L1KeyLengthInWord64;
  438. const word64 *nhK = m_nhKey();
  439. word64 *polyS = m_polyState();
  440. bool isFirstBlock = true;
  441. size_t i;
  442. #if !VMAC_BOOL_32BIT
  443. #if VMAC_BOOL_WORD128
  444. word128 a1, a2;
  445. #else
  446. word64 ah1, al1, ah2, al2;
  447. #endif
  448. word64 kh1, kl1, kh2, kl2;
  449. kh1=(polyS+0*4+2)[0]; kl1=(polyS+0*4+2)[1];
  450. if (T_128BitTag)
  451. {
  452. kh2=(polyS+1*4+2)[0]; kl2=(polyS+1*4+2)[1];
  453. }
  454. #endif
  455. do
  456. {
  457. DeclareNH(nhA);
  458. DeclareNH(nhB);
  459. i = 0;
  460. if (blocksRemainingInWord64 < L1KeyLengthInWord64)
  461. {
  462. if (blocksRemainingInWord64 % 8)
  463. {
  464. innerLoopEnd = blocksRemainingInWord64 % 8;
  465. for (; i<innerLoopEnd; i+=2)
  466. INNER_LOOP_ITERATION(0);
  467. }
  468. innerLoopEnd = blocksRemainingInWord64;
  469. }
  470. for (; i<innerLoopEnd; i+=8)
  471. {
  472. INNER_LOOP_ITERATION(0);
  473. INNER_LOOP_ITERATION(1);
  474. INNER_LOOP_ITERATION(2);
  475. INNER_LOOP_ITERATION(3);
  476. }
  477. blocksRemainingInWord64 -= innerLoopEnd;
  478. data += innerLoopEnd;
  479. #if VMAC_BOOL_32BIT
  480. word32 nh0[2], nh1[2];
  481. word64 nh2[2];
  482. nh0[0] = word32(nhA0);
  483. nhA1 += (nhA0 >> 32);
  484. nh1[0] = word32(nhA1);
  485. nh2[0] = (nhA2 + (nhA1 >> 32)) & m62;
  486. if (T_128BitTag)
  487. {
  488. nh0[1] = word32(nhB0);
  489. nhB1 += (nhB0 >> 32);
  490. nh1[1] = word32(nhB1);
  491. nh2[1] = (nhB2 + (nhB1 >> 32)) & m62;
  492. }
  493. #define a0 (((word32 *)(polyS+i*4))[2+NativeByteOrder::ToEnum()])
  494. #define a1 (*(((word32 *)(polyS+i*4))+3-NativeByteOrder::ToEnum())) // workaround for GCC 3.2
  495. #define a2 (((word32 *)(polyS+i*4))[0+NativeByteOrder::ToEnum()])
  496. #define a3 (*(((word32 *)(polyS+i*4))+1-NativeByteOrder::ToEnum()))
  497. #define aHi ((polyS+i*4)[0])
  498. #define k0 (((word32 *)(polyS+i*4+2))[2+NativeByteOrder::ToEnum()])
  499. #define k1 (*(((word32 *)(polyS+i*4+2))+3-NativeByteOrder::ToEnum()))
  500. #define k2 (((word32 *)(polyS+i*4+2))[0+NativeByteOrder::ToEnum()])
  501. #define k3 (*(((word32 *)(polyS+i*4+2))+1-NativeByteOrder::ToEnum()))
  502. #define kHi ((polyS+i*4+2)[0])
  503. if (isFirstBlock)
  504. {
  505. isFirstBlock = false;
  506. if (m_isFirstBlock)
  507. {
  508. m_isFirstBlock = false;
  509. for (i=0; i<=(size_t)T_128BitTag; i++)
  510. {
  511. word64 t = (word64)nh0[i] + k0;
  512. a0 = (word32)t;
  513. t = (t >> 32) + nh1[i] + k1;
  514. a1 = (word32)t;
  515. aHi = (t >> 32) + nh2[i] + kHi;
  516. }
  517. continue;
  518. }
  519. }
  520. for (i=0; i<=(size_t)T_128BitTag; i++)
  521. {
  522. word64 p, t;
  523. word32 t2;
  524. p = MUL32(a3, 2*k3);
  525. p += nh2[i];
  526. p += MUL32(a0, k2);
  527. p += MUL32(a1, k1);
  528. p += MUL32(a2, k0);
  529. t2 = (word32)p;
  530. p >>= 32;
  531. p += MUL32(a0, k3);
  532. p += MUL32(a1, k2);
  533. p += MUL32(a2, k1);
  534. p += MUL32(a3, k0);
  535. t = (word64(word32(p) & 0x7fffffff) << 32) | t2;
  536. p >>= 31;
  537. p += nh0[i];
  538. p += MUL32(a0, k0);
  539. p += MUL32(a1, 2*k3);
  540. p += MUL32(a2, 2*k2);
  541. p += MUL32(a3, 2*k1);
  542. t2 = (word32)p;
  543. p >>= 32;
  544. p += nh1[i];
  545. p += MUL32(a0, k1);
  546. p += MUL32(a1, k0);
  547. p += MUL32(a2, 2*k3);
  548. p += MUL32(a3, 2*k2);
  549. a0 = t2;
  550. a1 = (word32)p;
  551. aHi = (p >> 32) + t;
  552. }
  553. #undef a0
  554. #undef a1
  555. #undef a2
  556. #undef a3
  557. #undef aHi
  558. #undef k0
  559. #undef k1
  560. #undef k2
  561. #undef k3
  562. #undef kHi
  563. #else // #if VMAC_BOOL_32BIT
  564. if (isFirstBlock)
  565. {
  566. isFirstBlock = false;
  567. if (m_isFirstBlock)
  568. {
  569. m_isFirstBlock = false;
  570. #if VMAC_BOOL_WORD128
  571. #define first_poly_step(a, kh, kl, m) a = (m & m126) + ((word128(kh) << 64) | kl)
  572. first_poly_step(a1, kh1, kl1, nhA);
  573. if (T_128BitTag)
  574. first_poly_step(a2, kh2, kl2, nhB);
  575. #else
  576. #define first_poly_step(ah, al, kh, kl, mh, ml) {\
  577. mh &= m62;\
  578. ADD128(mh, ml, kh, kl); \
  579. ah = mh; al = ml;}
  580. first_poly_step(ah1, al1, kh1, kl1, nhA1, nhA0);
  581. if (T_128BitTag)
  582. first_poly_step(ah2, al2, kh2, kl2, nhB1, nhB0);
  583. #endif
  584. continue;
  585. }
  586. else
  587. {
  588. #if VMAC_BOOL_WORD128
  589. a1 = (word128((polyS+0*4)[0]) << 64) | (polyS+0*4)[1];
  590. #else
  591. ah1=(polyS+0*4)[0]; al1=(polyS+0*4)[1];
  592. #endif
  593. if (T_128BitTag)
  594. {
  595. #if VMAC_BOOL_WORD128
  596. a2 = (word128((polyS+1*4)[0]) << 64) | (polyS+1*4)[1];
  597. #else
  598. ah2=(polyS+1*4)[0]; al2=(polyS+1*4)[1];
  599. #endif
  600. }
  601. }
  602. }
  603. #if VMAC_BOOL_WORD128
  604. #define poly_step(a, kh, kl, m) \
  605. { word128 t1, t2, t3, t4;\
  606. Multiply128(t2, a>>64, kl);\
  607. Multiply128(t3, a, kh);\
  608. Multiply128(t1, a, kl);\
  609. Multiply128(t4, a>>64, 2*kh);\
  610. t2 += t3;\
  611. t4 += t1;\
  612. t2 += t4>>64;\
  613. a = (word128(word64(t2)&m63) << 64) | word64(t4);\
  614. t2 *= 2;\
  615. a += m & m126;\
  616. a += t2>>64;}
  617. poly_step(a1, kh1, kl1, nhA);
  618. if (T_128BitTag)
  619. poly_step(a2, kh2, kl2, nhB);
  620. #else
  621. #define poly_step(ah, al, kh, kl, mh, ml) \
  622. { word64 t1h, t1l, t2h, t2l, t3h, t3l, z=0; \
  623. /* compute ab*cd, put bd into result registers */ \
  624. MUL64(t2h,t2l,ah,kl); \
  625. MUL64(t3h,t3l,al,kh); \
  626. MUL64(t1h,t1l,ah,2*kh); \
  627. MUL64(ah,al,al,kl); \
  628. /* add together ad + bc */ \
  629. ADD128(t2h,t2l,t3h,t3l); \
  630. /* add 2 * ac to result */ \
  631. ADD128(ah,al,t1h,t1l); \
  632. /* now (ah,al), (t2l,2*t2h) need summing */ \
  633. /* first add the high registers, carrying into t2h */ \
  634. ADD128(t2h,ah,z,t2l); \
  635. /* double t2h and add top bit of ah */ \
  636. t2h += t2h + (ah >> 63); \
  637. ah &= m63; \
  638. /* now add the low registers */ \
  639. mh &= m62; \
  640. ADD128(ah,al,mh,ml); \
  641. ADD128(ah,al,z,t2h); \
  642. }
  643. poly_step(ah1, al1, kh1, kl1, nhA1, nhA0);
  644. if (T_128BitTag)
  645. poly_step(ah2, al2, kh2, kl2, nhB1, nhB0);
  646. #endif
  647. #endif // #if VMAC_BOOL_32BIT
  648. } while (blocksRemainingInWord64);
  649. #if VMAC_BOOL_WORD128
  650. (polyS+0*4)[0]=word64(a1>>64); (polyS+0*4)[1]=word64(a1);
  651. if (T_128BitTag)
  652. {
  653. (polyS+1*4)[0]=word64(a2>>64); (polyS+1*4)[1]=word64(a2);
  654. }
  655. #elif !VMAC_BOOL_32BIT
  656. (polyS+0*4)[0]=ah1; (polyS+0*4)[1]=al1;
  657. if (T_128BitTag)
  658. {
  659. (polyS+1*4)[0]=ah2; (polyS+1*4)[1]=al2;
  660. }
  661. #endif
  662. }
  663. inline void VMAC_Base::VHASH_Update(const word64 *data, size_t blocksRemainingInWord64)
  664. {
  665. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && CRYPTOPP_BOOL_X86
  666. if (HasSSE2())
  667. {
  668. VHASH_Update_SSE2(data, blocksRemainingInWord64, 0);
  669. if (m_is128)
  670. VHASH_Update_SSE2(data, blocksRemainingInWord64, 1);
  671. m_isFirstBlock = false;
  672. }
  673. else
  674. #endif
  675. {
  676. #if defined(_MSC_VER) && _MSC_VER < 1300
  677. VHASH_Update_Template(data, blocksRemainingInWord64);
  678. #else
  679. if (m_is128)
  680. VHASH_Update_Template<true>(data, blocksRemainingInWord64);
  681. else
  682. VHASH_Update_Template<false>(data, blocksRemainingInWord64);
  683. #endif
  684. }
  685. }
  686. size_t VMAC_Base::HashMultipleBlocks(const word64 *data, size_t length)
  687. {
  688. size_t remaining = ModPowerOf2(length, m_L1KeyLength);
  689. VHASH_Update(data, (length-remaining)/8);
  690. return remaining;
  691. }
  692. static word64 L3Hash(const word64 *input, const word64 *l3Key, size_t len)
  693. {
  694. word64 rh, rl, t, z=0;
  695. word64 p1 = input[0], p2 = input[1];
  696. word64 k1 = l3Key[0], k2 = l3Key[1];
  697. /* fully reduce (p1,p2)+(len,0) mod p127 */
  698. t = p1 >> 63;
  699. p1 &= m63;
  700. ADD128(p1, p2, len, t);
  701. /* At this point, (p1,p2) is at most 2^127+(len<<64) */
  702. t = (p1 > m63) + ((p1 == m63) & (p2 == m64));
  703. ADD128(p1, p2, z, t);
  704. p1 &= m63;
  705. /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
  706. t = p1 + (p2 >> 32);
  707. t += (t >> 32);
  708. t += (word32)t > 0xfffffffeU;
  709. p1 += (t >> 32);
  710. p2 += (p1 << 32);
  711. /* compute (p1+k1)%p64 and (p2+k2)%p64 */
  712. p1 += k1;
  713. p1 += (0 - (p1 < k1)) & 257;
  714. p2 += k2;
  715. p2 += (0 - (p2 < k2)) & 257;
  716. /* compute (p1+k1)*(p2+k2)%p64 */
  717. MUL64(rh, rl, p1, p2);
  718. t = rh >> 56;
  719. ADD128(t, rl, z, rh);
  720. rh <<= 8;
  721. ADD128(t, rl, z, rh);
  722. t += t << 8;
  723. rl += t;
  724. rl += (0 - (rl < t)) & 257;
  725. rl += (0 - (rl > p64-1)) & 257;
  726. return rl;
  727. }
  728. void VMAC_Base::TruncatedFinal(byte *mac, size_t size)
  729. {
  730. size_t len = ModPowerOf2(GetBitCountLo()/8, m_L1KeyLength);
  731. if (len)
  732. {
  733. memset(m_data()+len, 0, (0-len)%16);
  734. VHASH_Update(DataBuf(), ((len+15)/16)*2);
  735. len *= 8; // convert to bits
  736. }
  737. else if (m_isFirstBlock)
  738. {
  739. // special case for empty string
  740. m_polyState()[0] = m_polyState()[2];
  741. m_polyState()[1] = m_polyState()[3];
  742. if (m_is128)
  743. {
  744. m_polyState()[4] = m_polyState()[6];
  745. m_polyState()[5] = m_polyState()[7];
  746. }
  747. }
  748. if (m_is128)
  749. {
  750. word64 t[2];
  751. t[0] = L3Hash(m_polyState(), m_l3Key(), len) + GetWord<word64>(true, BIG_ENDIAN_ORDER, m_pad());
  752. t[1] = L3Hash(m_polyState()+4, m_l3Key()+2, len) + GetWord<word64>(true, BIG_ENDIAN_ORDER, m_pad()+8);
  753. if (size == 16)
  754. {
  755. PutWord(false, BIG_ENDIAN_ORDER, mac, t[0]);
  756. PutWord(false, BIG_ENDIAN_ORDER, mac+8, t[1]);
  757. }
  758. else
  759. {
  760. t[0] = ConditionalByteReverse(BIG_ENDIAN_ORDER, t[0]);
  761. t[1] = ConditionalByteReverse(BIG_ENDIAN_ORDER, t[1]);
  762. memcpy(mac, t, size);
  763. }
  764. }
  765. else
  766. {
  767. word64 t = L3Hash(m_polyState(), m_l3Key(), len);
  768. t += GetWord<word64>(true, BIG_ENDIAN_ORDER, m_pad() + (m_nonce()[IVSize()-1]&1) * 8);
  769. if (size == 8)
  770. PutWord(false, BIG_ENDIAN_ORDER, mac, t);
  771. else
  772. {
  773. t = ConditionalByteReverse(BIG_ENDIAN_ORDER, t);
  774. memcpy(mac, &t, size);
  775. }
  776. }
  777. }
  778. NAMESPACE_END