Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1277 lines
38 KiB

  1. // rijndael.cpp - modified by Chris Morgan <[email protected]>
  2. // and Wei Dai from Paulo Baretto's Rijndael implementation
  3. // The original code and all modifications are in the public domain.
  4. // use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM rijndael.cpp" to generate MASM code
  5. /*
  6. July 2010: Added support for AES-NI instructions via compiler intrinsics.
  7. */
  8. /*
  9. Feb 2009: The x86/x64 assembly code was rewritten in by Wei Dai to do counter mode
  10. caching, which was invented by Hongjun Wu and popularized by Daniel J. Bernstein
  11. and Peter Schwabe in their paper "New AES software speed records". The round
  12. function was also modified to include a trick similar to one in Brian Gladman's
  13. x86 assembly code, doing an 8-bit register move to minimize the number of
  14. register spills. Also switched to compressed tables and copying round keys to
  15. the stack.
  16. The C++ implementation now uses compressed tables if
  17. CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS is defined.
  18. */
  19. /*
  20. July 2006: Defense against timing attacks was added in by Wei Dai.
  21. The code now uses smaller tables in the first and last rounds,
  22. and preloads them into L1 cache before usage (by loading at least
  23. one element in each cache line).
  24. We try to delay subsequent accesses to each table (used in the first
  25. and last rounds) until all of the table has been preloaded. Hopefully
  26. the compiler isn't smart enough to optimize that code away.
  27. After preloading the table, we also try not to access any memory location
  28. other than the table and the stack, in order to prevent table entries from
  29. being unloaded from L1 cache, until that round is finished.
  30. (Some popular CPUs have 2-way associative caches.)
  31. */
  32. // This is the original introductory comment:
  33. /**
  34. * version 3.0 (December 2000)
  35. *
  36. * Optimised ANSI C code for the Rijndael cipher (now AES)
  37. *
  38. * author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
  39. * author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
  40. * author Paulo Barreto <paulo.barreto@terra.com.br>
  41. *
  42. * This code is hereby placed in the public domain.
  43. *
  44. * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
  45. * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  46. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  47. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
  48. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  51. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  52. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  53. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  54. * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  55. */
  56. #include "pch.h"
  57. #include "config.h"
  58. #ifndef CRYPTOPP_IMPORTS
  59. #ifndef CRYPTOPP_GENERATE_X64_MASMrij
  60. #include "rijndael.h"
  61. #include "stdcpp.h" // alloca
  62. #include "misc.h"
  63. #include "cpu.h"
  64. NAMESPACE_BEGIN(CryptoPP)
  65. // Hack for https://github.com/weidai11/cryptopp/issues/42
  66. #if (CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS)
  67. # define CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS 1
  68. #endif
  69. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  70. # if (CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
  71. namespace rdtable {CRYPTOPP_ALIGN_DATA(16) word64 Te[256+2];}
  72. using namespace rdtable;
  73. # else
  74. static word64 Te[256];
  75. # endif
  76. static word64 Td[256];
  77. #else
  78. static word32 Te[256*4], Td[256*4];
  79. #endif
  80. static volatile bool s_TeFilled = false, s_TdFilled = false;
  81. // ************************* Portable Code ************************************
  82. #define QUARTER_ROUND(L, T, t, a, b, c, d) \
  83. a ^= L(T, 3, byte(t)); t >>= 8;\
  84. b ^= L(T, 2, byte(t)); t >>= 8;\
  85. c ^= L(T, 1, byte(t)); t >>= 8;\
  86. d ^= L(T, 0, t);
  87. #define QUARTER_ROUND_LE(t, a, b, c, d) \
  88. tempBlock[a] = ((byte *)(Te+byte(t)))[1]; t >>= 8;\
  89. tempBlock[b] = ((byte *)(Te+byte(t)))[1]; t >>= 8;\
  90. tempBlock[c] = ((byte *)(Te+byte(t)))[1]; t >>= 8;\
  91. tempBlock[d] = ((byte *)(Te+t))[1];
  92. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  93. #define QUARTER_ROUND_LD(t, a, b, c, d) \
  94. tempBlock[a] = ((byte *)(Td+byte(t)))[GetNativeByteOrder()*7]; t >>= 8;\
  95. tempBlock[b] = ((byte *)(Td+byte(t)))[GetNativeByteOrder()*7]; t >>= 8;\
  96. tempBlock[c] = ((byte *)(Td+byte(t)))[GetNativeByteOrder()*7]; t >>= 8;\
  97. tempBlock[d] = ((byte *)(Td+t))[GetNativeByteOrder()*7];
  98. #else
  99. #define QUARTER_ROUND_LD(t, a, b, c, d) \
  100. tempBlock[a] = Sd[byte(t)]; t >>= 8;\
  101. tempBlock[b] = Sd[byte(t)]; t >>= 8;\
  102. tempBlock[c] = Sd[byte(t)]; t >>= 8;\
  103. tempBlock[d] = Sd[t];
  104. #endif
  105. #define QUARTER_ROUND_E(t, a, b, c, d) QUARTER_ROUND(TL_M, Te, t, a, b, c, d)
  106. #define QUARTER_ROUND_D(t, a, b, c, d) QUARTER_ROUND(TL_M, Td, t, a, b, c, d)
  107. #ifdef IS_LITTLE_ENDIAN
  108. #define QUARTER_ROUND_FE(t, a, b, c, d) QUARTER_ROUND(TL_F, Te, t, d, c, b, a)
  109. #define QUARTER_ROUND_FD(t, a, b, c, d) QUARTER_ROUND(TL_F, Td, t, d, c, b, a)
  110. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  111. #define TL_F(T, i, x) (*(word32 *)((byte *)T + x*8 + (6-i)%4+1))
  112. #define TL_M(T, i, x) (*(word32 *)((byte *)T + x*8 + (i+3)%4+1))
  113. #else
  114. #define TL_F(T, i, x) rotrFixed(T[x], (3-i)*8)
  115. #define TL_M(T, i, x) T[i*256 + x]
  116. #endif
  117. #else
  118. #define QUARTER_ROUND_FE(t, a, b, c, d) QUARTER_ROUND(TL_F, Te, t, a, b, c, d)
  119. #define QUARTER_ROUND_FD(t, a, b, c, d) QUARTER_ROUND(TL_F, Td, t, a, b, c, d)
  120. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  121. #define TL_F(T, i, x) (*(word32 *)((byte *)T + x*8 + (4-i)%4))
  122. #define TL_M TL_F
  123. #else
  124. #define TL_F(T, i, x) rotrFixed(T[x], i*8)
  125. #define TL_M(T, i, x) T[i*256 + x]
  126. #endif
  127. #endif
  128. #define f2(x) ((x<<1)^(((x>>7)&1)*0x11b))
  129. #define f4(x) ((x<<2)^(((x>>6)&1)*0x11b)^(((x>>6)&2)*0x11b))
  130. #define f8(x) ((x<<3)^(((x>>5)&1)*0x11b)^(((x>>5)&2)*0x11b)^(((x>>5)&4)*0x11b))
  131. #define f3(x) (f2(x) ^ x)
  132. #define f9(x) (f8(x) ^ x)
  133. #define fb(x) (f8(x) ^ f2(x) ^ x)
  134. #define fd(x) (f8(x) ^ f4(x) ^ x)
  135. #define fe(x) (f8(x) ^ f4(x) ^ f2(x))
  136. void Rijndael::Base::FillEncTable()
  137. {
  138. for (int i=0; i<256; i++)
  139. {
  140. byte x = Se[i];
  141. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  142. word32 y = word32(x)<<8 | word32(x)<<16 | word32(f2(x))<<24;
  143. Te[i] = word64(y | f3(x))<<32 | y;
  144. #else
  145. word32 y = f3(x) | word32(x)<<8 | word32(x)<<16 | word32(f2(x))<<24;
  146. for (int j=0; j<4; j++)
  147. {
  148. Te[i+j*256] = y;
  149. y = rotrFixed(y, 8);
  150. }
  151. #endif
  152. }
  153. #if (CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
  154. Te[256] = Te[257] = 0;
  155. #endif
  156. s_TeFilled = true;
  157. }
  158. void Rijndael::Base::FillDecTable()
  159. {
  160. for (int i=0; i<256; i++)
  161. {
  162. byte x = Sd[i];
  163. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  164. word32 y = word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24;
  165. Td[i] = word64(y | fb(x))<<32 | y | x;
  166. #else
  167. word32 y = fb(x) | word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24;;
  168. for (int j=0; j<4; j++)
  169. {
  170. Td[i+j*256] = y;
  171. y = rotrFixed(y, 8);
  172. }
  173. #endif
  174. }
  175. s_TdFilled = true;
  176. }
  177. void Rijndael::Base::UncheckedSetKey(const byte *userKey, unsigned int keylen, const NameValuePairs &)
  178. {
  179. AssertValidKeyLength(keylen);
  180. m_rounds = keylen/4 + 6;
  181. m_key.New(4*(m_rounds+1));
  182. word32 *rk = m_key;
  183. #if (CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE && (!defined(_MSC_VER) || _MSC_VER >= 1600 || CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32))
  184. // MSVC 2008 SP1 generates bad code for _mm_extract_epi32() when compiling for X64
  185. if (HasAESNI())
  186. {
  187. static const word32 rcLE[] = {
  188. 0x01, 0x02, 0x04, 0x08,
  189. 0x10, 0x20, 0x40, 0x80,
  190. 0x1B, 0x36, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
  191. };
  192. const word32 *rc = rcLE;
  193. __m128i temp = _mm_loadu_si128((__m128i *)(userKey+keylen-16));
  194. memcpy(rk, userKey, keylen);
  195. while (true)
  196. {
  197. rk[keylen/4] = rk[0] ^ _mm_extract_epi32(_mm_aeskeygenassist_si128(temp, 0), 3) ^ *(rc++);
  198. rk[keylen/4+1] = rk[1] ^ rk[keylen/4];
  199. rk[keylen/4+2] = rk[2] ^ rk[keylen/4+1];
  200. rk[keylen/4+3] = rk[3] ^ rk[keylen/4+2];
  201. if (rk + keylen/4 + 4 == m_key.end())
  202. break;
  203. if (keylen == 24)
  204. {
  205. rk[10] = rk[ 4] ^ rk[ 9];
  206. rk[11] = rk[ 5] ^ rk[10];
  207. temp = _mm_insert_epi32(temp, rk[11], 3);
  208. }
  209. else if (keylen == 32)
  210. {
  211. temp = _mm_insert_epi32(temp, rk[11], 3);
  212. rk[12] = rk[ 4] ^ _mm_extract_epi32(_mm_aeskeygenassist_si128(temp, 0), 2);
  213. rk[13] = rk[ 5] ^ rk[12];
  214. rk[14] = rk[ 6] ^ rk[13];
  215. rk[15] = rk[ 7] ^ rk[14];
  216. temp = _mm_insert_epi32(temp, rk[15], 3);
  217. }
  218. else
  219. temp = _mm_insert_epi32(temp, rk[7], 3);
  220. rk += keylen/4;
  221. }
  222. if (!IsForwardTransformation())
  223. {
  224. rk = m_key;
  225. unsigned int i, j;
  226. std::swap(*(__m128i *)(rk), *(__m128i *)(rk+4*m_rounds));
  227. for (i = 4, j = 4*m_rounds-4; i < j; i += 4, j -= 4)
  228. {
  229. temp = _mm_aesimc_si128(*(__m128i *)(rk+i));
  230. *(__m128i *)(rk+i) = _mm_aesimc_si128(*(__m128i *)(rk+j));
  231. *(__m128i *)(rk+j) = temp;
  232. }
  233. *(__m128i *)(rk+i) = _mm_aesimc_si128(*(__m128i *)(rk+i));
  234. }
  235. return;
  236. }
  237. #endif
  238. GetUserKey(BIG_ENDIAN_ORDER, rk, keylen/4, userKey, keylen);
  239. const word32 *rc = rcon;
  240. word32 temp;
  241. while (true)
  242. {
  243. temp = rk[keylen/4-1];
  244. word32 x = (word32(Se[GETBYTE(temp, 2)]) << 24) ^ (word32(Se[GETBYTE(temp, 1)]) << 16) ^ (word32(Se[GETBYTE(temp, 0)]) << 8) ^ Se[GETBYTE(temp, 3)];
  245. rk[keylen/4] = rk[0] ^ x ^ *(rc++);
  246. rk[keylen/4+1] = rk[1] ^ rk[keylen/4];
  247. rk[keylen/4+2] = rk[2] ^ rk[keylen/4+1];
  248. rk[keylen/4+3] = rk[3] ^ rk[keylen/4+2];
  249. if (rk + keylen/4 + 4 == m_key.end())
  250. break;
  251. if (keylen == 24)
  252. {
  253. rk[10] = rk[ 4] ^ rk[ 9];
  254. rk[11] = rk[ 5] ^ rk[10];
  255. }
  256. else if (keylen == 32)
  257. {
  258. temp = rk[11];
  259. rk[12] = rk[ 4] ^ (word32(Se[GETBYTE(temp, 3)]) << 24) ^ (word32(Se[GETBYTE(temp, 2)]) << 16) ^ (word32(Se[GETBYTE(temp, 1)]) << 8) ^ Se[GETBYTE(temp, 0)];
  260. rk[13] = rk[ 5] ^ rk[12];
  261. rk[14] = rk[ 6] ^ rk[13];
  262. rk[15] = rk[ 7] ^ rk[14];
  263. }
  264. rk += keylen/4;
  265. }
  266. rk = m_key;
  267. if (IsForwardTransformation())
  268. {
  269. if (!s_TeFilled)
  270. FillEncTable();
  271. ConditionalByteReverse(BIG_ENDIAN_ORDER, rk, rk, 16);
  272. ConditionalByteReverse(BIG_ENDIAN_ORDER, rk + m_rounds*4, rk + m_rounds*4, 16);
  273. }
  274. else
  275. {
  276. if (!s_TdFilled)
  277. FillDecTable();
  278. unsigned int i, j;
  279. #define InverseMixColumn(x) TL_M(Td, 0, Se[GETBYTE(x, 3)]) ^ TL_M(Td, 1, Se[GETBYTE(x, 2)]) ^ TL_M(Td, 2, Se[GETBYTE(x, 1)]) ^ TL_M(Td, 3, Se[GETBYTE(x, 0)])
  280. for (i = 4, j = 4*m_rounds-4; i < j; i += 4, j -= 4)
  281. {
  282. temp = InverseMixColumn(rk[i ]); rk[i ] = InverseMixColumn(rk[j ]); rk[j ] = temp;
  283. temp = InverseMixColumn(rk[i + 1]); rk[i + 1] = InverseMixColumn(rk[j + 1]); rk[j + 1] = temp;
  284. temp = InverseMixColumn(rk[i + 2]); rk[i + 2] = InverseMixColumn(rk[j + 2]); rk[j + 2] = temp;
  285. temp = InverseMixColumn(rk[i + 3]); rk[i + 3] = InverseMixColumn(rk[j + 3]); rk[j + 3] = temp;
  286. }
  287. rk[i+0] = InverseMixColumn(rk[i+0]);
  288. rk[i+1] = InverseMixColumn(rk[i+1]);
  289. rk[i+2] = InverseMixColumn(rk[i+2]);
  290. rk[i+3] = InverseMixColumn(rk[i+3]);
  291. temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[0]); rk[0] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+0]); rk[4*m_rounds+0] = temp;
  292. temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[1]); rk[1] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+1]); rk[4*m_rounds+1] = temp;
  293. temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[2]); rk[2] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+2]); rk[4*m_rounds+2] = temp;
  294. temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[3]); rk[3] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+3]); rk[4*m_rounds+3] = temp;
  295. }
  296. #if CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  297. if (HasAESNI())
  298. ConditionalByteReverse(BIG_ENDIAN_ORDER, rk+4, rk+4, (m_rounds-1)*16);
  299. #endif
  300. }
  301. void Rijndael::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const
  302. {
  303. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE) || CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  304. #if (CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
  305. if (HasSSE2())
  306. #else
  307. if (HasAESNI())
  308. #endif
  309. {
  310. return (void)Rijndael::Enc::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
  311. }
  312. #endif
  313. typedef BlockGetAndPut<word32, NativeByteOrder> Block;
  314. word32 s0, s1, s2, s3, t0, t1, t2, t3;
  315. Block::Get(inBlock)(s0)(s1)(s2)(s3);
  316. const word32 *rk = m_key;
  317. s0 ^= rk[0];
  318. s1 ^= rk[1];
  319. s2 ^= rk[2];
  320. s3 ^= rk[3];
  321. t0 = rk[4];
  322. t1 = rk[5];
  323. t2 = rk[6];
  324. t3 = rk[7];
  325. rk += 8;
  326. // timing attack countermeasure. see comments at top for more details
  327. const int cacheLineSize = GetCacheLineSize();
  328. unsigned int i;
  329. word32 u = 0;
  330. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  331. for (i=0; i<2048; i+=cacheLineSize)
  332. #else
  333. for (i=0; i<1024; i+=cacheLineSize)
  334. #endif
  335. u &= *(const word32 *)(((const byte *)Te)+i);
  336. u &= Te[255];
  337. s0 |= u; s1 |= u; s2 |= u; s3 |= u;
  338. QUARTER_ROUND_FE(s3, t0, t1, t2, t3)
  339. QUARTER_ROUND_FE(s2, t3, t0, t1, t2)
  340. QUARTER_ROUND_FE(s1, t2, t3, t0, t1)
  341. QUARTER_ROUND_FE(s0, t1, t2, t3, t0)
  342. // Nr - 2 full rounds:
  343. unsigned int r = m_rounds/2 - 1;
  344. do
  345. {
  346. s0 = rk[0]; s1 = rk[1]; s2 = rk[2]; s3 = rk[3];
  347. QUARTER_ROUND_E(t3, s0, s1, s2, s3)
  348. QUARTER_ROUND_E(t2, s3, s0, s1, s2)
  349. QUARTER_ROUND_E(t1, s2, s3, s0, s1)
  350. QUARTER_ROUND_E(t0, s1, s2, s3, s0)
  351. t0 = rk[4]; t1 = rk[5]; t2 = rk[6]; t3 = rk[7];
  352. QUARTER_ROUND_E(s3, t0, t1, t2, t3)
  353. QUARTER_ROUND_E(s2, t3, t0, t1, t2)
  354. QUARTER_ROUND_E(s1, t2, t3, t0, t1)
  355. QUARTER_ROUND_E(s0, t1, t2, t3, t0)
  356. rk += 8;
  357. } while (--r);
  358. word32 tbw[4];
  359. byte *const tempBlock = (byte *)tbw;
  360. QUARTER_ROUND_LE(t2, 15, 2, 5, 8)
  361. QUARTER_ROUND_LE(t1, 11, 14, 1, 4)
  362. QUARTER_ROUND_LE(t0, 7, 10, 13, 0)
  363. QUARTER_ROUND_LE(t3, 3, 6, 9, 12)
  364. Block::Put(xorBlock, outBlock)(tbw[0]^rk[0])(tbw[1]^rk[1])(tbw[2]^rk[2])(tbw[3]^rk[3]);
  365. }
  366. void Rijndael::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const
  367. {
  368. #if CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  369. if (HasAESNI())
  370. {
  371. Rijndael::Dec::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
  372. return;
  373. }
  374. #endif
  375. typedef BlockGetAndPut<word32, NativeByteOrder> Block;
  376. word32 s0, s1, s2, s3, t0, t1, t2, t3;
  377. Block::Get(inBlock)(s0)(s1)(s2)(s3);
  378. const word32 *rk = m_key;
  379. s0 ^= rk[0];
  380. s1 ^= rk[1];
  381. s2 ^= rk[2];
  382. s3 ^= rk[3];
  383. t0 = rk[4];
  384. t1 = rk[5];
  385. t2 = rk[6];
  386. t3 = rk[7];
  387. rk += 8;
  388. // timing attack countermeasure. see comments at top for more details
  389. const int cacheLineSize = GetCacheLineSize();
  390. unsigned int i;
  391. word32 u = 0;
  392. #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
  393. for (i=0; i<2048; i+=cacheLineSize)
  394. #else
  395. for (i=0; i<1024; i+=cacheLineSize)
  396. #endif
  397. u &= *(const word32 *)(((const byte *)Td)+i);
  398. u &= Td[255];
  399. s0 |= u; s1 |= u; s2 |= u; s3 |= u;
  400. QUARTER_ROUND_FD(s3, t2, t1, t0, t3)
  401. QUARTER_ROUND_FD(s2, t1, t0, t3, t2)
  402. QUARTER_ROUND_FD(s1, t0, t3, t2, t1)
  403. QUARTER_ROUND_FD(s0, t3, t2, t1, t0)
  404. // Nr - 2 full rounds:
  405. unsigned int r = m_rounds/2 - 1;
  406. do
  407. {
  408. s0 = rk[0]; s1 = rk[1]; s2 = rk[2]; s3 = rk[3];
  409. QUARTER_ROUND_D(t3, s2, s1, s0, s3)
  410. QUARTER_ROUND_D(t2, s1, s0, s3, s2)
  411. QUARTER_ROUND_D(t1, s0, s3, s2, s1)
  412. QUARTER_ROUND_D(t0, s3, s2, s1, s0)
  413. t0 = rk[4]; t1 = rk[5]; t2 = rk[6]; t3 = rk[7];
  414. QUARTER_ROUND_D(s3, t2, t1, t0, t3)
  415. QUARTER_ROUND_D(s2, t1, t0, t3, t2)
  416. QUARTER_ROUND_D(s1, t0, t3, t2, t1)
  417. QUARTER_ROUND_D(s0, t3, t2, t1, t0)
  418. rk += 8;
  419. } while (--r);
  420. #if !(defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS))
  421. // timing attack countermeasure. see comments at top for more details
  422. // If CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS is defined,
  423. // QUARTER_ROUND_LD will use Td, which is already preloaded.
  424. u = 0;
  425. for (i=0; i<256; i+=cacheLineSize)
  426. u &= *(const word32 *)(Sd+i);
  427. u &= *(const word32 *)(Sd+252);
  428. t0 |= u; t1 |= u; t2 |= u; t3 |= u;
  429. #endif
  430. word32 tbw[4];
  431. byte *const tempBlock = (byte *)tbw;
  432. QUARTER_ROUND_LD(t2, 7, 2, 13, 8)
  433. QUARTER_ROUND_LD(t1, 3, 14, 9, 4)
  434. QUARTER_ROUND_LD(t0, 15, 10, 5, 0)
  435. QUARTER_ROUND_LD(t3, 11, 6, 1, 12)
  436. Block::Put(xorBlock, outBlock)(tbw[0]^rk[0])(tbw[1]^rk[1])(tbw[2]^rk[2])(tbw[3]^rk[3]);
  437. }
  438. // ************************* Assembly Code ************************************
  439. #if CRYPTOPP_MSC_VERSION
  440. # pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly code
  441. #endif
  442. #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
  443. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
  444. CRYPTOPP_NAKED void CRYPTOPP_FASTCALL Rijndael_Enc_AdvancedProcessBlocks(void *locals, const word32 *k)
  445. {
  446. CRYPTOPP_UNUSED(locals); CRYPTOPP_UNUSED(k);
  447. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
  448. #define L_REG esp
  449. #define L_INDEX(i) (L_REG+768+i)
  450. #define L_INXORBLOCKS L_INBLOCKS+4
  451. #define L_OUTXORBLOCKS L_INBLOCKS+8
  452. #define L_OUTBLOCKS L_INBLOCKS+12
  453. #define L_INCREMENTS L_INDEX(16*15)
  454. #define L_SP L_INDEX(16*16)
  455. #define L_LENGTH L_INDEX(16*16+4)
  456. #define L_KEYS_BEGIN L_INDEX(16*16+8)
  457. #define MOVD movd
  458. #define MM(i) mm##i
  459. #define MXOR(a,b,c) \
  460. AS2( movzx esi, b)\
  461. AS2( movd mm7, DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
  462. AS2( pxor MM(a), mm7)\
  463. #define MMOV(a,b,c) \
  464. AS2( movzx esi, b)\
  465. AS2( movd MM(a), DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
  466. #else
  467. #define L_REG r8
  468. #define L_INDEX(i) (L_REG+i)
  469. #define L_INXORBLOCKS L_INBLOCKS+8
  470. #define L_OUTXORBLOCKS L_INBLOCKS+16
  471. #define L_OUTBLOCKS L_INBLOCKS+24
  472. #define L_INCREMENTS L_INDEX(16*16)
  473. #define L_LENGTH L_INDEX(16*18+8)
  474. #define L_KEYS_BEGIN L_INDEX(16*19)
  475. #define MOVD mov
  476. #define MM_0 r9d
  477. #define MM_1 r12d
  478. #ifdef __GNUC__
  479. #define MM_2 r11d
  480. #else
  481. #define MM_2 r10d
  482. #endif
  483. #define MM(i) MM_##i
  484. #define MXOR(a,b,c) \
  485. AS2( movzx esi, b)\
  486. AS2( xor MM(a), DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
  487. #define MMOV(a,b,c) \
  488. AS2( movzx esi, b)\
  489. AS2( mov MM(a), DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
  490. #endif
  491. #define L_SUBKEYS L_INDEX(0)
  492. #define L_SAVED_X L_SUBKEYS
  493. #define L_KEY12 L_INDEX(16*12)
  494. #define L_LASTROUND L_INDEX(16*13)
  495. #define L_INBLOCKS L_INDEX(16*14)
  496. #define MAP0TO4(i) (ASM_MOD(i+3,4)+1)
  497. #define XOR(a,b,c) \
  498. AS2( movzx esi, b)\
  499. AS2( xor a, DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
  500. #define MOV(a,b,c) \
  501. AS2( movzx esi, b)\
  502. AS2( mov a, DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
  503. #ifdef CRYPTOPP_GENERATE_X64_MASM
  504. ALIGN 8
  505. Rijndael_Enc_AdvancedProcessBlocks PROC FRAME
  506. rex_push_reg rsi
  507. push_reg rdi
  508. push_reg rbx
  509. push_reg r12
  510. .endprolog
  511. mov L_REG, rcx
  512. mov AS_REG_7, ?Te@rdtable@CryptoPP@@3PA_KA
  513. mov edi, DWORD PTR [?g_cacheLineSize@CryptoPP@@3IA]
  514. #elif defined(__GNUC__)
  515. __asm__ __volatile__
  516. (
  517. INTEL_NOPREFIX
  518. #if CRYPTOPP_BOOL_X64
  519. AS2( mov L_REG, rcx)
  520. #endif
  521. AS_PUSH_IF86(bx)
  522. AS_PUSH_IF86(bp)
  523. AS2( mov AS_REG_7, WORD_REG(si))
  524. #else
  525. AS_PUSH_IF86(si)
  526. AS_PUSH_IF86(di)
  527. AS_PUSH_IF86(bx)
  528. AS_PUSH_IF86(bp)
  529. AS2( lea AS_REG_7, [Te])
  530. AS2( mov edi, [g_cacheLineSize])
  531. #endif
  532. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
  533. AS2( mov [ecx+16*12+16*4], esp) // save esp to L_SP
  534. AS2( lea esp, [ecx-768])
  535. #endif
  536. // copy subkeys to stack
  537. AS2( mov WORD_REG(si), [L_KEYS_BEGIN])
  538. AS2( mov WORD_REG(ax), 16)
  539. AS2( and WORD_REG(ax), WORD_REG(si))
  540. AS2( movdqa xmm3, XMMWORD_PTR [WORD_REG(dx)+16+WORD_REG(ax)]) // subkey 1 (non-counter) or 2 (counter)
  541. AS2( movdqa [L_KEY12], xmm3)
  542. AS2( lea WORD_REG(ax), [WORD_REG(dx)+WORD_REG(ax)+2*16])
  543. AS2( sub WORD_REG(ax), WORD_REG(si))
  544. ASL(0)
  545. AS2( movdqa xmm0, [WORD_REG(ax)+WORD_REG(si)])
  546. AS2( movdqa XMMWORD_PTR [L_SUBKEYS+WORD_REG(si)], xmm0)
  547. AS2( add WORD_REG(si), 16)
  548. AS2( cmp WORD_REG(si), 16*12)
  549. ASJ( jl, 0, b)
  550. // read subkeys 0, 1 and last
  551. AS2( movdqa xmm4, [WORD_REG(ax)+WORD_REG(si)]) // last subkey
  552. AS2( movdqa xmm1, [WORD_REG(dx)]) // subkey 0
  553. AS2( MOVD MM(1), [WORD_REG(dx)+4*4]) // 0,1,2,3
  554. AS2( mov ebx, [WORD_REG(dx)+5*4]) // 4,5,6,7
  555. AS2( mov ecx, [WORD_REG(dx)+6*4]) // 8,9,10,11
  556. AS2( mov edx, [WORD_REG(dx)+7*4]) // 12,13,14,15
  557. // load table into cache
  558. AS2( xor WORD_REG(ax), WORD_REG(ax))
  559. ASL(9)
  560. AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
  561. AS2( add WORD_REG(ax), WORD_REG(di))
  562. AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
  563. AS2( add WORD_REG(ax), WORD_REG(di))
  564. AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
  565. AS2( add WORD_REG(ax), WORD_REG(di))
  566. AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
  567. AS2( add WORD_REG(ax), WORD_REG(di))
  568. AS2( cmp WORD_REG(ax), 2048)
  569. ASJ( jl, 9, b)
  570. AS1( lfence)
  571. AS2( test DWORD PTR [L_LENGTH], 1)
  572. ASJ( jz, 8, f)
  573. // counter mode one-time setup
  574. AS2( mov WORD_REG(si), [L_INBLOCKS])
  575. AS2( movdqu xmm2, [WORD_REG(si)]) // counter
  576. AS2( pxor xmm2, xmm1)
  577. AS2( psrldq xmm1, 14)
  578. AS2( movd eax, xmm1)
  579. AS2( mov al, BYTE PTR [WORD_REG(si)+15])
  580. AS2( MOVD MM(2), eax)
  581. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
  582. AS2( mov eax, 1)
  583. AS2( movd mm3, eax)
  584. #endif
  585. // partial first round, in: xmm2(15,14,13,12;11,10,9,8;7,6,5,4;3,2,1,0), out: mm1, ebx, ecx, edx
  586. AS2( movd eax, xmm2)
  587. AS2( psrldq xmm2, 4)
  588. AS2( movd edi, xmm2)
  589. AS2( psrldq xmm2, 4)
  590. MXOR( 1, al, 0) // 0
  591. XOR( edx, ah, 1) // 1
  592. AS2( shr eax, 16)
  593. XOR( ecx, al, 2) // 2
  594. XOR( ebx, ah, 3) // 3
  595. AS2( mov eax, edi)
  596. AS2( movd edi, xmm2)
  597. AS2( psrldq xmm2, 4)
  598. XOR( ebx, al, 0) // 4
  599. MXOR( 1, ah, 1) // 5
  600. AS2( shr eax, 16)
  601. XOR( edx, al, 2) // 6
  602. XOR( ecx, ah, 3) // 7
  603. AS2( mov eax, edi)
  604. AS2( movd edi, xmm2)
  605. XOR( ecx, al, 0) // 8
  606. XOR( ebx, ah, 1) // 9
  607. AS2( shr eax, 16)
  608. MXOR( 1, al, 2) // 10
  609. XOR( edx, ah, 3) // 11
  610. AS2( mov eax, edi)
  611. XOR( edx, al, 0) // 12
  612. XOR( ecx, ah, 1) // 13
  613. AS2( shr eax, 16)
  614. XOR( ebx, al, 2) // 14
  615. AS2( psrldq xmm2, 3)
  616. // partial second round, in: ebx(4,5,6,7), ecx(8,9,10,11), edx(12,13,14,15), out: eax, ebx, edi, mm0
  617. AS2( mov eax, [L_KEY12+0*4])
  618. AS2( mov edi, [L_KEY12+2*4])
  619. AS2( MOVD MM(0), [L_KEY12+3*4])
  620. MXOR( 0, cl, 3) /* 11 */
  621. XOR( edi, bl, 3) /* 7 */
  622. MXOR( 0, bh, 2) /* 6 */
  623. AS2( shr ebx, 16) /* 4,5 */
  624. XOR( eax, bl, 1) /* 5 */
  625. MOV( ebx, bh, 0) /* 4 */
  626. AS2( xor ebx, [L_KEY12+1*4])
  627. XOR( eax, ch, 2) /* 10 */
  628. AS2( shr ecx, 16) /* 8,9 */
  629. XOR( eax, dl, 3) /* 15 */
  630. XOR( ebx, dh, 2) /* 14 */
  631. AS2( shr edx, 16) /* 12,13 */
  632. XOR( edi, ch, 0) /* 8 */
  633. XOR( ebx, cl, 1) /* 9 */
  634. XOR( edi, dl, 1) /* 13 */
  635. MXOR( 0, dh, 0) /* 12 */
  636. AS2( movd ecx, xmm2)
  637. AS2( MOVD edx, MM(1))
  638. AS2( MOVD [L_SAVED_X+3*4], MM(0))
  639. AS2( mov [L_SAVED_X+0*4], eax)
  640. AS2( mov [L_SAVED_X+1*4], ebx)
  641. AS2( mov [L_SAVED_X+2*4], edi)
  642. ASJ( jmp, 5, f)
  643. ASL(3)
  644. // non-counter mode per-block setup
  645. AS2( MOVD MM(1), [L_KEY12+0*4]) // 0,1,2,3
  646. AS2( mov ebx, [L_KEY12+1*4]) // 4,5,6,7
  647. AS2( mov ecx, [L_KEY12+2*4]) // 8,9,10,11
  648. AS2( mov edx, [L_KEY12+3*4]) // 12,13,14,15
  649. ASL(8)
  650. AS2( mov WORD_REG(ax), [L_INBLOCKS])
  651. AS2( movdqu xmm2, [WORD_REG(ax)])
  652. AS2( mov WORD_REG(si), [L_INXORBLOCKS])
  653. AS2( movdqu xmm5, [WORD_REG(si)])
  654. AS2( pxor xmm2, xmm1)
  655. AS2( pxor xmm2, xmm5)
  656. // first round, in: xmm2(15,14,13,12;11,10,9,8;7,6,5,4;3,2,1,0), out: eax, ebx, ecx, edx
  657. AS2( movd eax, xmm2)
  658. AS2( psrldq xmm2, 4)
  659. AS2( movd edi, xmm2)
  660. AS2( psrldq xmm2, 4)
  661. MXOR( 1, al, 0) // 0
  662. XOR( edx, ah, 1) // 1
  663. AS2( shr eax, 16)
  664. XOR( ecx, al, 2) // 2
  665. XOR( ebx, ah, 3) // 3
  666. AS2( mov eax, edi)
  667. AS2( movd edi, xmm2)
  668. AS2( psrldq xmm2, 4)
  669. XOR( ebx, al, 0) // 4
  670. MXOR( 1, ah, 1) // 5
  671. AS2( shr eax, 16)
  672. XOR( edx, al, 2) // 6
  673. XOR( ecx, ah, 3) // 7
  674. AS2( mov eax, edi)
  675. AS2( movd edi, xmm2)
  676. XOR( ecx, al, 0) // 8
  677. XOR( ebx, ah, 1) // 9
  678. AS2( shr eax, 16)
  679. MXOR( 1, al, 2) // 10
  680. XOR( edx, ah, 3) // 11
  681. AS2( mov eax, edi)
  682. XOR( edx, al, 0) // 12
  683. XOR( ecx, ah, 1) // 13
  684. AS2( shr eax, 16)
  685. XOR( ebx, al, 2) // 14
  686. MXOR( 1, ah, 3) // 15
  687. AS2( MOVD eax, MM(1))
  688. AS2( add L_REG, [L_KEYS_BEGIN])
  689. AS2( add L_REG, 4*16)
  690. ASJ( jmp, 2, f)
  691. ASL(1)
  692. // counter-mode per-block setup
  693. AS2( MOVD ecx, MM(2))
  694. AS2( MOVD edx, MM(1))
  695. AS2( mov eax, [L_SAVED_X+0*4])
  696. AS2( mov ebx, [L_SAVED_X+1*4])
  697. AS2( xor cl, ch)
  698. AS2( and WORD_REG(cx), 255)
  699. ASL(5)
  700. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
  701. AS2( paddb MM(2), mm3)
  702. #else
  703. AS2( add MM(2), 1)
  704. #endif
  705. // remaining part of second round, in: edx(previous round),esi(keyed counter byte) eax,ebx,[L_SAVED_X+2*4],[L_SAVED_X+3*4], out: eax,ebx,ecx,edx
  706. AS2( xor edx, DWORD PTR [AS_REG_7+WORD_REG(cx)*8+3])
  707. XOR( ebx, dl, 3)
  708. MOV( ecx, dh, 2)
  709. AS2( shr edx, 16)
  710. AS2( xor ecx, [L_SAVED_X+2*4])
  711. XOR( eax, dh, 0)
  712. MOV( edx, dl, 1)
  713. AS2( xor edx, [L_SAVED_X+3*4])
  714. AS2( add L_REG, [L_KEYS_BEGIN])
  715. AS2( add L_REG, 3*16)
  716. ASJ( jmp, 4, f)
  717. // in: eax(0,1,2,3), ebx(4,5,6,7), ecx(8,9,10,11), edx(12,13,14,15)
  718. // out: eax, ebx, edi, mm0
  719. #define ROUND() \
  720. MXOR( 0, cl, 3) /* 11 */\
  721. AS2( mov cl, al) /* 8,9,10,3 */\
  722. XOR( edi, ah, 2) /* 2 */\
  723. AS2( shr eax, 16) /* 0,1 */\
  724. XOR( edi, bl, 3) /* 7 */\
  725. MXOR( 0, bh, 2) /* 6 */\
  726. AS2( shr ebx, 16) /* 4,5 */\
  727. MXOR( 0, al, 1) /* 1 */\
  728. MOV( eax, ah, 0) /* 0 */\
  729. XOR( eax, bl, 1) /* 5 */\
  730. MOV( ebx, bh, 0) /* 4 */\
  731. XOR( eax, ch, 2) /* 10 */\
  732. XOR( ebx, cl, 3) /* 3 */\
  733. AS2( shr ecx, 16) /* 8,9 */\
  734. XOR( eax, dl, 3) /* 15 */\
  735. XOR( ebx, dh, 2) /* 14 */\
  736. AS2( shr edx, 16) /* 12,13 */\
  737. XOR( edi, ch, 0) /* 8 */\
  738. XOR( ebx, cl, 1) /* 9 */\
  739. XOR( edi, dl, 1) /* 13 */\
  740. MXOR( 0, dh, 0) /* 12 */\
  741. ASL(2) // 2-round loop
  742. AS2( MOVD MM(0), [L_SUBKEYS-4*16+3*4])
  743. AS2( mov edi, [L_SUBKEYS-4*16+2*4])
  744. ROUND()
  745. AS2( mov ecx, edi)
  746. AS2( xor eax, [L_SUBKEYS-4*16+0*4])
  747. AS2( xor ebx, [L_SUBKEYS-4*16+1*4])
  748. AS2( MOVD edx, MM(0))
  749. ASL(4)
  750. AS2( MOVD MM(0), [L_SUBKEYS-4*16+7*4])
  751. AS2( mov edi, [L_SUBKEYS-4*16+6*4])
  752. ROUND()
  753. AS2( mov ecx, edi)
  754. AS2( xor eax, [L_SUBKEYS-4*16+4*4])
  755. AS2( xor ebx, [L_SUBKEYS-4*16+5*4])
  756. AS2( MOVD edx, MM(0))
  757. AS2( add L_REG, 32)
  758. AS2( test L_REG, 255)
  759. ASJ( jnz, 2, b)
  760. AS2( sub L_REG, 16*16)
  761. #define LAST(a, b, c) \
  762. AS2( movzx esi, a )\
  763. AS2( movzx edi, BYTE PTR [AS_REG_7+WORD_REG(si)*8+1] )\
  764. AS2( movzx esi, b )\
  765. AS2( xor edi, DWORD PTR [AS_REG_7+WORD_REG(si)*8+0] )\
  766. AS2( mov WORD PTR [L_LASTROUND+c], di )\
  767. // last round
  768. LAST(ch, dl, 2)
  769. LAST(dh, al, 6)
  770. AS2( shr edx, 16)
  771. LAST(ah, bl, 10)
  772. AS2( shr eax, 16)
  773. LAST(bh, cl, 14)
  774. AS2( shr ebx, 16)
  775. LAST(dh, al, 12)
  776. AS2( shr ecx, 16)
  777. LAST(ah, bl, 0)
  778. LAST(bh, cl, 4)
  779. LAST(ch, dl, 8)
  780. AS2( mov WORD_REG(ax), [L_OUTXORBLOCKS])
  781. AS2( mov WORD_REG(bx), [L_OUTBLOCKS])
  782. AS2( mov WORD_REG(cx), [L_LENGTH])
  783. AS2( sub WORD_REG(cx), 16)
  784. AS2( movdqu xmm2, [WORD_REG(ax)])
  785. AS2( pxor xmm2, xmm4)
  786. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
  787. AS2( movdqa xmm0, [L_INCREMENTS])
  788. AS2( paddd xmm0, [L_INBLOCKS])
  789. AS2( movdqa [L_INBLOCKS], xmm0)
  790. #else
  791. AS2( movdqa xmm0, [L_INCREMENTS+16])
  792. AS2( paddq xmm0, [L_INBLOCKS+16])
  793. AS2( movdqa [L_INBLOCKS+16], xmm0)
  794. #endif
  795. AS2( pxor xmm2, [L_LASTROUND])
  796. AS2( movdqu [WORD_REG(bx)], xmm2)
  797. ASJ( jle, 7, f)
  798. AS2( mov [L_LENGTH], WORD_REG(cx))
  799. AS2( test WORD_REG(cx), 1)
  800. ASJ( jnz, 1, b)
  801. #if CRYPTOPP_BOOL_X64
  802. AS2( movdqa xmm0, [L_INCREMENTS])
  803. AS2( paddq xmm0, [L_INBLOCKS])
  804. AS2( movdqa [L_INBLOCKS], xmm0)
  805. #endif
  806. ASJ( jmp, 3, b)
  807. ASL(7)
  808. // erase keys on stack
  809. AS2( xorps xmm0, xmm0)
  810. AS2( lea WORD_REG(ax), [L_SUBKEYS+7*16])
  811. AS2( movaps [WORD_REG(ax)-7*16], xmm0)
  812. AS2( movaps [WORD_REG(ax)-6*16], xmm0)
  813. AS2( movaps [WORD_REG(ax)-5*16], xmm0)
  814. AS2( movaps [WORD_REG(ax)-4*16], xmm0)
  815. AS2( movaps [WORD_REG(ax)-3*16], xmm0)
  816. AS2( movaps [WORD_REG(ax)-2*16], xmm0)
  817. AS2( movaps [WORD_REG(ax)-1*16], xmm0)
  818. AS2( movaps [WORD_REG(ax)+0*16], xmm0)
  819. AS2( movaps [WORD_REG(ax)+1*16], xmm0)
  820. AS2( movaps [WORD_REG(ax)+2*16], xmm0)
  821. AS2( movaps [WORD_REG(ax)+3*16], xmm0)
  822. AS2( movaps [WORD_REG(ax)+4*16], xmm0)
  823. AS2( movaps [WORD_REG(ax)+5*16], xmm0)
  824. AS2( movaps [WORD_REG(ax)+6*16], xmm0)
  825. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
  826. AS2( mov esp, [L_SP])
  827. AS1( emms)
  828. #endif
  829. AS_POP_IF86(bp)
  830. AS_POP_IF86(bx)
  831. #if defined(_MSC_VER) && CRYPTOPP_BOOL_X86
  832. AS_POP_IF86(di)
  833. AS_POP_IF86(si)
  834. AS1(ret)
  835. #endif
  836. #ifdef CRYPTOPP_GENERATE_X64_MASM
  837. pop r12
  838. pop rbx
  839. pop rdi
  840. pop rsi
  841. ret
  842. Rijndael_Enc_AdvancedProcessBlocks ENDP
  843. #endif
  844. #ifdef __GNUC__
  845. ATT_PREFIX
  846. :
  847. : "c" (locals), "d" (k), "S" (Te), "D" (g_cacheLineSize)
  848. : "memory", "cc", "%eax"
  849. #if CRYPTOPP_BOOL_X64
  850. , "%rbx", "%r8", "%r9", "%r10", "%r11", "%r12"
  851. #endif
  852. );
  853. #endif
  854. }
  855. #endif
  856. #ifndef CRYPTOPP_GENERATE_X64_MASM
  857. #ifdef CRYPTOPP_X64_MASM_AVAILABLE
  858. extern "C" {
  859. void Rijndael_Enc_AdvancedProcessBlocks(void *locals, const word32 *k);
  860. }
  861. #endif
  862. #if CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X86
  863. static inline bool AliasedWithTable(const byte *begin, const byte *end)
  864. {
  865. size_t s0 = size_t(begin)%4096, s1 = size_t(end)%4096;
  866. size_t t0 = size_t(Te)%4096, t1 = (size_t(Te)+sizeof(Te))%4096;
  867. if (t1 > t0)
  868. return (s0 >= t0 && s0 < t1) || (s1 > t0 && s1 <= t1);
  869. else
  870. return (s0 < t1 || s1 <= t1) || (s0 >= t0 || s1 > t0);
  871. }
  872. #if CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  873. inline void AESNI_Enc_Block(__m128i &block, const __m128i *subkeys, unsigned int rounds)
  874. {
  875. block = _mm_xor_si128(block, subkeys[0]);
  876. for (unsigned int i=1; i<rounds-1; i+=2)
  877. {
  878. block = _mm_aesenc_si128(block, subkeys[i]);
  879. block = _mm_aesenc_si128(block, subkeys[i+1]);
  880. }
  881. block = _mm_aesenc_si128(block, subkeys[rounds-1]);
  882. block = _mm_aesenclast_si128(block, subkeys[rounds]);
  883. }
  884. inline void AESNI_Enc_4_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, const __m128i *subkeys, unsigned int rounds)
  885. {
  886. __m128i rk = subkeys[0];
  887. block0 = _mm_xor_si128(block0, rk);
  888. block1 = _mm_xor_si128(block1, rk);
  889. block2 = _mm_xor_si128(block2, rk);
  890. block3 = _mm_xor_si128(block3, rk);
  891. for (unsigned int i=1; i<rounds; i++)
  892. {
  893. rk = subkeys[i];
  894. block0 = _mm_aesenc_si128(block0, rk);
  895. block1 = _mm_aesenc_si128(block1, rk);
  896. block2 = _mm_aesenc_si128(block2, rk);
  897. block3 = _mm_aesenc_si128(block3, rk);
  898. }
  899. rk = subkeys[rounds];
  900. block0 = _mm_aesenclast_si128(block0, rk);
  901. block1 = _mm_aesenclast_si128(block1, rk);
  902. block2 = _mm_aesenclast_si128(block2, rk);
  903. block3 = _mm_aesenclast_si128(block3, rk);
  904. }
  905. inline void AESNI_Dec_Block(__m128i &block, const __m128i *subkeys, unsigned int rounds)
  906. {
  907. block = _mm_xor_si128(block, subkeys[0]);
  908. for (unsigned int i=1; i<rounds-1; i+=2)
  909. {
  910. block = _mm_aesdec_si128(block, subkeys[i]);
  911. block = _mm_aesdec_si128(block, subkeys[i+1]);
  912. }
  913. block = _mm_aesdec_si128(block, subkeys[rounds-1]);
  914. block = _mm_aesdeclast_si128(block, subkeys[rounds]);
  915. }
  916. inline void AESNI_Dec_4_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, const __m128i *subkeys, unsigned int rounds)
  917. {
  918. __m128i rk = subkeys[0];
  919. block0 = _mm_xor_si128(block0, rk);
  920. block1 = _mm_xor_si128(block1, rk);
  921. block2 = _mm_xor_si128(block2, rk);
  922. block3 = _mm_xor_si128(block3, rk);
  923. for (unsigned int i=1; i<rounds; i++)
  924. {
  925. rk = subkeys[i];
  926. block0 = _mm_aesdec_si128(block0, rk);
  927. block1 = _mm_aesdec_si128(block1, rk);
  928. block2 = _mm_aesdec_si128(block2, rk);
  929. block3 = _mm_aesdec_si128(block3, rk);
  930. }
  931. rk = subkeys[rounds];
  932. block0 = _mm_aesdeclast_si128(block0, rk);
  933. block1 = _mm_aesdeclast_si128(block1, rk);
  934. block2 = _mm_aesdeclast_si128(block2, rk);
  935. block3 = _mm_aesdeclast_si128(block3, rk);
  936. }
  937. static CRYPTOPP_ALIGN_DATA(16) const word32 s_one[] = {0, 0, 0, 1<<24};
  938. template <typename F1, typename F4>
  939. inline size_t AESNI_AdvancedProcessBlocks(F1 func1, F4 func4, const __m128i *subkeys, unsigned int rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  940. {
  941. size_t blockSize = 16;
  942. size_t inIncrement = (flags & (BlockTransformation::BT_InBlockIsCounter|BlockTransformation::BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  943. size_t xorIncrement = xorBlocks ? blockSize : 0;
  944. size_t outIncrement = (flags & BlockTransformation::BT_DontIncrementInOutPointers) ? 0 : blockSize;
  945. if (flags & BlockTransformation::BT_ReverseDirection)
  946. {
  947. assert(length % blockSize == 0);
  948. inBlocks += length - blockSize;
  949. xorBlocks += length - blockSize;
  950. outBlocks += length - blockSize;
  951. inIncrement = 0-inIncrement;
  952. xorIncrement = 0-xorIncrement;
  953. outIncrement = 0-outIncrement;
  954. }
  955. if (flags & BlockTransformation::BT_AllowParallel)
  956. {
  957. while (length >= 4*blockSize)
  958. {
  959. __m128i block0 = _mm_loadu_si128((const __m128i *)inBlocks), block1, block2, block3;
  960. if (flags & BlockTransformation::BT_InBlockIsCounter)
  961. {
  962. const __m128i be1 = *(const __m128i *)s_one;
  963. block1 = _mm_add_epi32(block0, be1);
  964. block2 = _mm_add_epi32(block1, be1);
  965. block3 = _mm_add_epi32(block2, be1);
  966. _mm_storeu_si128((__m128i *)inBlocks, _mm_add_epi32(block3, be1));
  967. }
  968. else
  969. {
  970. inBlocks += inIncrement;
  971. block1 = _mm_loadu_si128((const __m128i *)inBlocks);
  972. inBlocks += inIncrement;
  973. block2 = _mm_loadu_si128((const __m128i *)inBlocks);
  974. inBlocks += inIncrement;
  975. block3 = _mm_loadu_si128((const __m128i *)inBlocks);
  976. inBlocks += inIncrement;
  977. }
  978. if (flags & BlockTransformation::BT_XorInput)
  979. {
  980. block0 = _mm_xor_si128(block0, _mm_loadu_si128((const __m128i *)xorBlocks));
  981. xorBlocks += xorIncrement;
  982. block1 = _mm_xor_si128(block1, _mm_loadu_si128((const __m128i *)xorBlocks));
  983. xorBlocks += xorIncrement;
  984. block2 = _mm_xor_si128(block2, _mm_loadu_si128((const __m128i *)xorBlocks));
  985. xorBlocks += xorIncrement;
  986. block3 = _mm_xor_si128(block3, _mm_loadu_si128((const __m128i *)xorBlocks));
  987. xorBlocks += xorIncrement;
  988. }
  989. func4(block0, block1, block2, block3, subkeys, rounds);
  990. if (xorBlocks && !(flags & BlockTransformation::BT_XorInput))
  991. {
  992. block0 = _mm_xor_si128(block0, _mm_loadu_si128((const __m128i *)xorBlocks));
  993. xorBlocks += xorIncrement;
  994. block1 = _mm_xor_si128(block1, _mm_loadu_si128((const __m128i *)xorBlocks));
  995. xorBlocks += xorIncrement;
  996. block2 = _mm_xor_si128(block2, _mm_loadu_si128((const __m128i *)xorBlocks));
  997. xorBlocks += xorIncrement;
  998. block3 = _mm_xor_si128(block3, _mm_loadu_si128((const __m128i *)xorBlocks));
  999. xorBlocks += xorIncrement;
  1000. }
  1001. _mm_storeu_si128((__m128i *)outBlocks, block0);
  1002. outBlocks += outIncrement;
  1003. _mm_storeu_si128((__m128i *)outBlocks, block1);
  1004. outBlocks += outIncrement;
  1005. _mm_storeu_si128((__m128i *)outBlocks, block2);
  1006. outBlocks += outIncrement;
  1007. _mm_storeu_si128((__m128i *)outBlocks, block3);
  1008. outBlocks += outIncrement;
  1009. length -= 4*blockSize;
  1010. }
  1011. }
  1012. while (length >= blockSize)
  1013. {
  1014. __m128i block = _mm_loadu_si128((const __m128i *)inBlocks);
  1015. if (flags & BlockTransformation::BT_XorInput)
  1016. block = _mm_xor_si128(block, _mm_loadu_si128((const __m128i *)xorBlocks));
  1017. if (flags & BlockTransformation::BT_InBlockIsCounter)
  1018. const_cast<byte *>(inBlocks)[15]++;
  1019. func1(block, subkeys, rounds);
  1020. if (xorBlocks && !(flags & BlockTransformation::BT_XorInput))
  1021. block = _mm_xor_si128(block, _mm_loadu_si128((const __m128i *)xorBlocks));
  1022. _mm_storeu_si128((__m128i *)outBlocks, block);
  1023. inBlocks += inIncrement;
  1024. outBlocks += outIncrement;
  1025. xorBlocks += xorIncrement;
  1026. length -= blockSize;
  1027. }
  1028. return length;
  1029. }
  1030. #endif
  1031. size_t Rijndael::Enc::AdvancedProcessBlocks(const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) const
  1032. {
  1033. #if CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  1034. if (HasAESNI())
  1035. return AESNI_AdvancedProcessBlocks(AESNI_Enc_Block, AESNI_Enc_4_Blocks, (const __m128i *)m_key.begin(), m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
  1036. #endif
  1037. #if (CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
  1038. if (HasSSE2())
  1039. {
  1040. if (length < BLOCKSIZE)
  1041. return length;
  1042. struct Locals
  1043. {
  1044. word32 subkeys[4*12], workspace[8];
  1045. const byte *inBlocks, *inXorBlocks, *outXorBlocks;
  1046. byte *outBlocks;
  1047. size_t inIncrement, inXorIncrement, outXorIncrement, outIncrement;
  1048. size_t regSpill, lengthAndCounterFlag, keysBegin;
  1049. };
  1050. size_t increment = BLOCKSIZE;
  1051. const byte* zeros = (byte *)(Te+256);
  1052. byte *space;
  1053. do {
  1054. // https://msdn.microsoft.com/en-us/library/5471dc8s.aspx
  1055. #if (CRYPTOPP_MSC_VERION >= 1400)
  1056. space = (byte *)_malloca(255+sizeof(Locals));
  1057. space += (256-(size_t)space%256)%256;
  1058. #else
  1059. space = (byte *)alloca(255+sizeof(Locals));
  1060. space += (256-(size_t)space%256)%256;
  1061. #endif
  1062. }
  1063. while (AliasedWithTable(space, space+sizeof(Locals)));
  1064. if (flags & BT_ReverseDirection)
  1065. {
  1066. assert(length % BLOCKSIZE == 0);
  1067. inBlocks += length - BLOCKSIZE;
  1068. xorBlocks += length - BLOCKSIZE;
  1069. outBlocks += length - BLOCKSIZE;
  1070. increment = 0-increment;
  1071. }
  1072. Locals &locals = *(Locals *)space;
  1073. locals.inBlocks = inBlocks;
  1074. locals.inXorBlocks = (flags & BT_XorInput) && xorBlocks ? xorBlocks : zeros;
  1075. locals.outXorBlocks = (flags & BT_XorInput) || !xorBlocks ? zeros : xorBlocks;
  1076. locals.outBlocks = outBlocks;
  1077. locals.inIncrement = (flags & BT_DontIncrementInOutPointers) ? 0 : increment;
  1078. locals.inXorIncrement = (flags & BT_XorInput) && xorBlocks ? increment : 0;
  1079. locals.outXorIncrement = (flags & BT_XorInput) || !xorBlocks ? 0 : increment;
  1080. locals.outIncrement = (flags & BT_DontIncrementInOutPointers) ? 0 : increment;
  1081. locals.lengthAndCounterFlag = length - (length%16) - bool(flags & BT_InBlockIsCounter);
  1082. int keysToCopy = m_rounds - (flags & BT_InBlockIsCounter ? 3 : 2);
  1083. locals.keysBegin = (12-keysToCopy)*16;
  1084. Rijndael_Enc_AdvancedProcessBlocks(&locals, m_key);
  1085. return length % BLOCKSIZE;
  1086. }
  1087. #endif
  1088. return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags);
  1089. }
  1090. #endif
  1091. #if CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  1092. size_t Rijndael::Dec::AdvancedProcessBlocks(const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) const
  1093. {
  1094. if (HasAESNI())
  1095. return AESNI_AdvancedProcessBlocks(AESNI_Dec_Block, AESNI_Dec_4_Blocks, (const __m128i *)m_key.begin(), m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
  1096. return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags);
  1097. }
  1098. #endif // #if CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE
  1099. NAMESPACE_END
  1100. #endif
  1101. #endif