Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

625 lines
23 KiB

  1. // salsa.cpp - written and placed in the public domain by Wei Dai
  2. // use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM salsa.cpp" to generate MASM code
  3. #include "pch.h"
  4. #include "config.h"
  5. #ifndef CRYPTOPP_GENERATE_X64_MASM
  6. #include "salsa.h"
  7. #include "argnames.h"
  8. #include "misc.h"
  9. #include "cpu.h"
  10. #if CRYPTOPP_MSC_VERSION
  11. # pragma warning(disable: 4702 4740)
  12. #endif
  13. // TODO: work around GCC 4.8+ issue with SSE2 ASM until the exact details are known
  14. // and fix is released. Duplicate with "valgrind ./cryptest.exe tv salsa"
  15. // Clang due to "Inline assembly operands don't work with .intel_syntax"
  16. // https://llvm.org/bugs/show_bug.cgi?id=24232
  17. #if defined(CRYPTOPP_DISABLE_SALSA_ASM)
  18. # undef CRYPTOPP_X86_ASM_AVAILABLE
  19. # undef CRYPTOPP_X32_ASM_AVAILABLE
  20. # undef CRYPTOPP_X64_ASM_AVAILABLE
  21. # undef CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  22. # undef CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE
  23. # define CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE 0
  24. # define CRYPTOPP_BOOL_SSSE3_ASM_AVAILABLE 0
  25. #endif
  26. NAMESPACE_BEGIN(CryptoPP)
  27. #if !defined(NDEBUG) && !defined(CRYPTOPP_DOXYGEN_PROCESSING)
  28. void Salsa20_TestInstantiations()
  29. {
  30. Salsa20::Encryption x;
  31. }
  32. #endif
  33. void Salsa20_Policy::CipherSetKey(const NameValuePairs &params, const byte *key, size_t length)
  34. {
  35. m_rounds = params.GetIntValueWithDefault(Name::Rounds(), 20);
  36. if (!(m_rounds == 8 || m_rounds == 12 || m_rounds == 20))
  37. throw InvalidRounds(Salsa20::StaticAlgorithmName(), m_rounds);
  38. // m_state is reordered for SSE2
  39. GetBlock<word32, LittleEndian> get1(key);
  40. get1(m_state[13])(m_state[10])(m_state[7])(m_state[4]);
  41. GetBlock<word32, LittleEndian> get2(key + length - 16);
  42. get2(m_state[15])(m_state[12])(m_state[9])(m_state[6]);
  43. // "expand 16-byte k" or "expand 32-byte k"
  44. m_state[0] = 0x61707865;
  45. m_state[1] = (length == 16) ? 0x3120646e : 0x3320646e;
  46. m_state[2] = (length == 16) ? 0x79622d36 : 0x79622d32;
  47. m_state[3] = 0x6b206574;
  48. }
  49. void Salsa20_Policy::CipherResynchronize(byte *keystreamBuffer, const byte *IV, size_t length)
  50. {
  51. CRYPTOPP_UNUSED(keystreamBuffer), CRYPTOPP_UNUSED(length);
  52. assert(length==8);
  53. GetBlock<word32, LittleEndian> get(IV);
  54. get(m_state[14])(m_state[11]);
  55. m_state[8] = m_state[5] = 0;
  56. }
  57. void Salsa20_Policy::SeekToIteration(lword iterationCount)
  58. {
  59. m_state[8] = (word32)iterationCount;
  60. m_state[5] = (word32)SafeRightShift<32>(iterationCount);
  61. }
  62. #if (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X64) && !defined(CRYPTOPP_DISABLE_SALSA_ASM)
  63. unsigned int Salsa20_Policy::GetAlignment() const
  64. {
  65. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  66. if (HasSSE2())
  67. return 16;
  68. else
  69. #endif
  70. return GetAlignmentOf<word32>();
  71. }
  72. unsigned int Salsa20_Policy::GetOptimalBlockSize() const
  73. {
  74. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  75. if (HasSSE2())
  76. return 4*BYTES_PER_ITERATION;
  77. else
  78. #endif
  79. return BYTES_PER_ITERATION;
  80. }
  81. #endif
  82. #ifdef CRYPTOPP_X64_MASM_AVAILABLE
  83. extern "C" {
  84. void Salsa20_OperateKeystream(byte *output, const byte *input, size_t iterationCount, int rounds, void *state);
  85. }
  86. #endif
  87. #if CRYPTOPP_MSC_VERSION
  88. # pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly code
  89. #endif
  90. void Salsa20_Policy::OperateKeystream(KeystreamOperation operation, byte *output, const byte *input, size_t iterationCount)
  91. {
  92. #endif // #ifdef CRYPTOPP_GENERATE_X64_MASM
  93. #ifdef CRYPTOPP_X64_MASM_AVAILABLE
  94. Salsa20_OperateKeystream(output, input, iterationCount, m_rounds, m_state.data());
  95. return;
  96. #endif
  97. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  98. #ifdef CRYPTOPP_GENERATE_X64_MASM
  99. ALIGN 8
  100. Salsa20_OperateKeystream PROC FRAME
  101. mov r10, [rsp + 5*8] ; state
  102. alloc_stack(10*16 + 32*16 + 8)
  103. save_xmm128 xmm6, 0200h
  104. save_xmm128 xmm7, 0210h
  105. save_xmm128 xmm8, 0220h
  106. save_xmm128 xmm9, 0230h
  107. save_xmm128 xmm10, 0240h
  108. save_xmm128 xmm11, 0250h
  109. save_xmm128 xmm12, 0260h
  110. save_xmm128 xmm13, 0270h
  111. save_xmm128 xmm14, 0280h
  112. save_xmm128 xmm15, 0290h
  113. .endprolog
  114. #define REG_output rcx
  115. #define REG_input rdx
  116. #define REG_iterationCount r8
  117. #define REG_state r10
  118. #define REG_rounds e9d
  119. #define REG_roundsLeft eax
  120. #define REG_temp32 r11d
  121. #define REG_temp r11
  122. #define SSE2_WORKSPACE rsp
  123. #else
  124. if (HasSSE2())
  125. {
  126. #if CRYPTOPP_BOOL_X64
  127. #define REG_output %1
  128. #define REG_input %0
  129. #define REG_iterationCount %2
  130. #define REG_state %4 /* constant */
  131. #define REG_rounds %3 /* constant */
  132. #define REG_roundsLeft eax
  133. #define REG_temp32 edx
  134. #define REG_temp rdx
  135. #define SSE2_WORKSPACE %5 /* constant */
  136. CRYPTOPP_ALIGN_DATA(16) byte workspace[16*32];
  137. #else
  138. #define REG_output edi
  139. #define REG_input eax
  140. #define REG_iterationCount ecx
  141. #define REG_state esi
  142. #define REG_rounds edx
  143. #define REG_roundsLeft ebx
  144. #define REG_temp32 ebp
  145. #define REG_temp ebp
  146. #define SSE2_WORKSPACE esp + WORD_SZ
  147. #endif
  148. #ifdef __GNUC__
  149. __asm__ __volatile__
  150. (
  151. INTEL_NOPREFIX
  152. AS_PUSH_IF86( bx)
  153. #else
  154. void *s = m_state.data();
  155. word32 r = m_rounds;
  156. AS2( mov REG_iterationCount, iterationCount)
  157. AS2( mov REG_input, input)
  158. AS2( mov REG_output, output)
  159. AS2( mov REG_state, s)
  160. AS2( mov REG_rounds, r)
  161. #endif
  162. #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
  163. AS_PUSH_IF86( bp)
  164. AS2( cmp REG_iterationCount, 4)
  165. ASJ( jl, 5, f)
  166. #if CRYPTOPP_BOOL_X86
  167. AS2( mov ebx, esp)
  168. AS2( and esp, -16)
  169. AS2( sub esp, 32*16)
  170. AS1( push ebx)
  171. #endif
  172. #define SSE2_EXPAND_S(i, j) \
  173. ASS( pshufd xmm4, xmm##i, j, j, j, j) \
  174. AS2( movdqa [SSE2_WORKSPACE + (i*4+j)*16 + 256], xmm4)
  175. AS2( movdqa xmm0, [REG_state + 0*16])
  176. AS2( movdqa xmm1, [REG_state + 1*16])
  177. AS2( movdqa xmm2, [REG_state + 2*16])
  178. AS2( movdqa xmm3, [REG_state + 3*16])
  179. SSE2_EXPAND_S(0, 0)
  180. SSE2_EXPAND_S(0, 1)
  181. SSE2_EXPAND_S(0, 2)
  182. SSE2_EXPAND_S(0, 3)
  183. SSE2_EXPAND_S(1, 0)
  184. SSE2_EXPAND_S(1, 2)
  185. SSE2_EXPAND_S(1, 3)
  186. SSE2_EXPAND_S(2, 1)
  187. SSE2_EXPAND_S(2, 2)
  188. SSE2_EXPAND_S(2, 3)
  189. SSE2_EXPAND_S(3, 0)
  190. SSE2_EXPAND_S(3, 1)
  191. SSE2_EXPAND_S(3, 2)
  192. SSE2_EXPAND_S(3, 3)
  193. #define SSE2_EXPAND_S85(i) \
  194. AS2( mov dword ptr [SSE2_WORKSPACE + 8*16 + i*4 + 256], REG_roundsLeft) \
  195. AS2( mov dword ptr [SSE2_WORKSPACE + 5*16 + i*4 + 256], REG_temp32) \
  196. AS2( add REG_roundsLeft, 1) \
  197. AS2( adc REG_temp32, 0)
  198. ASL(1)
  199. AS2( mov REG_roundsLeft, dword ptr [REG_state + 8*4])
  200. AS2( mov REG_temp32, dword ptr [REG_state + 5*4])
  201. SSE2_EXPAND_S85(0)
  202. SSE2_EXPAND_S85(1)
  203. SSE2_EXPAND_S85(2)
  204. SSE2_EXPAND_S85(3)
  205. AS2( mov dword ptr [REG_state + 8*4], REG_roundsLeft)
  206. AS2( mov dword ptr [REG_state + 5*4], REG_temp32)
  207. #define SSE2_QUARTER_ROUND(a, b, d, i) \
  208. AS2( movdqa xmm4, xmm##d) \
  209. AS2( paddd xmm4, xmm##a) \
  210. AS2( movdqa xmm5, xmm4) \
  211. AS2( pslld xmm4, i) \
  212. AS2( psrld xmm5, 32-i) \
  213. AS2( pxor xmm##b, xmm4) \
  214. AS2( pxor xmm##b, xmm5)
  215. #define L01(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##A, [SSE2_WORKSPACE + d*16 + i*256]) /* y3 */
  216. #define L02(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##C, [SSE2_WORKSPACE + a*16 + i*256]) /* y0 */
  217. #define L03(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##C) /* y0+y3 */
  218. #define L04(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##B, xmm##A)
  219. #define L05(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 7)
  220. #define L06(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##B, 32-7)
  221. #define L07(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, [SSE2_WORKSPACE + b*16 + i*256])
  222. #define L08(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##B) /* z1 */
  223. #define L09(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + b*16], xmm##A)
  224. #define L10(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##B, xmm##A)
  225. #define L11(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##C) /* z1+y0 */
  226. #define L12(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##D, xmm##A)
  227. #define L13(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 9)
  228. #define L14(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##D, 32-9)
  229. #define L15(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, [SSE2_WORKSPACE + c*16 + i*256])
  230. #define L16(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##D) /* z2 */
  231. #define L17(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + c*16], xmm##A)
  232. #define L18(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##D, xmm##A)
  233. #define L19(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##B) /* z2+z1 */
  234. #define L20(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##B, xmm##A)
  235. #define L21(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 13)
  236. #define L22(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##B, 32-13)
  237. #define L23(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, [SSE2_WORKSPACE + d*16 + i*256])
  238. #define L24(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##B) /* z3 */
  239. #define L25(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + d*16], xmm##A)
  240. #define L26(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##D) /* z3+z2 */
  241. #define L27(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##D, xmm##A)
  242. #define L28(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 18)
  243. #define L29(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##D, 32-18)
  244. #define L30(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##C) /* xor y0 */
  245. #define L31(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##D) /* z0 */
  246. #define L32(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + a*16], xmm##A)
  247. #define SSE2_QUARTER_ROUND_X8(i, a, b, c, d, e, f, g, h) \
  248. L01(0,1,2,3, a,b,c,d, i) L01(4,5,6,7, e,f,g,h, i) \
  249. L02(0,1,2,3, a,b,c,d, i) L02(4,5,6,7, e,f,g,h, i) \
  250. L03(0,1,2,3, a,b,c,d, i) L03(4,5,6,7, e,f,g,h, i) \
  251. L04(0,1,2,3, a,b,c,d, i) L04(4,5,6,7, e,f,g,h, i) \
  252. L05(0,1,2,3, a,b,c,d, i) L05(4,5,6,7, e,f,g,h, i) \
  253. L06(0,1,2,3, a,b,c,d, i) L06(4,5,6,7, e,f,g,h, i) \
  254. L07(0,1,2,3, a,b,c,d, i) L07(4,5,6,7, e,f,g,h, i) \
  255. L08(0,1,2,3, a,b,c,d, i) L08(4,5,6,7, e,f,g,h, i) \
  256. L09(0,1,2,3, a,b,c,d, i) L09(4,5,6,7, e,f,g,h, i) \
  257. L10(0,1,2,3, a,b,c,d, i) L10(4,5,6,7, e,f,g,h, i) \
  258. L11(0,1,2,3, a,b,c,d, i) L11(4,5,6,7, e,f,g,h, i) \
  259. L12(0,1,2,3, a,b,c,d, i) L12(4,5,6,7, e,f,g,h, i) \
  260. L13(0,1,2,3, a,b,c,d, i) L13(4,5,6,7, e,f,g,h, i) \
  261. L14(0,1,2,3, a,b,c,d, i) L14(4,5,6,7, e,f,g,h, i) \
  262. L15(0,1,2,3, a,b,c,d, i) L15(4,5,6,7, e,f,g,h, i) \
  263. L16(0,1,2,3, a,b,c,d, i) L16(4,5,6,7, e,f,g,h, i) \
  264. L17(0,1,2,3, a,b,c,d, i) L17(4,5,6,7, e,f,g,h, i) \
  265. L18(0,1,2,3, a,b,c,d, i) L18(4,5,6,7, e,f,g,h, i) \
  266. L19(0,1,2,3, a,b,c,d, i) L19(4,5,6,7, e,f,g,h, i) \
  267. L20(0,1,2,3, a,b,c,d, i) L20(4,5,6,7, e,f,g,h, i) \
  268. L21(0,1,2,3, a,b,c,d, i) L21(4,5,6,7, e,f,g,h, i) \
  269. L22(0,1,2,3, a,b,c,d, i) L22(4,5,6,7, e,f,g,h, i) \
  270. L23(0,1,2,3, a,b,c,d, i) L23(4,5,6,7, e,f,g,h, i) \
  271. L24(0,1,2,3, a,b,c,d, i) L24(4,5,6,7, e,f,g,h, i) \
  272. L25(0,1,2,3, a,b,c,d, i) L25(4,5,6,7, e,f,g,h, i) \
  273. L26(0,1,2,3, a,b,c,d, i) L26(4,5,6,7, e,f,g,h, i) \
  274. L27(0,1,2,3, a,b,c,d, i) L27(4,5,6,7, e,f,g,h, i) \
  275. L28(0,1,2,3, a,b,c,d, i) L28(4,5,6,7, e,f,g,h, i) \
  276. L29(0,1,2,3, a,b,c,d, i) L29(4,5,6,7, e,f,g,h, i) \
  277. L30(0,1,2,3, a,b,c,d, i) L30(4,5,6,7, e,f,g,h, i) \
  278. L31(0,1,2,3, a,b,c,d, i) L31(4,5,6,7, e,f,g,h, i) \
  279. L32(0,1,2,3, a,b,c,d, i) L32(4,5,6,7, e,f,g,h, i)
  280. #define SSE2_QUARTER_ROUND_X16(i, a, b, c, d, e, f, g, h, A, B, C, D, E, F, G, H) \
  281. L01(0,1,2,3, a,b,c,d, i) L01(4,5,6,7, e,f,g,h, i) L01(8,9,10,11, A,B,C,D, i) L01(12,13,14,15, E,F,G,H, i) \
  282. L02(0,1,2,3, a,b,c,d, i) L02(4,5,6,7, e,f,g,h, i) L02(8,9,10,11, A,B,C,D, i) L02(12,13,14,15, E,F,G,H, i) \
  283. L03(0,1,2,3, a,b,c,d, i) L03(4,5,6,7, e,f,g,h, i) L03(8,9,10,11, A,B,C,D, i) L03(12,13,14,15, E,F,G,H, i) \
  284. L04(0,1,2,3, a,b,c,d, i) L04(4,5,6,7, e,f,g,h, i) L04(8,9,10,11, A,B,C,D, i) L04(12,13,14,15, E,F,G,H, i) \
  285. L05(0,1,2,3, a,b,c,d, i) L05(4,5,6,7, e,f,g,h, i) L05(8,9,10,11, A,B,C,D, i) L05(12,13,14,15, E,F,G,H, i) \
  286. L06(0,1,2,3, a,b,c,d, i) L06(4,5,6,7, e,f,g,h, i) L06(8,9,10,11, A,B,C,D, i) L06(12,13,14,15, E,F,G,H, i) \
  287. L07(0,1,2,3, a,b,c,d, i) L07(4,5,6,7, e,f,g,h, i) L07(8,9,10,11, A,B,C,D, i) L07(12,13,14,15, E,F,G,H, i) \
  288. L08(0,1,2,3, a,b,c,d, i) L08(4,5,6,7, e,f,g,h, i) L08(8,9,10,11, A,B,C,D, i) L08(12,13,14,15, E,F,G,H, i) \
  289. L09(0,1,2,3, a,b,c,d, i) L09(4,5,6,7, e,f,g,h, i) L09(8,9,10,11, A,B,C,D, i) L09(12,13,14,15, E,F,G,H, i) \
  290. L10(0,1,2,3, a,b,c,d, i) L10(4,5,6,7, e,f,g,h, i) L10(8,9,10,11, A,B,C,D, i) L10(12,13,14,15, E,F,G,H, i) \
  291. L11(0,1,2,3, a,b,c,d, i) L11(4,5,6,7, e,f,g,h, i) L11(8,9,10,11, A,B,C,D, i) L11(12,13,14,15, E,F,G,H, i) \
  292. L12(0,1,2,3, a,b,c,d, i) L12(4,5,6,7, e,f,g,h, i) L12(8,9,10,11, A,B,C,D, i) L12(12,13,14,15, E,F,G,H, i) \
  293. L13(0,1,2,3, a,b,c,d, i) L13(4,5,6,7, e,f,g,h, i) L13(8,9,10,11, A,B,C,D, i) L13(12,13,14,15, E,F,G,H, i) \
  294. L14(0,1,2,3, a,b,c,d, i) L14(4,5,6,7, e,f,g,h, i) L14(8,9,10,11, A,B,C,D, i) L14(12,13,14,15, E,F,G,H, i) \
  295. L15(0,1,2,3, a,b,c,d, i) L15(4,5,6,7, e,f,g,h, i) L15(8,9,10,11, A,B,C,D, i) L15(12,13,14,15, E,F,G,H, i) \
  296. L16(0,1,2,3, a,b,c,d, i) L16(4,5,6,7, e,f,g,h, i) L16(8,9,10,11, A,B,C,D, i) L16(12,13,14,15, E,F,G,H, i) \
  297. L17(0,1,2,3, a,b,c,d, i) L17(4,5,6,7, e,f,g,h, i) L17(8,9,10,11, A,B,C,D, i) L17(12,13,14,15, E,F,G,H, i) \
  298. L18(0,1,2,3, a,b,c,d, i) L18(4,5,6,7, e,f,g,h, i) L18(8,9,10,11, A,B,C,D, i) L18(12,13,14,15, E,F,G,H, i) \
  299. L19(0,1,2,3, a,b,c,d, i) L19(4,5,6,7, e,f,g,h, i) L19(8,9,10,11, A,B,C,D, i) L19(12,13,14,15, E,F,G,H, i) \
  300. L20(0,1,2,3, a,b,c,d, i) L20(4,5,6,7, e,f,g,h, i) L20(8,9,10,11, A,B,C,D, i) L20(12,13,14,15, E,F,G,H, i) \
  301. L21(0,1,2,3, a,b,c,d, i) L21(4,5,6,7, e,f,g,h, i) L21(8,9,10,11, A,B,C,D, i) L21(12,13,14,15, E,F,G,H, i) \
  302. L22(0,1,2,3, a,b,c,d, i) L22(4,5,6,7, e,f,g,h, i) L22(8,9,10,11, A,B,C,D, i) L22(12,13,14,15, E,F,G,H, i) \
  303. L23(0,1,2,3, a,b,c,d, i) L23(4,5,6,7, e,f,g,h, i) L23(8,9,10,11, A,B,C,D, i) L23(12,13,14,15, E,F,G,H, i) \
  304. L24(0,1,2,3, a,b,c,d, i) L24(4,5,6,7, e,f,g,h, i) L24(8,9,10,11, A,B,C,D, i) L24(12,13,14,15, E,F,G,H, i) \
  305. L25(0,1,2,3, a,b,c,d, i) L25(4,5,6,7, e,f,g,h, i) L25(8,9,10,11, A,B,C,D, i) L25(12,13,14,15, E,F,G,H, i) \
  306. L26(0,1,2,3, a,b,c,d, i) L26(4,5,6,7, e,f,g,h, i) L26(8,9,10,11, A,B,C,D, i) L26(12,13,14,15, E,F,G,H, i) \
  307. L27(0,1,2,3, a,b,c,d, i) L27(4,5,6,7, e,f,g,h, i) L27(8,9,10,11, A,B,C,D, i) L27(12,13,14,15, E,F,G,H, i) \
  308. L28(0,1,2,3, a,b,c,d, i) L28(4,5,6,7, e,f,g,h, i) L28(8,9,10,11, A,B,C,D, i) L28(12,13,14,15, E,F,G,H, i) \
  309. L29(0,1,2,3, a,b,c,d, i) L29(4,5,6,7, e,f,g,h, i) L29(8,9,10,11, A,B,C,D, i) L29(12,13,14,15, E,F,G,H, i) \
  310. L30(0,1,2,3, a,b,c,d, i) L30(4,5,6,7, e,f,g,h, i) L30(8,9,10,11, A,B,C,D, i) L30(12,13,14,15, E,F,G,H, i) \
  311. L31(0,1,2,3, a,b,c,d, i) L31(4,5,6,7, e,f,g,h, i) L31(8,9,10,11, A,B,C,D, i) L31(12,13,14,15, E,F,G,H, i) \
  312. L32(0,1,2,3, a,b,c,d, i) L32(4,5,6,7, e,f,g,h, i) L32(8,9,10,11, A,B,C,D, i) L32(12,13,14,15, E,F,G,H, i)
  313. #if CRYPTOPP_BOOL_X64
  314. SSE2_QUARTER_ROUND_X16(1, 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15)
  315. #else
  316. SSE2_QUARTER_ROUND_X8(1, 2, 6, 10, 14, 3, 7, 11, 15)
  317. SSE2_QUARTER_ROUND_X8(1, 0, 4, 8, 12, 1, 5, 9, 13)
  318. #endif
  319. AS2( mov REG_roundsLeft, REG_rounds)
  320. ASJ( jmp, 2, f)
  321. ASL(SSE2_Salsa_Output)
  322. AS2( movdqa xmm0, xmm4)
  323. AS2( punpckldq xmm4, xmm5)
  324. AS2( movdqa xmm1, xmm6)
  325. AS2( punpckldq xmm6, xmm7)
  326. AS2( movdqa xmm2, xmm4)
  327. AS2( punpcklqdq xmm4, xmm6) // e
  328. AS2( punpckhqdq xmm2, xmm6) // f
  329. AS2( punpckhdq xmm0, xmm5)
  330. AS2( punpckhdq xmm1, xmm7)
  331. AS2( movdqa xmm6, xmm0)
  332. AS2( punpcklqdq xmm0, xmm1) // g
  333. AS2( punpckhqdq xmm6, xmm1) // h
  334. AS_XMM_OUTPUT4(SSE2_Salsa_Output_A, REG_input, REG_output, 4, 2, 0, 6, 1, 0, 4, 8, 12, 1)
  335. AS1( ret)
  336. ASL(6)
  337. #if CRYPTOPP_BOOL_X64
  338. SSE2_QUARTER_ROUND_X16(0, 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15)
  339. ASL(2)
  340. SSE2_QUARTER_ROUND_X16(0, 0, 13, 10, 7, 1, 14, 11, 4, 2, 15, 8, 5, 3, 12, 9, 6)
  341. #else
  342. SSE2_QUARTER_ROUND_X8(0, 2, 6, 10, 14, 3, 7, 11, 15)
  343. SSE2_QUARTER_ROUND_X8(0, 0, 4, 8, 12, 1, 5, 9, 13)
  344. ASL(2)
  345. SSE2_QUARTER_ROUND_X8(0, 2, 15, 8, 5, 3, 12, 9, 6)
  346. SSE2_QUARTER_ROUND_X8(0, 0, 13, 10, 7, 1, 14, 11, 4)
  347. #endif
  348. AS2( sub REG_roundsLeft, 2)
  349. ASJ( jnz, 6, b)
  350. #define SSE2_OUTPUT_4(a, b, c, d) \
  351. AS2( movdqa xmm4, [SSE2_WORKSPACE + a*16 + 256])\
  352. AS2( paddd xmm4, [SSE2_WORKSPACE + a*16])\
  353. AS2( movdqa xmm5, [SSE2_WORKSPACE + b*16 + 256])\
  354. AS2( paddd xmm5, [SSE2_WORKSPACE + b*16])\
  355. AS2( movdqa xmm6, [SSE2_WORKSPACE + c*16 + 256])\
  356. AS2( paddd xmm6, [SSE2_WORKSPACE + c*16])\
  357. AS2( movdqa xmm7, [SSE2_WORKSPACE + d*16 + 256])\
  358. AS2( paddd xmm7, [SSE2_WORKSPACE + d*16])\
  359. ASC( call, SSE2_Salsa_Output)
  360. SSE2_OUTPUT_4(0, 13, 10, 7)
  361. SSE2_OUTPUT_4(4, 1, 14, 11)
  362. SSE2_OUTPUT_4(8, 5, 2, 15)
  363. SSE2_OUTPUT_4(12, 9, 6, 3)
  364. AS2( test REG_input, REG_input)
  365. ASJ( jz, 9, f)
  366. AS2( add REG_input, 12*16)
  367. ASL(9)
  368. AS2( add REG_output, 12*16)
  369. AS2( sub REG_iterationCount, 4)
  370. AS2( cmp REG_iterationCount, 4)
  371. ASJ( jge, 1, b)
  372. AS_POP_IF86( sp)
  373. ASL(5)
  374. AS2( sub REG_iterationCount, 1)
  375. ASJ( jl, 4, f)
  376. AS2( movdqa xmm0, [REG_state + 0*16])
  377. AS2( movdqa xmm1, [REG_state + 1*16])
  378. AS2( movdqa xmm2, [REG_state + 2*16])
  379. AS2( movdqa xmm3, [REG_state + 3*16])
  380. AS2( mov REG_roundsLeft, REG_rounds)
  381. ASL(0)
  382. SSE2_QUARTER_ROUND(0, 1, 3, 7)
  383. SSE2_QUARTER_ROUND(1, 2, 0, 9)
  384. SSE2_QUARTER_ROUND(2, 3, 1, 13)
  385. SSE2_QUARTER_ROUND(3, 0, 2, 18)
  386. ASS( pshufd xmm1, xmm1, 2, 1, 0, 3)
  387. ASS( pshufd xmm2, xmm2, 1, 0, 3, 2)
  388. ASS( pshufd xmm3, xmm3, 0, 3, 2, 1)
  389. SSE2_QUARTER_ROUND(0, 3, 1, 7)
  390. SSE2_QUARTER_ROUND(3, 2, 0, 9)
  391. SSE2_QUARTER_ROUND(2, 1, 3, 13)
  392. SSE2_QUARTER_ROUND(1, 0, 2, 18)
  393. ASS( pshufd xmm1, xmm1, 0, 3, 2, 1)
  394. ASS( pshufd xmm2, xmm2, 1, 0, 3, 2)
  395. ASS( pshufd xmm3, xmm3, 2, 1, 0, 3)
  396. AS2( sub REG_roundsLeft, 2)
  397. ASJ( jnz, 0, b)
  398. AS2( paddd xmm0, [REG_state + 0*16])
  399. AS2( paddd xmm1, [REG_state + 1*16])
  400. AS2( paddd xmm2, [REG_state + 2*16])
  401. AS2( paddd xmm3, [REG_state + 3*16])
  402. AS2( add dword ptr [REG_state + 8*4], 1)
  403. AS2( adc dword ptr [REG_state + 5*4], 0)
  404. AS2( pcmpeqb xmm6, xmm6) // all ones
  405. AS2( psrlq xmm6, 32) // lo32 mask
  406. ASS( pshufd xmm7, xmm6, 0, 1, 2, 3) // hi32 mask
  407. AS2( movdqa xmm4, xmm0)
  408. AS2( movdqa xmm5, xmm3)
  409. AS2( pand xmm0, xmm7)
  410. AS2( pand xmm4, xmm6)
  411. AS2( pand xmm3, xmm6)
  412. AS2( pand xmm5, xmm7)
  413. AS2( por xmm4, xmm5) // 0,13,2,15
  414. AS2( movdqa xmm5, xmm1)
  415. AS2( pand xmm1, xmm7)
  416. AS2( pand xmm5, xmm6)
  417. AS2( por xmm0, xmm5) // 4,1,6,3
  418. AS2( pand xmm6, xmm2)
  419. AS2( pand xmm2, xmm7)
  420. AS2( por xmm1, xmm6) // 8,5,10,7
  421. AS2( por xmm2, xmm3) // 12,9,14,11
  422. AS2( movdqa xmm5, xmm4)
  423. AS2( movdqa xmm6, xmm0)
  424. AS3( shufpd xmm4, xmm1, 2) // 0,13,10,7
  425. AS3( shufpd xmm0, xmm2, 2) // 4,1,14,11
  426. AS3( shufpd xmm1, xmm5, 2) // 8,5,2,15
  427. AS3( shufpd xmm2, xmm6, 2) // 12,9,6,3
  428. // output keystream
  429. AS_XMM_OUTPUT4(SSE2_Salsa_Output_B, REG_input, REG_output, 4, 0, 1, 2, 3, 0, 1, 2, 3, 4)
  430. ASJ( jmp, 5, b)
  431. ASL(4)
  432. AS_POP_IF86( bp)
  433. #ifdef __GNUC__
  434. AS_POP_IF86( bx)
  435. ATT_PREFIX
  436. #if CRYPTOPP_BOOL_X64
  437. : "+r" (input), "+r" (output), "+r" (iterationCount)
  438. : "r" (m_rounds), "r" (m_state.m_ptr), "r" (workspace)
  439. : "%eax", "%rdx", "memory", "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15"
  440. #else
  441. : "+a" (input), "+D" (output), "+c" (iterationCount)
  442. : "d" (m_rounds), "S" (m_state.m_ptr)
  443. : "memory", "cc"
  444. #endif
  445. );
  446. #endif
  447. #ifdef CRYPTOPP_GENERATE_X64_MASM
  448. movdqa xmm6, [rsp + 0200h]
  449. movdqa xmm7, [rsp + 0210h]
  450. movdqa xmm8, [rsp + 0220h]
  451. movdqa xmm9, [rsp + 0230h]
  452. movdqa xmm10, [rsp + 0240h]
  453. movdqa xmm11, [rsp + 0250h]
  454. movdqa xmm12, [rsp + 0260h]
  455. movdqa xmm13, [rsp + 0270h]
  456. movdqa xmm14, [rsp + 0280h]
  457. movdqa xmm15, [rsp + 0290h]
  458. add rsp, 10*16 + 32*16 + 8
  459. ret
  460. Salsa20_OperateKeystream ENDP
  461. #else
  462. }
  463. else
  464. #endif
  465. #endif
  466. #ifndef CRYPTOPP_GENERATE_X64_MASM
  467. {
  468. word32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
  469. while (iterationCount--)
  470. {
  471. x0 = m_state[0]; x1 = m_state[1]; x2 = m_state[2]; x3 = m_state[3];
  472. x4 = m_state[4]; x5 = m_state[5]; x6 = m_state[6]; x7 = m_state[7];
  473. x8 = m_state[8]; x9 = m_state[9]; x10 = m_state[10]; x11 = m_state[11];
  474. x12 = m_state[12]; x13 = m_state[13]; x14 = m_state[14]; x15 = m_state[15];
  475. for (int i=m_rounds; i>0; i-=2)
  476. {
  477. #define QUARTER_ROUND(a, b, c, d) \
  478. b = b ^ rotlFixed(a + d, 7); \
  479. c = c ^ rotlFixed(b + a, 9); \
  480. d = d ^ rotlFixed(c + b, 13); \
  481. a = a ^ rotlFixed(d + c, 18);
  482. QUARTER_ROUND(x0, x4, x8, x12)
  483. QUARTER_ROUND(x1, x5, x9, x13)
  484. QUARTER_ROUND(x2, x6, x10, x14)
  485. QUARTER_ROUND(x3, x7, x11, x15)
  486. QUARTER_ROUND(x0, x13, x10, x7)
  487. QUARTER_ROUND(x1, x14, x11, x4)
  488. QUARTER_ROUND(x2, x15, x8, x5)
  489. QUARTER_ROUND(x3, x12, x9, x6)
  490. }
  491. #define SALSA_OUTPUT(x) {\
  492. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 0, x0 + m_state[0]);\
  493. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 1, x13 + m_state[13]);\
  494. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 2, x10 + m_state[10]);\
  495. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 3, x7 + m_state[7]);\
  496. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 4, x4 + m_state[4]);\
  497. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 5, x1 + m_state[1]);\
  498. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 6, x14 + m_state[14]);\
  499. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 7, x11 + m_state[11]);\
  500. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 8, x8 + m_state[8]);\
  501. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 9, x5 + m_state[5]);\
  502. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 10, x2 + m_state[2]);\
  503. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 11, x15 + m_state[15]);\
  504. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 12, x12 + m_state[12]);\
  505. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 13, x9 + m_state[9]);\
  506. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 14, x6 + m_state[6]);\
  507. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 15, x3 + m_state[3]);}
  508. #ifndef CRYPTOPP_DOXYGEN_PROCESSING
  509. CRYPTOPP_KEYSTREAM_OUTPUT_SWITCH(SALSA_OUTPUT, BYTES_PER_ITERATION);
  510. #endif
  511. if (++m_state[8] == 0)
  512. ++m_state[5];
  513. }
  514. }
  515. } // see comment above if an internal compiler error occurs here
  516. void XSalsa20_Policy::CipherSetKey(const NameValuePairs &params, const byte *key, size_t length)
  517. {
  518. m_rounds = params.GetIntValueWithDefault(Name::Rounds(), 20);
  519. if (!(m_rounds == 8 || m_rounds == 12 || m_rounds == 20))
  520. throw InvalidRounds(XSalsa20::StaticAlgorithmName(), m_rounds);
  521. GetUserKey(LITTLE_ENDIAN_ORDER, m_key.begin(), m_key.size(), key, length);
  522. if (length == 16)
  523. memcpy(m_key.begin()+4, m_key.begin(), 16);
  524. // "expand 32-byte k"
  525. m_state[0] = 0x61707865;
  526. m_state[1] = 0x3320646e;
  527. m_state[2] = 0x79622d32;
  528. m_state[3] = 0x6b206574;
  529. }
  530. void XSalsa20_Policy::CipherResynchronize(byte *keystreamBuffer, const byte *IV, size_t length)
  531. {
  532. CRYPTOPP_UNUSED(keystreamBuffer), CRYPTOPP_UNUSED(length);
  533. assert(length==24);
  534. word32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
  535. GetBlock<word32, LittleEndian> get(IV);
  536. get(x14)(x11)(x8)(x5)(m_state[14])(m_state[11]);
  537. x13 = m_key[0]; x10 = m_key[1]; x7 = m_key[2]; x4 = m_key[3];
  538. x15 = m_key[4]; x12 = m_key[5]; x9 = m_key[6]; x6 = m_key[7];
  539. x0 = m_state[0]; x1 = m_state[1]; x2 = m_state[2]; x3 = m_state[3];
  540. for (int i=m_rounds; i>0; i-=2)
  541. {
  542. QUARTER_ROUND(x0, x4, x8, x12)
  543. QUARTER_ROUND(x1, x5, x9, x13)
  544. QUARTER_ROUND(x2, x6, x10, x14)
  545. QUARTER_ROUND(x3, x7, x11, x15)
  546. QUARTER_ROUND(x0, x13, x10, x7)
  547. QUARTER_ROUND(x1, x14, x11, x4)
  548. QUARTER_ROUND(x2, x15, x8, x5)
  549. QUARTER_ROUND(x3, x12, x9, x6)
  550. }
  551. m_state[13] = x0; m_state[10] = x1; m_state[7] = x2; m_state[4] = x3;
  552. m_state[15] = x14; m_state[12] = x11; m_state[9] = x8; m_state[6] = x5;
  553. m_state[8] = m_state[5] = 0;
  554. }
  555. NAMESPACE_END
  556. #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM