Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

598 lines
22 KiB

  1. // salsa.cpp - written and placed in the public domain by Wei Dai
  2. // use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM salsa.cpp" to generate MASM code
  3. #include "pch.h"
  4. #ifndef CRYPTOPP_GENERATE_X64_MASM
  5. #include "salsa.h"
  6. #include "misc.h"
  7. #include "argnames.h"
  8. #include "cpu.h"
  9. NAMESPACE_BEGIN(CryptoPP)
  10. void Salsa20_TestInstantiations()
  11. {
  12. Salsa20::Encryption x;
  13. }
  14. void Salsa20_Policy::CipherSetKey(const NameValuePairs &params, const byte *key, size_t length)
  15. {
  16. m_rounds = params.GetIntValueWithDefault(Name::Rounds(), 20);
  17. if (!(m_rounds == 8 || m_rounds == 12 || m_rounds == 20))
  18. throw InvalidRounds(Salsa20::StaticAlgorithmName(), m_rounds);
  19. // m_state is reordered for SSE2
  20. GetBlock<word32, LittleEndian> get1(key);
  21. get1(m_state[13])(m_state[10])(m_state[7])(m_state[4]);
  22. GetBlock<word32, LittleEndian> get2(key + length - 16);
  23. get2(m_state[15])(m_state[12])(m_state[9])(m_state[6]);
  24. // "expand 16-byte k" or "expand 32-byte k"
  25. m_state[0] = 0x61707865;
  26. m_state[1] = (length == 16) ? 0x3120646e : 0x3320646e;
  27. m_state[2] = (length == 16) ? 0x79622d36 : 0x79622d32;
  28. m_state[3] = 0x6b206574;
  29. }
  30. void Salsa20_Policy::CipherResynchronize(byte *keystreamBuffer, const byte *IV, size_t length)
  31. {
  32. assert(length==8);
  33. GetBlock<word32, LittleEndian> get(IV);
  34. get(m_state[14])(m_state[11]);
  35. m_state[8] = m_state[5] = 0;
  36. }
  37. void Salsa20_Policy::SeekToIteration(lword iterationCount)
  38. {
  39. m_state[8] = (word32)iterationCount;
  40. m_state[5] = (word32)SafeRightShift<32>(iterationCount);
  41. }
  42. #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X64
  43. unsigned int Salsa20_Policy::GetAlignment() const
  44. {
  45. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  46. if (HasSSE2())
  47. return 16;
  48. else
  49. #endif
  50. return GetAlignmentOf<word32>();
  51. }
  52. unsigned int Salsa20_Policy::GetOptimalBlockSize() const
  53. {
  54. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  55. if (HasSSE2())
  56. return 4*BYTES_PER_ITERATION;
  57. else
  58. #endif
  59. return BYTES_PER_ITERATION;
  60. }
  61. #endif
  62. #ifdef CRYPTOPP_X64_MASM_AVAILABLE
  63. extern "C" {
  64. void Salsa20_OperateKeystream(byte *output, const byte *input, size_t iterationCount, int rounds, void *state);
  65. }
  66. #endif
  67. #pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly code
  68. void Salsa20_Policy::OperateKeystream(KeystreamOperation operation, byte *output, const byte *input, size_t iterationCount)
  69. {
  70. #endif // #ifdef CRYPTOPP_GENERATE_X64_MASM
  71. #ifdef CRYPTOPP_X64_MASM_AVAILABLE
  72. Salsa20_OperateKeystream(output, input, iterationCount, m_rounds, m_state.data());
  73. return;
  74. #endif
  75. #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
  76. #ifdef CRYPTOPP_GENERATE_X64_MASM
  77. ALIGN 8
  78. Salsa20_OperateKeystream PROC FRAME
  79. mov r10, [rsp + 5*8] ; state
  80. alloc_stack(10*16 + 32*16 + 8)
  81. save_xmm128 xmm6, 0200h
  82. save_xmm128 xmm7, 0210h
  83. save_xmm128 xmm8, 0220h
  84. save_xmm128 xmm9, 0230h
  85. save_xmm128 xmm10, 0240h
  86. save_xmm128 xmm11, 0250h
  87. save_xmm128 xmm12, 0260h
  88. save_xmm128 xmm13, 0270h
  89. save_xmm128 xmm14, 0280h
  90. save_xmm128 xmm15, 0290h
  91. .endprolog
  92. #define REG_output rcx
  93. #define REG_input rdx
  94. #define REG_iterationCount r8
  95. #define REG_state r10
  96. #define REG_rounds e9d
  97. #define REG_roundsLeft eax
  98. #define REG_temp32 r11d
  99. #define REG_temp r11
  100. #define SSE2_WORKSPACE rsp
  101. #else
  102. if (HasSSE2())
  103. {
  104. #if CRYPTOPP_BOOL_X64
  105. #define REG_output %4
  106. #define REG_input %1
  107. #define REG_iterationCount %2
  108. #define REG_state %3
  109. #define REG_rounds %0
  110. #define REG_roundsLeft eax
  111. #define REG_temp32 edx
  112. #define REG_temp rdx
  113. #define SSE2_WORKSPACE %5
  114. FixedSizeAlignedSecBlock<byte, 32*16> workspace;
  115. #else
  116. #define REG_output edi
  117. #define REG_input eax
  118. #define REG_iterationCount ecx
  119. #define REG_state esi
  120. #define REG_rounds edx
  121. #define REG_roundsLeft ebx
  122. #define REG_temp32 ebp
  123. #define REG_temp ebp
  124. #define SSE2_WORKSPACE esp + WORD_SZ
  125. #endif
  126. #ifdef __GNUC__
  127. __asm__ __volatile__
  128. (
  129. ".intel_syntax noprefix;"
  130. AS_PUSH_IF86( bx)
  131. #else
  132. void *s = m_state.data();
  133. word32 r = m_rounds;
  134. AS2( mov REG_iterationCount, iterationCount)
  135. AS2( mov REG_input, input)
  136. AS2( mov REG_output, output)
  137. AS2( mov REG_state, s)
  138. AS2( mov REG_rounds, r)
  139. #endif
  140. #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
  141. AS_PUSH_IF86( bp)
  142. AS2( cmp REG_iterationCount, 4)
  143. ASJ( jl, 5, f)
  144. #if CRYPTOPP_BOOL_X86
  145. AS2( mov ebx, esp)
  146. AS2( and esp, -16)
  147. AS2( sub esp, 32*16)
  148. AS1( push ebx)
  149. #endif
  150. #define SSE2_EXPAND_S(i, j) \
  151. ASS( pshufd xmm4, xmm##i, j, j, j, j) \
  152. AS2( movdqa [SSE2_WORKSPACE + (i*4+j)*16 + 256], xmm4)
  153. AS2( movdqa xmm0, [REG_state + 0*16])
  154. AS2( movdqa xmm1, [REG_state + 1*16])
  155. AS2( movdqa xmm2, [REG_state + 2*16])
  156. AS2( movdqa xmm3, [REG_state + 3*16])
  157. SSE2_EXPAND_S(0, 0)
  158. SSE2_EXPAND_S(0, 1)
  159. SSE2_EXPAND_S(0, 2)
  160. SSE2_EXPAND_S(0, 3)
  161. SSE2_EXPAND_S(1, 0)
  162. SSE2_EXPAND_S(1, 2)
  163. SSE2_EXPAND_S(1, 3)
  164. SSE2_EXPAND_S(2, 1)
  165. SSE2_EXPAND_S(2, 2)
  166. SSE2_EXPAND_S(2, 3)
  167. SSE2_EXPAND_S(3, 0)
  168. SSE2_EXPAND_S(3, 1)
  169. SSE2_EXPAND_S(3, 2)
  170. SSE2_EXPAND_S(3, 3)
  171. #define SSE2_EXPAND_S85(i) \
  172. AS2( mov dword ptr [SSE2_WORKSPACE + 8*16 + i*4 + 256], REG_roundsLeft) \
  173. AS2( mov dword ptr [SSE2_WORKSPACE + 5*16 + i*4 + 256], REG_temp32) \
  174. AS2( add REG_roundsLeft, 1) \
  175. AS2( adc REG_temp32, 0)
  176. ASL(1)
  177. AS2( mov REG_roundsLeft, dword ptr [REG_state + 8*4])
  178. AS2( mov REG_temp32, dword ptr [REG_state + 5*4])
  179. SSE2_EXPAND_S85(0)
  180. SSE2_EXPAND_S85(1)
  181. SSE2_EXPAND_S85(2)
  182. SSE2_EXPAND_S85(3)
  183. AS2( mov dword ptr [REG_state + 8*4], REG_roundsLeft)
  184. AS2( mov dword ptr [REG_state + 5*4], REG_temp32)
  185. #define SSE2_QUARTER_ROUND(a, b, d, i) \
  186. AS2( movdqa xmm4, xmm##d) \
  187. AS2( paddd xmm4, xmm##a) \
  188. AS2( movdqa xmm5, xmm4) \
  189. AS2( pslld xmm4, i) \
  190. AS2( psrld xmm5, 32-i) \
  191. AS2( pxor xmm##b, xmm4) \
  192. AS2( pxor xmm##b, xmm5)
  193. #define L01(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##A, [SSE2_WORKSPACE + d*16 + i*256]) /* y3 */
  194. #define L02(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##C, [SSE2_WORKSPACE + a*16 + i*256]) /* y0 */
  195. #define L03(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##C) /* y0+y3 */
  196. #define L04(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##B, xmm##A)
  197. #define L05(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 7)
  198. #define L06(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##B, 32-7)
  199. #define L07(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, [SSE2_WORKSPACE + b*16 + i*256])
  200. #define L08(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##B) /* z1 */
  201. #define L09(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + b*16], xmm##A)
  202. #define L10(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##B, xmm##A)
  203. #define L11(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##C) /* z1+y0 */
  204. #define L12(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##D, xmm##A)
  205. #define L13(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 9)
  206. #define L14(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##D, 32-9)
  207. #define L15(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, [SSE2_WORKSPACE + c*16 + i*256])
  208. #define L16(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##D) /* z2 */
  209. #define L17(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + c*16], xmm##A)
  210. #define L18(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##D, xmm##A)
  211. #define L19(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##B) /* z2+z1 */
  212. #define L20(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##B, xmm##A)
  213. #define L21(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 13)
  214. #define L22(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##B, 32-13)
  215. #define L23(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, [SSE2_WORKSPACE + d*16 + i*256])
  216. #define L24(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##B) /* z3 */
  217. #define L25(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + d*16], xmm##A)
  218. #define L26(A,B,C,D,a,b,c,d,i) AS2( paddd xmm##A, xmm##D) /* z3+z2 */
  219. #define L27(A,B,C,D,a,b,c,d,i) AS2( movdqa xmm##D, xmm##A)
  220. #define L28(A,B,C,D,a,b,c,d,i) AS2( pslld xmm##A, 18)
  221. #define L29(A,B,C,D,a,b,c,d,i) AS2( psrld xmm##D, 32-18)
  222. #define L30(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##C) /* xor y0 */
  223. #define L31(A,B,C,D,a,b,c,d,i) AS2( pxor xmm##A, xmm##D) /* z0 */
  224. #define L32(A,B,C,D,a,b,c,d,i) AS2( movdqa [SSE2_WORKSPACE + a*16], xmm##A)
  225. #define SSE2_QUARTER_ROUND_X8(i, a, b, c, d, e, f, g, h) \
  226. L01(0,1,2,3, a,b,c,d, i) L01(4,5,6,7, e,f,g,h, i) \
  227. L02(0,1,2,3, a,b,c,d, i) L02(4,5,6,7, e,f,g,h, i) \
  228. L03(0,1,2,3, a,b,c,d, i) L03(4,5,6,7, e,f,g,h, i) \
  229. L04(0,1,2,3, a,b,c,d, i) L04(4,5,6,7, e,f,g,h, i) \
  230. L05(0,1,2,3, a,b,c,d, i) L05(4,5,6,7, e,f,g,h, i) \
  231. L06(0,1,2,3, a,b,c,d, i) L06(4,5,6,7, e,f,g,h, i) \
  232. L07(0,1,2,3, a,b,c,d, i) L07(4,5,6,7, e,f,g,h, i) \
  233. L08(0,1,2,3, a,b,c,d, i) L08(4,5,6,7, e,f,g,h, i) \
  234. L09(0,1,2,3, a,b,c,d, i) L09(4,5,6,7, e,f,g,h, i) \
  235. L10(0,1,2,3, a,b,c,d, i) L10(4,5,6,7, e,f,g,h, i) \
  236. L11(0,1,2,3, a,b,c,d, i) L11(4,5,6,7, e,f,g,h, i) \
  237. L12(0,1,2,3, a,b,c,d, i) L12(4,5,6,7, e,f,g,h, i) \
  238. L13(0,1,2,3, a,b,c,d, i) L13(4,5,6,7, e,f,g,h, i) \
  239. L14(0,1,2,3, a,b,c,d, i) L14(4,5,6,7, e,f,g,h, i) \
  240. L15(0,1,2,3, a,b,c,d, i) L15(4,5,6,7, e,f,g,h, i) \
  241. L16(0,1,2,3, a,b,c,d, i) L16(4,5,6,7, e,f,g,h, i) \
  242. L17(0,1,2,3, a,b,c,d, i) L17(4,5,6,7, e,f,g,h, i) \
  243. L18(0,1,2,3, a,b,c,d, i) L18(4,5,6,7, e,f,g,h, i) \
  244. L19(0,1,2,3, a,b,c,d, i) L19(4,5,6,7, e,f,g,h, i) \
  245. L20(0,1,2,3, a,b,c,d, i) L20(4,5,6,7, e,f,g,h, i) \
  246. L21(0,1,2,3, a,b,c,d, i) L21(4,5,6,7, e,f,g,h, i) \
  247. L22(0,1,2,3, a,b,c,d, i) L22(4,5,6,7, e,f,g,h, i) \
  248. L23(0,1,2,3, a,b,c,d, i) L23(4,5,6,7, e,f,g,h, i) \
  249. L24(0,1,2,3, a,b,c,d, i) L24(4,5,6,7, e,f,g,h, i) \
  250. L25(0,1,2,3, a,b,c,d, i) L25(4,5,6,7, e,f,g,h, i) \
  251. L26(0,1,2,3, a,b,c,d, i) L26(4,5,6,7, e,f,g,h, i) \
  252. L27(0,1,2,3, a,b,c,d, i) L27(4,5,6,7, e,f,g,h, i) \
  253. L28(0,1,2,3, a,b,c,d, i) L28(4,5,6,7, e,f,g,h, i) \
  254. L29(0,1,2,3, a,b,c,d, i) L29(4,5,6,7, e,f,g,h, i) \
  255. L30(0,1,2,3, a,b,c,d, i) L30(4,5,6,7, e,f,g,h, i) \
  256. L31(0,1,2,3, a,b,c,d, i) L31(4,5,6,7, e,f,g,h, i) \
  257. L32(0,1,2,3, a,b,c,d, i) L32(4,5,6,7, e,f,g,h, i)
  258. #define SSE2_QUARTER_ROUND_X16(i, a, b, c, d, e, f, g, h, A, B, C, D, E, F, G, H) \
  259. L01(0,1,2,3, a,b,c,d, i) L01(4,5,6,7, e,f,g,h, i) L01(8,9,10,11, A,B,C,D, i) L01(12,13,14,15, E,F,G,H, i) \
  260. L02(0,1,2,3, a,b,c,d, i) L02(4,5,6,7, e,f,g,h, i) L02(8,9,10,11, A,B,C,D, i) L02(12,13,14,15, E,F,G,H, i) \
  261. L03(0,1,2,3, a,b,c,d, i) L03(4,5,6,7, e,f,g,h, i) L03(8,9,10,11, A,B,C,D, i) L03(12,13,14,15, E,F,G,H, i) \
  262. L04(0,1,2,3, a,b,c,d, i) L04(4,5,6,7, e,f,g,h, i) L04(8,9,10,11, A,B,C,D, i) L04(12,13,14,15, E,F,G,H, i) \
  263. L05(0,1,2,3, a,b,c,d, i) L05(4,5,6,7, e,f,g,h, i) L05(8,9,10,11, A,B,C,D, i) L05(12,13,14,15, E,F,G,H, i) \
  264. L06(0,1,2,3, a,b,c,d, i) L06(4,5,6,7, e,f,g,h, i) L06(8,9,10,11, A,B,C,D, i) L06(12,13,14,15, E,F,G,H, i) \
  265. L07(0,1,2,3, a,b,c,d, i) L07(4,5,6,7, e,f,g,h, i) L07(8,9,10,11, A,B,C,D, i) L07(12,13,14,15, E,F,G,H, i) \
  266. L08(0,1,2,3, a,b,c,d, i) L08(4,5,6,7, e,f,g,h, i) L08(8,9,10,11, A,B,C,D, i) L08(12,13,14,15, E,F,G,H, i) \
  267. L09(0,1,2,3, a,b,c,d, i) L09(4,5,6,7, e,f,g,h, i) L09(8,9,10,11, A,B,C,D, i) L09(12,13,14,15, E,F,G,H, i) \
  268. L10(0,1,2,3, a,b,c,d, i) L10(4,5,6,7, e,f,g,h, i) L10(8,9,10,11, A,B,C,D, i) L10(12,13,14,15, E,F,G,H, i) \
  269. L11(0,1,2,3, a,b,c,d, i) L11(4,5,6,7, e,f,g,h, i) L11(8,9,10,11, A,B,C,D, i) L11(12,13,14,15, E,F,G,H, i) \
  270. L12(0,1,2,3, a,b,c,d, i) L12(4,5,6,7, e,f,g,h, i) L12(8,9,10,11, A,B,C,D, i) L12(12,13,14,15, E,F,G,H, i) \
  271. L13(0,1,2,3, a,b,c,d, i) L13(4,5,6,7, e,f,g,h, i) L13(8,9,10,11, A,B,C,D, i) L13(12,13,14,15, E,F,G,H, i) \
  272. L14(0,1,2,3, a,b,c,d, i) L14(4,5,6,7, e,f,g,h, i) L14(8,9,10,11, A,B,C,D, i) L14(12,13,14,15, E,F,G,H, i) \
  273. L15(0,1,2,3, a,b,c,d, i) L15(4,5,6,7, e,f,g,h, i) L15(8,9,10,11, A,B,C,D, i) L15(12,13,14,15, E,F,G,H, i) \
  274. L16(0,1,2,3, a,b,c,d, i) L16(4,5,6,7, e,f,g,h, i) L16(8,9,10,11, A,B,C,D, i) L16(12,13,14,15, E,F,G,H, i) \
  275. L17(0,1,2,3, a,b,c,d, i) L17(4,5,6,7, e,f,g,h, i) L17(8,9,10,11, A,B,C,D, i) L17(12,13,14,15, E,F,G,H, i) \
  276. L18(0,1,2,3, a,b,c,d, i) L18(4,5,6,7, e,f,g,h, i) L18(8,9,10,11, A,B,C,D, i) L18(12,13,14,15, E,F,G,H, i) \
  277. L19(0,1,2,3, a,b,c,d, i) L19(4,5,6,7, e,f,g,h, i) L19(8,9,10,11, A,B,C,D, i) L19(12,13,14,15, E,F,G,H, i) \
  278. L20(0,1,2,3, a,b,c,d, i) L20(4,5,6,7, e,f,g,h, i) L20(8,9,10,11, A,B,C,D, i) L20(12,13,14,15, E,F,G,H, i) \
  279. L21(0,1,2,3, a,b,c,d, i) L21(4,5,6,7, e,f,g,h, i) L21(8,9,10,11, A,B,C,D, i) L21(12,13,14,15, E,F,G,H, i) \
  280. L22(0,1,2,3, a,b,c,d, i) L22(4,5,6,7, e,f,g,h, i) L22(8,9,10,11, A,B,C,D, i) L22(12,13,14,15, E,F,G,H, i) \
  281. L23(0,1,2,3, a,b,c,d, i) L23(4,5,6,7, e,f,g,h, i) L23(8,9,10,11, A,B,C,D, i) L23(12,13,14,15, E,F,G,H, i) \
  282. L24(0,1,2,3, a,b,c,d, i) L24(4,5,6,7, e,f,g,h, i) L24(8,9,10,11, A,B,C,D, i) L24(12,13,14,15, E,F,G,H, i) \
  283. L25(0,1,2,3, a,b,c,d, i) L25(4,5,6,7, e,f,g,h, i) L25(8,9,10,11, A,B,C,D, i) L25(12,13,14,15, E,F,G,H, i) \
  284. L26(0,1,2,3, a,b,c,d, i) L26(4,5,6,7, e,f,g,h, i) L26(8,9,10,11, A,B,C,D, i) L26(12,13,14,15, E,F,G,H, i) \
  285. L27(0,1,2,3, a,b,c,d, i) L27(4,5,6,7, e,f,g,h, i) L27(8,9,10,11, A,B,C,D, i) L27(12,13,14,15, E,F,G,H, i) \
  286. L28(0,1,2,3, a,b,c,d, i) L28(4,5,6,7, e,f,g,h, i) L28(8,9,10,11, A,B,C,D, i) L28(12,13,14,15, E,F,G,H, i) \
  287. L29(0,1,2,3, a,b,c,d, i) L29(4,5,6,7, e,f,g,h, i) L29(8,9,10,11, A,B,C,D, i) L29(12,13,14,15, E,F,G,H, i) \
  288. L30(0,1,2,3, a,b,c,d, i) L30(4,5,6,7, e,f,g,h, i) L30(8,9,10,11, A,B,C,D, i) L30(12,13,14,15, E,F,G,H, i) \
  289. L31(0,1,2,3, a,b,c,d, i) L31(4,5,6,7, e,f,g,h, i) L31(8,9,10,11, A,B,C,D, i) L31(12,13,14,15, E,F,G,H, i) \
  290. L32(0,1,2,3, a,b,c,d, i) L32(4,5,6,7, e,f,g,h, i) L32(8,9,10,11, A,B,C,D, i) L32(12,13,14,15, E,F,G,H, i)
  291. #if CRYPTOPP_BOOL_X64
  292. SSE2_QUARTER_ROUND_X16(1, 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15)
  293. #else
  294. SSE2_QUARTER_ROUND_X8(1, 2, 6, 10, 14, 3, 7, 11, 15)
  295. SSE2_QUARTER_ROUND_X8(1, 0, 4, 8, 12, 1, 5, 9, 13)
  296. #endif
  297. AS2( mov REG_roundsLeft, REG_rounds)
  298. ASJ( jmp, 2, f)
  299. ASL(SSE2_Salsa_Output)
  300. AS2( movdqa xmm0, xmm4)
  301. AS2( punpckldq xmm4, xmm5)
  302. AS2( movdqa xmm1, xmm6)
  303. AS2( punpckldq xmm6, xmm7)
  304. AS2( movdqa xmm2, xmm4)
  305. AS2( punpcklqdq xmm4, xmm6) // e
  306. AS2( punpckhqdq xmm2, xmm6) // f
  307. AS2( punpckhdq xmm0, xmm5)
  308. AS2( punpckhdq xmm1, xmm7)
  309. AS2( movdqa xmm6, xmm0)
  310. AS2( punpcklqdq xmm0, xmm1) // g
  311. AS2( punpckhqdq xmm6, xmm1) // h
  312. AS_XMM_OUTPUT4(SSE2_Salsa_Output_A, REG_input, REG_output, 4, 2, 0, 6, 1, 0, 4, 8, 12, 1)
  313. AS1( ret)
  314. ASL(6)
  315. #if CRYPTOPP_BOOL_X64
  316. SSE2_QUARTER_ROUND_X16(0, 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15)
  317. ASL(2)
  318. SSE2_QUARTER_ROUND_X16(0, 0, 13, 10, 7, 1, 14, 11, 4, 2, 15, 8, 5, 3, 12, 9, 6)
  319. #else
  320. SSE2_QUARTER_ROUND_X8(0, 2, 6, 10, 14, 3, 7, 11, 15)
  321. SSE2_QUARTER_ROUND_X8(0, 0, 4, 8, 12, 1, 5, 9, 13)
  322. ASL(2)
  323. SSE2_QUARTER_ROUND_X8(0, 2, 15, 8, 5, 3, 12, 9, 6)
  324. SSE2_QUARTER_ROUND_X8(0, 0, 13, 10, 7, 1, 14, 11, 4)
  325. #endif
  326. AS2( sub REG_roundsLeft, 2)
  327. ASJ( jnz, 6, b)
  328. #define SSE2_OUTPUT_4(a, b, c, d) \
  329. AS2( movdqa xmm4, [SSE2_WORKSPACE + a*16 + 256])\
  330. AS2( paddd xmm4, [SSE2_WORKSPACE + a*16])\
  331. AS2( movdqa xmm5, [SSE2_WORKSPACE + b*16 + 256])\
  332. AS2( paddd xmm5, [SSE2_WORKSPACE + b*16])\
  333. AS2( movdqa xmm6, [SSE2_WORKSPACE + c*16 + 256])\
  334. AS2( paddd xmm6, [SSE2_WORKSPACE + c*16])\
  335. AS2( movdqa xmm7, [SSE2_WORKSPACE + d*16 + 256])\
  336. AS2( paddd xmm7, [SSE2_WORKSPACE + d*16])\
  337. ASC( call, SSE2_Salsa_Output)
  338. SSE2_OUTPUT_4(0, 13, 10, 7)
  339. SSE2_OUTPUT_4(4, 1, 14, 11)
  340. SSE2_OUTPUT_4(8, 5, 2, 15)
  341. SSE2_OUTPUT_4(12, 9, 6, 3)
  342. AS2( test REG_input, REG_input)
  343. ASJ( jz, 9, f)
  344. AS2( add REG_input, 12*16)
  345. ASL(9)
  346. AS2( add REG_output, 12*16)
  347. AS2( sub REG_iterationCount, 4)
  348. AS2( cmp REG_iterationCount, 4)
  349. ASJ( jge, 1, b)
  350. AS_POP_IF86( sp)
  351. ASL(5)
  352. AS2( sub REG_iterationCount, 1)
  353. ASJ( jl, 4, f)
  354. AS2( movdqa xmm0, [REG_state + 0*16])
  355. AS2( movdqa xmm1, [REG_state + 1*16])
  356. AS2( movdqa xmm2, [REG_state + 2*16])
  357. AS2( movdqa xmm3, [REG_state + 3*16])
  358. AS2( mov REG_roundsLeft, REG_rounds)
  359. ASL(0)
  360. SSE2_QUARTER_ROUND(0, 1, 3, 7)
  361. SSE2_QUARTER_ROUND(1, 2, 0, 9)
  362. SSE2_QUARTER_ROUND(2, 3, 1, 13)
  363. SSE2_QUARTER_ROUND(3, 0, 2, 18)
  364. ASS( pshufd xmm1, xmm1, 2, 1, 0, 3)
  365. ASS( pshufd xmm2, xmm2, 1, 0, 3, 2)
  366. ASS( pshufd xmm3, xmm3, 0, 3, 2, 1)
  367. SSE2_QUARTER_ROUND(0, 3, 1, 7)
  368. SSE2_QUARTER_ROUND(3, 2, 0, 9)
  369. SSE2_QUARTER_ROUND(2, 1, 3, 13)
  370. SSE2_QUARTER_ROUND(1, 0, 2, 18)
  371. ASS( pshufd xmm1, xmm1, 0, 3, 2, 1)
  372. ASS( pshufd xmm2, xmm2, 1, 0, 3, 2)
  373. ASS( pshufd xmm3, xmm3, 2, 1, 0, 3)
  374. AS2( sub REG_roundsLeft, 2)
  375. ASJ( jnz, 0, b)
  376. AS2( paddd xmm0, [REG_state + 0*16])
  377. AS2( paddd xmm1, [REG_state + 1*16])
  378. AS2( paddd xmm2, [REG_state + 2*16])
  379. AS2( paddd xmm3, [REG_state + 3*16])
  380. AS2( add dword ptr [REG_state + 8*4], 1)
  381. AS2( adc dword ptr [REG_state + 5*4], 0)
  382. AS2( pcmpeqb xmm6, xmm6) // all ones
  383. AS2( psrlq xmm6, 32) // lo32 mask
  384. ASS( pshufd xmm7, xmm6, 0, 1, 2, 3) // hi32 mask
  385. AS2( movdqa xmm4, xmm0)
  386. AS2( movdqa xmm5, xmm3)
  387. AS2( pand xmm0, xmm7)
  388. AS2( pand xmm4, xmm6)
  389. AS2( pand xmm3, xmm6)
  390. AS2( pand xmm5, xmm7)
  391. AS2( por xmm4, xmm5) // 0,13,2,15
  392. AS2( movdqa xmm5, xmm1)
  393. AS2( pand xmm1, xmm7)
  394. AS2( pand xmm5, xmm6)
  395. AS2( por xmm0, xmm5) // 4,1,6,3
  396. AS2( pand xmm6, xmm2)
  397. AS2( pand xmm2, xmm7)
  398. AS2( por xmm1, xmm6) // 8,5,10,7
  399. AS2( por xmm2, xmm3) // 12,9,14,11
  400. AS2( movdqa xmm5, xmm4)
  401. AS2( movdqa xmm6, xmm0)
  402. AS3( shufpd xmm4, xmm1, 2) // 0,13,10,7
  403. AS3( shufpd xmm0, xmm2, 2) // 4,1,14,11
  404. AS3( shufpd xmm1, xmm5, 2) // 8,5,2,15
  405. AS3( shufpd xmm2, xmm6, 2) // 12,9,6,3
  406. // output keystream
  407. AS_XMM_OUTPUT4(SSE2_Salsa_Output_B, REG_input, REG_output, 4, 0, 1, 2, 3, 0, 1, 2, 3, 4)
  408. ASJ( jmp, 5, b)
  409. ASL(4)
  410. AS_POP_IF86( bp)
  411. #ifdef __GNUC__
  412. AS_POP_IF86( bx)
  413. ".att_syntax prefix;"
  414. :
  415. #if CRYPTOPP_BOOL_X64
  416. : "r" (m_rounds), "r" (input), "r" (iterationCount), "r" (m_state.data()), "r" (output), "r" (workspace.m_ptr)
  417. : "%eax", "%edx", "memory", "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15"
  418. #else
  419. : "d" (m_rounds), "a" (input), "c" (iterationCount), "S" (m_state.data()), "D" (output)
  420. : "memory", "cc"
  421. #endif
  422. );
  423. #endif
  424. #ifdef CRYPTOPP_GENERATE_X64_MASM
  425. movdqa xmm6, [rsp + 0200h]
  426. movdqa xmm7, [rsp + 0210h]
  427. movdqa xmm8, [rsp + 0220h]
  428. movdqa xmm9, [rsp + 0230h]
  429. movdqa xmm10, [rsp + 0240h]
  430. movdqa xmm11, [rsp + 0250h]
  431. movdqa xmm12, [rsp + 0260h]
  432. movdqa xmm13, [rsp + 0270h]
  433. movdqa xmm14, [rsp + 0280h]
  434. movdqa xmm15, [rsp + 0290h]
  435. add rsp, 10*16 + 32*16 + 8
  436. ret
  437. Salsa20_OperateKeystream ENDP
  438. #else
  439. }
  440. else
  441. #endif
  442. #endif
  443. #ifndef CRYPTOPP_GENERATE_X64_MASM
  444. {
  445. word32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
  446. while (iterationCount--)
  447. {
  448. x0 = m_state[0]; x1 = m_state[1]; x2 = m_state[2]; x3 = m_state[3];
  449. x4 = m_state[4]; x5 = m_state[5]; x6 = m_state[6]; x7 = m_state[7];
  450. x8 = m_state[8]; x9 = m_state[9]; x10 = m_state[10]; x11 = m_state[11];
  451. x12 = m_state[12]; x13 = m_state[13]; x14 = m_state[14]; x15 = m_state[15];
  452. for (int i=m_rounds; i>0; i-=2)
  453. {
  454. #define QUARTER_ROUND(a, b, c, d) \
  455. b = b ^ rotlFixed(a + d, 7); \
  456. c = c ^ rotlFixed(b + a, 9); \
  457. d = d ^ rotlFixed(c + b, 13); \
  458. a = a ^ rotlFixed(d + c, 18);
  459. QUARTER_ROUND(x0, x4, x8, x12)
  460. QUARTER_ROUND(x1, x5, x9, x13)
  461. QUARTER_ROUND(x2, x6, x10, x14)
  462. QUARTER_ROUND(x3, x7, x11, x15)
  463. QUARTER_ROUND(x0, x13, x10, x7)
  464. QUARTER_ROUND(x1, x14, x11, x4)
  465. QUARTER_ROUND(x2, x15, x8, x5)
  466. QUARTER_ROUND(x3, x12, x9, x6)
  467. }
  468. #define SALSA_OUTPUT(x) {\
  469. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 0, x0 + m_state[0]);\
  470. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 1, x13 + m_state[13]);\
  471. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 2, x10 + m_state[10]);\
  472. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 3, x7 + m_state[7]);\
  473. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 4, x4 + m_state[4]);\
  474. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 5, x1 + m_state[1]);\
  475. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 6, x14 + m_state[14]);\
  476. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 7, x11 + m_state[11]);\
  477. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 8, x8 + m_state[8]);\
  478. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 9, x5 + m_state[5]);\
  479. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 10, x2 + m_state[2]);\
  480. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 11, x15 + m_state[15]);\
  481. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 12, x12 + m_state[12]);\
  482. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 13, x9 + m_state[9]);\
  483. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 14, x6 + m_state[6]);\
  484. CRYPTOPP_KEYSTREAM_OUTPUT_WORD(x, LITTLE_ENDIAN_ORDER, 15, x3 + m_state[3]);}
  485. #ifndef CRYPTOPP_DOXYGEN_PROCESSING
  486. CRYPTOPP_KEYSTREAM_OUTPUT_SWITCH(SALSA_OUTPUT, BYTES_PER_ITERATION);
  487. #endif
  488. if (++m_state[8] == 0)
  489. ++m_state[5];
  490. }
  491. }
  492. } // see comment above if an internal compiler error occurs here
  493. void XSalsa20_Policy::CipherSetKey(const NameValuePairs &params, const byte *key, size_t length)
  494. {
  495. m_rounds = params.GetIntValueWithDefault(Name::Rounds(), 20);
  496. if (!(m_rounds == 8 || m_rounds == 12 || m_rounds == 20))
  497. throw InvalidRounds(XSalsa20::StaticAlgorithmName(), m_rounds);
  498. GetUserKey(LITTLE_ENDIAN_ORDER, m_key.begin(), m_key.size(), key, length);
  499. if (length == 16)
  500. memcpy(m_key.begin()+4, m_key.begin(), 16);
  501. // "expand 32-byte k"
  502. m_state[0] = 0x61707865;
  503. m_state[1] = 0x3320646e;
  504. m_state[2] = 0x79622d32;
  505. m_state[3] = 0x6b206574;
  506. }
  507. void XSalsa20_Policy::CipherResynchronize(byte *keystreamBuffer, const byte *IV, size_t length)
  508. {
  509. assert(length==24);
  510. word32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
  511. GetBlock<word32, LittleEndian> get(IV);
  512. get(x14)(x11)(x8)(x5)(m_state[14])(m_state[11]);
  513. x13 = m_key[0]; x10 = m_key[1]; x7 = m_key[2]; x4 = m_key[3];
  514. x15 = m_key[4]; x12 = m_key[5]; x9 = m_key[6]; x6 = m_key[7];
  515. x0 = m_state[0]; x1 = m_state[1]; x2 = m_state[2]; x3 = m_state[3];
  516. for (int i=m_rounds; i>0; i-=2)
  517. {
  518. QUARTER_ROUND(x0, x4, x8, x12)
  519. QUARTER_ROUND(x1, x5, x9, x13)
  520. QUARTER_ROUND(x2, x6, x10, x14)
  521. QUARTER_ROUND(x3, x7, x11, x15)
  522. QUARTER_ROUND(x0, x13, x10, x7)
  523. QUARTER_ROUND(x1, x14, x11, x4)
  524. QUARTER_ROUND(x2, x15, x8, x5)
  525. QUARTER_ROUND(x3, x12, x9, x6)
  526. }
  527. m_state[13] = x0; m_state[10] = x1; m_state[7] = x2; m_state[4] = x3;
  528. m_state[15] = x14; m_state[12] = x11; m_state[9] = x8; m_state[6] = x5;
  529. m_state[8] = m_state[5] = 0;
  530. }
  531. NAMESPACE_END
  532. #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM