sha256_sse4_amd64.asm 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. ;/*
  2. ; * Copyright 2011 Neil Kettle
  3. ; * Copyright 2011 Ufasoft
  4. ; * Copyright 2013 James Z.M. Gao
  5. ; * Copyright 2012-2016 Luke Dashjr
  6. ; *
  7. ; * This program is free software; you can redistribute it and/or modify it
  8. ; * under the terms of the GNU General Public License as published by the Free
  9. ; * Software Foundation; either version 3 of the License, or (at your option)
  10. ; * any later version. See COPYING for more details.
  11. ; */
  12. ALIGN 32
  13. BITS 64
  14. %ifidn __OUTPUT_FORMAT__,win64
  15. %define hash rcx
  16. %define data rdx
  17. %define init r8
  18. %define temp r9
  19. %else
  20. %define hash rdi
  21. %define data rsi
  22. %define init rdx
  23. %define temp rcx
  24. %endif
  25. %define rel_g_4sha256_k r10
  26. ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
  27. %define LAB_CALC_PARA 2
  28. %define LAB_CALC_UNROLL 8
  29. %define LAB_LOOP_UNROLL 8
  30. extern g_4sha256_k
  31. global CalcSha256_x64_sse4
  32. ; CalcSha256 hash(rdi), data(rsi), init(rdx)
  33. ; CalcSha256 hash(rcx), data(rdx), init(r8)
  34. CalcSha256_x64_sse4:
  35. push rbx
  36. %ifidn __OUTPUT_FORMAT__,win64
  37. sub rsp, 16 * 6
  38. movdqa [rsp + 16*0], xmm6
  39. movdqa [rsp + 16*1], xmm7
  40. movdqa [rsp + 16*2], xmm8
  41. movdqa [rsp + 16*3], xmm9
  42. movdqa [rsp + 16*4], xmm10
  43. movdqa [rsp + 16*5], xmm11
  44. %endif
  45. lea rel_g_4sha256_k, [g_4sha256_k wrt rip]
  46. LAB_NEXT_NONCE:
  47. mov temp, 64*4 ; 256 - temp is # of SHA-2 rounds
  48. mov rax, 16*4 ; 64 - rax is where we expand to
  49. LAB_SHA:
  50. push temp
  51. lea temp, qword [data+temp*4] ; + 1024
  52. lea r11, qword [data+rax*4] ; + 256
  53. LAB_CALC:
  54. %macro lab_calc_blk 1
  55. movntdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15]
  56. movdqa xmm2, xmm0 ; xmm2 = W[I-15]
  57. movntdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1]
  58. movdqa xmm6, xmm4 ; xmm6 = W[I-15+1]
  59. psrld xmm0, 3 ; xmm0 = W[I-15] >> 3
  60. movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3
  61. pslld xmm2, 14 ; xmm2 = W[I-15] << 14
  62. psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3
  63. movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3
  64. psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7
  65. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
  66. pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14
  67. psrld xmm1, 4 ; xmm1 = W[I-15] >> 7
  68. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
  69. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
  70. psrld xmm1, 11 ; xmm1 = W[I-15] >> 18
  71. psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18
  72. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
  73. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
  74. pslld xmm2, 11 ; xmm2 = W[I-15] << 25
  75. pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25
  76. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
  77. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
  78. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
  79. paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16]
  80. paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1]
  81. movntdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2]
  82. movntdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1]
  83. ;;;;;;;;;;;;;;;;;;
  84. movdqa xmm2, xmm3 ; xmm2 = W[I-2]
  85. psrld xmm3, 10 ; xmm3 = W[I-2] >> 10
  86. movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10
  87. movdqa xmm6, xmm7 ; xmm6 = W[I-2+1]
  88. psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10
  89. movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10
  90. paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
  91. paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
  92. pslld xmm2, 13 ; xmm2 = W[I-2] << 13
  93. pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13
  94. psrld xmm1, 7 ; xmm1 = W[I-2] >> 17
  95. psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17
  96. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
  97. psrld xmm1, 2 ; xmm1 = W[I-2] >> 19
  98. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
  99. pslld xmm2, 2 ; xmm2 = W[I-2] << 15
  100. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
  101. psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19
  102. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
  103. pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15
  104. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
  105. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
  106. paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
  107. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
  108. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
  109. paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
  110. movdqa [r11+(%1*16)], xmm0
  111. movdqa [r11+((%1+1)*16)], xmm4
  112. %endmacro
  113. %assign i 0
  114. %rep LAB_CALC_UNROLL
  115. lab_calc_blk i
  116. %assign i i+LAB_CALC_PARA
  117. %endrep
  118. add r11, LAB_CALC_UNROLL*LAB_CALC_PARA*16
  119. cmp r11, temp
  120. jb LAB_CALC
  121. pop temp
  122. mov rax, 0
  123. ; Load the init values of the message into the hash.
  124. movntdqa xmm7, [init]
  125. pshufd xmm5, xmm7, 0x55 ; xmm5 == b
  126. pshufd xmm4, xmm7, 0xAA ; xmm4 == c
  127. pshufd xmm3, xmm7, 0xFF ; xmm3 == d
  128. pshufd xmm7, xmm7, 0 ; xmm7 == a
  129. movntdqa xmm0, [init+4*4]
  130. pshufd xmm8, xmm0, 0x55 ; xmm8 == f
  131. pshufd xmm9, xmm0, 0xAA ; xmm9 == g
  132. pshufd xmm10, xmm0, 0xFF ; xmm10 == h
  133. pshufd xmm0, xmm0, 0 ; xmm0 == e
  134. LAB_LOOP:
  135. ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
  136. %macro lab_loop_blk 0
  137. movntdqa xmm6, [data+rax*4]
  138. paddd xmm6, [rel_g_4sha256_k+rax*4]
  139. add rax, 4
  140. paddd xmm6, xmm10 ; +h
  141. movdqa xmm1, xmm0
  142. movdqa xmm2, xmm9
  143. pandn xmm1, xmm2 ; ~e & g
  144. movdqa xmm10, xmm2 ; h = g
  145. movdqa xmm2, xmm8 ; f
  146. movdqa xmm9, xmm2 ; g = f
  147. pand xmm2, xmm0 ; e & f
  148. pxor xmm1, xmm2 ; (e & f) ^ (~e & g)
  149. movdqa xmm8, xmm0 ; f = e
  150. paddd xmm6, xmm1 ; Ch + h + w[i] + k[i]
  151. movdqa xmm1, xmm0
  152. psrld xmm0, 6
  153. movdqa xmm2, xmm0
  154. pslld xmm1, 7
  155. psrld xmm2, 5
  156. pxor xmm0, xmm1
  157. pxor xmm0, xmm2
  158. pslld xmm1, 14
  159. psrld xmm2, 14
  160. pxor xmm0, xmm1
  161. pxor xmm0, xmm2
  162. pslld xmm1, 5
  163. pxor xmm0, xmm1 ; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)
  164. paddd xmm6, xmm0 ; xmm6 = t1
  165. movdqa xmm0, xmm3 ; d
  166. paddd xmm0, xmm6 ; e = d+t1
  167. movdqa xmm1, xmm5 ; =b
  168. movdqa xmm3, xmm4 ; d = c
  169. movdqa xmm2, xmm4 ; c
  170. pand xmm2, xmm5 ; b & c
  171. pand xmm4, xmm7 ; a & c
  172. pand xmm1, xmm7 ; a & b
  173. pxor xmm1, xmm4
  174. movdqa xmm4, xmm5 ; c = b
  175. movdqa xmm5, xmm7 ; b = a
  176. pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d)
  177. paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d))
  178. movdqa xmm2, xmm7
  179. psrld xmm7, 2
  180. movdqa xmm1, xmm7
  181. pslld xmm2, 10
  182. psrld xmm1, 11
  183. pxor xmm7, xmm2
  184. pxor xmm7, xmm1
  185. pslld xmm2, 9
  186. psrld xmm1, 9
  187. pxor xmm7, xmm2
  188. pxor xmm7, xmm1
  189. pslld xmm2, 11
  190. pxor xmm7, xmm2
  191. paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
  192. %endmacro
  193. %assign i 0
  194. %rep LAB_LOOP_UNROLL
  195. lab_loop_blk
  196. %assign i i+1
  197. %endrep
  198. cmp rax, temp
  199. jb LAB_LOOP
  200. ; Finished the 64 rounds, calculate hash and save
  201. movntdqa xmm1, [init]
  202. pshufd xmm2, xmm1, 0x55
  203. paddd xmm5, xmm2
  204. pshufd xmm6, xmm1, 0xAA
  205. paddd xmm4, xmm6
  206. pshufd xmm11, xmm1, 0xFF
  207. paddd xmm3, xmm11
  208. pshufd xmm1, xmm1, 0
  209. paddd xmm7, xmm1
  210. movntdqa xmm1, [init+4*4]
  211. pshufd xmm2, xmm1, 0x55
  212. paddd xmm8, xmm2
  213. pshufd xmm6, xmm1, 0xAA
  214. paddd xmm9, xmm6
  215. pshufd xmm11, xmm1, 0xFF
  216. paddd xmm10, xmm11
  217. pshufd xmm1, xmm1, 0
  218. paddd xmm0, xmm1
  219. movdqa [hash+0*16], xmm7
  220. movdqa [hash+1*16], xmm5
  221. movdqa [hash+2*16], xmm4
  222. movdqa [hash+3*16], xmm3
  223. movdqa [hash+4*16], xmm0
  224. movdqa [hash+5*16], xmm8
  225. movdqa [hash+6*16], xmm9
  226. movdqa [hash+7*16], xmm10
  227. LAB_RET:
  228. %ifidn __OUTPUT_FORMAT__,win64
  229. movdqa xmm6, [rsp + 16*0]
  230. movdqa xmm7, [rsp + 16*1]
  231. movdqa xmm8, [rsp + 16*2]
  232. movdqa xmm9, [rsp + 16*3]
  233. movdqa xmm10, [rsp + 16*4]
  234. movdqa xmm11, [rsp + 16*5]
  235. add rsp, 16 * 6
  236. %endif
  237. pop rbx
  238. ret
  239. %ifidn __OUTPUT_FORMAT__,elf
  240. section .note.GNU-stack noalloc noexec nowrite progbits
  241. %endif
  242. %ifidn __OUTPUT_FORMAT__,elf64
  243. section .note.GNU-stack noalloc noexec nowrite progbits
  244. %endif