sha256_sse4_amd64.asm 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. ;; SHA-256 for X86-64 for Linux, based off of:
  2. ; (c) Ufasoft 2011 http://ufasoft.com mailto:support@ufasoft.com
  3. ; Version 2011
  4. ; This software is Public Domain
  5. ; Significant re-write/optimisation and reordering by,
  6. ; Neil Kettle <mu-b@digit-labs.org>
  7. ; ~18% performance improvement
  8. ; SHA-256 CPU SSE cruncher for Bitcoin Miner
  9. ALIGN 32
  10. BITS 64
  11. %define hash rdi
  12. %define data rsi
  13. %define init rdx
  14. ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
  15. %define LAB_CALC_PARA 2
  16. %define LAB_CALC_UNROLL 8
  17. %define LAB_LOOP_UNROLL 8
  18. extern g_4sha256_k
  19. global CalcSha256_x64_sse4
  20. ; CalcSha256 hash(rdi), data(rsi), init(rdx)
  21. CalcSha256_x64_sse4:
  22. push rbx
  23. LAB_NEXT_NONCE:
  24. mov rcx, 64*4 ; 256 - rcx is # of SHA-2 rounds
  25. mov rax, 16*4 ; 64 - rax is where we expand to
  26. LAB_SHA:
  27. push rcx
  28. lea rcx, qword [data+rcx*4] ; + 1024
  29. lea r11, qword [data+rax*4] ; + 256
  30. LAB_CALC:
  31. %macro lab_calc_blk 1
  32. movntdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15]
  33. movdqa xmm2, xmm0 ; xmm2 = W[I-15]
  34. movntdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1]
  35. movdqa xmm6, xmm4 ; xmm6 = W[I-15+1]
  36. psrld xmm0, 3 ; xmm0 = W[I-15] >> 3
  37. movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3
  38. pslld xmm2, 14 ; xmm2 = W[I-15] << 14
  39. psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3
  40. movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3
  41. psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7
  42. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
  43. pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14
  44. psrld xmm1, 4 ; xmm1 = W[I-15] >> 7
  45. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
  46. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
  47. psrld xmm1, 11 ; xmm1 = W[I-15] >> 18
  48. psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18
  49. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
  50. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
  51. pslld xmm2, 11 ; xmm2 = W[I-15] << 25
  52. pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25
  53. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
  54. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
  55. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
  56. paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16]
  57. paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1]
  58. movntdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2]
  59. movntdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1]
  60. ;;;;;;;;;;;;;;;;;;
  61. movdqa xmm2, xmm3 ; xmm2 = W[I-2]
  62. psrld xmm3, 10 ; xmm3 = W[I-2] >> 10
  63. movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10
  64. movdqa xmm6, xmm7 ; xmm6 = W[I-2+1]
  65. psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10
  66. movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10
  67. paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
  68. paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
  69. pslld xmm2, 13 ; xmm2 = W[I-2] << 13
  70. pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13
  71. psrld xmm1, 7 ; xmm1 = W[I-2] >> 17
  72. psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17
  73. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
  74. psrld xmm1, 2 ; xmm1 = W[I-2] >> 19
  75. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
  76. pslld xmm2, 2 ; xmm2 = W[I-2] << 15
  77. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
  78. psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19
  79. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
  80. pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15
  81. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
  82. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
  83. paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
  84. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
  85. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
  86. paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
  87. movdqa [r11+(%1*16)], xmm0
  88. movdqa [r11+((%1+1)*16)], xmm4
  89. %endmacro
  90. %assign i 0
  91. %rep LAB_CALC_UNROLL
  92. lab_calc_blk i
  93. %assign i i+LAB_CALC_PARA
  94. %endrep
  95. add r11, LAB_CALC_UNROLL*LAB_CALC_PARA*16
  96. cmp r11, rcx
  97. jb LAB_CALC
  98. pop rcx
  99. mov rax, 0
  100. ; Load the init values of the message into the hash.
  101. movntdqa xmm7, [init]
  102. pshufd xmm5, xmm7, 0x55 ; xmm5 == b
  103. pshufd xmm4, xmm7, 0xAA ; xmm4 == c
  104. pshufd xmm3, xmm7, 0xFF ; xmm3 == d
  105. pshufd xmm7, xmm7, 0 ; xmm7 == a
  106. movntdqa xmm0, [init+4*4]
  107. pshufd xmm8, xmm0, 0x55 ; xmm8 == f
  108. pshufd xmm9, xmm0, 0xAA ; xmm9 == g
  109. pshufd xmm10, xmm0, 0xFF ; xmm10 == h
  110. pshufd xmm0, xmm0, 0 ; xmm0 == e
  111. LAB_LOOP:
  112. ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
  113. %macro lab_loop_blk 0
  114. movntdqa xmm6, [data+rax*4]
  115. paddd xmm6, g_4sha256_k[rax*4]
  116. add rax, 4
  117. paddd xmm6, xmm10 ; +h
  118. movdqa xmm1, xmm0
  119. movdqa xmm2, xmm9
  120. pandn xmm1, xmm2 ; ~e & g
  121. movdqa xmm10, xmm2 ; h = g
  122. movdqa xmm2, xmm8 ; f
  123. movdqa xmm9, xmm2 ; g = f
  124. pand xmm2, xmm0 ; e & f
  125. pxor xmm1, xmm2 ; (e & f) ^ (~e & g)
  126. movdqa xmm8, xmm0 ; f = e
  127. paddd xmm6, xmm1 ; Ch + h + w[i] + k[i]
  128. movdqa xmm1, xmm0
  129. psrld xmm0, 6
  130. movdqa xmm2, xmm0
  131. pslld xmm1, 7
  132. psrld xmm2, 5
  133. pxor xmm0, xmm1
  134. pxor xmm0, xmm2
  135. pslld xmm1, 14
  136. psrld xmm2, 14
  137. pxor xmm0, xmm1
  138. pxor xmm0, xmm2
  139. pslld xmm1, 5
  140. pxor xmm0, xmm1 ; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)
  141. paddd xmm6, xmm0 ; xmm6 = t1
  142. movdqa xmm0, xmm3 ; d
  143. paddd xmm0, xmm6 ; e = d+t1
  144. movdqa xmm1, xmm5 ; =b
  145. movdqa xmm3, xmm4 ; d = c
  146. movdqa xmm2, xmm4 ; c
  147. pand xmm2, xmm5 ; b & c
  148. pand xmm4, xmm7 ; a & c
  149. pand xmm1, xmm7 ; a & b
  150. pxor xmm1, xmm4
  151. movdqa xmm4, xmm5 ; c = b
  152. movdqa xmm5, xmm7 ; b = a
  153. pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d)
  154. paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d))
  155. movdqa xmm2, xmm7
  156. psrld xmm7, 2
  157. movdqa xmm1, xmm7
  158. pslld xmm2, 10
  159. psrld xmm1, 11
  160. pxor xmm7, xmm2
  161. pxor xmm7, xmm1
  162. pslld xmm2, 9
  163. psrld xmm1, 9
  164. pxor xmm7, xmm2
  165. pxor xmm7, xmm1
  166. pslld xmm2, 11
  167. pxor xmm7, xmm2
  168. paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
  169. %endmacro
  170. %assign i 0
  171. %rep LAB_LOOP_UNROLL
  172. lab_loop_blk
  173. %assign i i+1
  174. %endrep
  175. cmp rax, rcx
  176. jb LAB_LOOP
  177. ; Finished the 64 rounds, calculate hash and save
  178. movntdqa xmm1, [rdx]
  179. pshufd xmm2, xmm1, 0x55
  180. paddd xmm5, xmm2
  181. pshufd xmm6, xmm1, 0xAA
  182. paddd xmm4, xmm6
  183. pshufd xmm11, xmm1, 0xFF
  184. paddd xmm3, xmm11
  185. pshufd xmm1, xmm1, 0
  186. paddd xmm7, xmm1
  187. movntdqa xmm1, [rdx+4*4]
  188. pshufd xmm2, xmm1, 0x55
  189. paddd xmm8, xmm2
  190. pshufd xmm6, xmm1, 0xAA
  191. paddd xmm9, xmm6
  192. pshufd xmm11, xmm1, 0xFF
  193. paddd xmm10, xmm11
  194. pshufd xmm1, xmm1, 0
  195. paddd xmm0, xmm1
  196. movdqa [hash+0*16], xmm7
  197. movdqa [hash+1*16], xmm5
  198. movdqa [hash+2*16], xmm4
  199. movdqa [hash+3*16], xmm3
  200. movdqa [hash+4*16], xmm0
  201. movdqa [hash+5*16], xmm8
  202. movdqa [hash+6*16], xmm9
  203. movdqa [hash+7*16], xmm10
  204. LAB_RET:
  205. pop rbx
  206. ret
  207. %ifidn __OUTPUT_FORMAT__,elf
  208. section .note.GNU-stack noalloc noexec nowrite progbits
  209. %endif
  210. %ifidn __OUTPUT_FORMAT__,elf64
  211. section .note.GNU-stack noalloc noexec nowrite progbits
  212. %endif