sha256_xmm_amd64.asm 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. ;; SHA-256 for X86-64 for Linux, based off of:
  2. ; (c) Ufasoft 2011 http://ufasoft.com mailto:support@ufasoft.com
  3. ; Version 2011
  4. ; This software is Public Domain
  5. ; SHA-256 CPU SSE cruncher for Bitcoin Miner
  6. ALIGN 32
  7. BITS 64
  8. %define hash rdi
  9. %define data rsi
  10. %define init rdx
  11. extern g_4sha256_k
  12. global CalcSha256_x64
  13. ; CalcSha256 hash(rdi), data(rsi), init(rdx)
  14. CalcSha256_x64:
  15. push rbx
  16. LAB_NEXT_NONCE:
  17. mov r11, data
  18. ; mov rax, pnonce
  19. ; mov eax, [rax]
  20. ; mov [rbx+3*16], eax
  21. ; inc eax
  22. ; mov [rbx+3*16+4], eax
  23. ; inc eax
  24. ; mov [rbx+3*16+8], eax
  25. ; inc eax
  26. ; mov [rbx+3*16+12], eax
  27. mov rcx, 64*4 ;rcx is # of SHA-2 rounds
  28. mov rax, 16*4 ;rax is where we expand to
  29. LAB_SHA:
  30. push rcx
  31. lea rcx, qword [r11+rcx*4]
  32. lea r11, qword [r11+rax*4]
  33. LAB_CALC:
  34. movdqa xmm0, [r11-15*16]
  35. movdqa xmm2, xmm0 ; (Rotr32(w_15, 7) ^ Rotr32(w_15, 18) ^ (w_15 >> 3))
  36. psrld xmm0, 3
  37. movdqa xmm1, xmm0
  38. pslld xmm2, 14
  39. psrld xmm1, 4
  40. pxor xmm0, xmm1
  41. pxor xmm0, xmm2
  42. pslld xmm2, 11
  43. psrld xmm1, 11
  44. pxor xmm0, xmm1
  45. pxor xmm0, xmm2
  46. paddd xmm0, [r11-16*16]
  47. movdqa xmm3, [r11-2*16]
  48. movdqa xmm2, xmm3 ; (Rotr32(w_2, 17) ^ Rotr32(w_2, 19) ^ (w_2 >> 10))
  49. psrld xmm3, 10
  50. movdqa xmm1, xmm3
  51. pslld xmm2, 13
  52. psrld xmm1, 7
  53. pxor xmm3, xmm1
  54. pxor xmm3, xmm2
  55. pslld xmm2, 2
  56. psrld xmm1, 2
  57. pxor xmm3, xmm1
  58. pxor xmm3, xmm2
  59. paddd xmm0, xmm3
  60. paddd xmm0, [r11-7*16]
  61. movdqa [r11], xmm0
  62. add r11, 16
  63. cmp r11, rcx
  64. jb LAB_CALC
  65. pop rcx
  66. mov rax, 0
  67. ; Load the init values of the message into the hash.
  68. movd xmm0, dword [rdx+4*4] ; xmm0 == e
  69. pshufd xmm0, xmm0, 0
  70. movd xmm3, dword [rdx+3*4] ; xmm3 == d
  71. pshufd xmm3, xmm3, 0
  72. movd xmm4, dword [rdx+2*4] ; xmm4 == c
  73. pshufd xmm4, xmm4, 0
  74. movd xmm5, dword [rdx+1*4] ; xmm5 == b
  75. pshufd xmm5, xmm5, 0
  76. movd xmm7, dword [rdx+0*4] ; xmm7 == a
  77. pshufd xmm7, xmm7, 0
  78. movd xmm8, dword [rdx+5*4] ; xmm8 == f
  79. pshufd xmm8, xmm8, 0
  80. movd xmm9, dword [rdx+6*4] ; xmm9 == g
  81. pshufd xmm9, xmm9, 0
  82. movd xmm10, dword [rdx+7*4] ; xmm10 == h
  83. pshufd xmm10, xmm10, 0
  84. LAB_LOOP:
  85. ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
  86. movdqa xmm6, [rsi+rax*4]
  87. paddd xmm6, g_4sha256_k[rax*4]
  88. add rax, 4
  89. paddd xmm6, xmm10 ; +h
  90. movdqa xmm1, xmm0
  91. movdqa xmm2, xmm9
  92. pandn xmm1, xmm2 ; ~e & g
  93. movdqa xmm10, xmm2 ; h = g
  94. movdqa xmm2, xmm8 ; f
  95. movdqa xmm9, xmm2 ; g = f
  96. pand xmm2, xmm0 ; e & f
  97. pxor xmm1, xmm2 ; (e & f) ^ (~e & g)
  98. movdqa xmm8, xmm0 ; f = e
  99. paddd xmm6, xmm1 ; Ch + h + w[i] + k[i]
  100. movdqa xmm1, xmm0
  101. psrld xmm0, 6
  102. movdqa xmm2, xmm0
  103. pslld xmm1, 7
  104. psrld xmm2, 5
  105. pxor xmm0, xmm1
  106. pxor xmm0, xmm2
  107. pslld xmm1, 14
  108. psrld xmm2, 14
  109. pxor xmm0, xmm1
  110. pxor xmm0, xmm2
  111. pslld xmm1, 5
  112. pxor xmm0, xmm1 ; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)
  113. paddd xmm6, xmm0 ; xmm6 = t1
  114. movdqa xmm0, xmm3 ; d
  115. paddd xmm0, xmm6 ; e = d+t1
  116. movdqa xmm1, xmm5 ; =b
  117. movdqa xmm3, xmm4 ; d = c
  118. movdqa xmm2, xmm4 ; c
  119. pand xmm2, xmm5 ; b & c
  120. pand xmm4, xmm7 ; a & c
  121. pand xmm1, xmm7 ; a & b
  122. pxor xmm1, xmm4
  123. movdqa xmm4, xmm5 ; c = b
  124. movdqa xmm5, xmm7 ; b = a
  125. pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d)
  126. paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d))
  127. movdqa xmm2, xmm7
  128. psrld xmm7, 2
  129. movdqa xmm1, xmm7
  130. pslld xmm2, 10
  131. psrld xmm1, 11
  132. pxor xmm7, xmm2
  133. pxor xmm7, xmm1
  134. pslld xmm2, 9
  135. psrld xmm1, 9
  136. pxor xmm7, xmm2
  137. pxor xmm7, xmm1
  138. pslld xmm2, 11
  139. pxor xmm7, xmm2
  140. paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
  141. cmp rax, rcx
  142. jb LAB_LOOP
  143. ; Finished the 64 rounds, calculate hash and save
  144. movd xmm1, dword [rdx+0*4]
  145. pshufd xmm1, xmm1, 0
  146. paddd xmm7, xmm1
  147. movd xmm1, dword [rdx+1*4]
  148. pshufd xmm1, xmm1, 0
  149. paddd xmm5, xmm1
  150. movd xmm1, dword [rdx+2*4]
  151. pshufd xmm1, xmm1, 0
  152. paddd xmm4, xmm1
  153. movd xmm1, dword [rdx+3*4]
  154. pshufd xmm1, xmm1, 0
  155. paddd xmm3, xmm1
  156. movd xmm1, dword [rdx+4*4]
  157. pshufd xmm1, xmm1, 0
  158. paddd xmm0, xmm1
  159. movd xmm1, dword [rdx+5*4]
  160. pshufd xmm1, xmm1, 0
  161. paddd xmm8, xmm1
  162. movd xmm1, dword [rdx+6*4]
  163. pshufd xmm1, xmm1, 0
  164. paddd xmm9, xmm1
  165. movd xmm1, dword [rdx+7*4]
  166. pshufd xmm1, xmm1, 0
  167. paddd xmm10, xmm1
  168. debug_me:
  169. movdqa [rdi+0*16], xmm7
  170. movdqa [rdi+1*16], xmm5
  171. movdqa [rdi+2*16], xmm4
  172. movdqa [rdi+3*16], xmm3
  173. movdqa [rdi+4*16], xmm0
  174. movdqa [rdi+5*16], xmm8
  175. movdqa [rdi+6*16], xmm9
  176. movdqa [rdi+7*16], xmm10
  177. LAB_RET:
  178. pop rbx
  179. ret