sha256_xmm_amd64.asm 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. ;/*
  2. ; * Copyright (C) 2011 - Neil Kettle <neil@digit-labs.org>
  3. ; *
  4. ; * This file is part of cpuminer-ng.
  5. ; *
  6. ; * cpuminer-ng is free software: you can redistribute it and/or modify
  7. ; * it under the terms of the GNU General Public License as published by
  8. ; * the Free Software Foundation, either version 3 of the License, or
  9. ; * (at your option) any later version.
  10. ; *
  11. ; * cpuminer-ng is distributed in the hope that it will be useful,
  12. ; * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. ; * GNU General Public License for more details.
  15. ; *
  16. ; * You should have received a copy of the GNU General Public License
  17. ; * along with cpuminer-ng. If not, see <http://www.gnu.org/licenses/>.
  18. ; */
  19. ; %rbp, %rbx, and %r12-%r15 - callee save
  20. ALIGN 32
  21. BITS 64
  22. %ifidn __OUTPUT_FORMAT__,win64
  23. %define hash rcx
  24. %define hash1 rdx
  25. %define data r8
  26. %define init r9
  27. %else
  28. %define hash rdi
  29. %define hash1 rsi
  30. %define data rdx
  31. %define init rcx
  32. %endif
  33. ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
  34. %define SHA_CALC_W_PARA 2
  35. %define SHA_CALC_W_UNROLL 8
  36. %define SHA_ROUND_LOOP_UNROLL 16
  37. %ifidn __YASM_OBJFMT__, macho64
  38. extern _sha256_consts_m128i
  39. extern _sha256_init
  40. %else
  41. extern sha256_consts_m128i
  42. extern sha256_init
  43. %endif
  44. %ifidn __YASM_OBJFMT__, macho64
  45. global _sha256_sse2_64_new
  46. %else
  47. global sha256_sse2_64_new
  48. %endif
  49. %define sr1 xmm6
  50. %define sr2 xmm1
  51. %define sr3 xmm2
  52. %define sr4 xmm13
  53. %define rA xmm7
  54. %define rB xmm5
  55. %define rC xmm4
  56. %define rD xmm3
  57. %define rE xmm0
  58. %define rF xmm8
  59. %define rG xmm9
  60. %define rH xmm10
  61. %macro sha_round_blk 0
  62. movdqa sr1, [data+rax] ; T1 = w;
  63. ;movdqa sr1, xmm11
  64. movdqa sr2, rE ; sr2 = rE
  65. pandn sr2, rG ; sr2 = ~rE & rG
  66. movdqa sr3, rF ; sr3 = rF
  67. paddd sr1, rH ; T1 = h + sha256_consts_m128i[i] + w;
  68. movdqa rH, rG ; rH = rG
  69. pand sr3, rE ; sr3 = rE & rF
  70. movdqa rG, rF ; rG = rF
  71. %ifidn __YASM_OBJFMT__, macho64
  72. paddd sr1, [rcx+rax]
  73. %else
  74. paddd sr1, sha256_consts_m128i[rax] ; T1 = sha256_consts_m128i[i] + w;
  75. %endif
  76. pxor sr2, sr3 ; sr2 = (rE & rF) ^ (~rE & rG) = Ch (e, f, g)
  77. movdqa rF, rE ; rF = rE
  78. paddd sr1, sr2 ; T1 = h + Ch (e, f, g) + sha256_consts_m128i[i] + w;
  79. movdqa sr2, rE ; sr2 = rE
  80. psrld rE, 6 ; e >> 6
  81. movdqa sr3, rE ; e >> 6
  82. pslld sr2, 7 ; e << 7
  83. psrld sr3, 5 ; e >> 11
  84. pxor rE, sr2 ; e >> 6 ^ e << 7
  85. pslld sr2, 14 ; e << 21
  86. pxor rE, sr3 ; e >> 6 ^ e << 7 ^ e >> 11
  87. psrld sr3, 14 ; e >> 25
  88. pxor rE, sr2 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21
  89. pslld sr2, 5 ; e << 26
  90. pxor rE, sr3 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 ^ e >> 25
  91. pxor rE, sr2 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 ^ e >> 25 ^ e << 26
  92. movdqa sr2, rB ; sr2 = rB
  93. paddd sr1, rE ; sr1 = h + BIGSIGMA1_256(e) + Ch (e, f, g) + sha256_consts_m128i[i] + w;
  94. movdqa rE, rD ; rE = rD
  95. movdqa rD, rC ; rD = rC
  96. paddd rE, sr1 ; rE = rD + T1
  97. movdqa sr3, rC ; sr3 = rC
  98. pand rC, rA ; rC = rC & rA
  99. pand sr3, rB ; sr3 = rB & rC
  100. pand sr2, rA ; sr2 = rB & rA
  101. pxor sr2, rC ; sr2 = (rB & rA) ^ (rC & rA)
  102. movdqa rC, rB ; rC = rB
  103. pxor sr2, sr3 ; sr2 = (rB & rA) ^ (rC & rA) ^ (rB & rC)
  104. movdqa rB, rA ; rB = rA
  105. paddd sr1, sr2 ; sr1 = T1 + (rB & rA) ^ (rC & rA) ^ (rB & rC)
  106. lea rax, [rax+16]
  107. movdqa sr3, rA ; sr3 = rA
  108. psrld rA, 2 ; a >> 2
  109. pslld sr3, 10 ; a << 10
  110. movdqa sr2, rA ; a >> 2
  111. pxor rA, sr3 ; a >> 2 ^ a << 10
  112. psrld sr2, 11 ; a >> 13
  113. pxor rA, sr2 ; a >> 2 ^ a << 10 ^ a >> 13
  114. pslld sr3, 9 ; a << 19
  115. pxor rA, sr3 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19
  116. psrld sr2, 9 ; a >> 21
  117. pxor rA, sr2 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 ^ a >> 21
  118. pslld sr3, 11 ; a << 30
  119. pxor rA, sr3 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 ^ a >> 21 ^ a << 30
  120. paddd rA, sr1 ; T1 + BIGSIGMA0_256(a) + Maj(a, b, c);
  121. %endmacro
  122. %macro sha_calc_w_blk 1
  123. movdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15]
  124. movdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1]
  125. movdqa xmm2, xmm0 ; xmm2 = W[I-15]
  126. movdqa xmm6, xmm4 ; xmm6 = W[I-15+1]
  127. psrld xmm0, 3 ; xmm0 = W[I-15] >> 3
  128. psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3
  129. movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3
  130. movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3
  131. pslld xmm2, 14 ; xmm2 = W[I-15] << 14
  132. pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14
  133. psrld xmm1, 4 ; xmm1 = W[I-15] >> 7
  134. psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7
  135. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
  136. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
  137. psrld xmm1, 11 ; xmm1 = W[I-15] >> 18
  138. psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18
  139. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
  140. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
  141. pslld xmm2, 11 ; xmm2 = W[I-15] << 25
  142. pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25
  143. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
  144. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
  145. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
  146. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
  147. movdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2]
  148. movdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1]
  149. paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16]
  150. paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1]
  151. ;;;;;;;;;;;;;;;;;;
  152. movdqa xmm2, xmm3 ; xmm2 = W[I-2]
  153. movdqa xmm6, xmm7 ; xmm6 = W[I-2+1]
  154. psrld xmm3, 10 ; xmm3 = W[I-2] >> 10
  155. psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10
  156. movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10
  157. movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10
  158. paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
  159. pslld xmm2, 13 ; xmm2 = W[I-2] << 13
  160. pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13
  161. psrld xmm1, 7 ; xmm1 = W[I-2] >> 17
  162. psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17
  163. paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
  164. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
  165. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
  166. psrld xmm1, 2 ; xmm1 = W[I-2] >> 19
  167. psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19
  168. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
  169. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
  170. pslld xmm2, 2 ; xmm2 = W[I-2] << 15
  171. pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15
  172. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
  173. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
  174. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
  175. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
  176. paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
  177. paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
  178. movdqa [r11+(%1*16)], xmm0
  179. movdqa [r11+((%1+1)*16)], xmm4
  180. %endmacro
  181. ; _sha256_sse2_64_new hash(rdi), hash1(rsi), data(rdx), init(rcx),
  182. %ifidn __YASM_OBJFMT__, macho64
  183. _sha256_sse2_64_new:
  184. %else
  185. sha256_sse2_64_new:
  186. %endif
  187. push rbx
  188. %ifidn __OUTPUT_FORMAT__,win64
  189. sub rsp, 16 * 6
  190. movdqa [rsp + 16*0], xmm6
  191. movdqa [rsp + 16*1], xmm7
  192. movdqa [rsp + 16*2], xmm8
  193. movdqa [rsp + 16*3], xmm9
  194. movdqa [rsp + 16*4], xmm10
  195. movdqa [rsp + 16*5], xmm13
  196. %endif
  197. %macro SHA_256 0
  198. mov rbx, 64*4 ; rbx is # of SHA-2 rounds
  199. mov rax, 16*4 ; rax is where we expand to
  200. push rbx
  201. lea rbx, qword [data+rbx*4]
  202. lea r11, qword [data+rax*4]
  203. %%SHA_CALC_W:
  204. %assign i 0
  205. %rep SHA_CALC_W_UNROLL
  206. sha_calc_w_blk i
  207. %assign i i+SHA_CALC_W_PARA
  208. %endrep
  209. add r11, SHA_CALC_W_UNROLL*SHA_CALC_W_PARA*16
  210. cmp r11, rbx
  211. jb %%SHA_CALC_W
  212. pop rbx
  213. mov rax, 0
  214. lea rbx, [rbx*4]
  215. movdqa rA, [init]
  216. pshufd rB, rA, 0x55 ; rB == B
  217. pshufd rC, rA, 0xAA ; rC == C
  218. pshufd rD, rA, 0xFF ; rD == D
  219. pshufd rA, rA, 0 ; rA == A
  220. movdqa rE, [init+4*4]
  221. pshufd rF, rE, 0x55 ; rF == F
  222. pshufd rG, rE, 0xAA ; rG == G
  223. pshufd rH, rE, 0xFF ; rH == H
  224. pshufd rE, rE, 0 ; rE == E
  225. %ifidn __YASM_OBJFMT__, macho64
  226. lea rcx, [_sha256_consts_m128i wrt rip]
  227. %endif
  228. %%SHAROUND_LOOP:
  229. %assign i 0
  230. %rep SHA_ROUND_LOOP_UNROLL
  231. sha_round_blk
  232. %assign i i+1
  233. %endrep
  234. cmp rax, rbx
  235. jb %%SHAROUND_LOOP
  236. ; Finished the 64 rounds, calculate hash and save
  237. movdqa sr1, [init]
  238. pshufd sr2, sr1, 0x55
  239. pshufd sr3, sr1, 0xAA
  240. pshufd sr4, sr1, 0xFF
  241. pshufd sr1, sr1, 0
  242. paddd rB, sr2
  243. paddd rC, sr3
  244. paddd rD, sr4
  245. paddd rA, sr1
  246. movdqa sr1, [init+4*4]
  247. pshufd sr2, sr1, 0x55
  248. pshufd sr3, sr1, 0xAA
  249. pshufd sr4, sr1, 0xFF
  250. pshufd sr1, sr1, 0
  251. paddd rF, sr2
  252. paddd rG, sr3
  253. paddd rH, sr4
  254. paddd rE, sr1
  255. %endmacro
  256. SHA_256
  257. movdqa [hash1+0*16], rA
  258. movdqa [hash1+1*16], rB
  259. movdqa [hash1+2*16], rC
  260. movdqa [hash1+3*16], rD
  261. movdqa [hash1+4*16], rE
  262. movdqa [hash1+5*16], rF
  263. movdqa [hash1+6*16], rG
  264. movdqa [hash1+7*16], rH
  265. mov data, hash1
  266. mov init, sha256_init
  267. SHA_256
  268. movdqa [hash+7*16], rH
  269. LAB_RET:
  270. %ifidn __OUTPUT_FORMAT__,win64
  271. movdqa xmm6, [rsp + 16*0]
  272. movdqa xmm7, [rsp + 16*1]
  273. movdqa xmm8, [rsp + 16*2]
  274. movdqa xmm9, [rsp + 16*3]
  275. movdqa xmm10, [rsp + 16*4]
  276. movdqa xmm13, [rsp + 16*5]
  277. add rsp, 16 * 6
  278. %endif
  279. pop rbx
  280. ret
  281. %ifidn __OUTPUT_FORMAT__,elf
  282. section .note.GNU-stack noalloc noexec nowrite progbits
  283. %endif
  284. %ifidn __OUTPUT_FORMAT__,elf64
  285. section .note.GNU-stack noalloc noexec nowrite progbits
  286. %endif