sha256_xmm.asm 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. ;/*
  2. ; * Copyright 2011 Ufasoft
  3. ; * Copyright 2012 Guido Ascioti <guido.ascioti@gmail.com>
  4. ; * Copyright 2012 Luke Dashjr
  5. ; *
  6. ; * This program is free software; you can redistribute it and/or modify it
  7. ; * under the terms of the GNU General Public License as published by the Free
  8. ; * Software Foundation; either version 3 of the License, or (at your option)
  9. ; * any later version. See COPYING for more details.
  10. ; */
  11. ALIGN 32
  12. BITS 32
  13. %define hash ecx
  14. %define data edx
  15. %define init esi
  16. ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
  17. %define LAB_CALC_PARA 2
  18. %define LAB_CALC_UNROLL 24
  19. %define LAB_LOOP_UNROLL 64
  20. extern sha256_consts_m128i
  21. global CalcSha256_x86
  22. ; CalcSha256 hash(ecx), data(edx), init([esp+4])
  23. CalcSha256_x86:
  24. push esi
  25. push edi
  26. mov init, [esp+12]
  27. LAB_SHA:
  28. lea edi, qword [data+256] ; + 256
  29. LAB_CALC:
  30. %macro lab_calc_blk 1
  31. movdqa xmm0, [edi-(15-%1)*16] ; xmm0 = W[I-15]
  32. movdqa xmm4, [edi-(15-(%1+1))*16] ; xmm4 = W[I-15+1]
  33. movdqa xmm2, xmm0 ; xmm2 = W[I-15]
  34. movdqa xmm6, xmm4 ; xmm6 = W[I-15+1]
  35. psrld xmm0, 3 ; xmm0 = W[I-15] >> 3
  36. psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3
  37. movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3
  38. movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3
  39. pslld xmm2, 14 ; xmm2 = W[I-15] << 14
  40. pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14
  41. psrld xmm1, 4 ; xmm1 = W[I-15] >> 7
  42. psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7
  43. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
  44. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
  45. psrld xmm1, 11 ; xmm1 = W[I-15] >> 18
  46. psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18
  47. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
  48. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
  49. pslld xmm2, 11 ; xmm2 = W[I-15] << 25
  50. pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25
  51. pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
  52. pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
  53. pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
  54. pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
  55. movdqa xmm3, [edi-(2-%1)*16] ; xmm3 = W[I-2]
  56. movdqa xmm7, [edi-(2-(%1+1))*16] ; xmm7 = W[I-2+1]
  57. paddd xmm0, [edi-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16]
  58. paddd xmm4, [edi-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1]
  59. ;;;;;;;;;;;;;;;;;;
  60. movdqa xmm2, xmm3 ; xmm2 = W[I-2]
  61. movdqa xmm6, xmm7 ; xmm6 = W[I-2+1]
  62. psrld xmm3, 10 ; xmm3 = W[I-2] >> 10
  63. psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10
  64. movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10
  65. movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10
  66. paddd xmm0, [edi-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
  67. pslld xmm2, 13 ; xmm2 = W[I-2] << 13
  68. pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13
  69. psrld xmm1, 7 ; xmm1 = W[I-2] >> 17
  70. psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17
  71. paddd xmm4, [edi-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
  72. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
  73. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
  74. psrld xmm1, 2 ; xmm1 = W[I-2] >> 19
  75. psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19
  76. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
  77. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
  78. pslld xmm2, 2 ; xmm2 = W[I-2] << 15
  79. pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15
  80. pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
  81. pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
  82. pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
  83. pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
  84. paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
  85. paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
  86. movdqa [edi+(%1*16)], xmm0
  87. movdqa [edi+((%1+1)*16)], xmm4
  88. %endmacro
  89. %assign i 0
  90. %rep LAB_CALC_UNROLL
  91. lab_calc_blk i
  92. %assign i i+LAB_CALC_PARA
  93. %endrep
  94. ; Load the init values of the message into the hash.
  95. movdqa xmm7, [init]
  96. pshufd xmm5, xmm7, 0x55 ; xmm5 == b
  97. pshufd xmm4, xmm7, 0xAA ; xmm4 == c
  98. pshufd xmm3, xmm7, 0xFF ; xmm3 == d
  99. pshufd xmm7, xmm7, 0 ; xmm7 == a
  100. movdqa xmm0, [init+4*4]
  101. pshufd xmm1, xmm0, 0x55 ; [hash+0*16] == f
  102. movdqa [hash+0*16], xmm1
  103. pshufd xmm1, xmm0, 0xAA ; [hash+1*16] == g
  104. movdqa [hash+1*16], xmm1
  105. pshufd xmm1, xmm0, 0xFF ; [hash+2*16] == h
  106. movdqa [hash+2*16], xmm1
  107. pshufd xmm0, xmm0, 0 ; xmm0 == e
  108. LAB_LOOP:
  109. ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
  110. %macro lab_loop_blk 1
  111. movdqa xmm6, [data+%1]
  112. paddd xmm6, sha256_consts_m128i[%1]
  113. paddd xmm6, [hash+2*16] ; +h
  114. movdqa xmm1, xmm0
  115. movdqa xmm2, [hash+1*16]
  116. pandn xmm1, xmm2 ; ~e & g
  117. movdqa [hash+2*16], xmm2 ; h = g
  118. movdqa xmm2, [hash+0*16] ; f
  119. movdqa [hash+1*16], xmm2 ; g = f
  120. pand xmm2, xmm0 ; e & f
  121. pxor xmm1, xmm2 ; (e & f) ^ (~e & g)
  122. movdqa [hash+0*16], xmm0 ; f = e
  123. paddd xmm6, xmm1 ; Ch + h + w[i] + k[i]
  124. movdqa xmm1, xmm0
  125. psrld xmm0, 6
  126. movdqa xmm2, xmm0
  127. pslld xmm1, 7
  128. psrld xmm2, 5
  129. pxor xmm0, xmm1
  130. pxor xmm0, xmm2
  131. pslld xmm1, 14
  132. psrld xmm2, 14
  133. pxor xmm0, xmm1
  134. pxor xmm0, xmm2
  135. pslld xmm1, 5
  136. pxor xmm0, xmm1 ; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)
  137. paddd xmm6, xmm0 ; xmm6 = t1
  138. movdqa xmm0, xmm3 ; d
  139. paddd xmm0, xmm6 ; e = d+t1
  140. movdqa xmm1, xmm5 ; =b
  141. movdqa xmm3, xmm4 ; d = c
  142. movdqa xmm2, xmm4 ; c
  143. pand xmm2, xmm5 ; b & c
  144. pand xmm4, xmm7 ; a & c
  145. pand xmm1, xmm7 ; a & b
  146. pxor xmm1, xmm4
  147. movdqa xmm4, xmm5 ; c = b
  148. movdqa xmm5, xmm7 ; b = a
  149. pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d)
  150. paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d))
  151. movdqa xmm2, xmm7
  152. psrld xmm7, 2
  153. movdqa xmm1, xmm7
  154. pslld xmm2, 10
  155. psrld xmm1, 11
  156. pxor xmm7, xmm2
  157. pxor xmm7, xmm1
  158. pslld xmm2, 9
  159. psrld xmm1, 9
  160. pxor xmm7, xmm2
  161. pxor xmm7, xmm1
  162. pslld xmm2, 11
  163. pxor xmm7, xmm2
  164. paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
  165. %endmacro
  166. %assign i 0
  167. %rep LAB_LOOP_UNROLL
  168. lab_loop_blk i
  169. %assign i i+16
  170. %endrep
  171. ; Finished the 64 rounds, calculate hash and save
  172. movdqa xmm1, [init+16]
  173. pshufd xmm2, xmm1, 0xFF
  174. movdqa xmm6, [hash+2*16]
  175. paddd xmm2, xmm6
  176. movdqa [hash+7*16], xmm2
  177. pshufd xmm2, xmm1, 0xAA
  178. movdqa xmm6, [hash+1*16]
  179. paddd xmm2, xmm6
  180. movdqa [hash+6*16], xmm2
  181. pshufd xmm2, xmm1, 0x55
  182. movdqa xmm6, [hash+0*16]
  183. paddd xmm2, xmm6
  184. movdqa [hash+5*16], xmm2
  185. pshufd xmm1, xmm1, 0
  186. paddd xmm0, xmm1
  187. movdqa [hash+4*16], xmm0
  188. movdqa xmm1, [init]
  189. pshufd xmm2, xmm1, 0xFF
  190. paddd xmm3, xmm2
  191. movdqa [hash+3*16], xmm3
  192. pshufd xmm2, xmm1, 0xAA
  193. paddd xmm4, xmm2
  194. movdqa [hash+2*16], xmm4
  195. pshufd xmm2, xmm1, 0x55
  196. paddd xmm5, xmm2
  197. movdqa [hash+1*16], xmm5
  198. pshufd xmm1, xmm1, 0
  199. paddd xmm7, xmm1
  200. movdqa [hash+0*16], xmm7
  201. LAB_RET:
  202. pop edi
  203. pop esi
  204. retn 4
  205. %ifidn __OUTPUT_FORMAT__,elf
  206. section .note.GNU-stack noalloc noexec nowrite progbits
  207. %endif
  208. %ifidn __OUTPUT_FORMAT__,elf32
  209. section .note.GNU-stack noalloc noexec nowrite progbits
  210. %endif