phatk120203.cl 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. // This file is taken and modified from the public-domain poclbm project, and
  2. // I have therefore decided to keep it public-domain.
  3. // Modified version copyright 2011-2012 Con Kolivas
  4. #ifdef VECTORS4
  5. typedef uint4 u;
  6. #else
  7. #ifdef VECTORS2
  8. typedef uint2 u;
  9. #else
  10. typedef uint u;
  11. #endif
  12. #endif
  13. __constant uint K[64] = {
  14. 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
  15. 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
  16. 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
  17. 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
  18. 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
  19. 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
  20. 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
  21. 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
  22. };
  23. __constant uint ConstW[128] = {
  24. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000U, 0x00000000, 0x00000000, 0x00000000,
  25. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000280U,
  26. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  27. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  28. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  29. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  30. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  31. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  32. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  33. 0x80000000U, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100U,
  34. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  35. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  36. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  37. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  38. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  39. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
  40. };
  41. __constant uint H[8] = {
  42. 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
  43. };
  44. #ifdef BITALIGN
  45. #pragma OPENCL EXTENSION cl_amd_media_ops : enable
  46. #define rot(x, y) amd_bitalign(x, x, (uint)(32 - y))
  47. // This part is not from the stock poclbm kernel. It's part of an optimization
  48. // added in the Phoenix Miner.
  49. // Some AMD devices have Vals[0] BFI_INT opcode, which behaves exactly like the
  50. // SHA-256 Ch function, but provides it in exactly one instruction. If
  51. // detected, use it for Ch. Otherwise, construct Ch out of simpler logical
  52. // primitives.
  53. #ifdef BFI_INT
  54. // Well, slight problem... It turns out BFI_INT isn't actually exposed to
  55. // OpenCL (or CAL IL for that matter) in any way. However, there is
  56. // a similar instruction, BYTE_ALIGN_INT, which is exposed to OpenCL via
  57. // amd_bytealign, takes the same inputs, and provides the same output.
  58. // We can use that as a placeholder for BFI_INT and have the application
  59. // patch it after compilation.
  60. // This is the BFI_INT function
  61. #define Ch(x, y, z) amd_bytealign(x,y,z)
  62. // Ma can also be implemented in terms of BFI_INT...
  63. #define Ma(z, x, y) amd_bytealign(z^x,y,x)
  64. #else // BFI_INT
  65. // Later SDKs optimise this to BFI INT without patching and GCN
  66. // actually fails if manually patched with BFI_INT
  67. #define Ch(x, y, z) bitselect((u)z, (u)y, (u)x)
  68. #define Ma(x, y, z) bitselect((u)x, (u)y, (u)z ^ (u)x)
  69. #define rotr(x, y) amd_bitalign((u)x, (u)x, (u)y)
  70. #endif
  71. #else // BITALIGN
  72. #define Ch(x, y, z) (z ^ (x & (y ^ z)))
  73. #define Ma(x, y, z) ((x & z) | (y & (x | z)))
  74. #define rot(x, y) rotate((u)x, (u)y)
  75. #define rotr(x, y) rotate((u)x, (u)(32-y))
  76. #endif
  77. //Various intermediate calculations for each SHA round
  78. #define s0(n) (S0(Vals[(0 + 128 - (n)) % 8]))
  79. #define S0(n) (rot(n, 30u)^rot(n, 19u)^rot(n,10u))
  80. #define s1(n) (S1(Vals[(4 + 128 - (n)) % 8]))
  81. #define S1(n) (rot(n, 26u)^rot(n, 21u)^rot(n, 7u))
  82. #define ch(n) Ch(Vals[(4 + 128 - (n)) % 8],Vals[(5 + 128 - (n)) % 8],Vals[(6 + 128 - (n)) % 8])
  83. #define maj(n) Ma(Vals[(1 + 128 - (n)) % 8],Vals[(2 + 128 - (n)) % 8],Vals[(0 + 128 - (n)) % 8])
  84. //t1 calc when W is already calculated
  85. #define t1(n) K[(n) % 64] + Vals[(7 + 128 - (n)) % 8] + W[(n)] + s1(n) + ch(n)
  86. //t1 calc which calculates W
  87. #define t1W(n) K[(n) % 64] + Vals[(7 + 128 - (n)) % 8] + W(n) + s1(n) + ch(n)
  88. //Used for constant W Values (the compiler optimizes out zeros)
  89. #define t1C(n) (K[(n) % 64]+ ConstW[(n)]) + Vals[(7 + 128 - (n)) % 8] + s1(n) + ch(n)
  90. //t2 Calc
  91. #define t2(n) maj(n) + s0(n)
  92. #define rotC(x,n) (x<<n | x >> (32-n))
  93. //W calculation used for SHA round
  94. #define W(n) (W[n] = P4(n) + P3(n) + P2(n) + P1(n))
  95. //Partial W calculations (used for the begining where only some values are nonzero)
  96. #define P1(n) ((rot(W[(n)-2],15u)^rot(W[(n)-2],13u)^((W[(n)-2])>>10U)))
  97. #define P2(n) ((rot(W[(n)-15],25u)^rot(W[(n)-15],14u)^((W[(n)-15])>>3U)))
  98. #define p1(x) ((rot(x,15u)^rot(x,13u)^((x)>>10U)))
  99. #define p2(x) ((rot(x,25u)^rot(x,14u)^((x)>>3U)))
  100. #define P3(n) W[n-7]
  101. #define P4(n) W[n-16]
  102. //Partial Calcs for constant W values
  103. #define P1C(n) ((rotC(ConstW[(n)-2],15)^rotC(ConstW[(n)-2],13)^((ConstW[(n)-2])>>10U)))
  104. #define P2C(n) ((rotC(ConstW[(n)-15],25)^rotC(ConstW[(n)-15],14)^((ConstW[(n)-15])>>3U)))
  105. #define P3C(x) ConstW[x-7]
  106. #define P4C(x) ConstW[x-16]
  107. //SHA round with built in W calc
  108. #define sharoundW(n) Barrier1(n); Vals[(3 + 128 - (n)) % 8] += t1W(n); Vals[(7 + 128 - (n)) % 8] = t1W(n) + t2(n);
  109. //SHA round without W calc
  110. #define sharound(n) Barrier2(n); Vals[(3 + 128 - (n)) % 8] += t1(n); Vals[(7 + 128 - (n)) % 8] = t1(n) + t2(n);
  111. //SHA round for constant W values
  112. #define sharoundC(n) Barrier3(n); Vals[(3 + 128 - (n)) % 8] += t1C(n); Vals[(7 + 128 - (n)) % 8] = t1C(n) + t2(n);
  113. //The compiler is stupid... I put this in there only to stop the compiler from (de)optimizing the order
  114. #define Barrier1(n) t1 = t1C((n+1))
  115. #define Barrier2(n) t1 = t1C((n))
  116. #define Barrier3(n) t1 = t1C((n))
  117. //#define WORKSIZE 256
  118. #define MAXBUFFERS (4095)
  119. __kernel
  120. __attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
  121. void search( const uint state0, const uint state1, const uint state2, const uint state3,
  122. const uint state4, const uint state5, const uint state6, const uint state7,
  123. const uint B1, const uint C1, const uint D1,
  124. const uint F1, const uint G1, const uint H1,
  125. const u base,
  126. const uint W16, const uint W17,
  127. const uint PreVal4, const uint PreVal0,
  128. const uint PreW18, const uint PreW19,
  129. const uint PreW31, const uint PreW32,
  130. __global uint * output)
  131. {
  132. u W[124];
  133. u Vals[8];
  134. //Dummy Variable to prevent compiler from reordering between rounds
  135. u t1;
  136. //Vals[0]=state0;
  137. Vals[1]=B1;
  138. Vals[2]=C1;
  139. Vals[3]=D1;
  140. //Vals[4]=PreVal4;
  141. Vals[5]=F1;
  142. Vals[6]=G1;
  143. Vals[7]=H1;
  144. W[16] = W16;
  145. W[17] = W17;
  146. #ifdef VECTORS4
  147. //Less dependencies to get both the local id and group id and then add them
  148. W[3] = base + (uint)(get_local_id(0)) * 4u + (uint)(get_group_id(0)) * (WORKSIZE * 4u);
  149. uint r = rot(W[3].x,25u)^rot(W[3].x,14u)^((W[3].x)>>3U);
  150. //Since only the 2 LSB is opposite between the nonces, we can save an instruction by flipping the 4 bits in W18 rather than the 1 bit in W3
  151. W[18] = PreW18 + (u){r, r ^ 0x2004000U, r ^ 0x4008000U, r ^ 0x600C000U};
  152. #else
  153. #ifdef VECTORS2
  154. W[3] = base + (uint)(get_local_id(0)) * 2u + (uint)(get_group_id(0)) * (WORKSIZE * 2u);
  155. uint r = rot(W[3].x,25u)^rot(W[3].x,14u)^((W[3].x)>>3U);
  156. W[18] = PreW18 + (u){r, r ^ 0x2004000U};
  157. #else
  158. W[3] = base + get_local_id(0) + get_group_id(0) * (WORKSIZE);
  159. u r = rot(W[3],25u)^rot(W[3],14u)^((W[3])>>3U);
  160. W[18] = PreW18 + r;
  161. #endif
  162. #endif
  163. //the order of the W calcs and Rounds is like this because the compiler needs help finding how to order the instructions
  164. Vals[4] = PreVal4 + W[3];
  165. Vals[0] = PreVal0 + W[3];
  166. sharoundC(4);
  167. W[19] = PreW19 + W[3];
  168. sharoundC(5);
  169. W[20] = P4C(20) + P1(20);
  170. sharoundC(6);
  171. W[21] = P1(21);
  172. sharoundC(7);
  173. W[22] = P3C(22) + P1(22);
  174. sharoundC(8);
  175. W[23] = W[16] + P1(23);
  176. sharoundC(9);
  177. W[24] = W[17] + P1(24);
  178. sharoundC(10);
  179. W[25] = P1(25) + P3(25);
  180. W[26] = P1(26) + P3(26);
  181. sharoundC(11);
  182. W[27] = P1(27) + P3(27);
  183. W[28] = P1(28) + P3(28);
  184. sharoundC(12);
  185. W[29] = P1(29) + P3(29);
  186. sharoundC(13);
  187. W[30] = P1(30) + P2C(30) + P3(30);
  188. W[31] = PreW31 + (P1(31) + P3(31));
  189. sharoundC(14);
  190. W[32] = PreW32 + (P1(32) + P3(32));
  191. sharoundC(15);
  192. sharound(16);
  193. sharound(17);
  194. sharound(18);
  195. sharound(19);
  196. sharound(20);
  197. sharound(21);
  198. sharound(22);
  199. sharound(23);
  200. sharound(24);
  201. sharound(25);
  202. sharound(26);
  203. sharound(27);
  204. sharound(28);
  205. sharound(29);
  206. sharound(30);
  207. sharound(31);
  208. sharound(32);
  209. sharoundW(33);
  210. sharoundW(34);
  211. sharoundW(35);
  212. sharoundW(36);
  213. sharoundW(37);
  214. sharoundW(38);
  215. sharoundW(39);
  216. sharoundW(40);
  217. sharoundW(41);
  218. sharoundW(42);
  219. sharoundW(43);
  220. sharoundW(44);
  221. sharoundW(45);
  222. sharoundW(46);
  223. sharoundW(47);
  224. sharoundW(48);
  225. sharoundW(49);
  226. sharoundW(50);
  227. sharoundW(51);
  228. sharoundW(52);
  229. sharoundW(53);
  230. sharoundW(54);
  231. sharoundW(55);
  232. sharoundW(56);
  233. sharoundW(57);
  234. sharoundW(58);
  235. sharoundW(59);
  236. sharoundW(60);
  237. sharoundW(61);
  238. sharoundW(62);
  239. sharoundW(63);
  240. W[64]=state0+Vals[0];
  241. W[65]=state1+Vals[1];
  242. W[66]=state2+Vals[2];
  243. W[67]=state3+Vals[3];
  244. W[68]=state4+Vals[4];
  245. W[69]=state5+Vals[5];
  246. W[70]=state6+Vals[6];
  247. W[71]=state7+Vals[7];
  248. Vals[0]=H[0];
  249. Vals[1]=H[1];
  250. Vals[2]=H[2];
  251. Vals[3]=H[3];
  252. Vals[4]=H[4];
  253. Vals[5]=H[5];
  254. Vals[6]=H[6];
  255. Vals[7]=H[7];
  256. //sharound(64 + 0);
  257. const u Temp = (0xb0edbdd0U + K[0]) + W[64];
  258. Vals[7] = Temp + 0x08909ae5U;
  259. Vals[3] = 0xa54ff53aU + Temp;
  260. #define P124(n) P2(n) + P1(n) + P4(n)
  261. W[64 + 16] = + P2(64 + 16) + P4(64 + 16);
  262. sharound(64 + 1);
  263. W[64 + 17] = P1C(64 + 17) + P2(64 + 17) + P4(64 + 17);
  264. sharound(64 + 2);
  265. W[64 + 18] = P124(64 + 18);
  266. sharound(64 + 3);
  267. W[64 + 19] = P124(64 + 19);
  268. sharound(64 + 4);
  269. W[64 + 20] = P124(64 + 20);
  270. sharound(64 + 5);
  271. W[64 + 21] = P124(64 + 21);
  272. sharound(64 + 6);
  273. W[64 + 22] = P4(64 + 22) + P3C(64 + 22) + P2(64 + 22) + P1(64 + 22);
  274. sharound(64 + 7);
  275. W[64 + 23] = P4(64 + 23) + P3(64 + 23) + P2C(64 + 23) + P1(64 + 23);
  276. sharoundC(64 + 8);
  277. W[64 + 24] = P1(64 + 24) + P4C(64 + 24) + P3(64 + 24);
  278. sharoundC(64 + 9);
  279. W[64 + 25] = P3(64 + 25) + P1(64 + 25);
  280. sharoundC(64 + 10);
  281. W[64 + 26] = P3(64 + 26) + P1(64 + 26);
  282. sharoundC(64 + 11);
  283. W[64 + 27] = P3(64 + 27) + P1(64 + 27);
  284. sharoundC(64 + 12);
  285. W[64 + 28] = P3(64 + 28) + P1(64 + 28);
  286. sharoundC(64 + 13);
  287. W[64 + 29] = P1(64 + 29) + P3(64 + 29);
  288. W[64 + 30] = P3(64 + 30) + P2C(64 + 30) + P1(64 + 30);
  289. sharoundC(64 + 14);
  290. W[64 + 31] = P4C(64 + 31) + P3(64 + 31) + P2(64 + 31) + P1(64 + 31);
  291. sharoundC(64 + 15);
  292. sharound(64 + 16);
  293. sharound(64 + 17);
  294. sharound(64 + 18);
  295. sharound(64 + 19);
  296. sharound(64 + 20);
  297. sharound(64 + 21);
  298. sharound(64 + 22);
  299. sharound(64 + 23);
  300. sharound(64 + 24);
  301. sharound(64 + 25);
  302. sharound(64 + 26);
  303. sharound(64 + 27);
  304. sharound(64 + 28);
  305. sharound(64 + 29);
  306. sharound(64 + 30);
  307. sharound(64 + 31);
  308. sharoundW(64 + 32);
  309. sharoundW(64 + 33);
  310. sharoundW(64 + 34);
  311. sharoundW(64 + 35);
  312. sharoundW(64 + 36);
  313. sharoundW(64 + 37);
  314. sharoundW(64 + 38);
  315. sharoundW(64 + 39);
  316. sharoundW(64 + 40);
  317. sharoundW(64 + 41);
  318. sharoundW(64 + 42);
  319. sharoundW(64 + 43);
  320. sharoundW(64 + 44);
  321. sharoundW(64 + 45);
  322. sharoundW(64 + 46);
  323. sharoundW(64 + 47);
  324. sharoundW(64 + 48);
  325. sharoundW(64 + 49);
  326. sharoundW(64 + 50);
  327. sharoundW(64 + 51);
  328. sharoundW(64 + 52);
  329. sharoundW(64 + 53);
  330. sharoundW(64 + 54);
  331. sharoundW(64 + 55);
  332. sharoundW(64 + 56);
  333. sharoundW(64 + 57);
  334. sharoundW(64 + 58);
  335. u v = W[117] + W[108] + Vals[3] + Vals[7] + P2(124) + P1(124) + Ch((Vals[0] + Vals[4]) + (K[59] + W(59+64)) + s1(64+59)+ ch(59+64),Vals[1],Vals[2]) ^
  336. -(K[60] + H[7]) - S1((Vals[0] + Vals[4]) + (K[59] + W(59+64)) + s1(64+59)+ ch(59+64));
  337. #define FOUND (0x80)
  338. #define NFLAG (0x7F)
  339. #ifdef VECTORS4
  340. bool result = v.x & v.y & v.z & v.w;
  341. if (!result) {
  342. if (!v.x)
  343. output[FOUND] = output[NFLAG & W[3].x] = W[3].x;
  344. if (!v.y)
  345. output[FOUND] = output[NFLAG & W[3].y] = W[3].y;
  346. if (!v.z)
  347. output[FOUND] = output[NFLAG & W[3].z] = W[3].z;
  348. if (!v.w)
  349. output[FOUND] = output[NFLAG & W[3].w] = W[3].w;
  350. }
  351. #else
  352. #ifdef VECTORS2
  353. bool result = v.x & v.y;
  354. if (!result) {
  355. if (!v.x)
  356. output[FOUND] = output[NFLAG & W[3].x] = W[3].x;
  357. if (!v.y)
  358. output[FOUND] = output[NFLAG & W[3].y] = W[3].y;
  359. }
  360. #else
  361. if (!v)
  362. output[FOUND] = output[NFLAG & W[3]] = W[3];
  363. #endif
  364. #endif
  365. }