cpu-miner.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "cpuminer-config.h"
  11. #define _GNU_SOURCE
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #ifndef WIN32
  22. #include <sys/resource.h>
  23. #endif
  24. #include <getopt.h>
  25. #include <jansson.h>
  26. #include <curl/curl.h>
  27. #include "compat.h"
  28. #include "miner.h"
  29. #include "findnonce.h"
  30. #include "ocl.h"
  31. #define PROGRAM_NAME "minerd"
  32. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  33. #define DEF_RPC_USERNAME "rpcuser"
  34. #define DEF_RPC_PASSWORD "rpcpass"
  35. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  36. #ifdef __linux /* Linux specific policy and affinity management */
  37. #include <sched.h>
  38. static inline void drop_policy(void)
  39. {
  40. struct sched_param param;
  41. #ifdef SCHED_IDLE
  42. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  43. #endif
  44. #ifdef SCHED_BATCH
  45. sched_setscheduler(0, SCHED_BATCH, &param);
  46. #endif
  47. }
  48. static inline void affine_to_cpu(int id, int cpu)
  49. {
  50. cpu_set_t set;
  51. CPU_ZERO(&set);
  52. CPU_SET(cpu, &set);
  53. sched_setaffinity(0, sizeof(&set), &set);
  54. applog(LOG_INFO, "Binding thread %d to cpu %d", id, cpu);
  55. }
  56. #else
  57. static inline void drop_policy(void)
  58. {
  59. }
  60. static inline void affine_to_cpu(int id, int cpu)
  61. {
  62. }
  63. #endif
  64. enum workio_commands {
  65. WC_GET_WORK,
  66. WC_SUBMIT_WORK,
  67. };
  68. struct workio_cmd {
  69. enum workio_commands cmd;
  70. struct thr_info *thr;
  71. union {
  72. struct work *work;
  73. } u;
  74. };
  75. enum sha256_algos {
  76. ALGO_C, /* plain C */
  77. ALGO_4WAY, /* parallel SSE2 */
  78. ALGO_VIA, /* VIA padlock */
  79. ALGO_CRYPTOPP, /* Crypto++ (C) */
  80. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  81. ALGO_SSE2_64, /* SSE2 for x86_64 */
  82. };
  83. static const char *algo_names[] = {
  84. [ALGO_C] = "c",
  85. #ifdef WANT_SSE2_4WAY
  86. [ALGO_4WAY] = "4way",
  87. #endif
  88. #ifdef WANT_VIA_PADLOCK
  89. [ALGO_VIA] = "via",
  90. #endif
  91. [ALGO_CRYPTOPP] = "cryptopp",
  92. #ifdef WANT_CRYPTOPP_ASM32
  93. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  94. #endif
  95. #ifdef WANT_X8664_SSE2
  96. [ALGO_SSE2_64] = "sse2_64",
  97. #endif
  98. };
  99. bool opt_debug = false;
  100. bool opt_protocol = false;
  101. bool opt_ndevs = false;
  102. bool want_longpoll = true;
  103. bool have_longpoll = false;
  104. bool use_syslog = false;
  105. static bool opt_quiet = false;
  106. static int opt_retries = 10;
  107. static int opt_fail_pause = 30;
  108. static int opt_log_interval = 5;
  109. int opt_vectors;
  110. int opt_worksize;
  111. int opt_scantime = 60;
  112. static json_t *opt_config;
  113. static const bool opt_time = true;
  114. #ifdef WANT_X8664_SSE2
  115. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  116. #else
  117. static enum sha256_algos opt_algo = ALGO_C;
  118. #endif
  119. static int nDevs;
  120. static int opt_n_threads = 1;
  121. static int num_processors;
  122. static int scan_intensity = 5;
  123. static char *rpc_url;
  124. static char *rpc_userpass;
  125. static char *rpc_user, *rpc_pass;
  126. struct thr_info *thr_info;
  127. static int work_thr_id;
  128. int longpoll_thr_id;
  129. struct work_restart *work_restart = NULL;
  130. pthread_mutex_t time_lock;
  131. static pthread_mutex_t hash_lock;
  132. static pthread_mutex_t get_lock;
  133. static double total_mhashes_done;
  134. static struct timeval total_tv_start, total_tv_end;
  135. static int accepted, rejected;
  136. struct option_help {
  137. const char *name;
  138. const char *helptext;
  139. };
  140. static struct option_help options_help[] = {
  141. { "help",
  142. "(-h) Display this help text" },
  143. { "config FILE",
  144. "(-c FILE) JSON-format configuration file (default: none)\n"
  145. "See example-cfg.json for an example configuration." },
  146. { "algo XXX",
  147. "(-a XXX) Specify sha256 implementation:\n"
  148. "\tc\t\tLinux kernel sha256, implemented in C (default)"
  149. #ifdef WANT_SSE2_4WAY
  150. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  151. #endif
  152. #ifdef WANT_VIA_PADLOCK
  153. "\n\tvia\t\tVIA padlock implementation"
  154. #endif
  155. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  156. #ifdef WANT_CRYPTOPP_ASM32
  157. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  158. #endif
  159. #ifdef WANT_X8664_SSE2
  160. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  161. #endif
  162. },
  163. { "quiet",
  164. "(-q) Disable per-thread hashmeter output (default: off)" },
  165. { "debug",
  166. "(-D) Enable debug output (default: off)" },
  167. { "intensity",
  168. "(-I) Intensity of scanning (0 - 10, default 5)" },
  169. { "log",
  170. "(-l) Interval in seconds between log output (default 5)" },
  171. { "ndevs",
  172. "(-n) Display number of detected GPUs" },
  173. { "no-longpoll",
  174. "Disable X-Long-Polling support (default: enabled)" },
  175. { "protocol-dump",
  176. "(-P) Verbose dump of protocol-level activities (default: off)" },
  177. { "retries N",
  178. "(-r N) Number of times to retry, if JSON-RPC call fails\n"
  179. "\t(default: 10; use -1 for \"never\")" },
  180. { "retry-pause N",
  181. "(-R N) Number of seconds to pause, between retries\n"
  182. "\t(default: 30)" },
  183. { "scantime N",
  184. "(-s N) Upper bound on time spent scanning current work,\n"
  185. "\tin seconds. (default: 60)" },
  186. #ifdef HAVE_SYSLOG_H
  187. { "syslog",
  188. "Use system log for output messages (default: standard error)" },
  189. #endif
  190. { "threads N",
  191. "(-t N) Number of miner CPU threads (default: number of processors)" },
  192. { "url URL",
  193. "URL for bitcoin JSON-RPC server "
  194. "(default: " DEF_RPC_URL ")" },
  195. { "userpass USERNAME:PASSWORD",
  196. "Username:Password pair for bitcoin JSON-RPC server "
  197. "(default: " DEF_RPC_USERPASS ")" },
  198. { "user USERNAME",
  199. "(-u USERNAME) Username for bitcoin JSON-RPC server "
  200. "(default: " DEF_RPC_USERNAME ")" },
  201. { "vectors N",
  202. "(-v N) Override detected optimal vector width (default: detected, 1,2 or 4)" },
  203. { "worksize N",
  204. "(-w N) Override detected optimal worksize (default: detected)" },
  205. { "pass PASSWORD",
  206. "(-p PASSWORD) Password for bitcoin JSON-RPC server "
  207. "(default: " DEF_RPC_PASSWORD ")" },
  208. };
  209. static struct option options[] = {
  210. { "algo", 1, NULL, 'a' },
  211. { "config", 1, NULL, 'c' },
  212. { "debug", 0, NULL, 'D' },
  213. { "help", 0, NULL, 'h' },
  214. { "intensity", 1, NULL, 'I' },
  215. { "log", 1, NULL, 'l' },
  216. { "ndevs", 0, NULL, 'n' },
  217. { "no-longpoll", 0, NULL, 1003 },
  218. { "pass", 1, NULL, 'p' },
  219. { "protocol-dump", 0, NULL, 'P' },
  220. { "quiet", 0, NULL, 'q' },
  221. { "threads", 1, NULL, 't' },
  222. { "retries", 1, NULL, 'r' },
  223. { "retry-pause", 1, NULL, 'R' },
  224. { "scantime", 1, NULL, 's' },
  225. #ifdef HAVE_SYSLOG_H
  226. { "syslog", 0, NULL, 1004 },
  227. #endif
  228. { "url", 1, NULL, 1001 },
  229. { "user", 1, NULL, 'u' },
  230. { "vectors", 1, NULL, 'v' },
  231. { "worksize", 1, NULL, 'w' },
  232. { "userpass", 1, NULL, 1002 },
  233. };
  234. struct work {
  235. unsigned char data[128];
  236. unsigned char hash1[64];
  237. unsigned char midstate[32];
  238. unsigned char target[32];
  239. unsigned char hash[32];
  240. uint32_t output[1];
  241. uint32_t res_nonce;
  242. uint32_t valid;
  243. dev_blk_ctx blk;
  244. };
  245. static bool jobj_binary(const json_t *obj, const char *key,
  246. void *buf, size_t buflen)
  247. {
  248. const char *hexstr;
  249. json_t *tmp;
  250. tmp = json_object_get(obj, key);
  251. if (unlikely(!tmp)) {
  252. applog(LOG_ERR, "JSON key '%s' not found", key);
  253. return false;
  254. }
  255. hexstr = json_string_value(tmp);
  256. if (unlikely(!hexstr)) {
  257. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  258. return false;
  259. }
  260. if (!hex2bin(buf, hexstr, buflen))
  261. return false;
  262. return true;
  263. }
  264. static bool work_decode(const json_t *val, struct work *work)
  265. {
  266. if (unlikely(!jobj_binary(val, "midstate",
  267. work->midstate, sizeof(work->midstate)))) {
  268. applog(LOG_ERR, "JSON inval midstate");
  269. goto err_out;
  270. }
  271. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  272. applog(LOG_ERR, "JSON inval data");
  273. goto err_out;
  274. }
  275. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  276. applog(LOG_ERR, "JSON inval hash1");
  277. goto err_out;
  278. }
  279. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  280. applog(LOG_ERR, "JSON inval target");
  281. goto err_out;
  282. }
  283. memset(work->hash, 0, sizeof(work->hash));
  284. return true;
  285. err_out:
  286. return false;
  287. }
  288. static bool submit_upstream_work(CURL *curl, char *hexstr)
  289. {
  290. json_t *val, *res;
  291. char s[345];
  292. bool rc = false;
  293. /* build JSON-RPC request */
  294. sprintf(s,
  295. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  296. hexstr);
  297. if (opt_debug)
  298. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  299. /* issue JSON-RPC request */
  300. val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false);
  301. if (unlikely(!val)) {
  302. applog(LOG_ERR, "submit_upstream_work json_rpc_call failed");
  303. goto out;
  304. }
  305. res = json_object_get(val, "result");
  306. /* Theoretically threads could race when modifying accepted and
  307. * rejected values but the chance of two submits completing at the
  308. * same time is zero so there is no point adding extra locking */
  309. if (json_is_true(res)) {
  310. accepted++;
  311. applog(LOG_INFO, "PROOF OF WORK RESULT: true (yay!!!)");
  312. } else {
  313. rejected++;
  314. applog(LOG_INFO, "PROOF OF WORK RESULT: false (booooo)");
  315. }
  316. json_decref(val);
  317. rc = true;
  318. out:
  319. return rc;
  320. }
  321. static const char *rpc_req =
  322. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  323. static bool get_upstream_work(CURL *curl, struct work *work)
  324. {
  325. json_t *val;
  326. bool rc;
  327. val = json_rpc_call(curl, rpc_url, rpc_userpass, rpc_req,
  328. want_longpoll, false);
  329. if (!val)
  330. return false;
  331. rc = work_decode(json_object_get(val, "result"), work);
  332. json_decref(val);
  333. return rc;
  334. }
  335. static void workio_cmd_free(struct workio_cmd *wc)
  336. {
  337. if (!wc)
  338. return;
  339. switch (wc->cmd) {
  340. case WC_SUBMIT_WORK:
  341. free(wc->u.work);
  342. break;
  343. default: /* do nothing */
  344. break;
  345. }
  346. memset(wc, 0, sizeof(*wc)); /* poison */
  347. free(wc);
  348. }
  349. static bool workio_get_work(struct workio_cmd *wc)
  350. {
  351. struct work *ret_work;
  352. int failures = 0;
  353. bool ret = false;
  354. CURL *curl;
  355. ret_work = calloc(1, sizeof(*ret_work));
  356. if (!ret_work) {
  357. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  358. return ret;
  359. }
  360. curl = curl_easy_init();
  361. if (unlikely(!curl)) {
  362. applog(LOG_ERR, "CURL initialization failed");
  363. return ret;
  364. }
  365. /* obtain new work from bitcoin via JSON-RPC */
  366. while (!get_upstream_work(curl, ret_work)) {
  367. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  368. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  369. free(ret_work);
  370. goto out;
  371. }
  372. /* pause, then restart work-request loop */
  373. applog(LOG_ERR, "json_rpc_call failed, retry after %d seconds",
  374. opt_fail_pause);
  375. sleep(opt_fail_pause);
  376. }
  377. /* send work to requesting thread */
  378. if (unlikely(!tq_push(wc->thr->q, ret_work))) {
  379. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  380. free(ret_work);
  381. } else
  382. ret = true;
  383. out:
  384. curl_easy_cleanup(curl);
  385. return ret;
  386. }
  387. static void *submit_thread(void *userdata)
  388. {
  389. char *hexstr = (char *)userdata;
  390. int failures = 0;
  391. CURL *curl;
  392. curl = curl_easy_init();
  393. if (unlikely(!curl)) {
  394. applog(LOG_ERR, "CURL initialization failed");
  395. exit (1);
  396. }
  397. /* submit solution to bitcoin via JSON-RPC */
  398. while (!submit_upstream_work(curl, hexstr)) {
  399. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  400. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  401. free(hexstr);
  402. curl_easy_cleanup(curl);
  403. exit (1);
  404. }
  405. /* pause, then restart work-request loop */
  406. applog(LOG_ERR, "...retry after %d seconds",
  407. opt_fail_pause);
  408. sleep(opt_fail_pause);
  409. }
  410. free(hexstr);
  411. curl_easy_cleanup(curl);
  412. return NULL;
  413. }
  414. /* Work is submitted asynchronously by creating a thread for each submit
  415. * thus avoiding the mining threads having to wait till work is submitted
  416. * before they can continue working. */
  417. static bool workio_submit_work(struct workio_cmd *wc)
  418. {
  419. struct work *work;
  420. pthread_t thr;
  421. char *hexstr;
  422. work = wc->u.work;
  423. /* build hex string */
  424. hexstr = bin2hex(work->data, sizeof(work->data));
  425. if (unlikely(!hexstr)) {
  426. applog(LOG_ERR, "workio_submit_work OOM");
  427. return false;
  428. }
  429. if (unlikely(pthread_create(&thr, NULL, submit_thread, (void *)hexstr))) {
  430. applog(LOG_ERR, "Failed to create submit_thread");
  431. return false;
  432. }
  433. pthread_detach(thr);
  434. return true;
  435. }
  436. static void *workio_thread(void *userdata)
  437. {
  438. struct thr_info *mythr = userdata;
  439. bool ok = true;
  440. while (ok) {
  441. struct workio_cmd *wc;
  442. /* wait for workio_cmd sent to us, on our queue */
  443. wc = tq_pop(mythr->q, NULL);
  444. if (!wc) {
  445. ok = false;
  446. break;
  447. }
  448. /* process workio_cmd */
  449. switch (wc->cmd) {
  450. case WC_GET_WORK:
  451. ok = workio_get_work(wc);
  452. break;
  453. case WC_SUBMIT_WORK:
  454. ok = workio_submit_work(wc);
  455. break;
  456. default: /* should never happen */
  457. ok = false;
  458. break;
  459. }
  460. workio_cmd_free(wc);
  461. }
  462. tq_freeze(mythr->q);
  463. return NULL;
  464. }
  465. static void hashmeter(int thr_id, struct timeval *diff,
  466. unsigned long hashes_done)
  467. {
  468. struct timeval temp_tv_end, total_diff;
  469. double khashes, secs;
  470. double total_secs;
  471. double local_mhashes, local_secs;
  472. static unsigned long local_hashes_done = 0;
  473. /* Don't bother calculating anything if we're not displaying it */
  474. if (opt_quiet || !opt_log_interval)
  475. return;
  476. khashes = hashes_done / 1000.0;
  477. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  478. if (opt_debug)
  479. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  480. thr_id, hashes_done, hashes_done / secs);
  481. gettimeofday(&temp_tv_end, NULL);
  482. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  483. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  484. if (opt_n_threads + nDevs > 1) {
  485. /* Totals are updated by all threads so can race without locking */
  486. pthread_mutex_lock(&hash_lock);
  487. total_mhashes_done += (double)hashes_done / 1000000.0;
  488. local_hashes_done += hashes_done;
  489. if (total_diff.tv_sec < opt_log_interval) {
  490. /* Only update the total every opt_log_interval seconds */
  491. pthread_mutex_unlock(&hash_lock);
  492. return;
  493. }
  494. gettimeofday(&total_tv_end, NULL);
  495. pthread_mutex_unlock(&hash_lock);
  496. } else {
  497. total_mhashes_done += (double)hashes_done / 1000000.0;
  498. local_hashes_done += hashes_done;
  499. if (total_diff.tv_sec < opt_log_interval)
  500. return;
  501. gettimeofday(&total_tv_end, NULL);
  502. }
  503. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  504. total_secs = (double)total_diff.tv_sec +
  505. ((double)total_diff.tv_usec / 1000000.0);
  506. local_mhashes = local_hashes_done / 1000000.0;
  507. local_hashes_done = 0;
  508. applog(LOG_INFO, "[%.2f | %.2f Mhash/s] [%d Accepted] [%d Rejected]",
  509. local_mhashes / local_secs,
  510. total_mhashes_done / total_secs, accepted, rejected);
  511. }
  512. /* Since we always have one extra work item queued, set the thread id to 0
  513. * for all the work and just give the work to the first thread that requests
  514. * work */
  515. static bool get_work(struct work *work)
  516. {
  517. static struct work *work_heap = NULL;
  518. struct thr_info *thr = &thr_info[0];
  519. struct workio_cmd *wc;
  520. bool ret = false;
  521. /* fill out work request message */
  522. wc = calloc(1, sizeof(*wc));
  523. if (unlikely(!wc))
  524. goto out;
  525. wc->cmd = WC_GET_WORK;
  526. wc->thr = thr;
  527. /* send work request to workio thread */
  528. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  529. workio_cmd_free(wc);
  530. goto out;
  531. }
  532. /* work_heap is a static var so it is protected by get_lock */
  533. pthread_mutex_lock(&get_lock);
  534. if (likely(work_heap)) {
  535. memcpy(work, work_heap, sizeof(*work));
  536. /* Wait for next response, a unit of work - it should be queued */
  537. free(work_heap);
  538. work_heap = tq_pop(thr->q, NULL);
  539. } else {
  540. /* wait for 1st response, or 1st response after failure */
  541. work_heap = tq_pop(thr->q, NULL);
  542. if (unlikely(!work_heap))
  543. goto out_unlock;
  544. /* send for another work request for the next time get_work
  545. * is called. */
  546. wc = calloc(1, sizeof(*wc));
  547. if (unlikely(!wc)) {
  548. free(work_heap);
  549. work_heap = NULL;
  550. goto out_unlock;
  551. }
  552. wc->cmd = WC_GET_WORK;
  553. wc->thr = thr;
  554. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  555. workio_cmd_free(wc);
  556. free(work_heap);
  557. work_heap = NULL;
  558. goto out_unlock;
  559. }
  560. }
  561. ret = true;
  562. out_unlock:
  563. pthread_mutex_unlock(&get_lock);
  564. out:
  565. return ret;
  566. }
  567. static bool submit_work(struct thr_info *thr, const struct work *work_in)
  568. {
  569. struct workio_cmd *wc;
  570. /* fill out work request message */
  571. wc = calloc(1, sizeof(*wc));
  572. if (!wc)
  573. return false;
  574. wc->u.work = malloc(sizeof(*work_in));
  575. if (!wc->u.work)
  576. goto err_out;
  577. wc->cmd = WC_SUBMIT_WORK;
  578. wc->thr = thr;
  579. memcpy(wc->u.work, work_in, sizeof(*work_in));
  580. /* send solution to workio thread */
  581. if (!tq_push(thr_info[work_thr_id].q, wc))
  582. goto err_out;
  583. return true;
  584. err_out:
  585. workio_cmd_free(wc);
  586. return false;
  587. }
  588. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  589. {
  590. work->data[64+12+0] = (nonce>>0) & 0xff;
  591. work->data[64+12+1] = (nonce>>8) & 0xff;
  592. work->data[64+12+2] = (nonce>>16) & 0xff;
  593. work->data[64+12+3] = (nonce>>24) & 0xff;
  594. return submit_work(thr, work);
  595. }
  596. static inline int cpu_from_thr_id(int thr_id)
  597. {
  598. return (thr_id - nDevs) % num_processors;
  599. }
  600. static void *miner_thread(void *userdata)
  601. {
  602. struct thr_info *mythr = userdata;
  603. int thr_id = mythr->id;
  604. uint32_t max_nonce = 0xffffff;
  605. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  606. * and if that fails, then SCHED_BATCH. No need for this to be an
  607. * error if it fails */
  608. setpriority(PRIO_PROCESS, 0, 19);
  609. drop_policy();
  610. /* Cpu affinity only makes sense if the number of threads is a multiple
  611. * of the number of CPUs */
  612. if (!(opt_n_threads % num_processors))
  613. affine_to_cpu(cpu_from_thr_id(thr_id), thr_id % num_processors);
  614. while (1) {
  615. struct work work __attribute__((aligned(128)));
  616. unsigned long hashes_done;
  617. struct timeval tv_start, tv_end, diff;
  618. uint64_t max64;
  619. bool rc;
  620. /* obtain new work from internal workio thread */
  621. if (unlikely(!get_work(&work))) {
  622. applog(LOG_ERR, "work retrieval failed, exiting "
  623. "mining thread %d", mythr->id);
  624. goto out;
  625. }
  626. hashes_done = 0;
  627. gettimeofday(&tv_start, NULL);
  628. /* scan nonces for a proof-of-work hash */
  629. switch (opt_algo) {
  630. case ALGO_C:
  631. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  632. work.hash1, work.hash, work.target,
  633. max_nonce, &hashes_done);
  634. break;
  635. #ifdef WANT_X8664_SSE2
  636. case ALGO_SSE2_64: {
  637. unsigned int rc5 =
  638. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  639. work.hash1, work.hash,
  640. work.target,
  641. max_nonce, &hashes_done);
  642. rc = (rc5 == -1) ? false : true;
  643. }
  644. break;
  645. #endif
  646. #ifdef WANT_SSE2_4WAY
  647. case ALGO_4WAY: {
  648. unsigned int rc4 =
  649. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  650. work.hash1, work.hash,
  651. work.target,
  652. max_nonce, &hashes_done);
  653. rc = (rc4 == -1) ? false : true;
  654. }
  655. break;
  656. #endif
  657. #ifdef WANT_VIA_PADLOCK
  658. case ALGO_VIA:
  659. rc = scanhash_via(thr_id, work.data, work.target,
  660. max_nonce, &hashes_done);
  661. break;
  662. #endif
  663. case ALGO_CRYPTOPP:
  664. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  665. work.hash1, work.hash, work.target,
  666. max_nonce, &hashes_done);
  667. break;
  668. #ifdef WANT_CRYPTOPP_ASM32
  669. case ALGO_CRYPTOPP_ASM32:
  670. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  671. work.hash1, work.hash, work.target,
  672. max_nonce, &hashes_done);
  673. break;
  674. #endif
  675. default:
  676. /* should never happen */
  677. goto out;
  678. }
  679. /* record scanhash elapsed time */
  680. gettimeofday(&tv_end, NULL);
  681. timeval_subtract(&diff, &tv_end, &tv_start);
  682. hashmeter(thr_id, &diff, hashes_done);
  683. /* adjust max_nonce to meet target scan time */
  684. if (diff.tv_usec > 500000)
  685. diff.tv_sec++;
  686. if (diff.tv_sec > 0) {
  687. max64 =
  688. ((uint64_t)hashes_done * opt_scantime) / diff.tv_sec;
  689. if (max64 > 0xfffffffaULL)
  690. max64 = 0xfffffffaULL;
  691. max_nonce = max64;
  692. }
  693. /* if nonce found, submit work */
  694. if (unlikely(rc)) {
  695. applog(LOG_INFO, "CPU %d found something?", cpu_from_thr_id(thr_id));
  696. if (!submit_work(mythr, &work))
  697. break;
  698. }
  699. }
  700. out:
  701. tq_freeze(mythr->q);
  702. return NULL;
  703. }
  704. enum {
  705. STAT_SLEEP_INTERVAL = 1,
  706. STAT_CTR_INTERVAL = 10000000,
  707. FAILURE_INTERVAL = 30,
  708. };
  709. static _clState *clStates[16];
  710. static inline cl_int queue_kernel_parameters(dev_blk_ctx *blk, cl_kernel *kernel,
  711. struct _cl_mem *output)
  712. {
  713. cl_int status = 0;
  714. int num = 0;
  715. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  716. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  717. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  718. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  719. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  720. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  721. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  722. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  723. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  724. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  725. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  726. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  727. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  728. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  729. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  730. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  731. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  732. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  733. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  734. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  735. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  736. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  737. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  738. status |= clSetKernelArg(*kernel, num++, sizeof(output), (void *)&output);
  739. return status;
  740. }
  741. static inline int gpu_from_thr_id(int thr_id)
  742. {
  743. return thr_id;
  744. }
  745. static void *gpuminer_thread(void *userdata)
  746. {
  747. struct thr_info *mythr = userdata;
  748. struct timeval tv_start;
  749. int thr_id = mythr->id;
  750. uint32_t res[128], blank_res[128];
  751. cl_kernel *kernel;
  752. memset(blank_res, 0, BUFFERSIZE);
  753. size_t globalThreads[1];
  754. size_t localThreads[1];
  755. cl_int status;
  756. _clState *clState = clStates[thr_id];
  757. kernel = &clState->kernel;
  758. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_TRUE, 0,
  759. BUFFERSIZE, blank_res, 0, NULL, NULL);
  760. if (unlikely(status != CL_SUCCESS))
  761. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  762. struct work *work = malloc(sizeof(struct work));
  763. bool need_work = true;
  764. unsigned int threads = 1 << (15 + scan_intensity);
  765. unsigned int vectors = clState->preferred_vwidth;
  766. unsigned int hashes_done = threads * vectors;
  767. gettimeofday(&tv_start, NULL);
  768. globalThreads[0] = threads;
  769. localThreads[0] = clState->work_size;
  770. while (1) {
  771. struct timeval tv_end, diff, tv_workstart;
  772. unsigned int i;
  773. clFinish(clState->commandQueue);
  774. if (need_work) {
  775. gettimeofday(&tv_workstart, NULL);
  776. /* obtain new work from internal workio thread */
  777. if (unlikely(!get_work(work))) {
  778. applog(LOG_ERR, "work retrieval failed, exiting "
  779. "gpu mining thread %d", mythr->id);
  780. goto out;
  781. }
  782. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  783. work->blk.nonce = 0;
  784. status = queue_kernel_parameters(&work->blk, kernel, clState->outputBuffer);
  785. if (unlikely(status != CL_SUCCESS))
  786. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  787. work_restart[thr_id].restart = 0;
  788. need_work = false;
  789. if (opt_debug)
  790. applog(LOG_DEBUG, "getwork");
  791. } else {
  792. status = clSetKernelArg(*kernel, 14, sizeof(uint), (void *)&work->blk.nonce);
  793. if (unlikely(status != CL_SUCCESS))
  794. { applog(LOG_ERR, "Error: clSetKernelArg of nonce failed."); goto out; }
  795. }
  796. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  797. globalThreads, localThreads, 0, NULL, NULL);
  798. if (unlikely(status != CL_SUCCESS))
  799. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  800. /* 127 is used as a flag to say nonces exist */
  801. if (unlikely(res[127])) {
  802. /* Clear the buffer again */
  803. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  804. BUFFERSIZE, blank_res, 0, NULL, NULL);
  805. if (unlikely(status != CL_SUCCESS))
  806. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  807. for (i = 0; i < 127; i++) {
  808. if (res[i]) {
  809. applog(LOG_INFO, "GPU %d found something?", gpu_from_thr_id(thr_id));
  810. postcalc_hash(mythr, &work->blk, work, res[i]);
  811. } else
  812. break;
  813. }
  814. clFinish(clState->commandQueue);
  815. }
  816. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  817. BUFFERSIZE, res, 0, NULL, NULL);
  818. if (unlikely(status != CL_SUCCESS))
  819. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  820. gettimeofday(&tv_end, NULL);
  821. timeval_subtract(&diff, &tv_end, &tv_start);
  822. hashmeter(thr_id, &diff, hashes_done);
  823. gettimeofday(&tv_start, NULL);
  824. work->blk.nonce += hashes_done;
  825. timeval_subtract(&diff, &tv_end, &tv_workstart);
  826. if (diff.tv_sec > opt_scantime ||
  827. work->blk.nonce > MAXTHREADS - hashes_done ||
  828. work_restart[thr_id].restart)
  829. need_work = true;
  830. }
  831. out:
  832. tq_freeze(mythr->q);
  833. return NULL;
  834. }
  835. static void restart_threads(void)
  836. {
  837. int i;
  838. for (i = 0; i < opt_n_threads + nDevs; i++)
  839. work_restart[i].restart = 1;
  840. }
  841. static void *longpoll_thread(void *userdata)
  842. {
  843. struct thr_info *mythr = userdata;
  844. CURL *curl = NULL;
  845. char *copy_start, *hdr_path, *lp_url = NULL;
  846. bool need_slash = false;
  847. int failures = 0;
  848. hdr_path = tq_pop(mythr->q, NULL);
  849. if (!hdr_path)
  850. goto out;
  851. /* full URL */
  852. if (strstr(hdr_path, "://")) {
  853. lp_url = hdr_path;
  854. hdr_path = NULL;
  855. }
  856. /* absolute path, on current server */
  857. else {
  858. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  859. if (rpc_url[strlen(rpc_url) - 1] != '/')
  860. need_slash = true;
  861. lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2);
  862. if (!lp_url)
  863. goto out;
  864. sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start);
  865. }
  866. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  867. curl = curl_easy_init();
  868. if (unlikely(!curl)) {
  869. applog(LOG_ERR, "CURL initialization failed");
  870. goto out;
  871. }
  872. while (1) {
  873. json_t *val;
  874. val = json_rpc_call(curl, lp_url, rpc_userpass, rpc_req,
  875. false, true);
  876. if (likely(val)) {
  877. failures = 0;
  878. json_decref(val);
  879. applog(LOG_INFO, "LONGPOLL detected new block");
  880. restart_threads();
  881. } else {
  882. if (failures++ < 10) {
  883. sleep(30);
  884. applog(LOG_ERR,
  885. "longpoll failed, sleeping for 30s");
  886. } else {
  887. applog(LOG_ERR,
  888. "longpoll failed, ending thread");
  889. goto out;
  890. }
  891. }
  892. }
  893. out:
  894. free(hdr_path);
  895. free(lp_url);
  896. tq_freeze(mythr->q);
  897. if (curl)
  898. curl_easy_cleanup(curl);
  899. return NULL;
  900. }
  901. static void show_usage(void)
  902. {
  903. int i;
  904. printf("minerd version %s\n\n", VERSION);
  905. printf("Usage:\tminerd [options]\n\nSupported options:\n");
  906. for (i = 0; i < ARRAY_SIZE(options_help); i++) {
  907. struct option_help *h;
  908. h = &options_help[i];
  909. printf("--%s\n%s\n\n", h->name, h->helptext);
  910. }
  911. exit(1);
  912. }
  913. static void parse_arg (int key, char *arg)
  914. {
  915. int v, i;
  916. switch(key) {
  917. case 'a':
  918. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  919. if (algo_names[i] &&
  920. !strcmp(arg, algo_names[i])) {
  921. opt_algo = i;
  922. break;
  923. }
  924. }
  925. if (i == ARRAY_SIZE(algo_names))
  926. show_usage();
  927. break;
  928. case 'c': {
  929. json_error_t err;
  930. if (opt_config)
  931. json_decref(opt_config);
  932. opt_config = json_load_file(arg, &err);
  933. if (!json_is_object(opt_config)) {
  934. applog(LOG_ERR, "JSON decode of %s failed", arg);
  935. show_usage();
  936. }
  937. break;
  938. }
  939. case 'D':
  940. opt_debug = true;
  941. break;
  942. case 'I':
  943. v = atoi(arg);
  944. if (v < 0 || v > 10) /* sanity check */
  945. show_usage();
  946. scan_intensity = v;
  947. break;
  948. case 'l':
  949. v = atoi(arg);
  950. if (v < 0 || v > 9999) /* sanity check */
  951. show_usage();
  952. opt_log_interval = v;
  953. break;
  954. case 'p':
  955. free(rpc_pass);
  956. rpc_pass = strdup(arg);
  957. break;
  958. case 'P':
  959. opt_protocol = true;
  960. break;
  961. case 'q':
  962. opt_quiet = true;
  963. break;
  964. case 'r':
  965. v = atoi(arg);
  966. if (v < -1 || v > 9999) /* sanity check */
  967. show_usage();
  968. opt_retries = v;
  969. break;
  970. case 'R':
  971. v = atoi(arg);
  972. if (v < 1 || v > 9999) /* sanity check */
  973. show_usage();
  974. opt_fail_pause = v;
  975. break;
  976. case 's':
  977. v = atoi(arg);
  978. if (v < 1 || v > 9999) /* sanity check */
  979. show_usage();
  980. opt_scantime = v;
  981. break;
  982. case 't':
  983. v = atoi(arg);
  984. if (v < 0 || v > 9999) /* sanity check */
  985. show_usage();
  986. opt_n_threads = v;
  987. break;
  988. case 'u':
  989. free(rpc_user);
  990. rpc_user = strdup(arg);
  991. break;
  992. case 'v':
  993. v = atoi(arg);
  994. if (v != 1 && v != 2 && v != 4)
  995. show_usage();
  996. opt_vectors = v;
  997. break;
  998. case 'w':
  999. v = atoi(arg);
  1000. if (v < 1 || v > 9999) /* sanity check */
  1001. show_usage();
  1002. opt_worksize = v;
  1003. break;
  1004. case 1001: /* --url */
  1005. if (strncmp(arg, "http://", 7) &&
  1006. strncmp(arg, "https://", 8))
  1007. show_usage();
  1008. free(rpc_url);
  1009. rpc_url = strdup(arg);
  1010. break;
  1011. case 1002: /* --userpass */
  1012. if (!strchr(arg, ':'))
  1013. show_usage();
  1014. free(rpc_userpass);
  1015. rpc_userpass = strdup(arg);
  1016. break;
  1017. case 1003:
  1018. want_longpoll = false;
  1019. break;
  1020. case 1004:
  1021. use_syslog = true;
  1022. break;
  1023. default:
  1024. show_usage();
  1025. }
  1026. }
  1027. static void parse_config(void)
  1028. {
  1029. int i;
  1030. json_t *val;
  1031. if (!json_is_object(opt_config))
  1032. return;
  1033. for (i = 0; i < ARRAY_SIZE(options); i++) {
  1034. if (!options[i].name)
  1035. break;
  1036. if (!strcmp(options[i].name, "config"))
  1037. continue;
  1038. val = json_object_get(opt_config, options[i].name);
  1039. if (!val)
  1040. continue;
  1041. if (options[i].has_arg && json_is_string(val)) {
  1042. char *s = strdup(json_string_value(val));
  1043. if (!s)
  1044. break;
  1045. parse_arg(options[i].val, s);
  1046. free(s);
  1047. } else if (!options[i].has_arg && json_is_true(val))
  1048. parse_arg(options[i].val, "");
  1049. else
  1050. applog(LOG_ERR, "JSON option %s invalid",
  1051. options[i].name);
  1052. }
  1053. }
  1054. static void parse_cmdline(int argc, char *argv[])
  1055. {
  1056. int key;
  1057. while (1) {
  1058. key = getopt_long(argc, argv, "a:c:qDPr:s:t:h?", options, NULL);
  1059. if (key < 0)
  1060. break;
  1061. parse_arg(key, optarg);
  1062. }
  1063. parse_config();
  1064. }
  1065. int main (int argc, char *argv[])
  1066. {
  1067. struct thr_info *thr;
  1068. unsigned int i;
  1069. char name[32];
  1070. #ifdef WIN32
  1071. opt_n_threads = 1;
  1072. #else
  1073. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1074. opt_n_threads = num_processors;
  1075. #endif /* !WIN32 */
  1076. nDevs = clDevicesNum();
  1077. if (opt_ndevs) {
  1078. applog(LOG_INFO, "%i", nDevs);
  1079. return nDevs;
  1080. }
  1081. rpc_url = strdup(DEF_RPC_URL);
  1082. /* parse command line */
  1083. parse_cmdline(argc, argv);
  1084. if (!rpc_userpass) {
  1085. if (!rpc_user || !rpc_pass) {
  1086. applog(LOG_ERR, "No login credentials supplied");
  1087. return 1;
  1088. }
  1089. rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
  1090. if (!rpc_userpass)
  1091. return 1;
  1092. sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
  1093. }
  1094. if (unlikely(pthread_mutex_init(&time_lock, NULL)))
  1095. return 1;
  1096. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1097. return 1;
  1098. if (unlikely(pthread_mutex_init(&get_lock, NULL)))
  1099. return 1;
  1100. #ifdef HAVE_SYSLOG_H
  1101. if (use_syslog)
  1102. openlog("cpuminer", LOG_PID, LOG_USER);
  1103. #endif
  1104. work_restart = calloc(opt_n_threads + nDevs, sizeof(*work_restart));
  1105. if (!work_restart)
  1106. return 1;
  1107. thr_info = calloc(opt_n_threads + 2 + nDevs, sizeof(*thr));
  1108. if (!thr_info)
  1109. return 1;
  1110. /* init workio thread info */
  1111. work_thr_id = opt_n_threads + nDevs;
  1112. thr = &thr_info[work_thr_id];
  1113. thr->id = work_thr_id;
  1114. thr->q = tq_new();
  1115. if (!thr->q)
  1116. return 1;
  1117. /* start work I/O thread */
  1118. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  1119. applog(LOG_ERR, "workio thread create failed");
  1120. return 1;
  1121. }
  1122. /* init longpoll thread info */
  1123. if (want_longpoll) {
  1124. longpoll_thr_id = opt_n_threads + nDevs + 1;
  1125. thr = &thr_info[longpoll_thr_id];
  1126. thr->id = longpoll_thr_id;
  1127. thr->q = tq_new();
  1128. if (!thr->q)
  1129. return 1;
  1130. /* start longpoll thread */
  1131. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  1132. applog(LOG_ERR, "longpoll thread create failed");
  1133. return 1;
  1134. }
  1135. } else
  1136. longpoll_thr_id = -1;
  1137. gettimeofday(&total_tv_start, NULL);
  1138. gettimeofday(&total_tv_end, NULL);
  1139. /* start GPU mining threads */
  1140. for (i = 0; i < nDevs; i++) {
  1141. thr = &thr_info[i];
  1142. thr->id = i;
  1143. thr->q = tq_new();
  1144. if (!thr->q)
  1145. return 1;
  1146. applog(LOG_INFO, "Init GPU %i", i);
  1147. clStates[i] = initCl(i, name, sizeof(name));
  1148. if (!clStates[i]) {
  1149. applog(LOG_ERR, "Failed to init GPU %d", i);
  1150. continue;
  1151. }
  1152. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1153. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1154. applog(LOG_ERR, "thread %d create failed", i);
  1155. return 1;
  1156. }
  1157. sleep(1); /* don't pound RPC server all at once */
  1158. }
  1159. applog(LOG_INFO, "%d gpu miner threads started", i);
  1160. /* start CPU mining threads */
  1161. for (i = nDevs; i < nDevs + opt_n_threads; i++) {
  1162. thr = &thr_info[i];
  1163. thr->id = i;
  1164. thr->q = tq_new();
  1165. if (!thr->q)
  1166. return 1;
  1167. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1168. applog(LOG_ERR, "thread %d create failed", i);
  1169. return 1;
  1170. }
  1171. sleep(1); /* don't pound RPC server all at once */
  1172. }
  1173. applog(LOG_INFO, "%d cpu miner threads started, "
  1174. "using SHA256 '%s' algorithm.",
  1175. opt_n_threads,
  1176. algo_names[opt_algo]);
  1177. /* Restart count as it will be wrong till all threads are started */
  1178. pthread_mutex_lock(&hash_lock);
  1179. gettimeofday(&total_tv_start, NULL);
  1180. gettimeofday(&total_tv_end, NULL);
  1181. total_mhashes_done = 0;
  1182. pthread_mutex_unlock(&hash_lock);
  1183. /* main loop - simply wait for workio thread to exit */
  1184. pthread_join(thr_info[work_thr_id].pth, NULL);
  1185. applog(LOG_INFO, "workio thread dead, exiting.");
  1186. return 0;
  1187. }