cpu-miner.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "cpuminer-config.h"
  11. #define _GNU_SOURCE
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #ifndef WIN32
  22. #include <sys/resource.h>
  23. #endif
  24. #include <getopt.h>
  25. #include <jansson.h>
  26. #include <curl/curl.h>
  27. #include "compat.h"
  28. #include "miner.h"
  29. #include "findnonce.h"
  30. #include "ocl.h"
  31. #define PROGRAM_NAME "minerd"
  32. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  33. #define DEF_RPC_USERNAME "rpcuser"
  34. #define DEF_RPC_PASSWORD "rpcpass"
  35. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  36. #ifdef __linux /* Linux specific policy and affinity management */
  37. #include <sched.h>
  38. static inline void drop_policy(void)
  39. {
  40. struct sched_param param;
  41. #ifdef SCHED_IDLE
  42. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  43. #endif
  44. #ifdef SCHED_BATCH
  45. sched_setscheduler(0, SCHED_BATCH, &param);
  46. #endif
  47. }
  48. static inline void affine_to_cpu(int id, int cpu)
  49. {
  50. cpu_set_t set;
  51. CPU_ZERO(&set);
  52. CPU_SET(cpu, &set);
  53. sched_setaffinity(0, sizeof(&set), &set);
  54. applog(LOG_INFO, "Binding thread %d to cpu %d", id, cpu);
  55. }
  56. #else
  57. static inline void drop_policy(void)
  58. {
  59. }
  60. static inline void affine_to_cpu(int id, int cpu)
  61. {
  62. }
  63. #endif
  64. enum workio_commands {
  65. WC_GET_WORK,
  66. WC_SUBMIT_WORK,
  67. };
  68. struct workio_cmd {
  69. enum workio_commands cmd;
  70. struct thr_info *thr;
  71. union {
  72. struct work *work;
  73. } u;
  74. };
  75. enum sha256_algos {
  76. ALGO_C, /* plain C */
  77. ALGO_4WAY, /* parallel SSE2 */
  78. ALGO_VIA, /* VIA padlock */
  79. ALGO_CRYPTOPP, /* Crypto++ (C) */
  80. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  81. ALGO_SSE2_64, /* SSE2 for x86_64 */
  82. };
  83. static const char *algo_names[] = {
  84. [ALGO_C] = "c",
  85. #ifdef WANT_SSE2_4WAY
  86. [ALGO_4WAY] = "4way",
  87. #endif
  88. #ifdef WANT_VIA_PADLOCK
  89. [ALGO_VIA] = "via",
  90. #endif
  91. [ALGO_CRYPTOPP] = "cryptopp",
  92. #ifdef WANT_CRYPTOPP_ASM32
  93. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  94. #endif
  95. #ifdef WANT_X8664_SSE2
  96. [ALGO_SSE2_64] = "sse2_64",
  97. #endif
  98. };
  99. bool opt_debug = false;
  100. bool opt_protocol = false;
  101. bool opt_ndevs = false;
  102. bool want_longpoll = true;
  103. bool have_longpoll = false;
  104. bool use_syslog = false;
  105. static bool opt_quiet = false;
  106. static int opt_retries = 10;
  107. static int opt_fail_pause = 30;
  108. static int opt_log_interval = 5;
  109. int opt_vectors;
  110. int opt_worksize;
  111. int opt_scantime = 60;
  112. static json_t *opt_config;
  113. static const bool opt_time = true;
  114. #ifdef WANT_X8664_SSE2
  115. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  116. #else
  117. static enum sha256_algos opt_algo = ALGO_C;
  118. #endif
  119. static int nDevs;
  120. static int opt_g_threads = 2;
  121. static int gpu_threads;
  122. static int opt_n_threads = 1;
  123. static int num_processors;
  124. static int scan_intensity = 4;
  125. static char *rpc_url;
  126. static char *rpc_userpass;
  127. static char *rpc_user, *rpc_pass;
  128. struct thr_info *thr_info;
  129. static int work_thr_id;
  130. int longpoll_thr_id;
  131. struct work_restart *work_restart = NULL;
  132. pthread_mutex_t time_lock;
  133. static pthread_mutex_t hash_lock;
  134. static pthread_mutex_t submit_lock;
  135. static pthread_mutex_t get_lock;
  136. static double total_mhashes_done;
  137. static struct timeval total_tv_start, total_tv_end;
  138. static int accepted, rejected;
  139. struct option_help {
  140. const char *name;
  141. const char *helptext;
  142. };
  143. static struct option_help options_help[] = {
  144. { "help",
  145. "(-h) Display this help text" },
  146. { "config FILE",
  147. "(-c FILE) JSON-format configuration file (default: none)\n"
  148. "See example-cfg.json for an example configuration." },
  149. { "algo XXX",
  150. "(-a XXX) Specify sha256 implementation:\n"
  151. "\tc\t\tLinux kernel sha256, implemented in C (default)"
  152. #ifdef WANT_SSE2_4WAY
  153. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  154. #endif
  155. #ifdef WANT_VIA_PADLOCK
  156. "\n\tvia\t\tVIA padlock implementation"
  157. #endif
  158. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  159. #ifdef WANT_CRYPTOPP_ASM32
  160. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  161. #endif
  162. #ifdef WANT_X8664_SSE2
  163. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  164. #endif
  165. },
  166. { "quiet",
  167. "(-q) Disable per-thread hashmeter output (default: off)" },
  168. { "debug",
  169. "(-D) Enable debug output (default: off)" },
  170. { "intensity",
  171. "(-I) Intensity of scanning (0 - 10, default 4)" },
  172. { "log",
  173. "(-l) Interval in seconds between log output (default 5)" },
  174. { "ndevs",
  175. "(-n) Display number of detected GPUs" },
  176. { "no-longpoll",
  177. "Disable X-Long-Polling support (default: enabled)" },
  178. { "protocol-dump",
  179. "(-P) Verbose dump of protocol-level activities (default: off)" },
  180. { "retries N",
  181. "(-r N) Number of times to retry, if JSON-RPC call fails\n"
  182. "\t(default: 10; use -1 for \"never\")" },
  183. { "retry-pause N",
  184. "(-R N) Number of seconds to pause, between retries\n"
  185. "\t(default: 30)" },
  186. { "scantime N",
  187. "(-s N) Upper bound on time spent scanning current work,\n"
  188. "\tin seconds. (default: 60)" },
  189. #ifdef HAVE_SYSLOG_H
  190. { "syslog",
  191. "Use system log for output messages (default: standard error)" },
  192. #endif
  193. { "threads N",
  194. "(-t N) Number of miner CPU threads (default: number of processors or 0 if GPU mining)" },
  195. { "url URL",
  196. "URL for bitcoin JSON-RPC server "
  197. "(default: " DEF_RPC_URL ")" },
  198. { "userpass USERNAME:PASSWORD",
  199. "Username:Password pair for bitcoin JSON-RPC server "
  200. "(default: " DEF_RPC_USERPASS ")" },
  201. { "user USERNAME",
  202. "(-u USERNAME) Username for bitcoin JSON-RPC server "
  203. "(default: " DEF_RPC_USERNAME ")" },
  204. { "vectors N",
  205. "(-v N) Override detected optimal vector width (default: detected, 1,2 or 4)" },
  206. { "worksize N",
  207. "(-w N) Override detected optimal worksize (default: detected)" },
  208. { "pass PASSWORD",
  209. "(-p PASSWORD) Password for bitcoin JSON-RPC server "
  210. "(default: " DEF_RPC_PASSWORD ")" },
  211. };
  212. static struct option options[] = {
  213. { "algo", 1, NULL, 'a' },
  214. { "config", 1, NULL, 'c' },
  215. { "debug", 0, NULL, 'D' },
  216. { "help", 0, NULL, 'h' },
  217. { "intensity", 1, NULL, 'I' },
  218. { "log", 1, NULL, 'l' },
  219. { "ndevs", 0, NULL, 'n' },
  220. { "no-longpoll", 0, NULL, 1003 },
  221. { "pass", 1, NULL, 'p' },
  222. { "protocol-dump", 0, NULL, 'P' },
  223. { "quiet", 0, NULL, 'q' },
  224. { "threads", 1, NULL, 't' },
  225. { "retries", 1, NULL, 'r' },
  226. { "retry-pause", 1, NULL, 'R' },
  227. { "scantime", 1, NULL, 's' },
  228. #ifdef HAVE_SYSLOG_H
  229. { "syslog", 0, NULL, 1004 },
  230. #endif
  231. { "url", 1, NULL, 1001 },
  232. { "user", 1, NULL, 'u' },
  233. { "vectors", 1, NULL, 'v' },
  234. { "worksize", 1, NULL, 'w' },
  235. { "userpass", 1, NULL, 1002 },
  236. };
  237. struct work {
  238. unsigned char data[128];
  239. unsigned char hash1[64];
  240. unsigned char midstate[32];
  241. unsigned char target[32];
  242. unsigned char hash[32];
  243. uint32_t output[1];
  244. uint32_t res_nonce;
  245. uint32_t valid;
  246. dev_blk_ctx blk;
  247. };
  248. static bool jobj_binary(const json_t *obj, const char *key,
  249. void *buf, size_t buflen)
  250. {
  251. const char *hexstr;
  252. json_t *tmp;
  253. tmp = json_object_get(obj, key);
  254. if (unlikely(!tmp)) {
  255. applog(LOG_ERR, "JSON key '%s' not found", key);
  256. return false;
  257. }
  258. hexstr = json_string_value(tmp);
  259. if (unlikely(!hexstr)) {
  260. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  261. return false;
  262. }
  263. if (!hex2bin(buf, hexstr, buflen))
  264. return false;
  265. return true;
  266. }
  267. static bool work_decode(const json_t *val, struct work *work)
  268. {
  269. if (unlikely(!jobj_binary(val, "midstate",
  270. work->midstate, sizeof(work->midstate)))) {
  271. applog(LOG_ERR, "JSON inval midstate");
  272. goto err_out;
  273. }
  274. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  275. applog(LOG_ERR, "JSON inval data");
  276. goto err_out;
  277. }
  278. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  279. applog(LOG_ERR, "JSON inval hash1");
  280. goto err_out;
  281. }
  282. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  283. applog(LOG_ERR, "JSON inval target");
  284. goto err_out;
  285. }
  286. memset(work->hash, 0, sizeof(work->hash));
  287. return true;
  288. err_out:
  289. return false;
  290. }
  291. static bool submit_upstream_work(CURL *curl, char *hexstr)
  292. {
  293. json_t *val, *res;
  294. char s[345];
  295. bool rc = false;
  296. /* build JSON-RPC request */
  297. sprintf(s,
  298. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  299. hexstr);
  300. if (opt_debug)
  301. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  302. /* issue JSON-RPC request */
  303. val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false);
  304. if (unlikely(!val)) {
  305. applog(LOG_ERR, "submit_upstream_work json_rpc_call failed");
  306. goto out;
  307. }
  308. res = json_object_get(val, "result");
  309. /* Theoretically threads could race when modifying accepted and
  310. * rejected values but the chance of two submits completing at the
  311. * same time is zero so there is no point adding extra locking */
  312. if (json_is_true(res)) {
  313. accepted++;
  314. applog(LOG_INFO, "PROOF OF WORK RESULT: true (yay!!!)");
  315. } else {
  316. rejected++;
  317. applog(LOG_INFO, "PROOF OF WORK RESULT: false (booooo)");
  318. }
  319. json_decref(val);
  320. rc = true;
  321. out:
  322. return rc;
  323. }
  324. static const char *rpc_req =
  325. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  326. static bool get_upstream_work(CURL *curl, struct work *work)
  327. {
  328. json_t *val;
  329. bool rc;
  330. val = json_rpc_call(curl, rpc_url, rpc_userpass, rpc_req,
  331. want_longpoll, false);
  332. if (!val)
  333. return false;
  334. rc = work_decode(json_object_get(val, "result"), work);
  335. json_decref(val);
  336. return rc;
  337. }
  338. static void workio_cmd_free(struct workio_cmd *wc)
  339. {
  340. if (!wc)
  341. return;
  342. switch (wc->cmd) {
  343. case WC_SUBMIT_WORK:
  344. free(wc->u.work);
  345. break;
  346. default: /* do nothing */
  347. break;
  348. }
  349. memset(wc, 0, sizeof(*wc)); /* poison */
  350. free(wc);
  351. }
  352. static bool workio_get_work(struct workio_cmd *wc)
  353. {
  354. struct work *ret_work;
  355. int failures = 0;
  356. bool ret = false;
  357. CURL *curl;
  358. ret_work = calloc(1, sizeof(*ret_work));
  359. if (!ret_work) {
  360. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  361. return ret;
  362. }
  363. curl = curl_easy_init();
  364. if (unlikely(!curl)) {
  365. applog(LOG_ERR, "CURL initialization failed");
  366. return ret;
  367. }
  368. /* obtain new work from bitcoin via JSON-RPC */
  369. while (!get_upstream_work(curl, ret_work)) {
  370. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  371. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  372. free(ret_work);
  373. goto out;
  374. }
  375. /* pause, then restart work-request loop */
  376. applog(LOG_ERR, "json_rpc_call failed, retry after %d seconds",
  377. opt_fail_pause);
  378. sleep(opt_fail_pause);
  379. }
  380. /* send work to requesting thread */
  381. if (unlikely(!tq_push(wc->thr->q, ret_work))) {
  382. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  383. free(ret_work);
  384. } else
  385. ret = true;
  386. out:
  387. curl_easy_cleanup(curl);
  388. return ret;
  389. }
  390. static void *submit_thread(void *userdata)
  391. {
  392. char *hexstr = (char *)userdata;
  393. int failures = 0;
  394. CURL *curl;
  395. /* libcurl seems to be not thread safe so only submit one at a time! */
  396. pthread_mutex_lock(&submit_lock);
  397. curl = curl_easy_init();
  398. if (unlikely(!curl)) {
  399. applog(LOG_ERR, "CURL initialization failed");
  400. exit (1);
  401. }
  402. /* submit solution to bitcoin via JSON-RPC */
  403. while (!submit_upstream_work(curl, hexstr)) {
  404. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  405. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  406. free(hexstr);
  407. curl_easy_cleanup(curl);
  408. exit (1);
  409. }
  410. /* pause, then restart work-request loop */
  411. applog(LOG_ERR, "...retry after %d seconds",
  412. opt_fail_pause);
  413. sleep(opt_fail_pause);
  414. }
  415. free(hexstr);
  416. curl_easy_cleanup(curl);
  417. pthread_mutex_unlock(&submit_lock);
  418. return NULL;
  419. }
  420. /* Work is submitted asynchronously by creating a thread for each submit
  421. * thus avoiding the mining threads having to wait till work is submitted
  422. * before they can continue working. */
  423. static bool workio_submit_work(struct workio_cmd *wc)
  424. {
  425. struct work *work;
  426. pthread_t thr;
  427. char *hexstr;
  428. work = wc->u.work;
  429. /* build hex string */
  430. hexstr = bin2hex(work->data, sizeof(work->data));
  431. if (unlikely(!hexstr)) {
  432. applog(LOG_ERR, "workio_submit_work OOM");
  433. return false;
  434. }
  435. if (unlikely(pthread_create(&thr, NULL, submit_thread, (void *)hexstr))) {
  436. applog(LOG_ERR, "Failed to create submit_thread");
  437. return false;
  438. }
  439. pthread_detach(thr);
  440. return true;
  441. }
  442. static void *workio_thread(void *userdata)
  443. {
  444. struct thr_info *mythr = userdata;
  445. bool ok = true;
  446. while (ok) {
  447. struct workio_cmd *wc;
  448. /* wait for workio_cmd sent to us, on our queue */
  449. wc = tq_pop(mythr->q, NULL);
  450. if (!wc) {
  451. ok = false;
  452. break;
  453. }
  454. /* process workio_cmd */
  455. switch (wc->cmd) {
  456. case WC_GET_WORK:
  457. ok = workio_get_work(wc);
  458. break;
  459. case WC_SUBMIT_WORK:
  460. ok = workio_submit_work(wc);
  461. break;
  462. default: /* should never happen */
  463. ok = false;
  464. break;
  465. }
  466. workio_cmd_free(wc);
  467. }
  468. tq_freeze(mythr->q);
  469. return NULL;
  470. }
  471. static void hashmeter(int thr_id, struct timeval *diff,
  472. unsigned long hashes_done)
  473. {
  474. struct timeval temp_tv_end, total_diff;
  475. double khashes, secs;
  476. double total_secs;
  477. double local_mhashes, local_secs;
  478. static unsigned long local_hashes_done = 0;
  479. /* Don't bother calculating anything if we're not displaying it */
  480. if (opt_quiet || !opt_log_interval)
  481. return;
  482. khashes = hashes_done / 1000.0;
  483. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  484. if (opt_debug)
  485. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  486. thr_id, hashes_done, hashes_done / secs);
  487. gettimeofday(&temp_tv_end, NULL);
  488. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  489. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  490. if (opt_n_threads + gpu_threads > 1) {
  491. /* Totals are updated by all threads so can race without locking */
  492. pthread_mutex_lock(&hash_lock);
  493. total_mhashes_done += (double)hashes_done / 1000000.0;
  494. local_hashes_done += hashes_done;
  495. if (total_diff.tv_sec < opt_log_interval) {
  496. /* Only update the total every opt_log_interval seconds */
  497. pthread_mutex_unlock(&hash_lock);
  498. return;
  499. }
  500. gettimeofday(&total_tv_end, NULL);
  501. pthread_mutex_unlock(&hash_lock);
  502. } else {
  503. total_mhashes_done += (double)hashes_done / 1000000.0;
  504. local_hashes_done += hashes_done;
  505. if (total_diff.tv_sec < opt_log_interval)
  506. return;
  507. gettimeofday(&total_tv_end, NULL);
  508. }
  509. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  510. total_secs = (double)total_diff.tv_sec +
  511. ((double)total_diff.tv_usec / 1000000.0);
  512. local_mhashes = local_hashes_done / 1000000.0;
  513. local_hashes_done = 0;
  514. applog(LOG_INFO, "[%.2f | %.2f Mhash/s] [%d Accepted] [%d Rejected]",
  515. local_mhashes / local_secs,
  516. total_mhashes_done / total_secs, accepted, rejected);
  517. }
  518. static struct work *work_heap = NULL;
  519. /* Since we always have one extra work item queued, set the thread id to 0
  520. * for all the work and just give the work to the first thread that requests
  521. * work */
  522. static bool get_work(struct work *work)
  523. {
  524. struct thr_info *thr = &thr_info[0];
  525. struct workio_cmd *wc;
  526. bool ret = false;
  527. /* fill out work request message */
  528. wc = calloc(1, sizeof(*wc));
  529. if (unlikely(!wc))
  530. goto out;
  531. wc->cmd = WC_GET_WORK;
  532. wc->thr = thr;
  533. /* send work request to workio thread */
  534. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  535. workio_cmd_free(wc);
  536. goto out;
  537. }
  538. /* work_heap is protected by get_lock */
  539. pthread_mutex_lock(&get_lock);
  540. if (likely(work_heap)) {
  541. memcpy(work, work_heap, sizeof(*work));
  542. /* Wait for next response, a unit of work - it should be queued */
  543. free(work_heap);
  544. work_heap = tq_pop(thr->q, NULL);
  545. } else {
  546. /* wait for 1st response, or 1st response after failure */
  547. work_heap = tq_pop(thr->q, NULL);
  548. if (unlikely(!work_heap))
  549. goto out_unlock;
  550. /* send for another work request for the next time get_work
  551. * is called. */
  552. wc = calloc(1, sizeof(*wc));
  553. if (unlikely(!wc)) {
  554. free(work_heap);
  555. work_heap = NULL;
  556. goto out_unlock;
  557. }
  558. wc->cmd = WC_GET_WORK;
  559. wc->thr = thr;
  560. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  561. workio_cmd_free(wc);
  562. free(work_heap);
  563. work_heap = NULL;
  564. goto out_unlock;
  565. }
  566. }
  567. ret = true;
  568. out_unlock:
  569. pthread_mutex_unlock(&get_lock);
  570. out:
  571. return ret;
  572. }
  573. static bool submit_work(struct thr_info *thr, const struct work *work_in)
  574. {
  575. struct workio_cmd *wc;
  576. /* fill out work request message */
  577. wc = calloc(1, sizeof(*wc));
  578. if (!wc)
  579. return false;
  580. wc->u.work = malloc(sizeof(*work_in));
  581. if (!wc->u.work)
  582. goto err_out;
  583. wc->cmd = WC_SUBMIT_WORK;
  584. wc->thr = thr;
  585. memcpy(wc->u.work, work_in, sizeof(*work_in));
  586. /* send solution to workio thread */
  587. if (!tq_push(thr_info[work_thr_id].q, wc))
  588. goto err_out;
  589. return true;
  590. err_out:
  591. workio_cmd_free(wc);
  592. return false;
  593. }
  594. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  595. {
  596. work->data[64+12+0] = (nonce>>0) & 0xff;
  597. work->data[64+12+1] = (nonce>>8) & 0xff;
  598. work->data[64+12+2] = (nonce>>16) & 0xff;
  599. work->data[64+12+3] = (nonce>>24) & 0xff;
  600. return submit_work(thr, work);
  601. }
  602. static inline int cpu_from_thr_id(int thr_id)
  603. {
  604. return (thr_id - gpu_threads) % num_processors;
  605. }
  606. static void *miner_thread(void *userdata)
  607. {
  608. struct thr_info *mythr = userdata;
  609. int thr_id = mythr->id;
  610. uint32_t max_nonce = 0xffffff;
  611. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  612. * and if that fails, then SCHED_BATCH. No need for this to be an
  613. * error if it fails */
  614. setpriority(PRIO_PROCESS, 0, 19);
  615. drop_policy();
  616. /* Cpu affinity only makes sense if the number of threads is a multiple
  617. * of the number of CPUs */
  618. if (!(opt_n_threads % num_processors))
  619. affine_to_cpu(cpu_from_thr_id(thr_id), thr_id % num_processors);
  620. while (1) {
  621. struct work work __attribute__((aligned(128)));
  622. unsigned long hashes_done;
  623. struct timeval tv_start, tv_end, diff;
  624. uint64_t max64;
  625. bool rc;
  626. /* obtain new work from internal workio thread */
  627. if (unlikely(!get_work(&work))) {
  628. applog(LOG_ERR, "work retrieval failed, exiting "
  629. "mining thread %d", mythr->id);
  630. goto out;
  631. }
  632. hashes_done = 0;
  633. gettimeofday(&tv_start, NULL);
  634. /* scan nonces for a proof-of-work hash */
  635. switch (opt_algo) {
  636. case ALGO_C:
  637. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  638. work.hash1, work.hash, work.target,
  639. max_nonce, &hashes_done);
  640. break;
  641. #ifdef WANT_X8664_SSE2
  642. case ALGO_SSE2_64: {
  643. unsigned int rc5 =
  644. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  645. work.hash1, work.hash,
  646. work.target,
  647. max_nonce, &hashes_done);
  648. rc = (rc5 == -1) ? false : true;
  649. }
  650. break;
  651. #endif
  652. #ifdef WANT_SSE2_4WAY
  653. case ALGO_4WAY: {
  654. unsigned int rc4 =
  655. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  656. work.hash1, work.hash,
  657. work.target,
  658. max_nonce, &hashes_done);
  659. rc = (rc4 == -1) ? false : true;
  660. }
  661. break;
  662. #endif
  663. #ifdef WANT_VIA_PADLOCK
  664. case ALGO_VIA:
  665. rc = scanhash_via(thr_id, work.data, work.target,
  666. max_nonce, &hashes_done);
  667. break;
  668. #endif
  669. case ALGO_CRYPTOPP:
  670. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  671. work.hash1, work.hash, work.target,
  672. max_nonce, &hashes_done);
  673. break;
  674. #ifdef WANT_CRYPTOPP_ASM32
  675. case ALGO_CRYPTOPP_ASM32:
  676. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  677. work.hash1, work.hash, work.target,
  678. max_nonce, &hashes_done);
  679. break;
  680. #endif
  681. default:
  682. /* should never happen */
  683. goto out;
  684. }
  685. /* record scanhash elapsed time */
  686. gettimeofday(&tv_end, NULL);
  687. timeval_subtract(&diff, &tv_end, &tv_start);
  688. hashmeter(thr_id, &diff, hashes_done);
  689. /* adjust max_nonce to meet target scan time */
  690. if (diff.tv_usec > 500000)
  691. diff.tv_sec++;
  692. if (diff.tv_sec > 0) {
  693. max64 =
  694. ((uint64_t)hashes_done * opt_scantime) / diff.tv_sec;
  695. if (max64 > 0xfffffffaULL)
  696. max64 = 0xfffffffaULL;
  697. max_nonce = max64;
  698. }
  699. /* if nonce found, submit work */
  700. if (unlikely(rc)) {
  701. applog(LOG_INFO, "CPU %d found something?", cpu_from_thr_id(thr_id));
  702. if (!submit_work(mythr, &work))
  703. break;
  704. }
  705. }
  706. out:
  707. tq_freeze(mythr->q);
  708. return NULL;
  709. }
  710. enum {
  711. STAT_SLEEP_INTERVAL = 1,
  712. STAT_CTR_INTERVAL = 10000000,
  713. FAILURE_INTERVAL = 30,
  714. };
  715. static _clState *clStates[16];
  716. static inline cl_int queue_kernel_parameters(dev_blk_ctx *blk, cl_kernel *kernel,
  717. struct _cl_mem *output)
  718. {
  719. cl_int status = 0;
  720. int num = 0;
  721. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  722. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  723. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  724. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  725. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  726. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  727. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  728. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  729. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  730. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  731. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  732. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  733. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  734. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  735. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  736. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  737. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  738. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  739. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  740. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  741. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  742. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  743. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  744. status |= clSetKernelArg(*kernel, num++, sizeof(output), (void *)&output);
  745. return status;
  746. }
  747. static inline int gpu_from_thr_id(int thr_id)
  748. {
  749. return thr_id;
  750. }
  751. static void *gpuminer_thread(void *userdata)
  752. {
  753. struct thr_info *mythr = userdata;
  754. struct timeval tv_start;
  755. int thr_id = mythr->id;
  756. uint32_t res[128], blank_res[128];
  757. cl_kernel *kernel;
  758. memset(blank_res, 0, BUFFERSIZE);
  759. size_t globalThreads[1];
  760. size_t localThreads[1];
  761. cl_int status;
  762. _clState *clState = clStates[thr_id];
  763. kernel = &clState->kernel;
  764. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_TRUE, 0,
  765. BUFFERSIZE, blank_res, 0, NULL, NULL);
  766. if (unlikely(status != CL_SUCCESS))
  767. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  768. struct work *work = malloc(sizeof(struct work));
  769. bool need_work = true;
  770. unsigned int threads = 1 << (15 + scan_intensity);
  771. unsigned int vectors = clState->preferred_vwidth;
  772. unsigned int hashes_done = threads * vectors;
  773. gettimeofday(&tv_start, NULL);
  774. globalThreads[0] = threads;
  775. localThreads[0] = clState->work_size;
  776. while (1) {
  777. struct timeval tv_end, diff, tv_workstart;
  778. unsigned int i;
  779. clFinish(clState->commandQueue);
  780. if (need_work) {
  781. gettimeofday(&tv_workstart, NULL);
  782. /* obtain new work from internal workio thread */
  783. if (unlikely(!get_work(work))) {
  784. applog(LOG_ERR, "work retrieval failed, exiting "
  785. "gpu mining thread %d", mythr->id);
  786. goto out;
  787. }
  788. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  789. work->blk.nonce = 0;
  790. status = queue_kernel_parameters(&work->blk, kernel, clState->outputBuffer);
  791. if (unlikely(status != CL_SUCCESS))
  792. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  793. work_restart[thr_id].restart = 0;
  794. need_work = false;
  795. if (opt_debug)
  796. applog(LOG_DEBUG, "getwork");
  797. } else {
  798. status = clSetKernelArg(*kernel, 14, sizeof(uint), (void *)&work->blk.nonce);
  799. if (unlikely(status != CL_SUCCESS))
  800. { applog(LOG_ERR, "Error: clSetKernelArg of nonce failed."); goto out; }
  801. }
  802. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  803. globalThreads, localThreads, 0, NULL, NULL);
  804. if (unlikely(status != CL_SUCCESS))
  805. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  806. /* 127 is used as a flag to say nonces exist */
  807. if (unlikely(res[127])) {
  808. /* Clear the buffer again */
  809. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  810. BUFFERSIZE, blank_res, 0, NULL, NULL);
  811. if (unlikely(status != CL_SUCCESS))
  812. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  813. for (i = 0; i < 127; i++) {
  814. if (res[i]) {
  815. applog(LOG_INFO, "GPU %d found something?", gpu_from_thr_id(thr_id));
  816. postcalc_hash(mythr, &work->blk, work, res[i]);
  817. } else
  818. break;
  819. }
  820. clFinish(clState->commandQueue);
  821. }
  822. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  823. BUFFERSIZE, res, 0, NULL, NULL);
  824. if (unlikely(status != CL_SUCCESS))
  825. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  826. gettimeofday(&tv_end, NULL);
  827. timeval_subtract(&diff, &tv_end, &tv_start);
  828. hashmeter(thr_id, &diff, hashes_done);
  829. gettimeofday(&tv_start, NULL);
  830. work->blk.nonce += hashes_done;
  831. timeval_subtract(&diff, &tv_end, &tv_workstart);
  832. if (diff.tv_sec > opt_scantime ||
  833. work->blk.nonce > MAXTHREADS - hashes_done ||
  834. work_restart[thr_id].restart)
  835. need_work = true;
  836. }
  837. out:
  838. tq_freeze(mythr->q);
  839. return NULL;
  840. }
  841. static void restart_threads(void)
  842. {
  843. int i;
  844. for (i = 0; i < opt_n_threads + gpu_threads; i++)
  845. work_restart[i].restart = 1;
  846. /* If longpoll has detected a new block, we should discard any queued
  847. * blocks in work_heap */
  848. pthread_mutex_lock(&get_lock);
  849. if (likely(work_heap)) {
  850. free(work_heap);
  851. work_heap = NULL;
  852. }
  853. pthread_mutex_unlock(&get_lock);
  854. }
  855. static void *longpoll_thread(void *userdata)
  856. {
  857. struct thr_info *mythr = userdata;
  858. CURL *curl = NULL;
  859. char *copy_start, *hdr_path, *lp_url = NULL;
  860. bool need_slash = false;
  861. int failures = 0;
  862. hdr_path = tq_pop(mythr->q, NULL);
  863. if (!hdr_path)
  864. goto out;
  865. /* full URL */
  866. if (strstr(hdr_path, "://")) {
  867. lp_url = hdr_path;
  868. hdr_path = NULL;
  869. }
  870. /* absolute path, on current server */
  871. else {
  872. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  873. if (rpc_url[strlen(rpc_url) - 1] != '/')
  874. need_slash = true;
  875. lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2);
  876. if (!lp_url)
  877. goto out;
  878. sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start);
  879. }
  880. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  881. curl = curl_easy_init();
  882. if (unlikely(!curl)) {
  883. applog(LOG_ERR, "CURL initialization failed");
  884. goto out;
  885. }
  886. while (1) {
  887. json_t *val;
  888. val = json_rpc_call(curl, lp_url, rpc_userpass, rpc_req,
  889. false, true);
  890. if (likely(val)) {
  891. failures = 0;
  892. json_decref(val);
  893. applog(LOG_INFO, "LONGPOLL detected new block");
  894. restart_threads();
  895. } else {
  896. if (failures++ < 10) {
  897. sleep(30);
  898. applog(LOG_ERR,
  899. "longpoll failed, sleeping for 30s");
  900. } else {
  901. applog(LOG_ERR,
  902. "longpoll failed, ending thread");
  903. goto out;
  904. }
  905. }
  906. }
  907. out:
  908. free(hdr_path);
  909. free(lp_url);
  910. tq_freeze(mythr->q);
  911. if (curl)
  912. curl_easy_cleanup(curl);
  913. return NULL;
  914. }
  915. static void show_usage(void)
  916. {
  917. int i;
  918. printf("minerd version %s\n\n", VERSION);
  919. printf("Usage:\tminerd [options]\n\nSupported options:\n");
  920. for (i = 0; i < ARRAY_SIZE(options_help); i++) {
  921. struct option_help *h;
  922. h = &options_help[i];
  923. printf("--%s\n%s\n\n", h->name, h->helptext);
  924. }
  925. exit(1);
  926. }
  927. static void parse_arg (int key, char *arg)
  928. {
  929. int v, i;
  930. switch(key) {
  931. case 'a':
  932. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  933. if (algo_names[i] &&
  934. !strcmp(arg, algo_names[i])) {
  935. opt_algo = i;
  936. break;
  937. }
  938. }
  939. if (i == ARRAY_SIZE(algo_names))
  940. show_usage();
  941. break;
  942. case 'c': {
  943. json_error_t err;
  944. if (opt_config)
  945. json_decref(opt_config);
  946. opt_config = json_load_file(arg, &err);
  947. if (!json_is_object(opt_config)) {
  948. applog(LOG_ERR, "JSON decode of %s failed", arg);
  949. show_usage();
  950. }
  951. break;
  952. }
  953. case 'D':
  954. opt_debug = true;
  955. break;
  956. case 'I':
  957. v = atoi(arg);
  958. if (v < 0 || v > 10) /* sanity check */
  959. show_usage();
  960. scan_intensity = v;
  961. break;
  962. case 'l':
  963. v = atoi(arg);
  964. if (v < 0 || v > 9999) /* sanity check */
  965. show_usage();
  966. opt_log_interval = v;
  967. break;
  968. case 'p':
  969. free(rpc_pass);
  970. rpc_pass = strdup(arg);
  971. break;
  972. case 'P':
  973. opt_protocol = true;
  974. break;
  975. case 'q':
  976. opt_quiet = true;
  977. break;
  978. case 'r':
  979. v = atoi(arg);
  980. if (v < -1 || v > 9999) /* sanity check */
  981. show_usage();
  982. opt_retries = v;
  983. break;
  984. case 'R':
  985. v = atoi(arg);
  986. if (v < 1 || v > 9999) /* sanity check */
  987. show_usage();
  988. opt_fail_pause = v;
  989. break;
  990. case 's':
  991. v = atoi(arg);
  992. if (v < 1 || v > 9999) /* sanity check */
  993. show_usage();
  994. opt_scantime = v;
  995. break;
  996. case 't':
  997. v = atoi(arg);
  998. if (v < 0 || v > 9999) /* sanity check */
  999. show_usage();
  1000. opt_n_threads = v;
  1001. break;
  1002. case 'u':
  1003. free(rpc_user);
  1004. rpc_user = strdup(arg);
  1005. break;
  1006. case 'v':
  1007. v = atoi(arg);
  1008. if (v != 1 && v != 2 && v != 4)
  1009. show_usage();
  1010. opt_vectors = v;
  1011. break;
  1012. case 'w':
  1013. v = atoi(arg);
  1014. if (v < 1 || v > 9999) /* sanity check */
  1015. show_usage();
  1016. opt_worksize = v;
  1017. break;
  1018. case 1001: /* --url */
  1019. if (strncmp(arg, "http://", 7) &&
  1020. strncmp(arg, "https://", 8))
  1021. show_usage();
  1022. free(rpc_url);
  1023. rpc_url = strdup(arg);
  1024. break;
  1025. case 1002: /* --userpass */
  1026. if (!strchr(arg, ':'))
  1027. show_usage();
  1028. free(rpc_userpass);
  1029. rpc_userpass = strdup(arg);
  1030. break;
  1031. case 1003:
  1032. want_longpoll = false;
  1033. break;
  1034. case 1004:
  1035. use_syslog = true;
  1036. break;
  1037. default:
  1038. show_usage();
  1039. }
  1040. }
  1041. static void parse_config(void)
  1042. {
  1043. int i;
  1044. json_t *val;
  1045. if (!json_is_object(opt_config))
  1046. return;
  1047. for (i = 0; i < ARRAY_SIZE(options); i++) {
  1048. if (!options[i].name)
  1049. break;
  1050. if (!strcmp(options[i].name, "config"))
  1051. continue;
  1052. val = json_object_get(opt_config, options[i].name);
  1053. if (!val)
  1054. continue;
  1055. if (options[i].has_arg && json_is_string(val)) {
  1056. char *s = strdup(json_string_value(val));
  1057. if (!s)
  1058. break;
  1059. parse_arg(options[i].val, s);
  1060. free(s);
  1061. } else if (!options[i].has_arg && json_is_true(val))
  1062. parse_arg(options[i].val, "");
  1063. else
  1064. applog(LOG_ERR, "JSON option %s invalid",
  1065. options[i].name);
  1066. }
  1067. }
  1068. static void parse_cmdline(int argc, char *argv[])
  1069. {
  1070. int key;
  1071. while (1) {
  1072. key = getopt_long(argc, argv, "a:c:qDPr:s:t:h?", options, NULL);
  1073. if (key < 0)
  1074. break;
  1075. parse_arg(key, optarg);
  1076. }
  1077. parse_config();
  1078. }
  1079. int main (int argc, char *argv[])
  1080. {
  1081. struct thr_info *thr;
  1082. unsigned int i;
  1083. char name[32];
  1084. #ifdef WIN32
  1085. opt_n_threads = 1;
  1086. #else
  1087. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1088. opt_n_threads = num_processors;
  1089. #endif /* !WIN32 */
  1090. nDevs = clDevicesNum();
  1091. if (opt_ndevs) {
  1092. applog(LOG_INFO, "%i", nDevs);
  1093. return nDevs;
  1094. }
  1095. if (nDevs)
  1096. opt_n_threads = 0;
  1097. rpc_url = strdup(DEF_RPC_URL);
  1098. /* parse command line */
  1099. parse_cmdline(argc, argv);
  1100. gpu_threads = nDevs * opt_g_threads;
  1101. if (!rpc_userpass) {
  1102. if (!rpc_user || !rpc_pass) {
  1103. applog(LOG_ERR, "No login credentials supplied");
  1104. return 1;
  1105. }
  1106. rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
  1107. if (!rpc_userpass)
  1108. return 1;
  1109. sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
  1110. }
  1111. if (unlikely(pthread_mutex_init(&time_lock, NULL)))
  1112. return 1;
  1113. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1114. return 1;
  1115. if (unlikely(pthread_mutex_init(&submit_lock, NULL)))
  1116. return 1;
  1117. if (unlikely(pthread_mutex_init(&get_lock, NULL)))
  1118. return 1;
  1119. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1120. return 1;
  1121. #ifdef HAVE_SYSLOG_H
  1122. if (use_syslog)
  1123. openlog("cpuminer", LOG_PID, LOG_USER);
  1124. #endif
  1125. work_restart = calloc(opt_n_threads + gpu_threads, sizeof(*work_restart));
  1126. if (!work_restart)
  1127. return 1;
  1128. thr_info = calloc(opt_n_threads + 2 + gpu_threads, sizeof(*thr));
  1129. if (!thr_info)
  1130. return 1;
  1131. /* init workio thread info */
  1132. work_thr_id = opt_n_threads + gpu_threads;
  1133. thr = &thr_info[work_thr_id];
  1134. thr->id = work_thr_id;
  1135. thr->q = tq_new();
  1136. if (!thr->q)
  1137. return 1;
  1138. /* start work I/O thread */
  1139. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  1140. applog(LOG_ERR, "workio thread create failed");
  1141. return 1;
  1142. }
  1143. /* init longpoll thread info */
  1144. if (want_longpoll) {
  1145. longpoll_thr_id = opt_n_threads + gpu_threads + 1;
  1146. thr = &thr_info[longpoll_thr_id];
  1147. thr->id = longpoll_thr_id;
  1148. thr->q = tq_new();
  1149. if (!thr->q)
  1150. return 1;
  1151. /* start longpoll thread */
  1152. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  1153. applog(LOG_ERR, "longpoll thread create failed");
  1154. return 1;
  1155. }
  1156. pthread_detach(thr->pth);
  1157. } else
  1158. longpoll_thr_id = -1;
  1159. gettimeofday(&total_tv_start, NULL);
  1160. gettimeofday(&total_tv_end, NULL);
  1161. /* start GPU mining threads */
  1162. for (i = 0; i < gpu_threads; i++) {
  1163. int gpu = i % nDevs;
  1164. thr = &thr_info[i];
  1165. thr->id = i;
  1166. thr->q = tq_new();
  1167. if (!thr->q)
  1168. return 1;
  1169. applog(LOG_INFO, "Init GPU thread %i", i);
  1170. clStates[i] = initCl(gpu, name, sizeof(name));
  1171. if (!clStates[i]) {
  1172. applog(LOG_ERR, "Failed to init GPU thread %d", i);
  1173. continue;
  1174. }
  1175. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1176. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1177. applog(LOG_ERR, "thread %d create failed", i);
  1178. return 1;
  1179. }
  1180. pthread_detach(thr->pth);
  1181. }
  1182. applog(LOG_INFO, "%d gpu miner threads started", i);
  1183. /* start CPU mining threads */
  1184. for (i = gpu_threads; i < gpu_threads + opt_n_threads; i++) {
  1185. thr = &thr_info[i];
  1186. thr->id = i;
  1187. thr->q = tq_new();
  1188. if (!thr->q)
  1189. return 1;
  1190. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1191. applog(LOG_ERR, "thread %d create failed", i);
  1192. return 1;
  1193. }
  1194. pthread_detach(thr->pth);
  1195. sleep(1); /* don't pound RPC server all at once */
  1196. }
  1197. applog(LOG_INFO, "%d cpu miner threads started, "
  1198. "using SHA256 '%s' algorithm.",
  1199. opt_n_threads,
  1200. algo_names[opt_algo]);
  1201. /* Restart count as it will be wrong till all threads are started */
  1202. pthread_mutex_lock(&hash_lock);
  1203. gettimeofday(&total_tv_start, NULL);
  1204. gettimeofday(&total_tv_end, NULL);
  1205. total_mhashes_done = 0;
  1206. pthread_mutex_unlock(&hash_lock);
  1207. /* main loop - simply wait for workio thread to exit */
  1208. pthread_join(thr_info[work_thr_id].pth, NULL);
  1209. curl_global_cleanup();
  1210. applog(LOG_INFO, "workio thread dead, exiting.");
  1211. return 0;
  1212. }