cpu-miner.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "cpuminer-config.h"
  11. #define _GNU_SOURCE
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #ifndef WIN32
  22. #include <sys/resource.h>
  23. #endif
  24. #include <getopt.h>
  25. #include <jansson.h>
  26. #include <curl/curl.h>
  27. #include "compat.h"
  28. #include "miner.h"
  29. #include "findnonce.h"
  30. #include "ocl.h"
  31. #define PROGRAM_NAME "minerd"
  32. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  33. #define DEF_RPC_USERNAME "rpcuser"
  34. #define DEF_RPC_PASSWORD "rpcpass"
  35. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  36. #ifdef __linux /* Linux specific policy and affinity management */
  37. #include <sched.h>
  38. static inline void drop_policy(void)
  39. {
  40. struct sched_param param;
  41. #ifdef SCHED_IDLE
  42. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  43. #endif
  44. #ifdef SCHED_BATCH
  45. sched_setscheduler(0, SCHED_BATCH, &param);
  46. #endif
  47. }
  48. static inline void affine_to_cpu(int id, int cpu)
  49. {
  50. cpu_set_t set;
  51. CPU_ZERO(&set);
  52. CPU_SET(cpu, &set);
  53. sched_setaffinity(0, sizeof(&set), &set);
  54. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  55. }
  56. #else
  57. static inline void drop_policy(void)
  58. {
  59. }
  60. static inline void affine_to_cpu(int id, int cpu)
  61. {
  62. }
  63. #endif
  64. enum workio_commands {
  65. WC_GET_WORK,
  66. WC_SUBMIT_WORK,
  67. };
  68. struct workio_cmd {
  69. enum workio_commands cmd;
  70. struct thr_info *thr;
  71. union {
  72. struct work *work;
  73. } u;
  74. };
  75. enum sha256_algos {
  76. ALGO_C, /* plain C */
  77. ALGO_4WAY, /* parallel SSE2 */
  78. ALGO_VIA, /* VIA padlock */
  79. ALGO_CRYPTOPP, /* Crypto++ (C) */
  80. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  81. ALGO_SSE2_64, /* SSE2 for x86_64 */
  82. };
  83. static const char *algo_names[] = {
  84. [ALGO_C] = "c",
  85. #ifdef WANT_SSE2_4WAY
  86. [ALGO_4WAY] = "4way",
  87. #endif
  88. #ifdef WANT_VIA_PADLOCK
  89. [ALGO_VIA] = "via",
  90. #endif
  91. [ALGO_CRYPTOPP] = "cryptopp",
  92. #ifdef WANT_CRYPTOPP_ASM32
  93. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  94. #endif
  95. #ifdef WANT_X8664_SSE2
  96. [ALGO_SSE2_64] = "sse2_64",
  97. #endif
  98. };
  99. bool opt_debug = false;
  100. bool opt_protocol = false;
  101. bool opt_ndevs = false;
  102. bool want_longpoll = true;
  103. bool have_longpoll = false;
  104. bool use_syslog = false;
  105. static bool opt_quiet = false;
  106. static int opt_retries = 10;
  107. static int opt_fail_pause = 30;
  108. static int opt_log_interval = 5;
  109. int opt_vectors;
  110. int opt_worksize;
  111. int opt_scantime = 60;
  112. static json_t *opt_config;
  113. static const bool opt_time = true;
  114. #ifdef WANT_X8664_SSE2
  115. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  116. #else
  117. static enum sha256_algos opt_algo = ALGO_C;
  118. #endif
  119. static int nDevs;
  120. static int opt_g_threads = 2;
  121. static int gpu_threads;
  122. static int opt_n_threads = 1;
  123. static int num_processors;
  124. static int scan_intensity = 4;
  125. static char *rpc_url;
  126. static char *rpc_userpass;
  127. static char *rpc_user, *rpc_pass;
  128. struct thr_info *thr_info;
  129. static int work_thr_id;
  130. int longpoll_thr_id;
  131. struct work_restart *work_restart = NULL;
  132. pthread_mutex_t time_lock;
  133. static pthread_mutex_t hash_lock;
  134. static pthread_mutex_t get_lock;
  135. static double total_mhashes_done;
  136. static struct timeval total_tv_start, total_tv_end;
  137. static int accepted, rejected;
  138. int hw_errors;
  139. struct option_help {
  140. const char *name;
  141. const char *helptext;
  142. };
  143. static struct option_help options_help[] = {
  144. { "help",
  145. "(-h) Display this help text" },
  146. { "algo XXX",
  147. "(-a XXX) Specify sha256 implementation:\n"
  148. "\tc\t\tLinux kernel sha256, implemented in C (default)"
  149. #ifdef WANT_SSE2_4WAY
  150. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  151. #endif
  152. #ifdef WANT_VIA_PADLOCK
  153. "\n\tvia\t\tVIA padlock implementation"
  154. #endif
  155. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  156. #ifdef WANT_CRYPTOPP_ASM32
  157. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  158. #endif
  159. #ifdef WANT_X8664_SSE2
  160. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  161. #endif
  162. },
  163. { "config FILE",
  164. "(-c FILE) JSON-format configuration file (default: none)\n"
  165. "See example-cfg.json for an example configuration." },
  166. { "cpu-threads N",
  167. "(-t N) Number of miner CPU threads (default: number of processors or 0 if GPU mining)" },
  168. { "debug",
  169. "(-D) Enable debug output (default: off)" },
  170. { "gpu-threads N",
  171. "(-g N) Number of threads per-GPU (0 - 10, default: 2)" },
  172. { "intensity",
  173. "(-I) Intensity of scanning (0 - 14, default 4)" },
  174. { "log",
  175. "(-l) Interval in seconds between log output (default: 5)" },
  176. { "ndevs",
  177. "(-n) Display number of detected GPUs" },
  178. { "no-longpoll",
  179. "Disable X-Long-Polling support (default: enabled)" },
  180. { "pass PASSWORD",
  181. "(-p PASSWORD) Password for bitcoin JSON-RPC server "
  182. "(default: " DEF_RPC_PASSWORD ")" },
  183. { "protocol-dump",
  184. "(-P) Verbose dump of protocol-level activities (default: off)" },
  185. { "quiet",
  186. "(-q) Disable per-thread hashmeter output (default: off)" },
  187. { "retries N",
  188. "(-r N) Number of times to retry, if JSON-RPC call fails\n"
  189. "\t(default: 10; use -1 for \"never\")" },
  190. { "retry-pause N",
  191. "(-R N) Number of seconds to pause, between retries\n"
  192. "\t(default: 30)" },
  193. { "scantime N",
  194. "(-s N) Upper bound on time spent scanning current work,\n"
  195. "\tin seconds. (default: 60)" },
  196. #ifdef HAVE_SYSLOG_H
  197. { "syslog",
  198. "Use system log for output messages (default: standard error)" },
  199. #endif
  200. { "url URL",
  201. "URL for bitcoin JSON-RPC server "
  202. "(default: " DEF_RPC_URL ")" },
  203. { "userpass USERNAME:PASSWORD",
  204. "Username:Password pair for bitcoin JSON-RPC server "
  205. "(default: " DEF_RPC_USERPASS ")" },
  206. { "user USERNAME",
  207. "(-u USERNAME) Username for bitcoin JSON-RPC server "
  208. "(default: " DEF_RPC_USERNAME ")" },
  209. { "vectors N",
  210. "(-v N) Override detected optimal vector width (default: detected, 1,2 or 4)" },
  211. { "worksize N",
  212. "(-w N) Override detected optimal worksize (default: detected)" },
  213. };
  214. static struct option options[] = {
  215. { "algo", 1, NULL, 'a' },
  216. { "config", 1, NULL, 'c' },
  217. { "cpu-threads", 1, NULL, 't' },
  218. { "gpu-threads", 1, NULL, 'g' },
  219. { "debug", 0, NULL, 'D' },
  220. { "help", 0, NULL, 'h' },
  221. { "intensity", 1, NULL, 'I' },
  222. { "log", 1, NULL, 'l' },
  223. { "ndevs", 0, NULL, 'n' },
  224. { "no-longpoll", 0, NULL, 1003 },
  225. { "pass", 1, NULL, 'p' },
  226. { "protocol-dump", 0, NULL, 'P' },
  227. { "quiet", 0, NULL, 'q' },
  228. { "retries", 1, NULL, 'r' },
  229. { "retry-pause", 1, NULL, 'R' },
  230. { "scantime", 1, NULL, 's' },
  231. #ifdef HAVE_SYSLOG_H
  232. { "syslog", 0, NULL, 1004 },
  233. #endif
  234. { "url", 1, NULL, 1001 },
  235. { "user", 1, NULL, 'u' },
  236. { "vectors", 1, NULL, 'v' },
  237. { "worksize", 1, NULL, 'w' },
  238. { "userpass", 1, NULL, 1002 },
  239. };
  240. static bool jobj_binary(const json_t *obj, const char *key,
  241. void *buf, size_t buflen)
  242. {
  243. const char *hexstr;
  244. json_t *tmp;
  245. tmp = json_object_get(obj, key);
  246. if (unlikely(!tmp)) {
  247. applog(LOG_ERR, "JSON key '%s' not found", key);
  248. return false;
  249. }
  250. hexstr = json_string_value(tmp);
  251. if (unlikely(!hexstr)) {
  252. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  253. return false;
  254. }
  255. if (!hex2bin(buf, hexstr, buflen))
  256. return false;
  257. return true;
  258. }
  259. static bool work_decode(const json_t *val, struct work *work)
  260. {
  261. if (unlikely(!jobj_binary(val, "midstate",
  262. work->midstate, sizeof(work->midstate)))) {
  263. applog(LOG_ERR, "JSON inval midstate");
  264. goto err_out;
  265. }
  266. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  267. applog(LOG_ERR, "JSON inval data");
  268. goto err_out;
  269. }
  270. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  271. applog(LOG_ERR, "JSON inval hash1");
  272. goto err_out;
  273. }
  274. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  275. applog(LOG_ERR, "JSON inval target");
  276. goto err_out;
  277. }
  278. memset(work->hash, 0, sizeof(work->hash));
  279. return true;
  280. err_out:
  281. return false;
  282. }
  283. static bool submit_upstream_work(CURL *curl, const struct work *work)
  284. {
  285. char *hexstr = NULL;
  286. json_t *val, *res;
  287. char s[345];
  288. bool rc = false;
  289. struct cgpu_info *cgpu = thr_info[work->thr_id].cgpu;
  290. /* build hex string */
  291. hexstr = bin2hex(work->data, sizeof(work->data));
  292. if (unlikely(!hexstr)) {
  293. applog(LOG_ERR, "submit_upstream_work OOM");
  294. goto out;
  295. }
  296. /* build JSON-RPC request */
  297. sprintf(s,
  298. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  299. hexstr);
  300. if (opt_debug)
  301. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  302. /* issue JSON-RPC request */
  303. val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false);
  304. if (unlikely(!val)) {
  305. applog(LOG_ERR, "submit_upstream_work json_rpc_call failed");
  306. goto out;
  307. }
  308. res = json_object_get(val, "result");
  309. /* Theoretically threads could race when modifying accepted and
  310. * rejected values but the chance of two submits completing at the
  311. * same time is zero so there is no point adding extra locking */
  312. if (json_is_true(res)) {
  313. cgpu->accepted++;
  314. accepted++;
  315. if (opt_debug)
  316. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  317. } else {
  318. cgpu->rejected++;
  319. rejected++;
  320. if (opt_debug)
  321. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  322. }
  323. applog(LOG_INFO, "%sPU: %d Accepted: %d Rejected: %d HW errors: %d",
  324. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, cgpu->accepted, cgpu->rejected, cgpu->hw_errors);
  325. json_decref(val);
  326. rc = true;
  327. out:
  328. free(hexstr);
  329. return rc;
  330. }
  331. static const char *rpc_req =
  332. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  333. static bool get_upstream_work(CURL *curl, struct work *work)
  334. {
  335. json_t *val;
  336. bool rc;
  337. val = json_rpc_call(curl, rpc_url, rpc_userpass, rpc_req,
  338. want_longpoll, false);
  339. if (!val)
  340. return false;
  341. rc = work_decode(json_object_get(val, "result"), work);
  342. json_decref(val);
  343. return rc;
  344. }
  345. static void workio_cmd_free(struct workio_cmd *wc)
  346. {
  347. if (!wc)
  348. return;
  349. switch (wc->cmd) {
  350. case WC_SUBMIT_WORK:
  351. free(wc->u.work);
  352. break;
  353. default: /* do nothing */
  354. break;
  355. }
  356. memset(wc, 0, sizeof(*wc)); /* poison */
  357. free(wc);
  358. }
  359. static bool workio_get_work(struct workio_cmd *wc, CURL *curl)
  360. {
  361. struct work *ret_work;
  362. int failures = 0;
  363. ret_work = calloc(1, sizeof(*ret_work));
  364. if (!ret_work) {
  365. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  366. return false;
  367. }
  368. /* obtain new work from bitcoin via JSON-RPC */
  369. while (!get_upstream_work(curl, ret_work)) {
  370. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  371. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  372. free(ret_work);
  373. return false;
  374. }
  375. /* pause, then restart work-request loop */
  376. applog(LOG_ERR, "json_rpc_call failed, retry after %d seconds",
  377. opt_fail_pause);
  378. sleep(opt_fail_pause);
  379. }
  380. /* send work to requesting thread */
  381. if (unlikely(!tq_push(wc->thr->q, ret_work))) {
  382. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  383. free(ret_work);
  384. }
  385. return true;
  386. }
  387. static bool workio_submit_work(struct workio_cmd *wc, CURL *curl)
  388. {
  389. int failures = 0;
  390. /* submit solution to bitcoin via JSON-RPC */
  391. while (!submit_upstream_work(curl, wc->u.work)) {
  392. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  393. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  394. return false;
  395. }
  396. /* pause, then restart work-request loop */
  397. applog(LOG_ERR, "...retry after %d seconds",
  398. opt_fail_pause);
  399. sleep(opt_fail_pause);
  400. }
  401. return true;
  402. }
  403. static void *workio_thread(void *userdata)
  404. {
  405. struct thr_info *mythr = userdata;
  406. bool ok = true;
  407. CURL *curl;
  408. curl = curl_easy_init();
  409. if (unlikely(!curl)) {
  410. applog(LOG_ERR, "CURL initialization failed");
  411. return NULL;
  412. }
  413. while (ok) {
  414. struct workio_cmd *wc;
  415. /* wait for workio_cmd sent to us, on our queue */
  416. wc = tq_pop(mythr->q, NULL);
  417. if (!wc) {
  418. ok = false;
  419. break;
  420. }
  421. /* process workio_cmd */
  422. switch (wc->cmd) {
  423. case WC_GET_WORK:
  424. ok = workio_get_work(wc, curl);
  425. break;
  426. case WC_SUBMIT_WORK:
  427. ok = workio_submit_work(wc, curl);
  428. break;
  429. default: /* should never happen */
  430. ok = false;
  431. break;
  432. }
  433. workio_cmd_free(wc);
  434. }
  435. tq_freeze(mythr->q);
  436. curl_easy_cleanup(curl);
  437. return NULL;
  438. }
  439. static void hashmeter(int thr_id, struct timeval *diff,
  440. unsigned long hashes_done)
  441. {
  442. struct timeval temp_tv_end, total_diff;
  443. double khashes, secs;
  444. double total_secs;
  445. double local_secs;
  446. static double local_mhashes_done = 0;
  447. static double rolling_local = 0;
  448. double local_mhashes = (double)hashes_done / 1000000.0;
  449. /* Don't bother calculating anything if we're not displaying it */
  450. if (opt_quiet || !opt_log_interval)
  451. return;
  452. khashes = hashes_done / 1000.0;
  453. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  454. if (opt_debug)
  455. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  456. thr_id, hashes_done, hashes_done / secs);
  457. gettimeofday(&temp_tv_end, NULL);
  458. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  459. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  460. if (opt_n_threads + gpu_threads > 1) {
  461. /* Totals are updated by all threads so can race without locking */
  462. pthread_mutex_lock(&hash_lock);
  463. total_mhashes_done += local_mhashes;
  464. local_mhashes_done += local_mhashes;
  465. if (total_diff.tv_sec < opt_log_interval) {
  466. /* Only update the total every opt_log_interval seconds */
  467. pthread_mutex_unlock(&hash_lock);
  468. return;
  469. }
  470. gettimeofday(&total_tv_end, NULL);
  471. pthread_mutex_unlock(&hash_lock);
  472. } else {
  473. total_mhashes_done += local_mhashes;
  474. local_mhashes_done += local_mhashes;
  475. if (total_diff.tv_sec < opt_log_interval)
  476. return;
  477. gettimeofday(&total_tv_end, NULL);
  478. }
  479. /* Use a rolling average by faking an exponential decay over 5 * log */
  480. rolling_local = ((rolling_local * 0.9) + local_mhashes_done) / 1.9;
  481. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  482. total_secs = (double)total_diff.tv_sec +
  483. ((double)total_diff.tv_usec / 1000000.0);
  484. applog(LOG_INFO, "[%.2f | %.2f Mhash/s] [%d Accepted] [%d Rejected] [%d HW errors]",
  485. rolling_local / local_secs,
  486. total_mhashes_done / total_secs, accepted, rejected, hw_errors);
  487. local_mhashes_done = 0;
  488. }
  489. static struct work *work_heap = NULL;
  490. /* Since we always have one extra work item queued, set the thread id to 0
  491. * for all the work and just give the work to the first thread that requests
  492. * work */
  493. static bool get_work(struct work *work)
  494. {
  495. struct thr_info *thr = &thr_info[0];
  496. struct workio_cmd *wc;
  497. bool ret = false;
  498. /* fill out work request message */
  499. wc = calloc(1, sizeof(*wc));
  500. if (unlikely(!wc))
  501. goto out;
  502. wc->cmd = WC_GET_WORK;
  503. wc->thr = thr;
  504. /* send work request to workio thread */
  505. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  506. workio_cmd_free(wc);
  507. goto out;
  508. }
  509. /* work_heap is protected by get_lock */
  510. pthread_mutex_lock(&get_lock);
  511. if (likely(work_heap)) {
  512. memcpy(work, work_heap, sizeof(*work));
  513. /* Wait for next response, a unit of work - it should be queued */
  514. free(work_heap);
  515. work_heap = tq_pop(thr->q, NULL);
  516. } else {
  517. /* wait for 1st response, or 1st response after failure */
  518. work_heap = tq_pop(thr->q, NULL);
  519. if (unlikely(!work_heap))
  520. goto out_unlock;
  521. /* send for another work request for the next time get_work
  522. * is called. */
  523. wc = calloc(1, sizeof(*wc));
  524. if (unlikely(!wc)) {
  525. free(work_heap);
  526. work_heap = NULL;
  527. goto out_unlock;
  528. }
  529. wc->cmd = WC_GET_WORK;
  530. wc->thr = thr;
  531. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  532. workio_cmd_free(wc);
  533. free(work_heap);
  534. work_heap = NULL;
  535. goto out_unlock;
  536. }
  537. }
  538. ret = true;
  539. out_unlock:
  540. pthread_mutex_unlock(&get_lock);
  541. out:
  542. return ret;
  543. }
  544. struct submit_data {
  545. struct thr_info *thr;
  546. struct work work_in;
  547. pthread_t pth;
  548. };
  549. static void *submit_work(void *userdata)
  550. {
  551. struct submit_data *sd = (struct submit_data *)userdata;
  552. struct workio_cmd *wc;
  553. /* fill out work request message */
  554. wc = calloc(1, sizeof(*wc));
  555. if (unlikely(!wc))
  556. goto out;
  557. wc->u.work = malloc(sizeof(struct work));
  558. if (unlikely(!wc->u.work))
  559. goto err_out;
  560. wc->cmd = WC_SUBMIT_WORK;
  561. wc->thr = sd->thr;
  562. memcpy(wc->u.work, &sd->work_in, sizeof(struct work));
  563. /* send solution to workio thread */
  564. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc)))
  565. goto err_out;
  566. goto out;
  567. err_out:
  568. workio_cmd_free(wc);
  569. out:
  570. pthread_detach(pthread_self());
  571. free(sd);
  572. return NULL;
  573. }
  574. static bool submit_work_async(struct thr_info *thr, const struct work *work_in)
  575. {
  576. struct submit_data *sd = malloc(sizeof(struct submit_data));
  577. if (unlikely(!sd)) {
  578. applog(LOG_ERR, "Failed to malloc sd in submit_work_async");
  579. return false;
  580. }
  581. memcpy(&sd->work_in, work_in, sizeof(struct work));
  582. /* Pass the thread id to the work struct for per-thread accounting */
  583. sd->work_in.thr_id = thr->id;
  584. if (pthread_create(&sd->pth, NULL, submit_work, (void *)sd)) {
  585. applog(LOG_ERR, "Failed to create submit_thread");
  586. return false;
  587. }
  588. return true;
  589. }
  590. static bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
  591. {
  592. struct workio_cmd *wc;
  593. /* fill out work request message */
  594. wc = calloc(1, sizeof(*wc));
  595. if (unlikely(!wc)) {
  596. applog(LOG_ERR, "Failed to calloc wc in submit_work_sync");
  597. return false;
  598. }
  599. wc->u.work = malloc(sizeof(*work_in));
  600. if (unlikely(!wc->u.work)) {
  601. applog(LOG_ERR, "Failed to calloc work in submit_work_sync");
  602. goto err_out;
  603. }
  604. wc->cmd = WC_SUBMIT_WORK;
  605. wc->thr = thr;
  606. memcpy(wc->u.work, work_in, sizeof(*work_in));
  607. /* send solution to workio thread */
  608. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  609. applog(LOG_ERR, "Failed to tq_push work in submit_work_sync");
  610. goto err_out;
  611. }
  612. return true;
  613. err_out:
  614. workio_cmd_free(wc);
  615. return false;
  616. }
  617. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  618. {
  619. work->data[64+12+0] = (nonce>>0) & 0xff;
  620. work->data[64+12+1] = (nonce>>8) & 0xff;
  621. work->data[64+12+2] = (nonce>>16) & 0xff;
  622. work->data[64+12+3] = (nonce>>24) & 0xff;
  623. return submit_work_sync(thr, work);
  624. }
  625. static inline int cpu_from_thr_id(int thr_id)
  626. {
  627. return (thr_id - gpu_threads) % num_processors;
  628. }
  629. static void *miner_thread(void *userdata)
  630. {
  631. struct thr_info *mythr = userdata;
  632. int thr_id = mythr->id;
  633. uint32_t max_nonce = 0xffffff;
  634. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  635. * and if that fails, then SCHED_BATCH. No need for this to be an
  636. * error if it fails */
  637. setpriority(PRIO_PROCESS, 0, 19);
  638. drop_policy();
  639. /* Cpu affinity only makes sense if the number of threads is a multiple
  640. * of the number of CPUs */
  641. if (!(opt_n_threads % num_processors))
  642. affine_to_cpu(thr_id - gpu_threads, cpu_from_thr_id(thr_id));
  643. while (1) {
  644. struct work work __attribute__((aligned(128)));
  645. unsigned long hashes_done;
  646. struct timeval tv_start, tv_end, diff;
  647. uint64_t max64;
  648. bool rc;
  649. /* obtain new work from internal workio thread */
  650. if (unlikely(!get_work(&work))) {
  651. applog(LOG_ERR, "work retrieval failed, exiting "
  652. "mining thread %d", mythr->id);
  653. goto out;
  654. }
  655. hashes_done = 0;
  656. gettimeofday(&tv_start, NULL);
  657. /* scan nonces for a proof-of-work hash */
  658. switch (opt_algo) {
  659. case ALGO_C:
  660. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  661. work.hash1, work.hash, work.target,
  662. max_nonce, &hashes_done);
  663. break;
  664. #ifdef WANT_X8664_SSE2
  665. case ALGO_SSE2_64: {
  666. unsigned int rc5 =
  667. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  668. work.hash1, work.hash,
  669. work.target,
  670. max_nonce, &hashes_done);
  671. rc = (rc5 == -1) ? false : true;
  672. }
  673. break;
  674. #endif
  675. #ifdef WANT_SSE2_4WAY
  676. case ALGO_4WAY: {
  677. unsigned int rc4 =
  678. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  679. work.hash1, work.hash,
  680. work.target,
  681. max_nonce, &hashes_done);
  682. rc = (rc4 == -1) ? false : true;
  683. }
  684. break;
  685. #endif
  686. #ifdef WANT_VIA_PADLOCK
  687. case ALGO_VIA:
  688. rc = scanhash_via(thr_id, work.data, work.target,
  689. max_nonce, &hashes_done);
  690. break;
  691. #endif
  692. case ALGO_CRYPTOPP:
  693. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  694. work.hash1, work.hash, work.target,
  695. max_nonce, &hashes_done);
  696. break;
  697. #ifdef WANT_CRYPTOPP_ASM32
  698. case ALGO_CRYPTOPP_ASM32:
  699. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  700. work.hash1, work.hash, work.target,
  701. max_nonce, &hashes_done);
  702. break;
  703. #endif
  704. default:
  705. /* should never happen */
  706. goto out;
  707. }
  708. /* record scanhash elapsed time */
  709. gettimeofday(&tv_end, NULL);
  710. timeval_subtract(&diff, &tv_end, &tv_start);
  711. hashmeter(thr_id, &diff, hashes_done);
  712. /* adjust max_nonce to meet target scan time */
  713. if (diff.tv_usec > 500000)
  714. diff.tv_sec++;
  715. if (diff.tv_sec > 0) {
  716. max64 =
  717. ((uint64_t)hashes_done * (opt_log_interval ? : opt_scantime)) / diff.tv_sec;
  718. if (max64 > 0xfffffffaULL)
  719. max64 = 0xfffffffaULL;
  720. max_nonce = max64;
  721. }
  722. /* if nonce found, submit work */
  723. if (unlikely(rc)) {
  724. if (opt_debug)
  725. applog(LOG_DEBUG, "CPU %d found something?", cpu_from_thr_id(thr_id));
  726. if (!submit_work_async(mythr, &work))
  727. break;
  728. }
  729. }
  730. out:
  731. tq_freeze(mythr->q);
  732. return NULL;
  733. }
  734. enum {
  735. STAT_SLEEP_INTERVAL = 1,
  736. STAT_CTR_INTERVAL = 10000000,
  737. FAILURE_INTERVAL = 30,
  738. };
  739. static _clState *clStates[16];
  740. static inline cl_int queue_kernel_parameters(_clState *clState, dev_blk_ctx *blk)
  741. {
  742. cl_kernel *kernel = &clState->kernel;
  743. cl_int status = 0;
  744. int num = 0;
  745. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  746. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  747. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  748. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  749. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  750. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  751. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  752. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  753. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  754. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  755. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  756. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  757. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  758. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  759. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  760. if (clState->hasBitAlign == true) {
  761. /* Parameters for phatk kernel */
  762. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W2);
  763. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W16);
  764. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W17);
  765. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->PreVal4);
  766. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->T1);
  767. } else {
  768. /* Parameters for poclbm kernel */
  769. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  770. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  771. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  772. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  773. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  774. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  775. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  776. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  777. }
  778. status |= clSetKernelArg(*kernel, num++, sizeof(clState->outputBuffer),
  779. (void *)&clState->outputBuffer);
  780. return status;
  781. }
  782. static inline int gpu_from_thr_id(int thr_id)
  783. {
  784. return thr_id % nDevs;
  785. }
  786. static void *gpuminer_thread(void *userdata)
  787. {
  788. struct thr_info *mythr = userdata;
  789. struct timeval tv_start, diff;
  790. const int thr_id = mythr->id;
  791. uint32_t *res, *blank_res;
  792. size_t globalThreads[1];
  793. size_t localThreads[1];
  794. cl_int status;
  795. _clState *clState = clStates[thr_id];
  796. const cl_kernel *kernel = &clState->kernel;
  797. struct work *work = malloc(sizeof(struct work));
  798. unsigned const int threads = 1 << (15 + scan_intensity);
  799. unsigned const int vectors = clState->preferred_vwidth;
  800. unsigned const int hashes = threads * vectors;
  801. unsigned int hashes_done = 0;
  802. res = calloc(BUFFERSIZE, 1);
  803. blank_res = calloc(BUFFERSIZE, 1);
  804. if (!res || !blank_res) {
  805. applog(LOG_ERR, "Failed to calloc in gpuminer_thread");
  806. goto out;
  807. }
  808. gettimeofday(&tv_start, NULL);
  809. globalThreads[0] = threads;
  810. localThreads[0] = clState->work_size;
  811. work_restart[thr_id].restart = 1;
  812. diff.tv_sec = 0;
  813. while (1) {
  814. struct timeval tv_end, tv_workstart;
  815. unsigned int i;
  816. /* This finish flushes the readbuffer set with CL_FALSE later */
  817. clFinish(clState->commandQueue);
  818. if (diff.tv_sec > opt_scantime || work->blk.nonce >= MAXTHREADS - hashes || work_restart[thr_id].restart) {
  819. /* Ignore any reads since we're getting new work and queue a clean buffer */
  820. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  821. BUFFERSIZE, blank_res, 0, NULL, NULL);
  822. if (unlikely(status != CL_SUCCESS))
  823. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  824. memset(res, 0, BUFFERSIZE);
  825. gettimeofday(&tv_workstart, NULL);
  826. /* obtain new work from internal workio thread */
  827. if (unlikely(!get_work(work))) {
  828. applog(LOG_ERR, "work retrieval failed, exiting "
  829. "gpu mining thread %d", mythr->id);
  830. goto out;
  831. }
  832. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  833. work->blk.nonce = 0;
  834. work_restart[thr_id].restart = 0;
  835. if (opt_debug)
  836. applog(LOG_DEBUG, "getwork thread %d", thr_id);
  837. /* Flushes the writebuffer set with CL_FALSE above */
  838. clFinish(clState->commandQueue);
  839. status = queue_kernel_parameters(clState, &work->blk);
  840. if (unlikely(status != CL_SUCCESS))
  841. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  842. } else {
  843. status = clSetKernelArg(*kernel, 14, sizeof(uint), (void *)&work->blk.nonce);
  844. if (unlikely(status != CL_SUCCESS))
  845. { applog(LOG_ERR, "Error: clSetKernelArg of nonce failed."); goto out; }
  846. }
  847. /* MAXBUFFERS entry is used as a flag to say nonces exist */
  848. if (res[MAXBUFFERS]) {
  849. /* Clear the buffer again */
  850. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  851. BUFFERSIZE, blank_res, 0, NULL, NULL);
  852. if (unlikely(status != CL_SUCCESS))
  853. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  854. if (opt_debug)
  855. applog(LOG_DEBUG, "GPU %d found something?", gpu_from_thr_id(thr_id));
  856. postcalc_hash_async(mythr, work, res);
  857. memset(res, 0, BUFFERSIZE);
  858. clFinish(clState->commandQueue);
  859. }
  860. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  861. globalThreads, localThreads, 0, NULL, NULL);
  862. if (unlikely(status != CL_SUCCESS))
  863. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  864. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  865. BUFFERSIZE, res, 0, NULL, NULL);
  866. if (unlikely(status != CL_SUCCESS))
  867. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  868. gettimeofday(&tv_end, NULL);
  869. timeval_subtract(&diff, &tv_end, &tv_start);
  870. hashes_done += hashes;
  871. work->blk.nonce += hashes;
  872. if (diff.tv_sec >= 1) {
  873. hashmeter(thr_id, &diff, hashes_done);
  874. gettimeofday(&tv_start, NULL);
  875. hashes_done = 0;
  876. }
  877. timeval_subtract(&diff, &tv_end, &tv_workstart);
  878. }
  879. out:
  880. tq_freeze(mythr->q);
  881. return NULL;
  882. }
  883. static void restart_threads(void)
  884. {
  885. int i;
  886. pthread_mutex_lock(&get_lock);
  887. for (i = 0; i < opt_n_threads + gpu_threads; i++)
  888. work_restart[i].restart = 1;
  889. /* If longpoll has detected a new block, we should discard any queued
  890. * blocks in work_heap */
  891. if (likely(work_heap)) {
  892. free(work_heap);
  893. work_heap = NULL;
  894. }
  895. pthread_mutex_unlock(&get_lock);
  896. }
  897. static void *longpoll_thread(void *userdata)
  898. {
  899. struct thr_info *mythr = userdata;
  900. CURL *curl = NULL;
  901. char *copy_start, *hdr_path, *lp_url = NULL;
  902. bool need_slash = false;
  903. int failures = 0;
  904. hdr_path = tq_pop(mythr->q, NULL);
  905. if (!hdr_path)
  906. goto out;
  907. /* full URL */
  908. if (strstr(hdr_path, "://")) {
  909. lp_url = hdr_path;
  910. hdr_path = NULL;
  911. }
  912. /* absolute path, on current server */
  913. else {
  914. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  915. if (rpc_url[strlen(rpc_url) - 1] != '/')
  916. need_slash = true;
  917. lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2);
  918. if (!lp_url)
  919. goto out;
  920. sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start);
  921. }
  922. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  923. curl = curl_easy_init();
  924. if (unlikely(!curl)) {
  925. applog(LOG_ERR, "CURL initialization failed");
  926. goto out;
  927. }
  928. while (1) {
  929. json_t *val;
  930. val = json_rpc_call(curl, lp_url, rpc_userpass, rpc_req,
  931. false, true);
  932. if (likely(val)) {
  933. failures = 0;
  934. json_decref(val);
  935. applog(LOG_INFO, "LONGPOLL detected new block");
  936. restart_threads();
  937. } else {
  938. if (failures++ < 10) {
  939. sleep(30);
  940. applog(LOG_ERR,
  941. "longpoll failed, sleeping for 30s");
  942. } else {
  943. applog(LOG_ERR,
  944. "longpoll failed, ending thread");
  945. goto out;
  946. }
  947. }
  948. }
  949. out:
  950. free(hdr_path);
  951. free(lp_url);
  952. tq_freeze(mythr->q);
  953. if (curl)
  954. curl_easy_cleanup(curl);
  955. return NULL;
  956. }
  957. static void show_usage(void)
  958. {
  959. int i;
  960. printf("minerd version %s\n\n", VERSION);
  961. printf("Usage:\tminerd [options]\n\nSupported options:\n");
  962. for (i = 0; i < ARRAY_SIZE(options_help); i++) {
  963. struct option_help *h;
  964. h = &options_help[i];
  965. printf("--%s\n%s\n\n", h->name, h->helptext);
  966. }
  967. exit(1);
  968. }
  969. static void parse_arg (int key, char *arg)
  970. {
  971. int v, i;
  972. switch(key) {
  973. case 'a':
  974. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  975. if (algo_names[i] &&
  976. !strcmp(arg, algo_names[i])) {
  977. opt_algo = i;
  978. break;
  979. }
  980. }
  981. if (i == ARRAY_SIZE(algo_names))
  982. show_usage();
  983. break;
  984. case 'c': {
  985. json_error_t err;
  986. if (opt_config)
  987. json_decref(opt_config);
  988. opt_config = json_load_file(arg, &err);
  989. if (!json_is_object(opt_config)) {
  990. applog(LOG_ERR, "JSON decode of %s failed", arg);
  991. show_usage();
  992. }
  993. break;
  994. }
  995. case 'g':
  996. v = atoi(arg);
  997. if (v < 0 || v > 10)
  998. show_usage();
  999. opt_g_threads = v;
  1000. break;
  1001. case 'D':
  1002. opt_debug = true;
  1003. break;
  1004. case 'I':
  1005. v = atoi(arg);
  1006. if (v < 0 || v > 14) /* sanity check */
  1007. show_usage();
  1008. scan_intensity = v;
  1009. break;
  1010. case 'l':
  1011. v = atoi(arg);
  1012. if (v < 0 || v > 9999) /* sanity check */
  1013. show_usage();
  1014. opt_log_interval = v;
  1015. break;
  1016. case 'p':
  1017. free(rpc_pass);
  1018. rpc_pass = strdup(arg);
  1019. break;
  1020. case 'P':
  1021. opt_protocol = true;
  1022. break;
  1023. case 'q':
  1024. opt_quiet = true;
  1025. break;
  1026. case 'r':
  1027. v = atoi(arg);
  1028. if (v < -1 || v > 9999) /* sanity check */
  1029. show_usage();
  1030. opt_retries = v;
  1031. break;
  1032. case 'R':
  1033. v = atoi(arg);
  1034. if (v < 1 || v > 9999) /* sanity check */
  1035. show_usage();
  1036. opt_fail_pause = v;
  1037. break;
  1038. case 's':
  1039. v = atoi(arg);
  1040. if (v < 1 || v > 9999) /* sanity check */
  1041. show_usage();
  1042. opt_scantime = v;
  1043. break;
  1044. case 't':
  1045. v = atoi(arg);
  1046. if (v < 0 || v > 9999) /* sanity check */
  1047. show_usage();
  1048. opt_n_threads = v;
  1049. break;
  1050. case 'u':
  1051. free(rpc_user);
  1052. rpc_user = strdup(arg);
  1053. break;
  1054. case 'v':
  1055. v = atoi(arg);
  1056. if (v != 1 && v != 2 && v != 4)
  1057. show_usage();
  1058. opt_vectors = v;
  1059. break;
  1060. case 'w':
  1061. v = atoi(arg);
  1062. if (v < 1 || v > 9999) /* sanity check */
  1063. show_usage();
  1064. opt_worksize = v;
  1065. break;
  1066. case 1001: /* --url */
  1067. if (strncmp(arg, "http://", 7) &&
  1068. strncmp(arg, "https://", 8))
  1069. show_usage();
  1070. free(rpc_url);
  1071. rpc_url = strdup(arg);
  1072. break;
  1073. case 1002: /* --userpass */
  1074. if (!strchr(arg, ':'))
  1075. show_usage();
  1076. free(rpc_userpass);
  1077. rpc_userpass = strdup(arg);
  1078. break;
  1079. case 1003:
  1080. want_longpoll = false;
  1081. break;
  1082. case 1004:
  1083. use_syslog = true;
  1084. break;
  1085. default:
  1086. show_usage();
  1087. }
  1088. }
  1089. static void parse_config(void)
  1090. {
  1091. int i;
  1092. json_t *val;
  1093. if (!json_is_object(opt_config))
  1094. return;
  1095. for (i = 0; i < ARRAY_SIZE(options); i++) {
  1096. if (!options[i].name)
  1097. break;
  1098. if (!strcmp(options[i].name, "config"))
  1099. continue;
  1100. val = json_object_get(opt_config, options[i].name);
  1101. if (!val)
  1102. continue;
  1103. if (options[i].has_arg && json_is_string(val)) {
  1104. char *s = strdup(json_string_value(val));
  1105. if (!s)
  1106. break;
  1107. parse_arg(options[i].val, s);
  1108. free(s);
  1109. } else if (!options[i].has_arg && json_is_true(val))
  1110. parse_arg(options[i].val, "");
  1111. else
  1112. applog(LOG_ERR, "JSON option %s invalid",
  1113. options[i].name);
  1114. }
  1115. }
  1116. static void parse_cmdline(int argc, char *argv[])
  1117. {
  1118. int key;
  1119. while (1) {
  1120. key = getopt_long(argc, argv, "a:c:qDPr:s:t:h?", options, NULL);
  1121. if (key < 0)
  1122. break;
  1123. parse_arg(key, optarg);
  1124. }
  1125. parse_config();
  1126. }
  1127. int main (int argc, char *argv[])
  1128. {
  1129. struct thr_info *thr;
  1130. unsigned int i;
  1131. char name[32];
  1132. #ifdef WIN32
  1133. opt_n_threads = 1;
  1134. #else
  1135. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1136. opt_n_threads = num_processors;
  1137. #endif /* !WIN32 */
  1138. nDevs = clDevicesNum();
  1139. if (opt_ndevs) {
  1140. applog(LOG_INFO, "%i", nDevs);
  1141. return nDevs;
  1142. }
  1143. /* Invert the value to determine if we manually set it in cmdline
  1144. * or disable gpu threads */
  1145. if (nDevs)
  1146. opt_n_threads = - opt_n_threads;
  1147. rpc_url = strdup(DEF_RPC_URL);
  1148. /* parse command line */
  1149. parse_cmdline(argc, argv);
  1150. gpu_threads = nDevs * opt_g_threads;
  1151. if (opt_n_threads < 0) {
  1152. if (gpu_threads)
  1153. opt_n_threads = 0;
  1154. else
  1155. opt_n_threads = -opt_n_threads;
  1156. }
  1157. if (!rpc_userpass) {
  1158. if (!rpc_user || !rpc_pass) {
  1159. applog(LOG_ERR, "No login credentials supplied");
  1160. return 1;
  1161. }
  1162. rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
  1163. if (!rpc_userpass)
  1164. return 1;
  1165. sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
  1166. }
  1167. if (unlikely(pthread_mutex_init(&time_lock, NULL)))
  1168. return 1;
  1169. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1170. return 1;
  1171. if (unlikely(pthread_mutex_init(&get_lock, NULL)))
  1172. return 1;
  1173. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1174. return 1;
  1175. #ifdef HAVE_SYSLOG_H
  1176. if (use_syslog)
  1177. openlog("cpuminer", LOG_PID, LOG_USER);
  1178. #endif
  1179. work_restart = calloc(opt_n_threads + gpu_threads, sizeof(*work_restart));
  1180. if (!work_restart)
  1181. return 1;
  1182. thr_info = calloc(opt_n_threads + 2 + gpu_threads, sizeof(*thr));
  1183. if (!thr_info)
  1184. return 1;
  1185. /* init workio thread info */
  1186. work_thr_id = opt_n_threads + gpu_threads;
  1187. thr = &thr_info[work_thr_id];
  1188. thr->id = work_thr_id;
  1189. thr->q = tq_new();
  1190. if (!thr->q)
  1191. return 1;
  1192. /* start work I/O thread */
  1193. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  1194. applog(LOG_ERR, "workio thread create failed");
  1195. return 1;
  1196. }
  1197. /* init longpoll thread info */
  1198. if (want_longpoll) {
  1199. longpoll_thr_id = opt_n_threads + gpu_threads + 1;
  1200. thr = &thr_info[longpoll_thr_id];
  1201. thr->id = longpoll_thr_id;
  1202. thr->q = tq_new();
  1203. if (!thr->q)
  1204. return 1;
  1205. /* start longpoll thread */
  1206. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  1207. applog(LOG_ERR, "longpoll thread create failed");
  1208. return 1;
  1209. }
  1210. pthread_detach(thr->pth);
  1211. } else
  1212. longpoll_thr_id = -1;
  1213. gettimeofday(&total_tv_start, NULL);
  1214. gettimeofday(&total_tv_end, NULL);
  1215. /* start GPU mining threads */
  1216. for (i = 0; i < gpu_threads; i++) {
  1217. int gpu = i % nDevs;
  1218. thr = &thr_info[i];
  1219. thr->id = i;
  1220. if (! (i % opt_g_threads)) {
  1221. thr->cgpu = calloc(1, sizeof(struct cgpu_info));
  1222. if (unlikely(!thr->cgpu)) {
  1223. applog(LOG_ERR, "Failed to calloc cgpu_info");
  1224. return 1;
  1225. }
  1226. thr->cgpu->is_gpu = 1;
  1227. thr->cgpu->cpu_gpu = gpu;
  1228. } else
  1229. thr->cgpu = thr_info[i - (i % opt_g_threads)].cgpu;
  1230. thr->q = tq_new();
  1231. if (!thr->q)
  1232. return 1;
  1233. applog(LOG_INFO, "Init GPU thread %i", i);
  1234. clStates[i] = initCl(gpu, name, sizeof(name));
  1235. if (!clStates[i]) {
  1236. applog(LOG_ERR, "Failed to init GPU thread %d", i);
  1237. continue;
  1238. }
  1239. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1240. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1241. applog(LOG_ERR, "thread %d create failed", i);
  1242. return 1;
  1243. }
  1244. pthread_detach(thr->pth);
  1245. }
  1246. applog(LOG_INFO, "%d gpu miner threads started", i);
  1247. /* start CPU mining threads */
  1248. for (i = gpu_threads; i < gpu_threads + opt_n_threads; i++) {
  1249. thr = &thr_info[i];
  1250. thr->id = i;
  1251. if (! (i % opt_g_threads)) {
  1252. thr->cgpu = calloc(1, sizeof(struct cgpu_info));
  1253. if (unlikely(!thr->cgpu)) {
  1254. applog(LOG_ERR, "Failed to calloc cgpu_info");
  1255. return 1;
  1256. }
  1257. thr->cgpu->cpu_gpu = cpu_from_thr_id(i);
  1258. } else
  1259. thr->cgpu = thr_info[cpu_from_thr_id(i - (i % opt_g_threads))].cgpu;
  1260. thr->q = tq_new();
  1261. if (!thr->q)
  1262. return 1;
  1263. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1264. applog(LOG_ERR, "thread %d create failed", i);
  1265. return 1;
  1266. }
  1267. pthread_detach(thr->pth);
  1268. sleep(1); /* don't pound RPC server all at once */
  1269. }
  1270. applog(LOG_INFO, "%d cpu miner threads started, "
  1271. "using SHA256 '%s' algorithm.",
  1272. opt_n_threads,
  1273. algo_names[opt_algo]);
  1274. /* Restart count as it will be wrong till all threads are started */
  1275. pthread_mutex_lock(&hash_lock);
  1276. gettimeofday(&total_tv_start, NULL);
  1277. gettimeofday(&total_tv_end, NULL);
  1278. total_mhashes_done = 0;
  1279. pthread_mutex_unlock(&hash_lock);
  1280. /* main loop - simply wait for workio thread to exit */
  1281. pthread_join(thr_info[work_thr_id].pth, NULL);
  1282. curl_global_cleanup();
  1283. applog(LOG_INFO, "workio thread dead, exiting.");
  1284. return 0;
  1285. }