cpu-miner.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "cpuminer-config.h"
  11. #define _GNU_SOURCE
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #ifndef WIN32
  22. #include <sys/resource.h>
  23. #endif
  24. #include <getopt.h>
  25. #include <jansson.h>
  26. #include <curl/curl.h>
  27. #include "compat.h"
  28. #include "miner.h"
  29. #include "findnonce.h"
  30. #include "ocl.h"
  31. #define PROGRAM_NAME "minerd"
  32. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  33. #define DEF_RPC_USERNAME "rpcuser"
  34. #define DEF_RPC_PASSWORD "rpcpass"
  35. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  36. #ifdef __linux /* Linux specific policy and affinity management */
  37. #include <sched.h>
  38. static inline void drop_policy(void)
  39. {
  40. struct sched_param param;
  41. #ifdef SCHED_IDLE
  42. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  43. #endif
  44. #ifdef SCHED_BATCH
  45. sched_setscheduler(0, SCHED_BATCH, &param);
  46. #endif
  47. }
  48. static inline void affine_to_cpu(int id, int cpu)
  49. {
  50. cpu_set_t set;
  51. CPU_ZERO(&set);
  52. CPU_SET(cpu, &set);
  53. sched_setaffinity(0, sizeof(&set), &set);
  54. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  55. }
  56. #else
  57. static inline void drop_policy(void)
  58. {
  59. }
  60. static inline void affine_to_cpu(int id, int cpu)
  61. {
  62. }
  63. #endif
  64. enum workio_commands {
  65. WC_GET_WORK,
  66. WC_SUBMIT_WORK,
  67. WC_DIE,
  68. };
  69. struct workio_cmd {
  70. enum workio_commands cmd;
  71. struct thr_info *thr;
  72. union {
  73. struct work *work;
  74. } u;
  75. };
  76. enum sha256_algos {
  77. ALGO_C, /* plain C */
  78. ALGO_4WAY, /* parallel SSE2 */
  79. ALGO_VIA, /* VIA padlock */
  80. ALGO_CRYPTOPP, /* Crypto++ (C) */
  81. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  82. ALGO_SSE2_64, /* SSE2 for x86_64 */
  83. };
  84. static const char *algo_names[] = {
  85. [ALGO_C] = "c",
  86. #ifdef WANT_SSE2_4WAY
  87. [ALGO_4WAY] = "4way",
  88. #endif
  89. #ifdef WANT_VIA_PADLOCK
  90. [ALGO_VIA] = "via",
  91. #endif
  92. [ALGO_CRYPTOPP] = "cryptopp",
  93. #ifdef WANT_CRYPTOPP_ASM32
  94. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  95. #endif
  96. #ifdef WANT_X8664_SSE2
  97. [ALGO_SSE2_64] = "sse2_64",
  98. #endif
  99. };
  100. bool opt_debug = false;
  101. bool opt_protocol = false;
  102. bool opt_ndevs = false;
  103. bool want_longpoll = true;
  104. bool have_longpoll = false;
  105. bool use_syslog = false;
  106. static bool opt_quiet = false;
  107. static int opt_retries = 10;
  108. static int opt_fail_pause = 30;
  109. static int opt_log_interval = 5;
  110. static int opt_queue = 2;
  111. int opt_vectors;
  112. int opt_worksize;
  113. int opt_scantime = 60;
  114. static json_t *opt_config;
  115. static const bool opt_time = true;
  116. #ifdef WANT_X8664_SSE2
  117. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  118. #else
  119. static enum sha256_algos opt_algo = ALGO_C;
  120. #endif
  121. static int nDevs;
  122. static int opt_g_threads = 2;
  123. static int gpu_threads;
  124. static int opt_n_threads = 1;
  125. static int num_processors;
  126. static int scan_intensity = 4;
  127. static char *rpc_url;
  128. static char *rpc_userpass;
  129. static char *rpc_user, *rpc_pass;
  130. struct thr_info *thr_info;
  131. static int work_thr_id;
  132. int longpoll_thr_id;
  133. struct work_restart *work_restart = NULL;
  134. pthread_mutex_t time_lock;
  135. static pthread_mutex_t hash_lock;
  136. static double total_mhashes_done;
  137. static struct timeval total_tv_start, total_tv_end;
  138. static int accepted, rejected;
  139. int hw_errors;
  140. struct option_help {
  141. const char *name;
  142. const char *helptext;
  143. };
  144. static struct option_help options_help[] = {
  145. { "help",
  146. "(-h) Display this help text" },
  147. { "algo XXX",
  148. "(-a XXX) Specify sha256 implementation:\n"
  149. "\tc\t\tLinux kernel sha256, implemented in C (default)"
  150. #ifdef WANT_SSE2_4WAY
  151. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  152. #endif
  153. #ifdef WANT_VIA_PADLOCK
  154. "\n\tvia\t\tVIA padlock implementation"
  155. #endif
  156. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  157. #ifdef WANT_CRYPTOPP_ASM32
  158. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  159. #endif
  160. #ifdef WANT_X8664_SSE2
  161. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  162. #endif
  163. },
  164. { "config FILE",
  165. "(-c FILE) JSON-format configuration file (default: none)\n"
  166. "See example-cfg.json for an example configuration." },
  167. { "cpu-threads N",
  168. "(-t N) Number of miner CPU threads (default: number of processors or 0 if GPU mining)" },
  169. { "debug",
  170. "(-D) Enable debug output (default: off)" },
  171. { "gpu-threads N",
  172. "(-g N) Number of threads per-GPU (0 - 10, default: 2)" },
  173. { "intensity N",
  174. "(-I) Intensity of scanning (0 - 14, default 4)" },
  175. { "log N",
  176. "(-l N) Interval in seconds between log output (default: 5)" },
  177. { "ndevs",
  178. "(-n) Display number of detected GPUs and exit" },
  179. { "no-longpoll",
  180. "Disable X-Long-Polling support (default: enabled)" },
  181. { "pass PASSWORD",
  182. "(-p PASSWORD) Password for bitcoin JSON-RPC server "
  183. "(default: " DEF_RPC_PASSWORD ")" },
  184. { "protocol-dump",
  185. "(-P) Verbose dump of protocol-level activities (default: off)" },
  186. { "queue N",
  187. "(-Q N) Number of work items to queue (1 - 10, default 2)" },
  188. { "quiet",
  189. "(-q) Disable per-thread hashmeter output (default: off)" },
  190. { "retries N",
  191. "(-r N) Number of times to retry, if JSON-RPC call fails\n"
  192. "\t(default: 10; use -1 for \"never\")" },
  193. { "retry-pause N",
  194. "(-R N) Number of seconds to pause, between retries\n"
  195. "\t(default: 30)" },
  196. { "scantime N",
  197. "(-s N) Upper bound on time spent scanning current work,\n"
  198. "\tin seconds. (default: 60)" },
  199. #ifdef HAVE_SYSLOG_H
  200. { "syslog",
  201. "Use system log for output messages (default: standard error)" },
  202. #endif
  203. { "url URL",
  204. "URL for bitcoin JSON-RPC server "
  205. "(default: " DEF_RPC_URL ")" },
  206. { "userpass USERNAME:PASSWORD",
  207. "Username:Password pair for bitcoin JSON-RPC server "
  208. "(default: " DEF_RPC_USERPASS ")" },
  209. { "user USERNAME",
  210. "(-u USERNAME) Username for bitcoin JSON-RPC server "
  211. "(default: " DEF_RPC_USERNAME ")" },
  212. { "vectors N",
  213. "(-v N) Override detected optimal vector width (default: detected, 1,2 or 4)" },
  214. { "worksize N",
  215. "(-w N) Override detected optimal worksize (default: detected)" },
  216. };
  217. static struct option options[] = {
  218. { "algo", 1, NULL, 'a' },
  219. { "config", 1, NULL, 'c' },
  220. { "cpu-threads", 1, NULL, 't' },
  221. { "gpu-threads", 1, NULL, 'g' },
  222. { "debug", 0, NULL, 'D' },
  223. { "help", 0, NULL, 'h' },
  224. { "intensity", 1, NULL, 'I' },
  225. { "log", 1, NULL, 'l' },
  226. { "ndevs", 0, NULL, 'n' },
  227. { "no-longpoll", 0, NULL, 1003 },
  228. { "pass", 1, NULL, 'p' },
  229. { "protocol-dump", 0, NULL, 'P' },
  230. { "queue", 1, NULL, 'Q' },
  231. { "quiet", 0, NULL, 'q' },
  232. { "retries", 1, NULL, 'r' },
  233. { "retry-pause", 1, NULL, 'R' },
  234. { "scantime", 1, NULL, 's' },
  235. #ifdef HAVE_SYSLOG_H
  236. { "syslog", 0, NULL, 1004 },
  237. #endif
  238. { "url", 1, NULL, 1001 },
  239. { "user", 1, NULL, 'u' },
  240. { "vectors", 1, NULL, 'v' },
  241. { "worksize", 1, NULL, 'w' },
  242. { "userpass", 1, NULL, 1002 },
  243. };
  244. static bool jobj_binary(const json_t *obj, const char *key,
  245. void *buf, size_t buflen)
  246. {
  247. const char *hexstr;
  248. json_t *tmp;
  249. tmp = json_object_get(obj, key);
  250. if (unlikely(!tmp)) {
  251. applog(LOG_ERR, "JSON key '%s' not found", key);
  252. return false;
  253. }
  254. hexstr = json_string_value(tmp);
  255. if (unlikely(!hexstr)) {
  256. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  257. return false;
  258. }
  259. if (!hex2bin(buf, hexstr, buflen))
  260. return false;
  261. return true;
  262. }
  263. static bool work_decode(const json_t *val, struct work *work)
  264. {
  265. if (unlikely(!jobj_binary(val, "midstate",
  266. work->midstate, sizeof(work->midstate)))) {
  267. applog(LOG_ERR, "JSON inval midstate");
  268. goto err_out;
  269. }
  270. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  271. applog(LOG_ERR, "JSON inval data");
  272. goto err_out;
  273. }
  274. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  275. applog(LOG_ERR, "JSON inval hash1");
  276. goto err_out;
  277. }
  278. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  279. applog(LOG_ERR, "JSON inval target");
  280. goto err_out;
  281. }
  282. memset(work->hash, 0, sizeof(work->hash));
  283. return true;
  284. err_out:
  285. return false;
  286. }
  287. static bool submit_upstream_work(CURL *curl, const struct work *work)
  288. {
  289. char *hexstr = NULL;
  290. json_t *val, *res;
  291. char s[345];
  292. bool rc = false;
  293. struct cgpu_info *cgpu = thr_info[work->thr_id].cgpu;
  294. /* build hex string */
  295. hexstr = bin2hex(work->data, sizeof(work->data));
  296. if (unlikely(!hexstr)) {
  297. applog(LOG_ERR, "submit_upstream_work OOM");
  298. goto out;
  299. }
  300. /* build JSON-RPC request */
  301. sprintf(s,
  302. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  303. hexstr);
  304. if (opt_debug)
  305. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  306. /* issue JSON-RPC request */
  307. val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false);
  308. if (unlikely(!val)) {
  309. applog(LOG_ERR, "submit_upstream_work json_rpc_call failed");
  310. goto out;
  311. }
  312. res = json_object_get(val, "result");
  313. /* Theoretically threads could race when modifying accepted and
  314. * rejected values but the chance of two submits completing at the
  315. * same time is zero so there is no point adding extra locking */
  316. if (json_is_true(res)) {
  317. cgpu->accepted++;
  318. accepted++;
  319. if (opt_debug)
  320. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  321. } else {
  322. cgpu->rejected++;
  323. rejected++;
  324. if (opt_debug)
  325. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  326. }
  327. applog(LOG_INFO, "%sPU: %d Accepted: %d Rejected: %d HW errors: %d",
  328. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, cgpu->accepted, cgpu->rejected, cgpu->hw_errors);
  329. json_decref(val);
  330. rc = true;
  331. out:
  332. free(hexstr);
  333. return rc;
  334. }
  335. static const char *rpc_req =
  336. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  337. static bool get_upstream_work(CURL *curl, struct work *work)
  338. {
  339. json_t *val;
  340. bool rc;
  341. val = json_rpc_call(curl, rpc_url, rpc_userpass, rpc_req,
  342. want_longpoll, false);
  343. if (unlikely(!val)) {
  344. applog(LOG_ERR, "Failed json_rpc_call in get_upstream_work");
  345. return false;
  346. }
  347. rc = work_decode(json_object_get(val, "result"), work);
  348. json_decref(val);
  349. return rc;
  350. }
  351. static void workio_cmd_free(struct workio_cmd *wc)
  352. {
  353. if (!wc)
  354. return;
  355. switch (wc->cmd) {
  356. case WC_SUBMIT_WORK:
  357. free(wc->u.work);
  358. break;
  359. default: /* do nothing */
  360. break;
  361. }
  362. memset(wc, 0, sizeof(*wc)); /* poison */
  363. free(wc);
  364. }
  365. static void kill_work(void)
  366. {
  367. struct workio_cmd *wc;
  368. applog(LOG_INFO, "Received kill message");
  369. wc = calloc(1, sizeof(*wc));
  370. if (unlikely(!wc)) {
  371. applog(LOG_ERR, "Failed to calloc wc in kill_work");
  372. /* We're just trying to die anyway, so forget graceful */
  373. exit (1);
  374. }
  375. wc->cmd = WC_DIE;
  376. wc->thr = 0;
  377. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  378. applog(LOG_ERR, "Failed to tq_push work in kill_work");
  379. exit (1);
  380. }
  381. }
  382. struct io_data{
  383. struct workio_cmd *wc;
  384. CURL *curl;
  385. };
  386. static pthread_t *get_thread = NULL;
  387. static pthread_t *submit_thread = NULL;
  388. static char current_block[36];
  389. static void *get_work_thread(void *userdata)
  390. {
  391. struct io_data *io_data = (struct io_data *)userdata;
  392. struct workio_cmd *wc = io_data->wc;
  393. CURL *curl = io_data->curl;
  394. struct work *ret_work;
  395. int failures = 0;
  396. ret_work = calloc(1, sizeof(*ret_work));
  397. if (unlikely(!ret_work)) {
  398. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  399. kill_work();
  400. goto out;
  401. }
  402. /* obtain new work from bitcoin via JSON-RPC */
  403. while (!get_upstream_work(curl, ret_work)) {
  404. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  405. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  406. free(ret_work);
  407. kill_work();
  408. goto out;
  409. }
  410. /* pause, then restart work-request loop */
  411. applog(LOG_ERR, "json_rpc_call failed on get work, retry after %d seconds",
  412. opt_fail_pause);
  413. sleep(opt_fail_pause);
  414. }
  415. /* send work to requesting thread */
  416. if (unlikely(!tq_push(wc->thr->q, ret_work))) {
  417. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  418. kill_work();
  419. free(ret_work);
  420. }
  421. out:
  422. free(io_data);
  423. workio_cmd_free(wc);
  424. return NULL;
  425. }
  426. static bool workio_get_work(struct workio_cmd *wc, CURL *curl)
  427. {
  428. struct io_data *id = malloc(sizeof(struct io_data));
  429. if (unlikely(!id)) {
  430. applog(LOG_ERR, "Failed to malloc id in workio_get_work");
  431. return false;
  432. }
  433. id->wc = wc;
  434. id->curl = curl;
  435. if (unlikely(!get_thread)) {
  436. /* This is only instantiated once at startup */
  437. get_thread = malloc(sizeof(get_thread));
  438. if (unlikely(!get_thread)) {
  439. applog(LOG_ERR, "Failed to malloc get_thread in workio_get_work");
  440. return false;
  441. }
  442. } else
  443. pthread_join(*get_thread, NULL);
  444. if (unlikely(pthread_create(get_thread, NULL, get_work_thread, (void *)id))) {
  445. applog(LOG_ERR, "Failed to create get_work_thread");
  446. free(id);
  447. return false;
  448. }
  449. return true;
  450. }
  451. static void *submit_work_thread(void *userdata)
  452. {
  453. struct io_data *io_data = (struct io_data *)userdata;
  454. struct workio_cmd *wc = io_data->wc;
  455. CURL *curl = io_data->curl;
  456. int failures = 0;
  457. if (unlikely(strncmp((const char *)wc->u.work->data, current_block, 36))) {
  458. applog(LOG_INFO, "Stale work detected, discarding");
  459. goto out;
  460. }
  461. /* submit solution to bitcoin via JSON-RPC */
  462. while (!submit_upstream_work(curl, wc->u.work)) {
  463. if (unlikely(strncmp((const char *)wc->u.work->data, current_block, 36))) {
  464. applog(LOG_INFO, "Stale work detected, discarding");
  465. goto out;
  466. }
  467. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  468. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  469. kill_work();
  470. goto out;
  471. }
  472. /* pause, then restart work-request loop */
  473. applog(LOG_ERR, "json_rpc_call failed on submit_work, retry after %d seconds",
  474. opt_fail_pause);
  475. sleep(opt_fail_pause);
  476. }
  477. out:
  478. workio_cmd_free(wc);
  479. free(io_data);
  480. return NULL;
  481. }
  482. static bool workio_submit_work(struct workio_cmd *wc, CURL *curl)
  483. {
  484. struct io_data *id = malloc(sizeof(struct io_data));
  485. if (unlikely(!id)) {
  486. applog(LOG_ERR, "Failed to malloc id in workio_submit_work");
  487. return false;
  488. }
  489. id->wc = wc;
  490. id->curl = curl;
  491. if (unlikely(!submit_thread)) {
  492. submit_thread = malloc(sizeof(submit_thread));
  493. if (unlikely(!submit_thread)) {
  494. applog(LOG_ERR, "Failed to malloc submit_thread in workio_submit_work");
  495. return false;
  496. }
  497. } else
  498. pthread_join(*submit_thread, NULL);
  499. if (unlikely(pthread_create(submit_thread, NULL, submit_work_thread, (void *)id))) {
  500. applog(LOG_ERR, "Failed to create submit_work_thread");
  501. free(id);
  502. return false;
  503. }
  504. return true;
  505. }
  506. static void *workio_thread(void *userdata)
  507. {
  508. struct thr_info *mythr = userdata;
  509. bool ok = true;
  510. CURL *get_curl, *submit_curl;
  511. get_curl = curl_easy_init();
  512. submit_curl = curl_easy_init();
  513. if (unlikely(!get_curl || !submit_curl)) {
  514. applog(LOG_ERR, "CURL initialization failed");
  515. return NULL;
  516. }
  517. while (ok) {
  518. struct workio_cmd *wc;
  519. /* wait for workio_cmd sent to us, on our queue */
  520. wc = tq_pop(mythr->q, NULL);
  521. if (unlikely(!wc)) {
  522. ok = false;
  523. break;
  524. }
  525. /* process workio_cmd */
  526. switch (wc->cmd) {
  527. case WC_GET_WORK:
  528. ok = workio_get_work(wc, get_curl);
  529. break;
  530. case WC_SUBMIT_WORK:
  531. ok = workio_submit_work(wc, submit_curl);
  532. break;
  533. case WC_DIE:
  534. default:
  535. ok = false;
  536. break;
  537. }
  538. }
  539. tq_freeze(mythr->q);
  540. curl_easy_cleanup(submit_curl);
  541. curl_easy_cleanup(get_curl);
  542. return NULL;
  543. }
  544. static void hashmeter(int thr_id, struct timeval *diff,
  545. unsigned long hashes_done)
  546. {
  547. struct timeval temp_tv_end, total_diff;
  548. double khashes, secs;
  549. double total_secs;
  550. double local_secs;
  551. static double local_mhashes_done = 0;
  552. static double rolling_local = 0;
  553. double local_mhashes = (double)hashes_done / 1000000.0;
  554. /* Don't bother calculating anything if we're not displaying it */
  555. if (opt_quiet || !opt_log_interval)
  556. return;
  557. khashes = hashes_done / 1000.0;
  558. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  559. if (opt_debug)
  560. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  561. thr_id, hashes_done, hashes_done / secs);
  562. gettimeofday(&temp_tv_end, NULL);
  563. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  564. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  565. if (opt_n_threads + gpu_threads > 1) {
  566. /* Totals are updated by all threads so can race without locking */
  567. pthread_mutex_lock(&hash_lock);
  568. total_mhashes_done += local_mhashes;
  569. local_mhashes_done += local_mhashes;
  570. if (total_diff.tv_sec < opt_log_interval) {
  571. /* Only update the total every opt_log_interval seconds */
  572. pthread_mutex_unlock(&hash_lock);
  573. return;
  574. }
  575. gettimeofday(&total_tv_end, NULL);
  576. pthread_mutex_unlock(&hash_lock);
  577. } else {
  578. total_mhashes_done += local_mhashes;
  579. local_mhashes_done += local_mhashes;
  580. if (total_diff.tv_sec < opt_log_interval)
  581. return;
  582. gettimeofday(&total_tv_end, NULL);
  583. }
  584. /* Use a rolling average by faking an exponential decay over 5 * log */
  585. rolling_local = ((rolling_local * 0.9) + local_mhashes_done) / 1.9;
  586. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  587. total_secs = (double)total_diff.tv_sec +
  588. ((double)total_diff.tv_usec / 1000000.0);
  589. applog(LOG_INFO, "[%.2f | %.2f Mhash/s] [%d Accepted] [%d Rejected] [%d HW errors]",
  590. rolling_local / local_secs,
  591. total_mhashes_done / total_secs, accepted, rejected, hw_errors);
  592. local_mhashes_done = 0;
  593. }
  594. /* All work is queued flagged as being for thread 0 and then the mining thread
  595. * flags it as its own */
  596. static bool queue_request(void)
  597. {
  598. struct thr_info *thr = &thr_info[0];
  599. struct workio_cmd *wc;
  600. /* fill out work request message */
  601. wc = calloc(1, sizeof(*wc));
  602. if (unlikely(!wc))
  603. return false;
  604. wc->cmd = WC_GET_WORK;
  605. wc->thr = thr;
  606. /* send work request to workio thread */
  607. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  608. workio_cmd_free(wc);
  609. return false;
  610. }
  611. return true;
  612. }
  613. static bool get_work(struct work *work)
  614. {
  615. struct thr_info *thr = &thr_info[0];
  616. static bool first_work = true;
  617. struct work *work_heap;
  618. bool ret = false;
  619. unsigned int i;
  620. get_new:
  621. if (unlikely(!queue_request()))
  622. goto out;
  623. /* wait for 1st response, or get cached response */
  624. work_heap = tq_pop(thr->q, NULL);
  625. if (unlikely(!work_heap))
  626. goto out;
  627. if (unlikely(work_restart[opt_n_threads + gpu_threads].restart)) {
  628. work_restart[opt_n_threads + gpu_threads].restart = 0;
  629. free(work_heap);
  630. if (opt_debug)
  631. applog(LOG_DEBUG, "New block detected, discarding old work");
  632. for (i = 1; i < opt_queue; i++) {
  633. /* Pop off all the work. Cancelling the requests would
  634. * be better but tricky. */
  635. work_heap = tq_pop(thr->q, NULL);
  636. if (unlikely(!work_heap))
  637. goto out;
  638. free(work_heap);
  639. if (unlikely(!queue_request()))
  640. goto out;
  641. }
  642. goto get_new;
  643. }
  644. if (unlikely(first_work)) {
  645. first_work = false;
  646. /* send for extra work requests for the next time get_work
  647. * is called. */
  648. for (i = 1; i < opt_queue; i++) {
  649. if (unlikely(!queue_request()))
  650. goto out_free;
  651. }
  652. }
  653. memcpy(work, work_heap, sizeof(*work));
  654. memcpy(current_block, work->data, 36);
  655. ret = true;
  656. out_free:
  657. free(work_heap);
  658. out:
  659. return ret;
  660. }
  661. static bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
  662. {
  663. struct workio_cmd *wc;
  664. /* fill out work request message */
  665. wc = calloc(1, sizeof(*wc));
  666. if (unlikely(!wc)) {
  667. applog(LOG_ERR, "Failed to calloc wc in submit_work_sync");
  668. return false;
  669. }
  670. wc->u.work = malloc(sizeof(*work_in));
  671. if (unlikely(!wc->u.work)) {
  672. applog(LOG_ERR, "Failed to calloc work in submit_work_sync");
  673. goto err_out;
  674. }
  675. wc->cmd = WC_SUBMIT_WORK;
  676. wc->thr = thr;
  677. memcpy(wc->u.work, work_in, sizeof(*work_in));
  678. /* send solution to workio thread */
  679. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  680. applog(LOG_ERR, "Failed to tq_push work in submit_work_sync");
  681. goto err_out;
  682. }
  683. return true;
  684. err_out:
  685. workio_cmd_free(wc);
  686. return false;
  687. }
  688. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  689. {
  690. work->data[64+12+0] = (nonce>>0) & 0xff;
  691. work->data[64+12+1] = (nonce>>8) & 0xff;
  692. work->data[64+12+2] = (nonce>>16) & 0xff;
  693. work->data[64+12+3] = (nonce>>24) & 0xff;
  694. return submit_work_sync(thr, work);
  695. }
  696. static inline int cpu_from_thr_id(int thr_id)
  697. {
  698. return (thr_id - gpu_threads) % num_processors;
  699. }
  700. static void *miner_thread(void *userdata)
  701. {
  702. struct thr_info *mythr = userdata;
  703. const int thr_id = mythr->id;
  704. uint32_t max_nonce = 0xffffff;
  705. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  706. * and if that fails, then SCHED_BATCH. No need for this to be an
  707. * error if it fails */
  708. setpriority(PRIO_PROCESS, 0, 19);
  709. drop_policy();
  710. /* Cpu affinity only makes sense if the number of threads is a multiple
  711. * of the number of CPUs */
  712. if (!(opt_n_threads % num_processors))
  713. affine_to_cpu(thr_id - gpu_threads, cpu_from_thr_id(thr_id));
  714. while (1) {
  715. struct work work __attribute__((aligned(128)));
  716. unsigned long hashes_done;
  717. struct timeval tv_start, tv_end, diff;
  718. uint64_t max64;
  719. bool rc;
  720. /* obtain new work from internal workio thread */
  721. if (unlikely(!get_work(&work))) {
  722. applog(LOG_ERR, "work retrieval failed, exiting "
  723. "mining thread %d", mythr->id);
  724. goto out;
  725. }
  726. work.thr_id = thr_id;
  727. hashes_done = 0;
  728. gettimeofday(&tv_start, NULL);
  729. /* scan nonces for a proof-of-work hash */
  730. switch (opt_algo) {
  731. case ALGO_C:
  732. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  733. work.hash1, work.hash, work.target,
  734. max_nonce, &hashes_done);
  735. break;
  736. #ifdef WANT_X8664_SSE2
  737. case ALGO_SSE2_64: {
  738. unsigned int rc5 =
  739. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  740. work.hash1, work.hash,
  741. work.target,
  742. max_nonce, &hashes_done);
  743. rc = (rc5 == -1) ? false : true;
  744. }
  745. break;
  746. #endif
  747. #ifdef WANT_SSE2_4WAY
  748. case ALGO_4WAY: {
  749. unsigned int rc4 =
  750. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  751. work.hash1, work.hash,
  752. work.target,
  753. max_nonce, &hashes_done);
  754. rc = (rc4 == -1) ? false : true;
  755. }
  756. break;
  757. #endif
  758. #ifdef WANT_VIA_PADLOCK
  759. case ALGO_VIA:
  760. rc = scanhash_via(thr_id, work.data, work.target,
  761. max_nonce, &hashes_done);
  762. break;
  763. #endif
  764. case ALGO_CRYPTOPP:
  765. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  766. work.hash1, work.hash, work.target,
  767. max_nonce, &hashes_done);
  768. break;
  769. #ifdef WANT_CRYPTOPP_ASM32
  770. case ALGO_CRYPTOPP_ASM32:
  771. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  772. work.hash1, work.hash, work.target,
  773. max_nonce, &hashes_done);
  774. break;
  775. #endif
  776. default:
  777. /* should never happen */
  778. goto out;
  779. }
  780. /* record scanhash elapsed time */
  781. gettimeofday(&tv_end, NULL);
  782. timeval_subtract(&diff, &tv_end, &tv_start);
  783. hashmeter(thr_id, &diff, hashes_done);
  784. /* adjust max_nonce to meet target scan time */
  785. if (diff.tv_usec > 500000)
  786. diff.tv_sec++;
  787. if (diff.tv_sec > 0) {
  788. max64 =
  789. ((uint64_t)hashes_done * (opt_log_interval ? : opt_scantime)) / diff.tv_sec;
  790. if (max64 > 0xfffffffaULL)
  791. max64 = 0xfffffffaULL;
  792. max_nonce = max64;
  793. }
  794. /* if nonce found, submit work */
  795. if (unlikely(rc)) {
  796. if (opt_debug)
  797. applog(LOG_DEBUG, "CPU %d found something?", cpu_from_thr_id(thr_id));
  798. if (unlikely(!submit_work_sync(mythr, &work)))
  799. break;
  800. }
  801. }
  802. out:
  803. tq_freeze(mythr->q);
  804. return NULL;
  805. }
  806. enum {
  807. STAT_SLEEP_INTERVAL = 1,
  808. STAT_CTR_INTERVAL = 10000000,
  809. FAILURE_INTERVAL = 30,
  810. };
  811. static _clState *clStates[16];
  812. static inline cl_int queue_kernel_parameters(_clState *clState, dev_blk_ctx *blk)
  813. {
  814. cl_kernel *kernel = &clState->kernel;
  815. cl_int status = 0;
  816. int num = 0;
  817. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  818. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  819. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  820. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  821. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  822. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  823. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  824. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  825. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  826. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  827. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  828. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  829. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  830. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  831. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  832. if (clState->hasBitAlign == true) {
  833. /* Parameters for phatk kernel */
  834. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W2);
  835. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W16);
  836. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W17);
  837. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->PreVal4);
  838. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->T1);
  839. } else {
  840. /* Parameters for poclbm kernel */
  841. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  842. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  843. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  844. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  845. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  846. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  847. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  848. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  849. }
  850. status |= clSetKernelArg(*kernel, num++, sizeof(clState->outputBuffer),
  851. (void *)&clState->outputBuffer);
  852. return status;
  853. }
  854. static inline int gpu_from_thr_id(int thr_id)
  855. {
  856. return thr_id % nDevs;
  857. }
  858. static void *gpuminer_thread(void *userdata)
  859. {
  860. struct thr_info *mythr = userdata;
  861. struct timeval tv_start, diff;
  862. const int thr_id = mythr->id;
  863. uint32_t *res, *blank_res;
  864. size_t globalThreads[1];
  865. size_t localThreads[1];
  866. cl_int status;
  867. _clState *clState = clStates[thr_id];
  868. const cl_kernel *kernel = &clState->kernel;
  869. struct work *work = malloc(sizeof(struct work));
  870. unsigned const int threads = 1 << (15 + scan_intensity);
  871. unsigned const int vectors = clState->preferred_vwidth;
  872. unsigned const int hashes = threads * vectors;
  873. unsigned int hashes_done = 0;
  874. res = calloc(BUFFERSIZE, 1);
  875. blank_res = calloc(BUFFERSIZE, 1);
  876. if (!res || !blank_res) {
  877. applog(LOG_ERR, "Failed to calloc in gpuminer_thread");
  878. goto out;
  879. }
  880. gettimeofday(&tv_start, NULL);
  881. globalThreads[0] = threads;
  882. localThreads[0] = clState->work_size;
  883. work_restart[thr_id].restart = 1;
  884. diff.tv_sec = 0;
  885. while (1) {
  886. struct timeval tv_end, tv_workstart;
  887. /* This finish flushes the readbuffer set with CL_FALSE later */
  888. clFinish(clState->commandQueue);
  889. if (diff.tv_sec > opt_scantime || work->blk.nonce >= MAXTHREADS - hashes || work_restart[thr_id].restart) {
  890. /* Ignore any reads since we're getting new work and queue a clean buffer */
  891. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  892. BUFFERSIZE, blank_res, 0, NULL, NULL);
  893. if (unlikely(status != CL_SUCCESS))
  894. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  895. memset(res, 0, BUFFERSIZE);
  896. gettimeofday(&tv_workstart, NULL);
  897. /* obtain new work from internal workio thread */
  898. if (unlikely(!get_work(work))) {
  899. applog(LOG_ERR, "work retrieval failed, exiting "
  900. "gpu mining thread %d", mythr->id);
  901. goto out;
  902. }
  903. work->thr_id = thr_id;
  904. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  905. work->blk.nonce = 0;
  906. work_restart[thr_id].restart = 0;
  907. if (opt_debug)
  908. applog(LOG_DEBUG, "getwork thread %d", thr_id);
  909. /* Flushes the writebuffer set with CL_FALSE above */
  910. clFinish(clState->commandQueue);
  911. }
  912. status = queue_kernel_parameters(clState, &work->blk);
  913. if (unlikely(status != CL_SUCCESS))
  914. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  915. /* MAXBUFFERS entry is used as a flag to say nonces exist */
  916. if (res[MAXBUFFERS]) {
  917. /* Clear the buffer again */
  918. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  919. BUFFERSIZE, blank_res, 0, NULL, NULL);
  920. if (unlikely(status != CL_SUCCESS))
  921. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  922. if (opt_debug)
  923. applog(LOG_DEBUG, "GPU %d found something?", gpu_from_thr_id(thr_id));
  924. postcalc_hash_async(mythr, work, res);
  925. memset(res, 0, BUFFERSIZE);
  926. clFinish(clState->commandQueue);
  927. }
  928. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  929. globalThreads, localThreads, 0, NULL, NULL);
  930. if (unlikely(status != CL_SUCCESS))
  931. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  932. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  933. BUFFERSIZE, res, 0, NULL, NULL);
  934. if (unlikely(status != CL_SUCCESS))
  935. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  936. gettimeofday(&tv_end, NULL);
  937. timeval_subtract(&diff, &tv_end, &tv_start);
  938. hashes_done += hashes;
  939. work->blk.nonce += hashes;
  940. if (diff.tv_sec >= 1) {
  941. hashmeter(thr_id, &diff, hashes_done);
  942. gettimeofday(&tv_start, NULL);
  943. hashes_done = 0;
  944. }
  945. timeval_subtract(&diff, &tv_end, &tv_workstart);
  946. }
  947. out:
  948. tq_freeze(mythr->q);
  949. return NULL;
  950. }
  951. static void restart_threads(void)
  952. {
  953. int i;
  954. for (i = 0; i < opt_n_threads + gpu_threads + 1; i++)
  955. work_restart[i].restart = 1;
  956. }
  957. static void *longpoll_thread(void *userdata)
  958. {
  959. struct thr_info *mythr = userdata;
  960. CURL *curl = NULL;
  961. char *copy_start, *hdr_path, *lp_url = NULL;
  962. bool need_slash = false;
  963. int failures = 0;
  964. hdr_path = tq_pop(mythr->q, NULL);
  965. if (!hdr_path)
  966. goto out;
  967. /* full URL */
  968. if (strstr(hdr_path, "://")) {
  969. lp_url = hdr_path;
  970. hdr_path = NULL;
  971. }
  972. /* absolute path, on current server */
  973. else {
  974. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  975. if (rpc_url[strlen(rpc_url) - 1] != '/')
  976. need_slash = true;
  977. lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2);
  978. if (!lp_url)
  979. goto out;
  980. sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start);
  981. }
  982. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  983. curl = curl_easy_init();
  984. if (unlikely(!curl)) {
  985. applog(LOG_ERR, "CURL initialization failed");
  986. goto out;
  987. }
  988. while (1) {
  989. json_t *val;
  990. val = json_rpc_call(curl, lp_url, rpc_userpass, rpc_req,
  991. false, true);
  992. if (likely(val)) {
  993. failures = 0;
  994. json_decref(val);
  995. applog(LOG_INFO, "LONGPOLL detected new block");
  996. restart_threads();
  997. } else {
  998. if (failures++ < 10) {
  999. sleep(30);
  1000. applog(LOG_ERR,
  1001. "longpoll failed, sleeping for 30s");
  1002. } else {
  1003. applog(LOG_ERR,
  1004. "longpoll failed, ending thread");
  1005. goto out;
  1006. }
  1007. }
  1008. }
  1009. out:
  1010. free(hdr_path);
  1011. free(lp_url);
  1012. tq_freeze(mythr->q);
  1013. if (curl)
  1014. curl_easy_cleanup(curl);
  1015. return NULL;
  1016. }
  1017. static void show_usage(void)
  1018. {
  1019. int i;
  1020. printf("minerd version %s\n\n", VERSION);
  1021. printf("Usage:\tminerd [options]\n\nSupported options:\n");
  1022. for (i = 0; i < ARRAY_SIZE(options_help); i++) {
  1023. struct option_help *h;
  1024. h = &options_help[i];
  1025. printf("--%s\n%s\n\n", h->name, h->helptext);
  1026. }
  1027. exit(1);
  1028. }
  1029. static void parse_arg (int key, char *arg)
  1030. {
  1031. int v, i;
  1032. switch(key) {
  1033. case 'a':
  1034. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  1035. if (algo_names[i] &&
  1036. !strcmp(arg, algo_names[i])) {
  1037. opt_algo = i;
  1038. break;
  1039. }
  1040. }
  1041. if (i == ARRAY_SIZE(algo_names))
  1042. show_usage();
  1043. break;
  1044. case 'c': {
  1045. json_error_t err;
  1046. if (opt_config)
  1047. json_decref(opt_config);
  1048. opt_config = json_load_file(arg, &err);
  1049. if (!json_is_object(opt_config)) {
  1050. applog(LOG_ERR, "JSON decode of %s failed", arg);
  1051. show_usage();
  1052. }
  1053. break;
  1054. }
  1055. case 'g':
  1056. v = atoi(arg);
  1057. if (v < 0 || v > 10)
  1058. show_usage();
  1059. opt_g_threads = v;
  1060. break;
  1061. case 'D':
  1062. opt_debug = true;
  1063. break;
  1064. case 'I':
  1065. v = atoi(arg);
  1066. if (v < 0 || v > 14) /* sanity check */
  1067. show_usage();
  1068. scan_intensity = v;
  1069. break;
  1070. case 'l':
  1071. v = atoi(arg);
  1072. if (v < 0 || v > 9999) /* sanity check */
  1073. show_usage();
  1074. opt_log_interval = v;
  1075. break;
  1076. case 'p':
  1077. free(rpc_pass);
  1078. rpc_pass = strdup(arg);
  1079. break;
  1080. case 'P':
  1081. opt_protocol = true;
  1082. break;
  1083. case 'Q':
  1084. v = atoi(arg);
  1085. if (v < 1 || v > 10)
  1086. show_usage();
  1087. opt_queue = v;
  1088. break;
  1089. case 'q':
  1090. opt_quiet = true;
  1091. break;
  1092. case 'r':
  1093. v = atoi(arg);
  1094. if (v < -1 || v > 9999) /* sanity check */
  1095. show_usage();
  1096. opt_retries = v;
  1097. break;
  1098. case 'R':
  1099. v = atoi(arg);
  1100. if (v < 1 || v > 9999) /* sanity check */
  1101. show_usage();
  1102. opt_fail_pause = v;
  1103. break;
  1104. case 's':
  1105. v = atoi(arg);
  1106. if (v < 1 || v > 9999) /* sanity check */
  1107. show_usage();
  1108. opt_scantime = v;
  1109. break;
  1110. case 't':
  1111. v = atoi(arg);
  1112. if (v < 0 || v > 9999) /* sanity check */
  1113. show_usage();
  1114. opt_n_threads = v;
  1115. break;
  1116. case 'u':
  1117. free(rpc_user);
  1118. rpc_user = strdup(arg);
  1119. break;
  1120. case 'v':
  1121. v = atoi(arg);
  1122. if (v != 1 && v != 2 && v != 4)
  1123. show_usage();
  1124. opt_vectors = v;
  1125. break;
  1126. case 'w':
  1127. v = atoi(arg);
  1128. if (v < 1 || v > 9999) /* sanity check */
  1129. show_usage();
  1130. opt_worksize = v;
  1131. break;
  1132. case 1001: /* --url */
  1133. if (strncmp(arg, "http://", 7) &&
  1134. strncmp(arg, "https://", 8))
  1135. show_usage();
  1136. free(rpc_url);
  1137. rpc_url = strdup(arg);
  1138. break;
  1139. case 1002: /* --userpass */
  1140. if (!strchr(arg, ':'))
  1141. show_usage();
  1142. free(rpc_userpass);
  1143. rpc_userpass = strdup(arg);
  1144. break;
  1145. case 1003:
  1146. want_longpoll = false;
  1147. break;
  1148. case 1004:
  1149. use_syslog = true;
  1150. break;
  1151. default:
  1152. show_usage();
  1153. }
  1154. }
  1155. static void parse_config(void)
  1156. {
  1157. int i;
  1158. json_t *val;
  1159. if (!json_is_object(opt_config))
  1160. return;
  1161. for (i = 0; i < ARRAY_SIZE(options); i++) {
  1162. if (!options[i].name)
  1163. break;
  1164. if (!strcmp(options[i].name, "config"))
  1165. continue;
  1166. val = json_object_get(opt_config, options[i].name);
  1167. if (!val)
  1168. continue;
  1169. if (options[i].has_arg && json_is_string(val)) {
  1170. char *s = strdup(json_string_value(val));
  1171. if (!s)
  1172. break;
  1173. parse_arg(options[i].val, s);
  1174. free(s);
  1175. } else if (!options[i].has_arg && json_is_true(val))
  1176. parse_arg(options[i].val, "");
  1177. else
  1178. applog(LOG_ERR, "JSON option %s invalid",
  1179. options[i].name);
  1180. }
  1181. }
  1182. static void parse_cmdline(int argc, char *argv[])
  1183. {
  1184. int key;
  1185. while (1) {
  1186. key = getopt_long(argc, argv, "a:c:qDPr:s:t:h?", options, NULL);
  1187. if (key < 0)
  1188. break;
  1189. parse_arg(key, optarg);
  1190. }
  1191. parse_config();
  1192. }
  1193. int main (int argc, char *argv[])
  1194. {
  1195. struct thr_info *thr;
  1196. unsigned int i;
  1197. char name[32];
  1198. struct cgpu_info *gpus = NULL, *cpus = NULL;
  1199. #ifdef WIN32
  1200. opt_n_threads = num_processors = 1;
  1201. #else
  1202. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1203. opt_n_threads = num_processors;
  1204. #endif /* !WIN32 */
  1205. nDevs = clDevicesNum();
  1206. if (opt_ndevs) {
  1207. applog(LOG_INFO, "%i", nDevs);
  1208. return nDevs;
  1209. }
  1210. /* Invert the value to determine if we manually set it in cmdline
  1211. * or disable gpu threads */
  1212. if (nDevs)
  1213. opt_n_threads = - opt_n_threads;
  1214. rpc_url = strdup(DEF_RPC_URL);
  1215. /* parse command line */
  1216. parse_cmdline(argc, argv);
  1217. gpu_threads = nDevs * opt_g_threads;
  1218. if (opt_n_threads < 0) {
  1219. if (gpu_threads)
  1220. opt_n_threads = 0;
  1221. else
  1222. opt_n_threads = -opt_n_threads;
  1223. }
  1224. if (!rpc_userpass) {
  1225. if (!rpc_user || !rpc_pass) {
  1226. applog(LOG_ERR, "No login credentials supplied");
  1227. return 1;
  1228. }
  1229. rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
  1230. if (!rpc_userpass)
  1231. return 1;
  1232. sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
  1233. }
  1234. if (unlikely(pthread_mutex_init(&time_lock, NULL)))
  1235. return 1;
  1236. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1237. return 1;
  1238. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1239. return 1;
  1240. #ifdef HAVE_SYSLOG_H
  1241. if (use_syslog)
  1242. openlog("cpuminer", LOG_PID, LOG_USER);
  1243. #endif
  1244. work_restart = calloc(opt_n_threads + gpu_threads + 1, sizeof(*work_restart));
  1245. if (!work_restart)
  1246. return 1;
  1247. thr_info = calloc(opt_n_threads + 2 + gpu_threads, sizeof(*thr));
  1248. if (!thr_info)
  1249. return 1;
  1250. /* init workio thread info */
  1251. work_thr_id = opt_n_threads + gpu_threads;
  1252. thr = &thr_info[work_thr_id];
  1253. thr->id = work_thr_id;
  1254. thr->q = tq_new();
  1255. if (!thr->q)
  1256. return 1;
  1257. /* start work I/O thread */
  1258. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  1259. applog(LOG_ERR, "workio thread create failed");
  1260. return 1;
  1261. }
  1262. /* init longpoll thread info */
  1263. if (want_longpoll) {
  1264. longpoll_thr_id = opt_n_threads + gpu_threads + 1;
  1265. thr = &thr_info[longpoll_thr_id];
  1266. thr->id = longpoll_thr_id;
  1267. thr->q = tq_new();
  1268. if (!thr->q)
  1269. return 1;
  1270. /* start longpoll thread */
  1271. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  1272. applog(LOG_ERR, "longpoll thread create failed");
  1273. return 1;
  1274. }
  1275. pthread_detach(thr->pth);
  1276. } else
  1277. longpoll_thr_id = -1;
  1278. gettimeofday(&total_tv_start, NULL);
  1279. gettimeofday(&total_tv_end, NULL);
  1280. if (opt_n_threads ) {
  1281. cpus = calloc(num_processors, sizeof(struct cgpu_info));
  1282. if (unlikely(!cpus)) {
  1283. applog(LOG_ERR, "Failed to calloc cpus");
  1284. return 1;
  1285. }
  1286. }
  1287. if (gpu_threads) {
  1288. gpus = calloc(nDevs, sizeof(struct cgpu_info));
  1289. if (unlikely(!gpus)) {
  1290. applog(LOG_ERR, "Failed to calloc gpus");
  1291. return 1;
  1292. }
  1293. }
  1294. /* start GPU mining threads */
  1295. for (i = 0; i < gpu_threads; i++) {
  1296. int gpu = gpu_from_thr_id(i);
  1297. thr = &thr_info[i];
  1298. thr->id = i;
  1299. gpus[gpu].is_gpu = 1;
  1300. gpus[gpu].cpu_gpu = gpu;
  1301. thr->cgpu = &gpus[gpu];
  1302. thr->q = tq_new();
  1303. if (!thr->q) {
  1304. applog(LOG_ERR, "tq_new failed in starting gpu mining threads");
  1305. return 1;
  1306. }
  1307. applog(LOG_INFO, "Init GPU thread %i", i);
  1308. clStates[i] = initCl(gpu, name, sizeof(name));
  1309. if (!clStates[i]) {
  1310. applog(LOG_ERR, "Failed to init GPU thread %d", i);
  1311. continue;
  1312. }
  1313. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1314. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1315. applog(LOG_ERR, "thread %d create failed", i);
  1316. return 1;
  1317. }
  1318. pthread_detach(thr->pth);
  1319. }
  1320. applog(LOG_INFO, "%d gpu miner threads started", i);
  1321. /* start CPU mining threads */
  1322. for (i = gpu_threads; i < gpu_threads + opt_n_threads; i++) {
  1323. int cpu = cpu_from_thr_id(i);
  1324. thr = &thr_info[i];
  1325. thr->id = i;
  1326. cpus[cpu].cpu_gpu = cpu;
  1327. thr->cgpu = &cpus[cpu];
  1328. thr->q = tq_new();
  1329. if (!thr->q) {
  1330. applog(LOG_ERR, "tq_new failed in starting cpu mining threads");
  1331. return 1;
  1332. }
  1333. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1334. applog(LOG_ERR, "thread %d create failed", i);
  1335. return 1;
  1336. }
  1337. pthread_detach(thr->pth);
  1338. }
  1339. applog(LOG_INFO, "%d cpu miner threads started, "
  1340. "using SHA256 '%s' algorithm.",
  1341. opt_n_threads,
  1342. algo_names[opt_algo]);
  1343. /* Restart count as it will be wrong till all threads are started */
  1344. pthread_mutex_lock(&hash_lock);
  1345. gettimeofday(&total_tv_start, NULL);
  1346. gettimeofday(&total_tv_end, NULL);
  1347. total_mhashes_done = 0;
  1348. pthread_mutex_unlock(&hash_lock);
  1349. /* main loop - simply wait for workio thread to exit */
  1350. pthread_join(thr_info[work_thr_id].pth, NULL);
  1351. curl_global_cleanup();
  1352. if (gpu_threads)
  1353. free(gpus);
  1354. if (opt_n_threads)
  1355. free(cpus);
  1356. applog(LOG_INFO, "workio thread dead, exiting.");
  1357. return 0;
  1358. }