main.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "config.h"
  11. #include <curses.h>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #include <stdarg.h>
  22. #include <assert.h>
  23. #include <signal.h>
  24. #ifndef WIN32
  25. #include <sys/resource.h>
  26. #endif
  27. #include <ccan/opt/opt.h>
  28. #include <jansson.h>
  29. #include <curl/curl.h>
  30. #include "compat.h"
  31. #include "miner.h"
  32. #include "findnonce.h"
  33. #include "ocl.h"
  34. #define PROGRAM_NAME "cgminer"
  35. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  36. #define DEF_RPC_USERNAME "rpcuser"
  37. #define DEF_RPC_PASSWORD "rpcpass"
  38. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  39. #ifdef __linux /* Linux specific policy and affinity management */
  40. #include <sched.h>
  41. static inline void drop_policy(void)
  42. {
  43. struct sched_param param;
  44. #ifdef SCHED_BATCH
  45. #ifdef SCHED_IDLE
  46. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  47. #endif
  48. sched_setscheduler(0, SCHED_BATCH, &param);
  49. #endif
  50. }
  51. static inline void affine_to_cpu(int id, int cpu)
  52. {
  53. cpu_set_t set;
  54. CPU_ZERO(&set);
  55. CPU_SET(cpu, &set);
  56. sched_setaffinity(0, sizeof(&set), &set);
  57. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  58. }
  59. #else
  60. static inline void drop_policy(void)
  61. {
  62. }
  63. static inline void affine_to_cpu(int id, int cpu)
  64. {
  65. }
  66. #endif
  67. enum workio_commands {
  68. WC_GET_WORK,
  69. WC_SUBMIT_WORK,
  70. WC_DIE,
  71. };
  72. struct workio_cmd {
  73. enum workio_commands cmd;
  74. struct thr_info *thr;
  75. union {
  76. struct work *work;
  77. } u;
  78. };
  79. enum sha256_algos {
  80. ALGO_C, /* plain C */
  81. ALGO_4WAY, /* parallel SSE2 */
  82. ALGO_VIA, /* VIA padlock */
  83. ALGO_CRYPTOPP, /* Crypto++ (C) */
  84. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  85. ALGO_SSE2_64, /* SSE2 for x86_64 */
  86. };
  87. static const char *algo_names[] = {
  88. [ALGO_C] = "c",
  89. #ifdef WANT_SSE2_4WAY
  90. [ALGO_4WAY] = "4way",
  91. #endif
  92. #ifdef WANT_VIA_PADLOCK
  93. [ALGO_VIA] = "via",
  94. #endif
  95. [ALGO_CRYPTOPP] = "cryptopp",
  96. #ifdef WANT_CRYPTOPP_ASM32
  97. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  98. #endif
  99. #ifdef WANT_X8664_SSE2
  100. [ALGO_SSE2_64] = "sse2_64",
  101. #endif
  102. };
  103. bool opt_debug = false;
  104. bool opt_protocol = false;
  105. bool want_longpoll = true;
  106. bool have_longpoll = false;
  107. bool use_syslog = false;
  108. static bool opt_quiet = false;
  109. static int opt_retries = -1;
  110. static int opt_fail_pause = 5;
  111. static int opt_log_interval = 5;
  112. bool opt_log_output = false;
  113. static bool opt_dynamic = true;
  114. static int opt_queue = 1;
  115. int opt_vectors;
  116. int opt_worksize;
  117. int opt_scantime = 60;
  118. static const bool opt_time = true;
  119. #ifdef WANT_X8664_SSE2
  120. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  121. #else
  122. static enum sha256_algos opt_algo = ALGO_C;
  123. #endif
  124. static int nDevs;
  125. static int opt_g_threads = 2;
  126. static int opt_device;
  127. static int total_devices;
  128. static bool gpu_devices[16];
  129. static int gpu_threads;
  130. static bool forced_n_threads;
  131. static int opt_n_threads;
  132. static int mining_threads;
  133. static int num_processors;
  134. static int scan_intensity = 4;
  135. static char *rpc_url;
  136. static char *rpc_userpass;
  137. static char *rpc_user, *rpc_pass;
  138. struct thr_info *thr_info;
  139. static int work_thr_id;
  140. int longpoll_thr_id;
  141. static int stage_thr_id;
  142. static int watchdog_thr_id;
  143. struct work_restart *work_restart = NULL;
  144. static pthread_mutex_t hash_lock;
  145. static pthread_mutex_t qd_lock;
  146. static pthread_mutex_t stgd_lock;
  147. static pthread_mutex_t curses_lock;
  148. static double total_mhashes_done;
  149. static struct timeval total_tv_start, total_tv_end;
  150. static int accepted, rejected;
  151. int hw_errors;
  152. static int total_queued, total_staged, lp_staged;
  153. static bool localgen = false;
  154. static unsigned int getwork_requested;
  155. static char current_block[37];
  156. static char longpoll_block[37];
  157. static char blank[37];
  158. static void applog_and_exit(const char *fmt, ...)
  159. {
  160. va_list ap;
  161. va_start(ap, fmt);
  162. vapplog(LOG_ERR, fmt, ap);
  163. va_end(ap);
  164. exit(1);
  165. }
  166. /* FIXME: Use asprintf for better errors. */
  167. static char *set_algo(const char *arg, enum sha256_algos *algo)
  168. {
  169. enum sha256_algos i;
  170. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  171. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  172. *algo = i;
  173. return NULL;
  174. }
  175. }
  176. return "Unknown algorithm";
  177. }
  178. static void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  179. {
  180. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  181. }
  182. static char *set_int_range(const char *arg, int *i, int min, int max)
  183. {
  184. char *err = opt_set_intval(arg, i);
  185. if (err)
  186. return err;
  187. if (*i < min || *i > max)
  188. return "Value out of range";
  189. return NULL;
  190. }
  191. static char *set_int_0_to_9999(const char *arg, int *i)
  192. {
  193. return set_int_range(arg, i, 0, 9999);
  194. }
  195. static char *set_int_0_to_14(const char *arg, int *i)
  196. {
  197. return set_int_range(arg, i, 0, 14);
  198. }
  199. static char *force_nthreads_int(const char *arg, int *i)
  200. {
  201. forced_n_threads = true;
  202. return set_int_range(arg, i, 0, 9999);
  203. }
  204. static char *set_int_0_to_10(const char *arg, int *i)
  205. {
  206. return set_int_range(arg, i, 0, 10);
  207. }
  208. static char *set_int_1_to_10(const char *arg, int *i)
  209. {
  210. return set_int_range(arg, i, 1, 10);
  211. }
  212. static char *set_devices(const char *arg, int *i)
  213. {
  214. char *err = opt_set_intval(arg, i);
  215. if (err)
  216. return err;
  217. if (*i < 0 || *i > 15)
  218. return "Invalid GPU device number";
  219. total_devices++;
  220. gpu_devices[*i] = true;
  221. return NULL;
  222. }
  223. static char *set_url(const char *arg, char **p)
  224. {
  225. opt_set_charp(arg, p);
  226. if (strncmp(arg, "http://", 7) &&
  227. strncmp(arg, "https://", 8))
  228. return "URL must start with http:// or https://";
  229. return NULL;
  230. }
  231. static char *set_vector(const char *arg, int *i)
  232. {
  233. char *err = opt_set_intval(arg, i);
  234. if (err)
  235. return err;
  236. if (*i != 1 && *i != 2 && *i != 4)
  237. return "Valid vectors are 1, 2 or 4";
  238. return NULL;
  239. }
  240. static char *enable_debug(bool *flag)
  241. {
  242. *flag = true;
  243. /* Turn out verbose output, too. */
  244. opt_log_output = true;
  245. return NULL;
  246. }
  247. /* These options are available from config file or commandline */
  248. static struct opt_table opt_config_table[] = {
  249. OPT_WITH_ARG("--algo|-a",
  250. set_algo, show_algo, &opt_algo,
  251. "Specify sha256 implementation:\n"
  252. "\tc\t\tLinux kernel sha256, implemented in C"
  253. #ifdef WANT_SSE2_4WAY
  254. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  255. #endif
  256. #ifdef WANT_VIA_PADLOCK
  257. "\n\tvia\t\tVIA padlock implementation"
  258. #endif
  259. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  260. #ifdef WANT_CRYPTOPP_ASM32
  261. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  262. #endif
  263. #ifdef WANT_X8664_SSE2
  264. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  265. #endif
  266. ),
  267. OPT_WITH_ARG("--cpu-threads|-t",
  268. force_nthreads_int, opt_show_intval, &opt_n_threads,
  269. "Number of miner CPU threads"),
  270. OPT_WITHOUT_ARG("--debug|-D",
  271. enable_debug, &opt_debug,
  272. "Enable debug output"),
  273. #ifdef HAVE_OPENCL
  274. OPT_WITH_ARG("--device|-d",
  275. set_devices, NULL, &opt_device,
  276. "Select device to use, (Use repeat -d for multiple devices, default: all)"),
  277. OPT_WITH_ARG("--gpu-threads|-g",
  278. set_int_0_to_10, opt_show_intval, &opt_g_threads,
  279. "Number of threads per GPU (0 - 10)"),
  280. OPT_WITH_ARG("--intensity|-I",
  281. set_int_0_to_14, opt_show_intval, &scan_intensity,
  282. "Intensity of GPU scanning (0 - 14)"),
  283. #endif
  284. OPT_WITH_ARG("--log|-l",
  285. set_int_0_to_9999, opt_show_intval, &opt_log_interval,
  286. "Interval in seconds between log output"),
  287. #ifdef HAVE_OPENCL
  288. OPT_WITHOUT_ARG("--no-dynamic|-n",
  289. opt_set_invbool, &opt_dynamic,
  290. "Disable dynamic adjustment of intensity which normally maintains desktop interactivity"),
  291. #endif
  292. OPT_WITHOUT_ARG("--no-longpoll",
  293. opt_set_invbool, &want_longpoll,
  294. "Disable X-Long-Polling support"),
  295. OPT_WITH_ARG("--pass|-p",
  296. opt_set_charp, NULL, &rpc_pass,
  297. "Password for bitcoin JSON-RPC server"),
  298. OPT_WITHOUT_ARG("--protocol-dump|-P",
  299. opt_set_bool, &opt_protocol,
  300. "Verbose dump of protocol-level activities"),
  301. OPT_WITH_ARG("--queue|-Q",
  302. set_int_1_to_10, opt_show_intval, &opt_queue,
  303. "Number of extra work items to queue (1 - 10)"),
  304. OPT_WITHOUT_ARG("--quiet|-q",
  305. opt_set_bool, &opt_quiet,
  306. "Disable per-thread hashmeter output"),
  307. OPT_WITH_ARG("--retries|-r",
  308. opt_set_intval, opt_show_intval, &opt_retries,
  309. "Number of times to retry before giving up, if JSON-RPC call fails (-1 means never)"),
  310. OPT_WITH_ARG("--retry-pause|-R",
  311. set_int_0_to_9999, opt_show_intval, &opt_fail_pause,
  312. "Number of seconds to pause, between retries"),
  313. OPT_WITH_ARG("--scan-time|-s",
  314. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  315. "Upper bound on time spent scanning current work, in seconds"),
  316. #ifdef HAVE_SYSLOG_H
  317. OPT_WITHOUT_ARG("--syslog",
  318. opt_set_bool, &use_syslog,
  319. "Use system log for output messages (default: standard error)"),
  320. #endif
  321. OPT_WITH_ARG("--url|-o",
  322. set_url, opt_show_charp, &rpc_url,
  323. "URL for bitcoin JSON-RPC server"),
  324. OPT_WITH_ARG("--user|-u",
  325. opt_set_charp, NULL, &rpc_user,
  326. "Username for bitcoin JSON-RPC server"),
  327. #ifdef HAVE_OPENCL
  328. OPT_WITH_ARG("--vectors|-v",
  329. set_vector, NULL, &opt_vectors,
  330. "Override detected optimal vector width (1, 2 or 4)"),
  331. #endif
  332. OPT_WITHOUT_ARG("--verbose",
  333. opt_set_bool, &opt_log_output,
  334. "Log verbose output to stderr as well as status output"),
  335. #ifdef HAVE_OPENCL
  336. OPT_WITH_ARG("--worksize|-w",
  337. set_int_0_to_9999, opt_show_intval, &opt_worksize,
  338. "Override detected optimal worksize"),
  339. #endif
  340. OPT_WITH_ARG("--userpass|-O",
  341. opt_set_charp, NULL, &rpc_userpass,
  342. "Username:Password pair for bitcoin JSON-RPC server"),
  343. OPT_ENDTABLE
  344. };
  345. static char *parse_config(json_t *config)
  346. {
  347. static char err_buf[200];
  348. json_t *val;
  349. struct opt_table *opt;
  350. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  351. char *p, *name;
  352. /* We don't handle subtables. */
  353. assert(!(opt->type & OPT_SUBTABLE));
  354. /* Pull apart the option name(s). */
  355. name = strdup(opt->names);
  356. for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
  357. char *err;
  358. /* Ignore short options. */
  359. if (p[1] != '-')
  360. continue;
  361. val = json_object_get(config, p+2);
  362. if (!val)
  363. continue;
  364. if ((opt->type & OPT_HASARG) && json_is_string(val)) {
  365. err = opt->cb_arg(json_string_value(val),
  366. opt->u.arg);
  367. } else if ((opt->type&OPT_NOARG) && json_is_true(val)) {
  368. err = opt->cb(opt->u.arg);
  369. } else {
  370. err = "Invalid value";
  371. }
  372. if (err) {
  373. sprintf(err_buf, "Parsing JSON option %s: %s",
  374. p, err);
  375. return err_buf;
  376. }
  377. }
  378. free(name);
  379. }
  380. return NULL;
  381. }
  382. static char *load_config(const char *arg, void *unused)
  383. {
  384. json_error_t err;
  385. json_t *config;
  386. config = json_load_file(arg, 0, &err);
  387. if (!json_is_object(config))
  388. return "JSON decode of file failed";
  389. /* Parse the config now, so we can override it. That can keep pointers
  390. * so don't free config object. */
  391. return parse_config(config);
  392. }
  393. static char *print_ndevs_and_exit(int *ndevs)
  394. {
  395. printf("%i", *ndevs);
  396. exit(*ndevs);
  397. }
  398. /* These options are available from commandline only */
  399. static struct opt_table opt_cmdline_table[] = {
  400. OPT_WITH_ARG("--config|-c",
  401. load_config, NULL, NULL,
  402. "Load a JSON-format configuration file\n"
  403. "See example-cfg.json for an example configuration."),
  404. OPT_WITHOUT_ARG("--help|-h",
  405. opt_usage_and_exit,
  406. #ifdef HAVE_OPENCL
  407. "\nBuilt with CPU and GPU mining support.\n\n",
  408. #else
  409. "\nBuilt with CPU mining support only.\n\n",
  410. #endif
  411. "Print this message"),
  412. OPT_WITHOUT_ARG("--ndevs|-e",
  413. print_ndevs_and_exit, &nDevs,
  414. "Enumerate number of detected GPUs and exit"),
  415. OPT_ENDTABLE
  416. };
  417. static bool jobj_binary(const json_t *obj, const char *key,
  418. void *buf, size_t buflen)
  419. {
  420. const char *hexstr;
  421. json_t *tmp;
  422. tmp = json_object_get(obj, key);
  423. if (unlikely(!tmp)) {
  424. applog(LOG_ERR, "JSON key '%s' not found", key);
  425. return false;
  426. }
  427. hexstr = json_string_value(tmp);
  428. if (unlikely(!hexstr)) {
  429. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  430. return false;
  431. }
  432. if (!hex2bin(buf, hexstr, buflen))
  433. return false;
  434. return true;
  435. }
  436. static bool work_decode(const json_t *val, struct work *work)
  437. {
  438. if (unlikely(!jobj_binary(val, "midstate",
  439. work->midstate, sizeof(work->midstate)))) {
  440. applog(LOG_ERR, "JSON inval midstate");
  441. goto err_out;
  442. }
  443. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  444. applog(LOG_ERR, "JSON inval data");
  445. goto err_out;
  446. }
  447. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  448. applog(LOG_ERR, "JSON inval hash1");
  449. goto err_out;
  450. }
  451. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  452. applog(LOG_ERR, "JSON inval target");
  453. goto err_out;
  454. }
  455. memset(work->hash, 0, sizeof(work->hash));
  456. return true;
  457. err_out:
  458. return false;
  459. }
  460. static inline int gpu_from_thr_id(int thr_id)
  461. {
  462. return thr_id % nDevs;
  463. }
  464. static inline int cpu_from_thr_id(int thr_id)
  465. {
  466. return (thr_id - gpu_threads) % num_processors;
  467. }
  468. static WINDOW *mainwin, *statuswin, *logwin;
  469. static double total_secs = 0.1;
  470. static char statusline[256];
  471. static int cpucursor, gpucursor, logstart, logcursor;
  472. static bool curses_active = false;
  473. static struct cgpu_info *gpus, *cpus;
  474. /* Must be called with curses mutex lock held and curses_active */
  475. static inline void __print_status(int thr_id)
  476. {
  477. wmove(statuswin, 0, 0);
  478. wattron(statuswin, A_BOLD);
  479. wprintw(statuswin, " " PROGRAM_NAME " version " VERSION);
  480. wattroff(statuswin, A_BOLD);
  481. wmove(statuswin, 1, 0);
  482. whline(statuswin, '-', 80);
  483. wmove(statuswin, 2,0);
  484. wprintw(statuswin, " %s", statusline);
  485. wclrtoeol(statuswin);
  486. wmove(statuswin, 3, 0);
  487. whline(statuswin, '-', 80);
  488. wmove(statuswin, logstart - 1, 0);
  489. whline(statuswin, '-', 80);
  490. if (thr_id >= 0 && thr_id < gpu_threads) {
  491. int gpu = gpu_from_thr_id(thr_id);
  492. struct cgpu_info *cgpu = &gpus[gpu];
  493. wmove(statuswin, gpucursor + gpu, 0);
  494. wprintw(statuswin, " GPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  495. gpu, cgpu->total_mhashes / total_secs,
  496. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  497. cgpu->efficiency, cgpu->utility);
  498. wclrtoeol(statuswin);
  499. } else if (thr_id >= gpu_threads) {
  500. int cpu = cpu_from_thr_id(thr_id);
  501. struct cgpu_info *cgpu = &cpus[cpu];
  502. wmove(statuswin, cpucursor + cpu, 0);
  503. wprintw(statuswin, " CPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  504. cpu, cgpu->total_mhashes / total_secs,
  505. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  506. cgpu->efficiency, cgpu->utility);
  507. wclrtoeol(statuswin);
  508. }
  509. }
  510. static void print_status(int thr_id)
  511. {
  512. if (unlikely(!curses_active))
  513. return;
  514. pthread_mutex_lock(&curses_lock);
  515. __print_status(thr_id);
  516. wrefresh(statuswin);
  517. pthread_mutex_unlock(&curses_lock);
  518. }
  519. void log_curses(const char *f, va_list ap)
  520. {
  521. if (unlikely(!curses_active))
  522. return;
  523. pthread_mutex_lock(&curses_lock);
  524. vw_printw(logwin, f, ap);
  525. wrefresh(logwin);
  526. pthread_mutex_unlock(&curses_lock);
  527. }
  528. static bool submit_fail = false;
  529. static bool submit_upstream_work(const struct work *work)
  530. {
  531. char *hexstr = NULL;
  532. json_t *val, *res;
  533. char s[345];
  534. bool rc = false;
  535. int thr_id = work->thr_id;
  536. struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
  537. CURL *curl = curl_easy_init();
  538. if (unlikely(!curl)) {
  539. applog(LOG_ERR, "CURL initialisation failed");
  540. return rc;
  541. }
  542. /* build hex string */
  543. hexstr = bin2hex(work->data, sizeof(work->data));
  544. if (unlikely(!hexstr)) {
  545. applog(LOG_ERR, "submit_upstream_work OOM");
  546. goto out_nofree;
  547. }
  548. /* build JSON-RPC request */
  549. sprintf(s,
  550. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  551. hexstr);
  552. if (opt_debug)
  553. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  554. /* issue JSON-RPC request */
  555. val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false);
  556. if (unlikely(!val)) {
  557. applog(LOG_INFO, "submit_upstream_work json_rpc_call failed");
  558. if (!submit_fail) {
  559. submit_fail = true;
  560. applog(LOG_WARNING, "Upstream communication failure, caching submissions");
  561. }
  562. goto out;
  563. } else if (submit_fail) {
  564. submit_fail = false;
  565. applog(LOG_WARNING, "Upstream communication resumed, submitting work");
  566. }
  567. res = json_object_get(val, "result");
  568. /* Theoretically threads could race when modifying accepted and
  569. * rejected values but the chance of two submits completing at the
  570. * same time is zero so there is no point adding extra locking */
  571. if (json_is_true(res)) {
  572. cgpu->accepted++;
  573. accepted++;
  574. if (opt_debug)
  575. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  576. if (!opt_quiet)
  577. applog(LOG_WARNING, "Share accepted from %sPU %d thread %d",
  578. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, thr_id);
  579. } else {
  580. cgpu->rejected++;
  581. rejected++;
  582. if (opt_debug)
  583. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  584. if (!opt_quiet)
  585. applog(LOG_WARNING, "Share rejected from %sPU %d thread %d",
  586. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, thr_id);
  587. }
  588. cgpu->utility = cgpu->accepted / ( total_secs ? total_secs : 1 ) * 60;
  589. cgpu->efficiency = cgpu->getworks ? cgpu->accepted * 100.0 / cgpu->getworks : 0.0;
  590. if (!opt_quiet)
  591. print_status(thr_id);
  592. applog(LOG_INFO, "%sPU %d Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m",
  593. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, cgpu->getworks, cgpu->accepted,
  594. cgpu->rejected, cgpu->hw_errors, cgpu->efficiency, cgpu->utility);
  595. json_decref(val);
  596. rc = true;
  597. out:
  598. free(hexstr);
  599. out_nofree:
  600. curl_easy_cleanup(curl);
  601. return rc;
  602. }
  603. static const char *rpc_req =
  604. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  605. static bool get_upstream_work(struct work *work)
  606. {
  607. json_t *val;
  608. bool rc = false;
  609. CURL *curl = curl_easy_init();
  610. if (unlikely(!curl)) {
  611. applog(LOG_ERR, "CURL initialisation failed");
  612. return rc;
  613. }
  614. val = json_rpc_call(curl, rpc_url, rpc_userpass, rpc_req,
  615. want_longpoll, false);
  616. if (unlikely(!val)) {
  617. applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
  618. goto out;
  619. }
  620. rc = work_decode(json_object_get(val, "result"), work);
  621. json_decref(val);
  622. out:
  623. curl_easy_cleanup(curl);
  624. return rc;
  625. }
  626. static void workio_cmd_free(struct workio_cmd *wc)
  627. {
  628. if (!wc)
  629. return;
  630. switch (wc->cmd) {
  631. case WC_SUBMIT_WORK:
  632. free(wc->u.work);
  633. break;
  634. default: /* do nothing */
  635. break;
  636. }
  637. memset(wc, 0, sizeof(*wc)); /* poison */
  638. free(wc);
  639. }
  640. static void kill_work(void)
  641. {
  642. struct workio_cmd *wc;
  643. struct thr_info *thr;
  644. unsigned int i;
  645. applog(LOG_INFO, "Received kill message");
  646. /* Kill the watchdog thread */
  647. thr = &thr_info[watchdog_thr_id];
  648. pthread_cancel(thr->pth);
  649. /* Stop the mining threads*/
  650. for (i = 0; i < mining_threads; i++) {
  651. thr = &thr_info[i];
  652. tq_freeze(thr->q);
  653. /* No need to check if this succeeds or not */
  654. pthread_cancel(thr->pth);
  655. }
  656. /* Stop the others */
  657. thr = &thr_info[stage_thr_id];
  658. pthread_cancel(thr->pth);
  659. thr = &thr_info[longpoll_thr_id];
  660. pthread_cancel(thr->pth);
  661. wc = calloc(1, sizeof(*wc));
  662. if (unlikely(!wc)) {
  663. applog(LOG_ERR, "Failed to calloc wc in kill_work");
  664. /* We're just trying to die anyway, so forget graceful */
  665. exit (1);
  666. }
  667. wc->cmd = WC_DIE;
  668. wc->thr = 0;
  669. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  670. applog(LOG_ERR, "Failed to tq_push work in kill_work");
  671. exit (1);
  672. }
  673. }
  674. static void shutdown_cleanup(void)
  675. {
  676. curl_global_cleanup();
  677. if (gpu_threads) {
  678. gpu_threads = 0;
  679. free(gpus);
  680. }
  681. if (opt_n_threads) {
  682. opt_n_threads = 0;
  683. free(cpus);
  684. }
  685. if (curses_active) {
  686. delwin(logwin);
  687. delwin(statuswin);
  688. delwin(mainwin);
  689. endwin();
  690. refresh();
  691. curses_active = false;
  692. }
  693. }
  694. static void sighandler(int sig)
  695. {
  696. kill_work();
  697. }
  698. static void *get_work_thread(void *userdata)
  699. {
  700. struct workio_cmd *wc = (struct workio_cmd *)userdata;
  701. struct work *ret_work;
  702. int failures = 0;
  703. pthread_detach(pthread_self());
  704. ret_work = calloc(1, sizeof(*ret_work));
  705. if (unlikely(!ret_work)) {
  706. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  707. kill_work();
  708. goto out;
  709. }
  710. /* obtain new work from bitcoin via JSON-RPC */
  711. while (!get_upstream_work(ret_work)) {
  712. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  713. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  714. free(ret_work);
  715. kill_work();
  716. goto out;
  717. }
  718. /* pause, then restart work-request loop */
  719. applog(LOG_DEBUG, "json_rpc_call failed on get work, retry after %d seconds",
  720. opt_fail_pause);
  721. sleep(opt_fail_pause);
  722. }
  723. /* send work to requesting thread */
  724. if (unlikely(!tq_push(thr_info[stage_thr_id].q, ret_work))) {
  725. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  726. kill_work();
  727. free(ret_work);
  728. }
  729. out:
  730. workio_cmd_free(wc);
  731. return NULL;
  732. }
  733. static bool workio_get_work(struct workio_cmd *wc)
  734. {
  735. pthread_t get_thread;
  736. if (unlikely(pthread_create(&get_thread, NULL, get_work_thread, (void *)wc))) {
  737. applog(LOG_ERR, "Failed to create get_work_thread");
  738. return false;
  739. }
  740. return true;
  741. }
  742. static void *submit_work_thread(void *userdata)
  743. {
  744. struct workio_cmd *wc = (struct workio_cmd *)userdata;
  745. int failures = 0;
  746. char *hexstr;
  747. pthread_detach(pthread_self());
  748. hexstr = bin2hex(wc->u.work->data, 36);
  749. if (unlikely(!hexstr)) {
  750. applog(LOG_ERR, "submit_work_thread OOM");
  751. goto out;
  752. }
  753. if (unlikely(strncmp(hexstr, current_block, 36))) {
  754. applog(LOG_WARNING, "Stale work detected, discarding");
  755. goto out_free;
  756. }
  757. /* submit solution to bitcoin via JSON-RPC */
  758. while (!submit_upstream_work(wc->u.work)) {
  759. if (unlikely(strncmp(hexstr, current_block, 36))) {
  760. applog(LOG_WARNING, "Stale work detected, discarding");
  761. goto out_free;
  762. }
  763. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  764. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  765. kill_work();
  766. goto out_free;
  767. }
  768. /* pause, then restart work-request loop */
  769. applog(LOG_INFO, "json_rpc_call failed on submit_work, retry after %d seconds",
  770. opt_fail_pause);
  771. sleep(opt_fail_pause);
  772. }
  773. out_free:
  774. free(hexstr);
  775. out:
  776. workio_cmd_free(wc);
  777. return NULL;
  778. }
  779. static bool workio_submit_work(struct workio_cmd *wc)
  780. {
  781. pthread_t submit_thread;
  782. if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, (void *)wc))) {
  783. applog(LOG_ERR, "Failed to create submit_work_thread");
  784. return false;
  785. }
  786. return true;
  787. }
  788. static void inc_staged(int inc, bool lp)
  789. {
  790. pthread_mutex_lock(&stgd_lock);
  791. if (lp) {
  792. lp_staged += inc;
  793. total_staged += inc;
  794. } else if (lp_staged)
  795. lp_staged--;
  796. else
  797. total_staged += inc;
  798. pthread_mutex_unlock(&stgd_lock);
  799. }
  800. static void dec_staged(int inc)
  801. {
  802. pthread_mutex_lock(&stgd_lock);
  803. if (lp_staged)
  804. lp_staged -= inc;
  805. total_staged -= inc;
  806. pthread_mutex_unlock(&stgd_lock);
  807. }
  808. static int requests_staged(void)
  809. {
  810. int ret;
  811. pthread_mutex_lock(&stgd_lock);
  812. ret = total_staged;
  813. pthread_mutex_unlock(&stgd_lock);
  814. return ret;
  815. }
  816. static void *stage_thread(void *userdata)
  817. {
  818. struct thr_info *mythr = userdata;
  819. bool ok = true;
  820. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  821. while (ok) {
  822. struct work *work = NULL;
  823. char *hexstr;
  824. work = tq_pop(mythr->q, NULL);
  825. if (unlikely(!work)) {
  826. applog(LOG_ERR, "Failed to tq_pop in stage_thread");
  827. ok = false;
  828. break;
  829. }
  830. hexstr = bin2hex(work->data, 36);
  831. if (unlikely(!hexstr)) {
  832. applog(LOG_ERR, "stage_thread OOM");
  833. break;
  834. }
  835. /* current_block is blanked out on successful longpoll */
  836. if (likely(strncmp(current_block, blank, 36))) {
  837. if (unlikely(strncmp(hexstr, current_block, 36))) {
  838. if (want_longpoll)
  839. applog(LOG_WARNING, "New block detected on network before receiving longpoll, flushing work queue");
  840. else
  841. applog(LOG_WARNING, "New block detected on network, flushing work queue");
  842. /* As we can't flush the work from here, signal
  843. * the wakeup thread to restart all the
  844. * threads */
  845. work_restart[stage_thr_id].restart = 1;
  846. }
  847. } else
  848. memcpy(longpoll_block, hexstr, 36);
  849. memcpy(current_block, hexstr, 36);
  850. free(hexstr);
  851. if (unlikely(!tq_push(thr_info[0].q, work))) {
  852. applog(LOG_ERR, "Failed to tq_push work in stage_thread");
  853. ok = false;
  854. break;
  855. }
  856. inc_staged(1, false);
  857. }
  858. tq_freeze(mythr->q);
  859. return NULL;
  860. }
  861. static void *workio_thread(void *userdata)
  862. {
  863. struct thr_info *mythr = userdata;
  864. bool ok = true;
  865. while (ok) {
  866. struct workio_cmd *wc;
  867. /* wait for workio_cmd sent to us, on our queue */
  868. wc = tq_pop(mythr->q, NULL);
  869. if (unlikely(!wc)) {
  870. applog(LOG_ERR, "Failed to tq_pop in workio_thread");
  871. ok = false;
  872. break;
  873. }
  874. /* process workio_cmd */
  875. switch (wc->cmd) {
  876. case WC_GET_WORK:
  877. ok = workio_get_work(wc);
  878. break;
  879. case WC_SUBMIT_WORK:
  880. ok = workio_submit_work(wc);
  881. break;
  882. case WC_DIE:
  883. default:
  884. ok = false;
  885. break;
  886. }
  887. }
  888. tq_freeze(mythr->q);
  889. return NULL;
  890. }
  891. static void hashmeter(int thr_id, struct timeval *diff,
  892. unsigned long hashes_done)
  893. {
  894. struct timeval temp_tv_end, total_diff;
  895. double khashes, secs;
  896. double local_secs;
  897. double utility, efficiency = 0.0;
  898. static double local_mhashes_done = 0;
  899. static double rolling_local = 0;
  900. double local_mhashes = (double)hashes_done / 1000000.0;
  901. struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
  902. /* Update the last time this thread reported in */
  903. if (thr_id >= 0)
  904. gettimeofday(&thr_info[thr_id].last, NULL);
  905. /* Don't bother calculating anything if we're not displaying it */
  906. if (opt_quiet || !opt_log_interval)
  907. return;
  908. khashes = hashes_done / 1000.0;
  909. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  910. if (thr_id >= 0 && secs) {
  911. /* So we can call hashmeter from a non worker thread */
  912. if (opt_debug)
  913. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  914. thr_id, hashes_done, hashes_done / secs);
  915. cgpu->local_mhashes += local_mhashes;
  916. cgpu->total_mhashes += local_mhashes;
  917. }
  918. /* Totals are updated by all threads so can race without locking */
  919. pthread_mutex_lock(&hash_lock);
  920. gettimeofday(&temp_tv_end, NULL);
  921. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  922. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  923. total_mhashes_done += local_mhashes;
  924. local_mhashes_done += local_mhashes;
  925. if (total_diff.tv_sec < opt_log_interval)
  926. /* Only update the total every opt_log_interval seconds */
  927. goto out_unlock;
  928. gettimeofday(&total_tv_end, NULL);
  929. /* Use a rolling average by faking an exponential decay over 5 * log */
  930. rolling_local = ((rolling_local * 0.9) + local_mhashes_done) / 1.9;
  931. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  932. total_secs = (double)total_diff.tv_sec +
  933. ((double)total_diff.tv_usec / 1000000.0);
  934. utility = accepted / ( total_secs ? total_secs : 1 ) * 60;
  935. efficiency = getwork_requested ? accepted * 100.0 / getwork_requested : 0.0;
  936. sprintf(statusline, "[(%ds):%.1f (avg):%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  937. opt_log_interval, rolling_local / local_secs, total_mhashes_done / total_secs,
  938. getwork_requested, accepted, rejected, hw_errors, efficiency, utility);
  939. print_status(thr_id);
  940. applog(LOG_INFO, "%s", statusline);
  941. local_mhashes_done = 0;
  942. out_unlock:
  943. pthread_mutex_unlock(&hash_lock);
  944. }
  945. /* This is overkill, but at least we'll know accurately how much work is
  946. * queued to prevent ever being left without work */
  947. static void inc_queued(void)
  948. {
  949. pthread_mutex_lock(&qd_lock);
  950. total_queued++;
  951. pthread_mutex_unlock(&qd_lock);
  952. }
  953. static void dec_queued(void)
  954. {
  955. pthread_mutex_lock(&qd_lock);
  956. total_queued--;
  957. pthread_mutex_unlock(&qd_lock);
  958. dec_staged(1);
  959. }
  960. static int requests_queued(void)
  961. {
  962. int ret;
  963. pthread_mutex_lock(&qd_lock);
  964. ret = total_queued;
  965. pthread_mutex_unlock(&qd_lock);
  966. return ret;
  967. }
  968. /* All work is queued flagged as being for thread 0 and then the mining thread
  969. * flags it as its own */
  970. static bool queue_request(void)
  971. {
  972. struct thr_info *thr = &thr_info[0];
  973. struct workio_cmd *wc;
  974. /* fill out work request message */
  975. wc = calloc(1, sizeof(*wc));
  976. if (unlikely(!wc)) {
  977. applog(LOG_ERR, "Failed to tq_pop in queue_request");
  978. return false;
  979. }
  980. wc->cmd = WC_GET_WORK;
  981. wc->thr = thr;
  982. /* send work request to workio thread */
  983. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  984. applog(LOG_ERR, "Failed to tq_push in queue_request");
  985. workio_cmd_free(wc);
  986. return false;
  987. }
  988. inc_queued();
  989. return true;
  990. }
  991. static bool discard_request(void)
  992. {
  993. struct thr_info *thr = &thr_info[0];
  994. struct work *work_heap;
  995. /* Just in case we fell in a hole and missed a queue filling */
  996. if (unlikely(!requests_queued())) {
  997. applog(LOG_WARNING, "Tried to discard_request with nil queued");
  998. return true;
  999. }
  1000. work_heap = tq_pop(thr->q, NULL);
  1001. if (unlikely(!work_heap)) {
  1002. applog(LOG_ERR, "Failed to tq_pop in discard_request");
  1003. return false;
  1004. }
  1005. free(work_heap);
  1006. dec_queued();
  1007. return true;
  1008. }
  1009. static void flush_requests(bool longpoll)
  1010. {
  1011. int i, extra;
  1012. extra = requests_queued();
  1013. /* When flushing from longpoll, we don't know the new work yet. When
  1014. * not flushing from longpoll, the first work item is valid so do not
  1015. * discard it */
  1016. if (longpoll)
  1017. memcpy(current_block, blank, 36);
  1018. else
  1019. extra--;
  1020. /* Temporarily increase the staged count so that get_work thinks there
  1021. * is work available instead of making threads reuse existing work */
  1022. if (extra >= mining_threads)
  1023. inc_staged(mining_threads, true);
  1024. else
  1025. inc_staged(extra, true);
  1026. for (i = 0; i < extra; i++) {
  1027. /* Queue a whole batch of new requests */
  1028. if (unlikely(!queue_request())) {
  1029. applog(LOG_ERR, "Failed to queue requests in flush_requests");
  1030. kill_work();
  1031. break;
  1032. }
  1033. /* Pop off the old requests. Cancelling the requests would be better
  1034. * but is tricky */
  1035. if (unlikely(!discard_request())) {
  1036. applog(LOG_ERR, "Failed to discard requests in flush_requests");
  1037. kill_work();
  1038. break;
  1039. }
  1040. }
  1041. }
  1042. static bool get_work(struct work *work, bool queued)
  1043. {
  1044. struct timeval now;
  1045. struct timespec abstime = {};
  1046. struct thr_info *thr = &thr_info[0];
  1047. struct work *work_heap;
  1048. bool ret = false;
  1049. int failures = 0;
  1050. getwork_requested++;
  1051. retry:
  1052. gettimeofday(&now, NULL);
  1053. abstime.tv_sec = now.tv_sec + 60;
  1054. if (unlikely(!queued && !queue_request())) {
  1055. applog(LOG_WARNING, "Failed to queue_request in get_work");
  1056. goto out;
  1057. }
  1058. if (!requests_staged()) {
  1059. uint32_t *work_ntime;
  1060. uint32_t ntime;
  1061. /* Only print this message once each time we shift to localgen */
  1062. if (!localgen)
  1063. applog(LOG_WARNING, "Server not providing work fast enough, generating work locally");
  1064. localgen = true;
  1065. work_ntime = (uint32_t *)(work->data + 68);
  1066. ntime = be32toh(*work_ntime);
  1067. ntime++;
  1068. *work_ntime = htobe32(ntime);
  1069. ret = true;
  1070. goto out;
  1071. } else if (localgen) {
  1072. localgen = false;
  1073. applog(LOG_WARNING, "Resumed retrieving work from server");
  1074. }
  1075. /* Wait for 1st response, or get cached response. We really should
  1076. * never time out on the pop request but something might go amiss :/
  1077. */
  1078. work_heap = tq_pop(thr->q, &abstime);
  1079. if (unlikely(!work_heap)) {
  1080. applog(LOG_INFO, "Failed to tq_pop in get_work");
  1081. goto out;
  1082. }
  1083. dec_queued();
  1084. memcpy(work, work_heap, sizeof(*work));
  1085. ret = true;
  1086. free(work_heap);
  1087. out:
  1088. if (unlikely(ret == false)) {
  1089. if ((opt_retries >= 0) && (++failures > opt_retries)) {
  1090. applog(LOG_ERR, "Failed %d times to get_work");
  1091. return ret;
  1092. }
  1093. applog(LOG_DEBUG, "Retrying after %d seconds", opt_fail_pause);
  1094. sleep(opt_fail_pause);
  1095. goto retry;
  1096. }
  1097. return ret;
  1098. }
  1099. static bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
  1100. {
  1101. struct workio_cmd *wc;
  1102. /* fill out work request message */
  1103. wc = calloc(1, sizeof(*wc));
  1104. if (unlikely(!wc)) {
  1105. applog(LOG_ERR, "Failed to calloc wc in submit_work_sync");
  1106. return false;
  1107. }
  1108. wc->u.work = malloc(sizeof(*work_in));
  1109. if (unlikely(!wc->u.work)) {
  1110. applog(LOG_ERR, "Failed to calloc work in submit_work_sync");
  1111. goto err_out;
  1112. }
  1113. wc->cmd = WC_SUBMIT_WORK;
  1114. wc->thr = thr;
  1115. memcpy(wc->u.work, work_in, sizeof(*work_in));
  1116. /* send solution to workio thread */
  1117. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  1118. applog(LOG_ERR, "Failed to tq_push work in submit_work_sync");
  1119. goto err_out;
  1120. }
  1121. return true;
  1122. err_out:
  1123. workio_cmd_free(wc);
  1124. return false;
  1125. }
  1126. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  1127. {
  1128. work->data[64+12+0] = (nonce>>0) & 0xff;
  1129. work->data[64+12+1] = (nonce>>8) & 0xff;
  1130. work->data[64+12+2] = (nonce>>16) & 0xff;
  1131. work->data[64+12+3] = (nonce>>24) & 0xff;
  1132. return submit_work_sync(thr, work);
  1133. }
  1134. static void *miner_thread(void *userdata)
  1135. {
  1136. struct thr_info *mythr = userdata;
  1137. const int thr_id = mythr->id;
  1138. uint32_t max_nonce = 0xffffff;
  1139. bool needs_work = true;
  1140. /* Try to cycle approximately 5 times before each log update */
  1141. const unsigned long cycle = opt_log_interval / 5 ? : 1;
  1142. /* Request the next work item at 2/3 of the scantime */
  1143. unsigned const int request_interval = opt_scantime * 2 / 3 ? : 1;
  1144. unsigned const long request_nonce = MAXTHREADS / 3 * 2;
  1145. bool requested = true;
  1146. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1147. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  1148. * and if that fails, then SCHED_BATCH. No need for this to be an
  1149. * error if it fails */
  1150. setpriority(PRIO_PROCESS, 0, 19);
  1151. drop_policy();
  1152. /* Cpu affinity only makes sense if the number of threads is a multiple
  1153. * of the number of CPUs */
  1154. if (!(opt_n_threads % num_processors))
  1155. affine_to_cpu(thr_id - gpu_threads, cpu_from_thr_id(thr_id));
  1156. while (1) {
  1157. struct work work __attribute__((aligned(128)));
  1158. unsigned long hashes_done;
  1159. struct timeval tv_workstart, tv_start, tv_end, diff;
  1160. uint64_t max64;
  1161. bool rc;
  1162. if (needs_work) {
  1163. gettimeofday(&tv_workstart, NULL);
  1164. /* obtain new work from internal workio thread */
  1165. if (unlikely(!get_work(&work, requested))) {
  1166. applog(LOG_ERR, "work retrieval failed, exiting "
  1167. "mining thread %d", thr_id);
  1168. goto out;
  1169. }
  1170. mythr->cgpu->getworks++;
  1171. work.thr_id = thr_id;
  1172. needs_work = requested = false;
  1173. work.blk.nonce = 0;
  1174. }
  1175. hashes_done = 0;
  1176. gettimeofday(&tv_start, NULL);
  1177. /* scan nonces for a proof-of-work hash */
  1178. switch (opt_algo) {
  1179. case ALGO_C:
  1180. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  1181. work.hash1, work.hash, work.target,
  1182. max_nonce, &hashes_done,
  1183. work.blk.nonce);
  1184. break;
  1185. #ifdef WANT_X8664_SSE2
  1186. case ALGO_SSE2_64: {
  1187. unsigned int rc5 =
  1188. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  1189. work.hash1, work.hash,
  1190. work.target,
  1191. max_nonce, &hashes_done,
  1192. work.blk.nonce);
  1193. rc = (rc5 == -1) ? false : true;
  1194. }
  1195. break;
  1196. #endif
  1197. #ifdef WANT_SSE2_4WAY
  1198. case ALGO_4WAY: {
  1199. unsigned int rc4 =
  1200. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  1201. work.hash1, work.hash,
  1202. work.target,
  1203. max_nonce, &hashes_done,
  1204. work.blk.nonce);
  1205. rc = (rc4 == -1) ? false : true;
  1206. }
  1207. break;
  1208. #endif
  1209. #ifdef WANT_VIA_PADLOCK
  1210. case ALGO_VIA:
  1211. rc = scanhash_via(thr_id, work.data, work.target,
  1212. max_nonce, &hashes_done,
  1213. work.blk.nonce);
  1214. break;
  1215. #endif
  1216. case ALGO_CRYPTOPP:
  1217. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  1218. work.hash1, work.hash, work.target,
  1219. max_nonce, &hashes_done,
  1220. work.blk.nonce);
  1221. break;
  1222. #ifdef WANT_CRYPTOPP_ASM32
  1223. case ALGO_CRYPTOPP_ASM32:
  1224. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  1225. work.hash1, work.hash, work.target,
  1226. max_nonce, &hashes_done,
  1227. work.blk.nonce);
  1228. break;
  1229. #endif
  1230. default:
  1231. /* should never happen */
  1232. goto out;
  1233. }
  1234. /* record scanhash elapsed time */
  1235. gettimeofday(&tv_end, NULL);
  1236. timeval_subtract(&diff, &tv_end, &tv_start);
  1237. hashes_done -= work.blk.nonce;
  1238. hashmeter(thr_id, &diff, hashes_done);
  1239. work.blk.nonce += hashes_done;
  1240. /* adjust max_nonce to meet target cycle time */
  1241. if (diff.tv_usec > 500000)
  1242. diff.tv_sec++;
  1243. if (diff.tv_sec && diff.tv_sec != cycle) {
  1244. max64 = work.blk.nonce +
  1245. ((uint64_t)hashes_done * cycle) / diff.tv_sec;
  1246. } else
  1247. max64 = work.blk.nonce + hashes_done;
  1248. if (max64 > 0xfffffffaULL)
  1249. max64 = 0xfffffffaULL;
  1250. max_nonce = max64;
  1251. /* if nonce found, submit work */
  1252. if (unlikely(rc)) {
  1253. if (opt_debug)
  1254. applog(LOG_DEBUG, "CPU %d found something?", cpu_from_thr_id(thr_id));
  1255. if (unlikely(!submit_work_sync(mythr, &work))) {
  1256. applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
  1257. break;
  1258. }
  1259. work.blk.nonce += 4;
  1260. }
  1261. timeval_subtract(&diff, &tv_end, &tv_workstart);
  1262. if (!requested && (diff.tv_sec > request_interval || work.blk.nonce > request_nonce)) {
  1263. if (unlikely(!queue_request())) {
  1264. applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
  1265. goto out;
  1266. }
  1267. requested = true;
  1268. }
  1269. if (diff.tv_sec > opt_scantime || work_restart[thr_id].restart ||
  1270. work.blk.nonce >= MAXTHREADS - hashes_done)
  1271. needs_work = true;
  1272. }
  1273. out:
  1274. tq_freeze(mythr->q);
  1275. return NULL;
  1276. }
  1277. enum {
  1278. STAT_SLEEP_INTERVAL = 1,
  1279. STAT_CTR_INTERVAL = 10000000,
  1280. FAILURE_INTERVAL = 30,
  1281. };
  1282. #ifdef HAVE_OPENCL
  1283. static _clState *clStates[16];
  1284. static inline cl_int queue_kernel_parameters(_clState *clState, dev_blk_ctx *blk)
  1285. {
  1286. cl_kernel *kernel = &clState->kernel;
  1287. cl_int status = 0;
  1288. int num = 0;
  1289. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  1290. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  1291. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  1292. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  1293. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  1294. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  1295. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  1296. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  1297. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  1298. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  1299. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  1300. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  1301. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  1302. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  1303. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  1304. if (clState->hasBitAlign == true) {
  1305. /* Parameters for phatk kernel */
  1306. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W2);
  1307. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W16);
  1308. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W17);
  1309. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->PreVal4);
  1310. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->T1);
  1311. } else {
  1312. /* Parameters for poclbm kernel */
  1313. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  1314. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  1315. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  1316. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  1317. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  1318. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  1319. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  1320. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  1321. }
  1322. status |= clSetKernelArg(*kernel, num++, sizeof(clState->outputBuffer),
  1323. (void *)&clState->outputBuffer);
  1324. return status;
  1325. }
  1326. static void set_threads_hashes(unsigned int vectors, unsigned int *threads,
  1327. unsigned int *hashes, size_t *globalThreads)
  1328. {
  1329. *globalThreads = *threads = 1 << (15 + scan_intensity);
  1330. *hashes = *threads * vectors;
  1331. }
  1332. static void *gpuminer_thread(void *userdata)
  1333. {
  1334. const unsigned long cycle = opt_log_interval / 5 ? : 1;
  1335. struct timeval tv_start, tv_end, diff;
  1336. struct thr_info *mythr = userdata;
  1337. const int thr_id = mythr->id;
  1338. uint32_t *res, *blank_res;
  1339. double gpu_ms_average = 7;
  1340. size_t globalThreads[1];
  1341. size_t localThreads[1];
  1342. cl_int status;
  1343. _clState *clState = clStates[thr_id];
  1344. const cl_kernel *kernel = &clState->kernel;
  1345. struct work *work = malloc(sizeof(struct work));
  1346. unsigned int threads = 1 << (15 + scan_intensity);
  1347. unsigned const int vectors = clState->preferred_vwidth;
  1348. unsigned int hashes = threads * vectors;
  1349. unsigned int hashes_done = 0;
  1350. /* Request the next work item at 2/3 of the scantime */
  1351. unsigned const int request_interval = opt_scantime * 2 / 3 ? : 1;
  1352. unsigned const long request_nonce = MAXTHREADS / 3 * 2;
  1353. bool requested = true;
  1354. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1355. res = calloc(BUFFERSIZE, 1);
  1356. blank_res = calloc(BUFFERSIZE, 1);
  1357. if (!res || !blank_res) {
  1358. applog(LOG_ERR, "Failed to calloc in gpuminer_thread");
  1359. goto out;
  1360. }
  1361. gettimeofday(&tv_start, NULL);
  1362. globalThreads[0] = threads;
  1363. localThreads[0] = clState->work_size;
  1364. diff.tv_sec = ~0UL;
  1365. gettimeofday(&tv_end, NULL);
  1366. while (1) {
  1367. struct timeval tv_workstart, tv_gpustart, tv_gpuend;
  1368. suseconds_t gpu_us;
  1369. gettimeofday(&tv_gpustart, NULL);
  1370. timeval_subtract(&diff, &tv_gpustart, &tv_gpuend);
  1371. /* This finish flushes the readbuffer set with CL_FALSE later */
  1372. clFinish(clState->commandQueue);
  1373. gettimeofday(&tv_gpuend, NULL);
  1374. timeval_subtract(&diff, &tv_gpuend, &tv_gpustart);
  1375. gpu_us = diff.tv_sec * 1000000 + diff.tv_usec;
  1376. gpu_ms_average = ((gpu_us / 1000) + gpu_ms_average * 0.9) / 1.9;
  1377. if (opt_dynamic) {
  1378. /* Try to not let the GPU be out for longer than 6ms, but
  1379. * increase intensity when the system is idle, unless
  1380. * dynamic is disabled. */
  1381. if (gpu_ms_average > 7) {
  1382. if (scan_intensity > 0)
  1383. scan_intensity--;
  1384. set_threads_hashes(vectors, &threads, &hashes, globalThreads);
  1385. } else if (gpu_ms_average < 3) {
  1386. if (scan_intensity < 14)
  1387. scan_intensity++;
  1388. set_threads_hashes(vectors, &threads, &hashes, globalThreads);
  1389. }
  1390. }
  1391. if (diff.tv_sec > opt_scantime || work->blk.nonce >= MAXTHREADS - hashes || work_restart[thr_id].restart) {
  1392. /* Ignore any reads since we're getting new work and queue a clean buffer */
  1393. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1394. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1395. if (unlikely(status != CL_SUCCESS))
  1396. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1397. memset(res, 0, BUFFERSIZE);
  1398. gettimeofday(&tv_workstart, NULL);
  1399. /* obtain new work from internal workio thread */
  1400. if (unlikely(!get_work(work, requested))) {
  1401. applog(LOG_ERR, "work retrieval failed, exiting "
  1402. "gpu mining thread %d", mythr->id);
  1403. goto out;
  1404. }
  1405. mythr->cgpu->getworks++;
  1406. work->thr_id = thr_id;
  1407. requested = false;
  1408. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  1409. work->blk.nonce = 0;
  1410. work_restart[thr_id].restart = 0;
  1411. if (opt_debug)
  1412. applog(LOG_DEBUG, "getwork thread %d", thr_id);
  1413. /* Flushes the writebuffer set with CL_FALSE above */
  1414. clFinish(clState->commandQueue);
  1415. }
  1416. status = queue_kernel_parameters(clState, &work->blk);
  1417. if (unlikely(status != CL_SUCCESS))
  1418. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  1419. /* MAXBUFFERS entry is used as a flag to say nonces exist */
  1420. if (res[MAXBUFFERS]) {
  1421. /* Clear the buffer again */
  1422. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1423. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1424. if (unlikely(status != CL_SUCCESS))
  1425. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1426. if (opt_debug)
  1427. applog(LOG_DEBUG, "GPU %d found something?", gpu_from_thr_id(thr_id));
  1428. postcalc_hash_async(mythr, work, res);
  1429. memset(res, 0, BUFFERSIZE);
  1430. clFinish(clState->commandQueue);
  1431. }
  1432. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  1433. globalThreads, localThreads, 0, NULL, NULL);
  1434. if (unlikely(status != CL_SUCCESS))
  1435. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  1436. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1437. BUFFERSIZE, res, 0, NULL, NULL);
  1438. if (unlikely(status != CL_SUCCESS))
  1439. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  1440. gettimeofday(&tv_end, NULL);
  1441. timeval_subtract(&diff, &tv_end, &tv_start);
  1442. hashes_done += hashes;
  1443. work->blk.nonce += hashes;
  1444. if (diff.tv_usec > 500000)
  1445. diff.tv_sec++;
  1446. if (diff.tv_sec >= cycle) {
  1447. hashmeter(thr_id, &diff, hashes_done);
  1448. gettimeofday(&tv_start, NULL);
  1449. hashes_done = 0;
  1450. }
  1451. timeval_subtract(&diff, &tv_end, &tv_workstart);
  1452. if (!requested && (diff.tv_sec > request_interval || work->blk.nonce > request_nonce)) {
  1453. if (unlikely(!queue_request())) {
  1454. applog(LOG_ERR, "Failed to queue_request in gpuminer_thread %d", thr_id);
  1455. goto out;
  1456. }
  1457. requested = true;
  1458. }
  1459. }
  1460. out:
  1461. tq_freeze(mythr->q);
  1462. return NULL;
  1463. }
  1464. #endif /* HAVE_OPENCL */
  1465. static void restart_threads(bool longpoll)
  1466. {
  1467. int i;
  1468. /* Discard old queued requests and get new ones */
  1469. flush_requests(longpoll);
  1470. for (i = 0; i < mining_threads; i++)
  1471. work_restart[i].restart = 1;
  1472. }
  1473. static void *longpoll_thread(void *userdata)
  1474. {
  1475. struct thr_info *mythr = userdata;
  1476. CURL *curl = NULL;
  1477. char *copy_start, *hdr_path, *lp_url = NULL;
  1478. bool need_slash = false;
  1479. int failures = 0;
  1480. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1481. hdr_path = tq_pop(mythr->q, NULL);
  1482. if (!hdr_path)
  1483. goto out;
  1484. /* full URL */
  1485. if (strstr(hdr_path, "://")) {
  1486. lp_url = hdr_path;
  1487. hdr_path = NULL;
  1488. }
  1489. /* absolute path, on current server */
  1490. else {
  1491. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  1492. if (rpc_url[strlen(rpc_url) - 1] != '/')
  1493. need_slash = true;
  1494. lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2);
  1495. if (!lp_url)
  1496. goto out;
  1497. sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start);
  1498. }
  1499. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  1500. curl = curl_easy_init();
  1501. if (unlikely(!curl)) {
  1502. applog(LOG_ERR, "CURL initialisation failed");
  1503. goto out;
  1504. }
  1505. while (1) {
  1506. struct timeval start, end;
  1507. json_t *val;
  1508. gettimeofday(&start, NULL);
  1509. val = json_rpc_call(curl, lp_url, rpc_userpass, rpc_req,
  1510. false, true);
  1511. if (likely(val)) {
  1512. failures = 0;
  1513. json_decref(val);
  1514. /* Keep track of who ordered a restart_threads to make
  1515. * sure it's only done once per new block */
  1516. if (likely(!strncmp(longpoll_block, blank, 36) ||
  1517. !strncmp(longpoll_block, current_block, 36))) {
  1518. applog(LOG_WARNING, "LONGPOLL detected new block on network, flushing work queue");
  1519. restart_threads(true);
  1520. } else
  1521. applog(LOG_WARNING, "LONGPOLL received after new block already detected");
  1522. } else {
  1523. /* Some pools regularly drop the longpoll request so
  1524. * only see this as longpoll failure if it happens
  1525. * immediately and just restart it the rest of the
  1526. * time. */
  1527. gettimeofday(&end, NULL);
  1528. if (end.tv_sec - start.tv_sec > 30)
  1529. continue;
  1530. if (failures++ < 10) {
  1531. sleep(30);
  1532. applog(LOG_WARNING,
  1533. "longpoll failed, sleeping for 30s");
  1534. } else {
  1535. applog(LOG_ERR,
  1536. "longpoll failed, ending thread");
  1537. goto out;
  1538. }
  1539. }
  1540. memcpy(longpoll_block, current_block, 36);
  1541. }
  1542. out:
  1543. free(hdr_path);
  1544. free(lp_url);
  1545. tq_freeze(mythr->q);
  1546. if (curl)
  1547. curl_easy_cleanup(curl);
  1548. return NULL;
  1549. }
  1550. static void reinit_cputhread(int thr_id)
  1551. {
  1552. struct thr_info *thr = &thr_info[thr_id];
  1553. tq_freeze(thr->q);
  1554. if (unlikely(pthread_cancel(thr->pth))) {
  1555. applog(LOG_ERR, "Failed to pthread_cancel in reinit_gputhread");
  1556. goto failed_out;
  1557. }
  1558. if (unlikely(pthread_join(thr->pth, NULL))) {
  1559. applog(LOG_ERR, "Failed to pthread_join in reinit_gputhread");
  1560. goto failed_out;
  1561. }
  1562. applog(LOG_INFO, "Reinit CPU thread %d", thr_id);
  1563. tq_thaw(thr->q);
  1564. gettimeofday(&thr->last, NULL);
  1565. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1566. applog(LOG_ERR, "thread %d create failed", thr_id);
  1567. goto failed_out;
  1568. }
  1569. return;
  1570. failed_out:
  1571. kill_work();
  1572. }
  1573. #ifdef HAVE_OPENCL
  1574. static void reinit_gputhread(int thr_id)
  1575. {
  1576. int gpu = gpu_from_thr_id(thr_id);
  1577. struct thr_info *thr = &thr_info[thr_id];
  1578. char name[256];
  1579. tq_freeze(thr->q);
  1580. if (unlikely(pthread_cancel(thr->pth))) {
  1581. applog(LOG_ERR, "Failed to pthread_cancel in reinit_gputhread");
  1582. goto failed_out;
  1583. }
  1584. if (unlikely(pthread_join(thr->pth, NULL))) {
  1585. applog(LOG_ERR, "Failed to pthread_join in reinit_gputhread");
  1586. goto failed_out;
  1587. }
  1588. free(clStates[thr_id]);
  1589. applog(LOG_INFO, "Reinit GPU thread %d", thr_id);
  1590. tq_thaw(thr->q);
  1591. clStates[thr_id] = initCl(gpu, name, sizeof(name));
  1592. if (!clStates[thr_id]) {
  1593. applog(LOG_ERR, "Failed to reinit GPU thread %d", thr_id);
  1594. goto failed_out;
  1595. }
  1596. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1597. gettimeofday(&thr->last, NULL);
  1598. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1599. applog(LOG_ERR, "thread %d create failed", thr_id);
  1600. goto failed_out;
  1601. }
  1602. return;
  1603. failed_out:
  1604. kill_work();
  1605. }
  1606. static void reinit_thread(int thr_id)
  1607. {
  1608. if (thr_id < gpu_threads)
  1609. reinit_gputhread(thr_id);
  1610. else
  1611. reinit_cputhread(thr_id);
  1612. }
  1613. #else
  1614. static void reinit_thread(int thr_id)
  1615. {
  1616. reinit_cputhread(thr_id);
  1617. }
  1618. #endif
  1619. /* Makes sure the hashmeter keeps going even if mining threads stall, updates
  1620. * the screen at regular intervals, and restarts threads if they appear to have
  1621. * died. */
  1622. static void *watchdog_thread(void *userdata)
  1623. {
  1624. const unsigned int interval = opt_log_interval / 2 ? : 1;
  1625. struct timeval zero_tv;
  1626. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1627. memset(&zero_tv, 0, sizeof(struct timeval));
  1628. while (1) {
  1629. int x, y, logx, logy, i;
  1630. struct timeval now;
  1631. sleep(interval);
  1632. if (requests_queued() < opt_queue)
  1633. queue_request();
  1634. hashmeter(-1, &zero_tv, 0);
  1635. if (curses_active) {
  1636. pthread_mutex_lock(&curses_lock);
  1637. getmaxyx(mainwin, y, x);
  1638. getmaxyx(logwin, logy, logx);
  1639. y -= logcursor;
  1640. /* Detect screen size change */
  1641. if (x != logx || y != logy)
  1642. wresize(logwin, y, x);
  1643. for (i = 0; i < mining_threads; i++)
  1644. __print_status(i);
  1645. redrawwin(logwin);
  1646. redrawwin(statuswin);
  1647. pthread_mutex_unlock(&curses_lock);
  1648. }
  1649. if (unlikely(work_restart[stage_thr_id].restart)) {
  1650. restart_threads(false);
  1651. work_restart[stage_thr_id].restart = 0;
  1652. }
  1653. gettimeofday(&now, NULL);
  1654. for (i = 0; i < mining_threads; i++) {
  1655. struct thr_info *thr = &thr_info[i];
  1656. /* Do not kill threads waiting on longpoll staged work */
  1657. if (now.tv_sec - thr->last.tv_sec > 60 && !lp_staged) {
  1658. applog(LOG_ERR, "Attempting to restart thread %d, idle for more than 60 seconds", i);
  1659. /* Create one mandatory work item */
  1660. inc_staged(1, true);
  1661. if (unlikely(!queue_request())) {
  1662. applog(LOG_ERR, "Failed to queue_request in watchdog_thread");
  1663. kill_work();
  1664. break;
  1665. }
  1666. reinit_thread(i);
  1667. applog(LOG_WARNING, "Thread %d restarted", i);
  1668. }
  1669. }
  1670. }
  1671. return NULL;
  1672. }
  1673. int main (int argc, char *argv[])
  1674. {
  1675. unsigned int i, j = 0, x, y;
  1676. struct sigaction handler;
  1677. struct thr_info *thr;
  1678. char name[256];
  1679. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1680. return 1;
  1681. if (unlikely(pthread_mutex_init(&qd_lock, NULL)))
  1682. return 1;
  1683. if (unlikely(pthread_mutex_init(&stgd_lock, NULL)))
  1684. return 1;
  1685. if (unlikely(pthread_mutex_init(&curses_lock, NULL)))
  1686. return 1;
  1687. handler.sa_handler = &sighandler;
  1688. sigaction(SIGTERM, &handler, 0);
  1689. sigaction(SIGINT, &handler, 0);
  1690. for (i = 0; i < 36; i++) {
  1691. strcat(blank, "0");
  1692. strcat(current_block, "0");
  1693. strcat(longpoll_block, "0");
  1694. }
  1695. #ifdef WIN32
  1696. opt_n_threads = num_processors = 1;
  1697. #else
  1698. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1699. opt_n_threads = num_processors;
  1700. #endif /* !WIN32 */
  1701. #ifdef HAVE_OPENCL
  1702. for (i = 0; i < 16; i++)
  1703. gpu_devices[i] = false;
  1704. nDevs = clDevicesNum();
  1705. if (nDevs < 0)
  1706. return 1;
  1707. #endif
  1708. if (nDevs)
  1709. opt_n_threads = 0;
  1710. rpc_url = strdup(DEF_RPC_URL);
  1711. /* parse command line */
  1712. opt_register_table(opt_config_table,
  1713. "Options for both config file and command line");
  1714. opt_register_table(opt_cmdline_table,
  1715. "Options for command line only");
  1716. opt_parse(&argc, argv, applog_and_exit);
  1717. if (argc != 1) {
  1718. applog(LOG_ERR, "Unexpected extra commandline arguments");
  1719. return 1;
  1720. }
  1721. if (total_devices) {
  1722. if (total_devices > nDevs) {
  1723. applog(LOG_ERR, "More devices specified than exist");
  1724. return 1;
  1725. }
  1726. for (i = 0; i < 16; i++)
  1727. if (gpu_devices[i] && i + 1 > nDevs) {
  1728. applog(LOG_ERR, "Command line options set a device that doesn't exist");
  1729. return 1;
  1730. }
  1731. gpu_threads = total_devices * opt_g_threads;
  1732. } else {
  1733. gpu_threads = nDevs * opt_g_threads;
  1734. for (i = 0; i < nDevs; i++)
  1735. gpu_devices[i] = true;
  1736. }
  1737. if (!gpu_threads && !forced_n_threads) {
  1738. /* Maybe they turned GPU off; restore default CPU threads. */
  1739. opt_n_threads = num_processors;
  1740. }
  1741. logcursor = 4;
  1742. mining_threads = opt_n_threads + gpu_threads;
  1743. gpucursor = logcursor;
  1744. cpucursor = gpucursor + nDevs;
  1745. logstart = cpucursor + (opt_n_threads ? num_processors : 0) + 1;
  1746. logcursor = logstart + 1;
  1747. if (!rpc_userpass) {
  1748. if (!rpc_user || !rpc_pass) {
  1749. applog(LOG_ERR, "No login credentials supplied");
  1750. return 1;
  1751. }
  1752. rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
  1753. if (!rpc_userpass)
  1754. return 1;
  1755. sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
  1756. }
  1757. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1758. return 1;
  1759. #ifdef HAVE_SYSLOG_H
  1760. if (use_syslog)
  1761. openlog("cpuminer", LOG_PID, LOG_USER);
  1762. #endif
  1763. work_restart = calloc(mining_threads + 4, sizeof(*work_restart));
  1764. if (!work_restart)
  1765. return 1;
  1766. thr_info = calloc(mining_threads + 4, sizeof(*thr));
  1767. if (!thr_info)
  1768. return 1;
  1769. /* init workio thread info */
  1770. work_thr_id = mining_threads;
  1771. thr = &thr_info[work_thr_id];
  1772. thr->id = work_thr_id;
  1773. thr->q = tq_new();
  1774. if (!thr->q)
  1775. return 1;
  1776. /* start work I/O thread */
  1777. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  1778. applog(LOG_ERR, "workio thread create failed");
  1779. return 1;
  1780. }
  1781. /* init longpoll thread info */
  1782. if (want_longpoll) {
  1783. longpoll_thr_id = mining_threads + 1;
  1784. thr = &thr_info[longpoll_thr_id];
  1785. thr->id = longpoll_thr_id;
  1786. thr->q = tq_new();
  1787. if (!thr->q)
  1788. return 1;
  1789. /* start longpoll thread */
  1790. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  1791. applog(LOG_ERR, "longpoll thread create failed");
  1792. return 1;
  1793. }
  1794. pthread_detach(thr->pth);
  1795. } else
  1796. longpoll_thr_id = -1;
  1797. gettimeofday(&total_tv_start, NULL);
  1798. gettimeofday(&total_tv_end, NULL);
  1799. if (opt_n_threads ) {
  1800. cpus = calloc(num_processors, sizeof(struct cgpu_info));
  1801. if (unlikely(!cpus)) {
  1802. applog(LOG_ERR, "Failed to calloc cpus");
  1803. return 1;
  1804. }
  1805. }
  1806. if (gpu_threads) {
  1807. gpus = calloc(nDevs, sizeof(struct cgpu_info));
  1808. if (unlikely(!gpus)) {
  1809. applog(LOG_ERR, "Failed to calloc gpus");
  1810. return 1;
  1811. }
  1812. }
  1813. stage_thr_id = mining_threads + 3;
  1814. thr = &thr_info[stage_thr_id];
  1815. thr->q = tq_new();
  1816. if (!thr->q)
  1817. return 1;
  1818. /* start stage thread */
  1819. if (pthread_create(&thr->pth, NULL, stage_thread, thr)) {
  1820. applog(LOG_ERR, "stage thread create failed");
  1821. return 1;
  1822. }
  1823. /* Flag the work as ready forcing the mining threads to wait till we
  1824. * actually put something into the queue */
  1825. inc_staged(mining_threads, true);
  1826. #ifdef HAVE_OPENCL
  1827. i = 0;
  1828. /* start GPU mining threads */
  1829. for (j = 0; j < nDevs * opt_g_threads; j++) {
  1830. int gpu = gpu_from_thr_id(j);
  1831. /* Skip devices not set to work */
  1832. if (!gpu_devices[gpu])
  1833. continue;
  1834. thr = &thr_info[i];
  1835. thr->id = i;
  1836. gpus[gpu].is_gpu = 1;
  1837. gpus[gpu].cpu_gpu = gpu;
  1838. thr->cgpu = &gpus[gpu];
  1839. thr->q = tq_new();
  1840. if (!thr->q) {
  1841. applog(LOG_ERR, "tq_new failed in starting gpu mining threads");
  1842. return 1;
  1843. }
  1844. applog(LOG_INFO, "Init GPU thread %i", i);
  1845. clStates[i] = initCl(gpu, name, sizeof(name));
  1846. if (!clStates[i]) {
  1847. applog(LOG_ERR, "Failed to init GPU thread %d", i);
  1848. continue;
  1849. }
  1850. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1851. gettimeofday(&thr->last, NULL);
  1852. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1853. applog(LOG_ERR, "thread %d create failed", i);
  1854. return 1;
  1855. }
  1856. i++;
  1857. }
  1858. applog(LOG_INFO, "%d gpu miner threads started", gpu_threads);
  1859. #endif
  1860. /* start CPU mining threads */
  1861. for (i = gpu_threads; i < mining_threads; i++) {
  1862. int cpu = cpu_from_thr_id(i);
  1863. thr = &thr_info[i];
  1864. thr->id = i;
  1865. cpus[cpu].cpu_gpu = cpu;
  1866. thr->cgpu = &cpus[cpu];
  1867. thr->q = tq_new();
  1868. if (!thr->q) {
  1869. applog(LOG_ERR, "tq_new failed in starting cpu mining threads");
  1870. return 1;
  1871. }
  1872. gettimeofday(&thr->last, NULL);
  1873. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1874. applog(LOG_ERR, "thread %d create failed", i);
  1875. return 1;
  1876. }
  1877. }
  1878. applog(LOG_INFO, "%d cpu miner threads started, "
  1879. "using SHA256 '%s' algorithm.",
  1880. opt_n_threads,
  1881. algo_names[opt_algo]);
  1882. watchdog_thr_id = mining_threads + 2;
  1883. thr = &thr_info[watchdog_thr_id];
  1884. /* start wakeup thread */
  1885. if (pthread_create(&thr->pth, NULL, watchdog_thread, NULL)) {
  1886. applog(LOG_ERR, "wakeup thread create failed");
  1887. return 1;
  1888. }
  1889. /* Restart count as it will be wrong till all threads are started */
  1890. pthread_mutex_lock(&hash_lock);
  1891. gettimeofday(&total_tv_start, NULL);
  1892. gettimeofday(&total_tv_end, NULL);
  1893. total_mhashes_done = 0;
  1894. pthread_mutex_unlock(&hash_lock);
  1895. /* Set up the ncurses interface */
  1896. if (!opt_quiet) {
  1897. mainwin = initscr();
  1898. getmaxyx(mainwin, y, x);
  1899. statuswin = newwin(logstart, x, 0, 0);
  1900. logwin = newwin(y - logcursor, 0, logcursor, 0);
  1901. idlok(logwin, true);
  1902. scrollok(logwin, true);
  1903. leaveok(logwin, true);
  1904. leaveok(statuswin, true);
  1905. curses_active = true;
  1906. for (i = 0; i < mining_threads; i++)
  1907. print_status(i);
  1908. }
  1909. /* Now that everything's ready put enough work in the queue */
  1910. for (i = 0; i < opt_queue + mining_threads; i++) {
  1911. if (unlikely(!queue_request())) {
  1912. applog(LOG_ERR, "Failed to queue_request in main");
  1913. return 1;
  1914. }
  1915. }
  1916. /* main loop - simply wait for workio thread to exit */
  1917. pthread_join(thr_info[work_thr_id].pth, NULL);
  1918. applog(LOG_INFO, "workio thread dead, exiting.");
  1919. shutdown_cleanup();
  1920. return 0;
  1921. }