main.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "config.h"
  11. #include <curses.h>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #include <stdarg.h>
  22. #include <assert.h>
  23. #ifndef WIN32
  24. #include <sys/resource.h>
  25. #endif
  26. #include <ccan/opt/opt.h>
  27. #include <jansson.h>
  28. #include <curl/curl.h>
  29. #include "compat.h"
  30. #include "miner.h"
  31. #include "findnonce.h"
  32. #include "ocl.h"
  33. #define PROGRAM_NAME "cgminer"
  34. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  35. #define DEF_RPC_USERNAME "rpcuser"
  36. #define DEF_RPC_PASSWORD "rpcpass"
  37. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  38. #ifdef __linux /* Linux specific policy and affinity management */
  39. #include <sched.h>
  40. static inline void drop_policy(void)
  41. {
  42. struct sched_param param;
  43. #ifdef SCHED_BATCH
  44. #ifdef SCHED_IDLE
  45. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  46. #endif
  47. sched_setscheduler(0, SCHED_BATCH, &param);
  48. #endif
  49. }
  50. static inline void affine_to_cpu(int id, int cpu)
  51. {
  52. cpu_set_t set;
  53. CPU_ZERO(&set);
  54. CPU_SET(cpu, &set);
  55. sched_setaffinity(0, sizeof(&set), &set);
  56. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  57. }
  58. #else
  59. static inline void drop_policy(void)
  60. {
  61. }
  62. static inline void affine_to_cpu(int id, int cpu)
  63. {
  64. }
  65. #endif
  66. enum workio_commands {
  67. WC_GET_WORK,
  68. WC_SUBMIT_WORK,
  69. WC_DIE,
  70. };
  71. struct workio_cmd {
  72. enum workio_commands cmd;
  73. struct thr_info *thr;
  74. union {
  75. struct work *work;
  76. } u;
  77. };
  78. enum sha256_algos {
  79. ALGO_C, /* plain C */
  80. ALGO_4WAY, /* parallel SSE2 */
  81. ALGO_VIA, /* VIA padlock */
  82. ALGO_CRYPTOPP, /* Crypto++ (C) */
  83. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  84. ALGO_SSE2_64, /* SSE2 for x86_64 */
  85. };
  86. static const char *algo_names[] = {
  87. [ALGO_C] = "c",
  88. #ifdef WANT_SSE2_4WAY
  89. [ALGO_4WAY] = "4way",
  90. #endif
  91. #ifdef WANT_VIA_PADLOCK
  92. [ALGO_VIA] = "via",
  93. #endif
  94. [ALGO_CRYPTOPP] = "cryptopp",
  95. #ifdef WANT_CRYPTOPP_ASM32
  96. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  97. #endif
  98. #ifdef WANT_X8664_SSE2
  99. [ALGO_SSE2_64] = "sse2_64",
  100. #endif
  101. };
  102. bool opt_debug = false;
  103. bool opt_protocol = false;
  104. bool want_longpoll = true;
  105. bool have_longpoll = false;
  106. bool use_syslog = false;
  107. static bool opt_quiet = false;
  108. static int opt_retries = -1;
  109. static int opt_fail_pause = 5;
  110. static int opt_log_interval = 5;
  111. bool opt_log_output = false;
  112. static int opt_queue = 1;
  113. int opt_vectors;
  114. int opt_worksize;
  115. int opt_scantime = 60;
  116. static const bool opt_time = true;
  117. #ifdef WANT_X8664_SSE2
  118. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  119. #else
  120. static enum sha256_algos opt_algo = ALGO_C;
  121. #endif
  122. static int nDevs;
  123. static int opt_g_threads = 2;
  124. static int opt_device;
  125. static int total_devices;
  126. static bool gpu_devices[16];
  127. static int gpu_threads;
  128. static bool forced_n_threads;
  129. static int opt_n_threads;
  130. static int mining_threads;
  131. static int num_processors;
  132. static int scan_intensity = 4;
  133. static char *rpc_url;
  134. static char *rpc_userpass;
  135. static char *rpc_user, *rpc_pass;
  136. struct thr_info *thr_info;
  137. static int work_thr_id;
  138. int longpoll_thr_id;
  139. static int stage_thr_id;
  140. struct work_restart *work_restart = NULL;
  141. static pthread_mutex_t hash_lock;
  142. static pthread_mutex_t qd_lock;
  143. static pthread_mutex_t stgd_lock;
  144. static double total_mhashes_done;
  145. static struct timeval total_tv_start, total_tv_end;
  146. static int accepted, rejected;
  147. int hw_errors;
  148. static int total_queued, total_staged, lp_staged;
  149. static bool localgen = false;
  150. static unsigned int getwork_requested;
  151. static char current_block[37];
  152. static char longpoll_block[37];
  153. static char blank[37];
  154. static void applog_and_exit(const char *fmt, ...)
  155. {
  156. va_list ap;
  157. va_start(ap, fmt);
  158. vapplog(LOG_ERR, fmt, ap);
  159. va_end(ap);
  160. exit(1);
  161. }
  162. /* FIXME: Use asprintf for better errors. */
  163. static char *set_algo(const char *arg, enum sha256_algos *algo)
  164. {
  165. enum sha256_algos i;
  166. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  167. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  168. *algo = i;
  169. return NULL;
  170. }
  171. }
  172. return "Unknown algorithm";
  173. }
  174. static void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  175. {
  176. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  177. }
  178. static char *set_int_range(const char *arg, int *i, int min, int max)
  179. {
  180. char *err = opt_set_intval(arg, i);
  181. if (err)
  182. return err;
  183. if (*i < min || *i > max)
  184. return "Value out of range";
  185. return NULL;
  186. }
  187. static char *set_int_0_to_9999(const char *arg, int *i)
  188. {
  189. return set_int_range(arg, i, 0, 9999);
  190. }
  191. static char *set_int_0_to_14(const char *arg, int *i)
  192. {
  193. return set_int_range(arg, i, 0, 14);
  194. }
  195. static char *force_nthreads_int(const char *arg, int *i)
  196. {
  197. forced_n_threads = true;
  198. return set_int_range(arg, i, 0, 9999);
  199. }
  200. static char *set_int_0_to_10(const char *arg, int *i)
  201. {
  202. return set_int_range(arg, i, 0, 10);
  203. }
  204. static char *set_int_1_to_10(const char *arg, int *i)
  205. {
  206. return set_int_range(arg, i, 1, 10);
  207. }
  208. static char *set_devices(const char *arg, int *i)
  209. {
  210. char *err = opt_set_intval(arg, i);
  211. if (err)
  212. return err;
  213. if (*i < 0 || *i > 15)
  214. return "Invalid GPU device number";
  215. total_devices++;
  216. gpu_devices[*i] = true;
  217. return NULL;
  218. }
  219. static char *set_url(const char *arg, char **p)
  220. {
  221. opt_set_charp(arg, p);
  222. if (strncmp(arg, "http://", 7) &&
  223. strncmp(arg, "https://", 8))
  224. return "URL must start with http:// or https://";
  225. return NULL;
  226. }
  227. static char *set_vector(const char *arg, int *i)
  228. {
  229. char *err = opt_set_intval(arg, i);
  230. if (err)
  231. return err;
  232. if (*i != 1 && *i != 2 && *i != 4)
  233. return "Valid vectors are 1, 2 or 4";
  234. return NULL;
  235. }
  236. static char *enable_debug(bool *flag)
  237. {
  238. *flag = true;
  239. /* Turn out verbose output, too. */
  240. opt_log_output = true;
  241. return NULL;
  242. }
  243. /* These options are available from config file or commandline */
  244. static struct opt_table opt_config_table[] = {
  245. OPT_WITH_ARG("--algo|-a",
  246. set_algo, show_algo, &opt_algo,
  247. "Specify sha256 implementation:\n"
  248. "\tc\t\tLinux kernel sha256, implemented in C"
  249. #ifdef WANT_SSE2_4WAY
  250. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  251. #endif
  252. #ifdef WANT_VIA_PADLOCK
  253. "\n\tvia\t\tVIA padlock implementation"
  254. #endif
  255. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  256. #ifdef WANT_CRYPTOPP_ASM32
  257. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  258. #endif
  259. #ifdef WANT_X8664_SSE2
  260. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  261. #endif
  262. ),
  263. OPT_WITH_ARG("--cpu-threads|-t",
  264. force_nthreads_int, opt_show_intval, &opt_n_threads,
  265. "Number of miner CPU threads"),
  266. OPT_WITHOUT_ARG("--debug|-D",
  267. enable_debug, &opt_debug,
  268. "Enable debug output"),
  269. #ifdef HAVE_OPENCL
  270. OPT_WITH_ARG("--device|-d",
  271. set_devices, NULL, &opt_device,
  272. "Select device to use, (Use repeat -d for multiple devices, default: all)"),
  273. OPT_WITH_ARG("--gpu-threads|-g",
  274. set_int_0_to_10, opt_show_intval, &opt_g_threads,
  275. "Number of threads per GPU (0 - 10)"),
  276. OPT_WITH_ARG("--intensity|-I",
  277. set_int_0_to_14, opt_show_intval, &scan_intensity,
  278. "Intensity of GPU scanning (0 - 14)"),
  279. #endif
  280. OPT_WITH_ARG("--log|-l",
  281. set_int_0_to_9999, opt_show_intval, &opt_log_interval,
  282. "Interval in seconds between log output"),
  283. OPT_WITHOUT_ARG("--no-longpoll",
  284. opt_set_invbool, &want_longpoll,
  285. "Disable X-Long-Polling support"),
  286. OPT_WITH_ARG("--pass|-p",
  287. opt_set_charp, NULL, &rpc_pass,
  288. "Password for bitcoin JSON-RPC server"),
  289. OPT_WITHOUT_ARG("--protocol-dump|-P",
  290. opt_set_bool, &opt_protocol,
  291. "Verbose dump of protocol-level activities"),
  292. OPT_WITH_ARG("--queue|-Q",
  293. set_int_1_to_10, opt_show_intval, &opt_queue,
  294. "Number of extra work items to queue (1 - 10)"),
  295. OPT_WITHOUT_ARG("--quiet|-q",
  296. opt_set_bool, &opt_quiet,
  297. "Disable per-thread hashmeter output"),
  298. OPT_WITH_ARG("--retries|-r",
  299. opt_set_intval, opt_show_intval, &opt_retries,
  300. "Number of times to retry before giving up, if JSON-RPC call fails (-1 means never)"),
  301. OPT_WITH_ARG("--retry-pause|-R",
  302. set_int_0_to_9999, opt_show_intval, &opt_fail_pause,
  303. "Number of seconds to pause, between retries"),
  304. OPT_WITH_ARG("--scan-time|-s",
  305. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  306. "Upper bound on time spent scanning current work, in seconds"),
  307. #ifdef HAVE_SYSLOG_H
  308. OPT_WITHOUT_ARG("--syslog",
  309. opt_set_bool, &use_syslog,
  310. "Use system log for output messages (default: standard error)"),
  311. #endif
  312. OPT_WITH_ARG("--url|-o",
  313. set_url, opt_show_charp, &rpc_url,
  314. "URL for bitcoin JSON-RPC server"),
  315. OPT_WITH_ARG("--user|-u",
  316. opt_set_charp, NULL, &rpc_user,
  317. "Username for bitcoin JSON-RPC server"),
  318. #ifdef HAVE_OPENCL
  319. OPT_WITH_ARG("--vectors|-v",
  320. set_vector, NULL, &opt_vectors,
  321. "Override detected optimal vector width (1, 2 or 4)"),
  322. #endif
  323. OPT_WITHOUT_ARG("--verbose",
  324. opt_set_bool, &opt_log_output,
  325. "Log verbose output to stderr as well as status output"),
  326. #ifdef HAVE_OPENCL
  327. OPT_WITH_ARG("--worksize|-w",
  328. set_int_0_to_9999, opt_show_intval, &opt_worksize,
  329. "Override detected optimal worksize"),
  330. #endif
  331. OPT_WITH_ARG("--userpass|-O",
  332. opt_set_charp, NULL, &rpc_userpass,
  333. "Username:Password pair for bitcoin JSON-RPC server"),
  334. OPT_ENDTABLE
  335. };
  336. static char *parse_config(json_t *config)
  337. {
  338. static char err_buf[200];
  339. json_t *val;
  340. struct opt_table *opt;
  341. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  342. char *p, *name;
  343. /* We don't handle subtables. */
  344. assert(!(opt->type & OPT_SUBTABLE));
  345. /* Pull apart the option name(s). */
  346. name = strdup(opt->names);
  347. for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
  348. char *err;
  349. /* Ignore short options. */
  350. if (p[1] != '-')
  351. continue;
  352. val = json_object_get(config, p+2);
  353. if (!val)
  354. continue;
  355. if ((opt->type & OPT_HASARG) && json_is_string(val)) {
  356. err = opt->cb_arg(json_string_value(val),
  357. opt->u.arg);
  358. } else if ((opt->type&OPT_NOARG) && json_is_true(val)) {
  359. err = opt->cb(opt->u.arg);
  360. } else {
  361. err = "Invalid value";
  362. }
  363. if (err) {
  364. sprintf(err_buf, "Parsing JSON option %s: %s",
  365. p, err);
  366. return err_buf;
  367. }
  368. }
  369. free(name);
  370. }
  371. return NULL;
  372. }
  373. static char *load_config(const char *arg, void *unused)
  374. {
  375. json_error_t err;
  376. json_t *config;
  377. config = json_load_file(arg, &err);
  378. if (!json_is_object(config))
  379. return "JSON decode of file failed";
  380. /* Parse the config now, so we can override it. That can keep pointers
  381. * so don't free config object. */
  382. return parse_config(config);
  383. }
  384. static char *print_ndevs_and_exit(int *ndevs)
  385. {
  386. printf("%i", *ndevs);
  387. exit(*ndevs);
  388. }
  389. /* These options are available from commandline only */
  390. static struct opt_table opt_cmdline_table[] = {
  391. OPT_WITH_ARG("--config|-c",
  392. load_config, NULL, NULL,
  393. "Load a JSON-format configuration file\n"
  394. "See example-cfg.json for an example configuration."),
  395. OPT_WITHOUT_ARG("--help|-h",
  396. opt_usage_and_exit,
  397. #ifdef HAVE_OPENCL
  398. "\nBuilt with CPU and GPU mining support.\n\n",
  399. #else
  400. "\nBuilt with CPU mining support only.\n\n",
  401. #endif
  402. "Print this message"),
  403. OPT_WITHOUT_ARG("--ndevs|-n",
  404. print_ndevs_and_exit, &nDevs,
  405. "Display number of detected GPUs and exit"),
  406. OPT_ENDTABLE
  407. };
  408. static bool jobj_binary(const json_t *obj, const char *key,
  409. void *buf, size_t buflen)
  410. {
  411. const char *hexstr;
  412. json_t *tmp;
  413. tmp = json_object_get(obj, key);
  414. if (unlikely(!tmp)) {
  415. applog(LOG_ERR, "JSON key '%s' not found", key);
  416. return false;
  417. }
  418. hexstr = json_string_value(tmp);
  419. if (unlikely(!hexstr)) {
  420. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  421. return false;
  422. }
  423. if (!hex2bin(buf, hexstr, buflen))
  424. return false;
  425. return true;
  426. }
  427. static bool work_decode(const json_t *val, struct work *work)
  428. {
  429. if (unlikely(!jobj_binary(val, "midstate",
  430. work->midstate, sizeof(work->midstate)))) {
  431. applog(LOG_ERR, "JSON inval midstate");
  432. goto err_out;
  433. }
  434. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  435. applog(LOG_ERR, "JSON inval data");
  436. goto err_out;
  437. }
  438. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  439. applog(LOG_ERR, "JSON inval hash1");
  440. goto err_out;
  441. }
  442. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  443. applog(LOG_ERR, "JSON inval target");
  444. goto err_out;
  445. }
  446. memset(work->hash, 0, sizeof(work->hash));
  447. return true;
  448. err_out:
  449. return false;
  450. }
  451. static inline int gpu_from_thr_id(int thr_id)
  452. {
  453. return thr_id % nDevs;
  454. }
  455. static inline int cpu_from_thr_id(int thr_id)
  456. {
  457. return (thr_id - gpu_threads) % num_processors;
  458. }
  459. static WINDOW *mainwin, *statuswin, *logwin;
  460. static double total_secs = 0.1;
  461. static char statusline[256];
  462. static int cpucursor, gpucursor, logstart, logcursor;
  463. static bool curses_active = false;
  464. static struct cgpu_info *gpus, *cpus;
  465. static inline void print_status(int thr_id)
  466. {
  467. int x;
  468. if (unlikely(!curses_active))
  469. return;
  470. wmove(statuswin, 0, 0);
  471. wattron(statuswin, A_BOLD);
  472. wprintw(statuswin, PROGRAM_NAME " version " VERSION);
  473. wattroff(statuswin, A_BOLD);
  474. wmove(statuswin, 1, 0);
  475. whline(statuswin, '-', 80);
  476. wmove(statuswin, 2,0);
  477. wprintw(statuswin, "Totals: %s", statusline);
  478. wclrtoeol(statuswin);
  479. wmove(statuswin, 3, 0);
  480. whline(statuswin, '-', 80);
  481. wmove(statuswin, logstart - 1, 0);
  482. whline(statuswin, '-', 80);
  483. if (thr_id >= 0 && thr_id < gpu_threads) {
  484. int gpu = gpu_from_thr_id(thr_id);
  485. struct cgpu_info *cgpu = &gpus[gpu];
  486. wmove(statuswin, gpucursor + gpu, 0);
  487. wprintw(statuswin, "GPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  488. gpu, cgpu->total_mhashes / total_secs,
  489. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  490. cgpu->efficiency, cgpu->utility);
  491. wclrtoeol(statuswin);
  492. } else if (thr_id >= gpu_threads) {
  493. int cpu = cpu_from_thr_id(thr_id);
  494. struct cgpu_info *cgpu = &cpus[cpu];
  495. wmove(statuswin, cpucursor + cpu, 0);
  496. wprintw(statuswin, "CPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  497. cpu, cgpu->total_mhashes / total_secs,
  498. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  499. cgpu->efficiency, cgpu->utility);
  500. wclrtoeol(statuswin);
  501. }
  502. wrefresh(statuswin);
  503. }
  504. void log_curses(const char *f, va_list ap)
  505. {
  506. int x, y, logx, logy;
  507. if (unlikely(!curses_active))
  508. return;
  509. getmaxyx(mainwin, y, x);
  510. getmaxyx(logwin, logy, logx);
  511. y -= logcursor;
  512. /* Detect screen size change */
  513. if (x != logx || y != logy) {
  514. wresize(logwin, y, x);
  515. redrawwin(logwin);
  516. redrawwin(statuswin);
  517. }
  518. vw_printw(logwin, f, ap);
  519. wrefresh(logwin);
  520. }
  521. static bool submit_fail = false;
  522. static bool submit_upstream_work(const struct work *work)
  523. {
  524. char *hexstr = NULL;
  525. json_t *val, *res;
  526. char s[345];
  527. bool rc = false;
  528. struct cgpu_info *cgpu = thr_info[work->thr_id].cgpu;
  529. CURL *curl = curl_easy_init();
  530. if (unlikely(!curl)) {
  531. applog(LOG_ERR, "CURL initialisation failed");
  532. return rc;
  533. }
  534. /* build hex string */
  535. hexstr = bin2hex(work->data, sizeof(work->data));
  536. if (unlikely(!hexstr)) {
  537. applog(LOG_ERR, "submit_upstream_work OOM");
  538. goto out_nofree;
  539. }
  540. /* build JSON-RPC request */
  541. sprintf(s,
  542. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  543. hexstr);
  544. if (opt_debug)
  545. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  546. /* issue JSON-RPC request */
  547. val = json_rpc_call(curl, rpc_url, rpc_userpass, s, false, false);
  548. if (unlikely(!val)) {
  549. applog(LOG_INFO, "submit_upstream_work json_rpc_call failed");
  550. if (!submit_fail) {
  551. submit_fail = true;
  552. applog(LOG_WARNING, "Upstream communication failure, caching submissions");
  553. }
  554. goto out;
  555. } else if (submit_fail) {
  556. submit_fail = false;
  557. applog(LOG_WARNING, "Upstream communication resumed, submitting work");
  558. }
  559. res = json_object_get(val, "result");
  560. /* Theoretically threads could race when modifying accepted and
  561. * rejected values but the chance of two submits completing at the
  562. * same time is zero so there is no point adding extra locking */
  563. if (json_is_true(res)) {
  564. cgpu->accepted++;
  565. accepted++;
  566. if (opt_debug)
  567. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  568. if (!opt_quiet)
  569. applog(LOG_WARNING, "Share accepted from %sPU %d",
  570. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu);
  571. } else {
  572. cgpu->rejected++;
  573. rejected++;
  574. if (opt_debug)
  575. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  576. if (!opt_quiet)
  577. applog(LOG_WARNING, "Share rejected from %sPU %d",
  578. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu);
  579. }
  580. cgpu->utility = cgpu->accepted / ( total_secs ? total_secs : 1 ) * 60;
  581. cgpu->efficiency = cgpu->getworks ? cgpu->accepted * 100.0 / cgpu->getworks : 0.0;
  582. if (!opt_quiet)
  583. print_status(work->thr_id);
  584. applog(LOG_INFO, "%sPU %d Requested:%d Accepted:%d Rejected:%d HW errors:%d Efficiency:%.0f%% Utility:%.2f/m",
  585. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, cgpu->getworks, cgpu->accepted,
  586. cgpu->rejected, cgpu->hw_errors, cgpu->efficiency, cgpu->utility);
  587. json_decref(val);
  588. rc = true;
  589. out:
  590. free(hexstr);
  591. out_nofree:
  592. curl_easy_cleanup(curl);
  593. return rc;
  594. }
  595. static const char *rpc_req =
  596. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  597. static bool get_upstream_work(struct work *work)
  598. {
  599. json_t *val;
  600. bool rc = false;
  601. CURL *curl = curl_easy_init();
  602. if (unlikely(!curl)) {
  603. applog(LOG_ERR, "CURL initialisation failed");
  604. return rc;
  605. }
  606. val = json_rpc_call(curl, rpc_url, rpc_userpass, rpc_req,
  607. want_longpoll, false);
  608. if (unlikely(!val)) {
  609. applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
  610. goto out;
  611. }
  612. rc = work_decode(json_object_get(val, "result"), work);
  613. json_decref(val);
  614. out:
  615. curl_easy_cleanup(curl);
  616. return rc;
  617. }
  618. static void workio_cmd_free(struct workio_cmd *wc)
  619. {
  620. if (!wc)
  621. return;
  622. switch (wc->cmd) {
  623. case WC_SUBMIT_WORK:
  624. free(wc->u.work);
  625. break;
  626. default: /* do nothing */
  627. break;
  628. }
  629. memset(wc, 0, sizeof(*wc)); /* poison */
  630. free(wc);
  631. }
  632. static void kill_work(void)
  633. {
  634. struct workio_cmd *wc;
  635. applog(LOG_INFO, "Received kill message");
  636. wc = calloc(1, sizeof(*wc));
  637. if (unlikely(!wc)) {
  638. applog(LOG_ERR, "Failed to calloc wc in kill_work");
  639. /* We're just trying to die anyway, so forget graceful */
  640. exit (1);
  641. }
  642. wc->cmd = WC_DIE;
  643. wc->thr = 0;
  644. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  645. applog(LOG_ERR, "Failed to tq_push work in kill_work");
  646. exit (1);
  647. }
  648. }
  649. static void *get_work_thread(void *userdata)
  650. {
  651. struct workio_cmd *wc = (struct workio_cmd *)userdata;
  652. struct work *ret_work;
  653. int failures = 0;
  654. pthread_detach(pthread_self());
  655. ret_work = calloc(1, sizeof(*ret_work));
  656. if (unlikely(!ret_work)) {
  657. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  658. kill_work();
  659. goto out;
  660. }
  661. /* obtain new work from bitcoin via JSON-RPC */
  662. while (!get_upstream_work(ret_work)) {
  663. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  664. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  665. free(ret_work);
  666. kill_work();
  667. goto out;
  668. }
  669. /* pause, then restart work-request loop */
  670. applog(LOG_DEBUG, "json_rpc_call failed on get work, retry after %d seconds",
  671. opt_fail_pause);
  672. sleep(opt_fail_pause);
  673. }
  674. /* send work to requesting thread */
  675. if (unlikely(!tq_push(thr_info[stage_thr_id].q, ret_work))) {
  676. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  677. kill_work();
  678. free(ret_work);
  679. }
  680. out:
  681. workio_cmd_free(wc);
  682. return NULL;
  683. }
  684. static bool workio_get_work(struct workio_cmd *wc)
  685. {
  686. pthread_t get_thread;
  687. if (unlikely(pthread_create(&get_thread, NULL, get_work_thread, (void *)wc))) {
  688. applog(LOG_ERR, "Failed to create get_work_thread");
  689. return false;
  690. }
  691. return true;
  692. }
  693. static void *submit_work_thread(void *userdata)
  694. {
  695. struct workio_cmd *wc = (struct workio_cmd *)userdata;
  696. int failures = 0;
  697. char *hexstr;
  698. pthread_detach(pthread_self());
  699. hexstr = bin2hex(wc->u.work->data, 36);
  700. if (unlikely(!hexstr)) {
  701. applog(LOG_ERR, "submit_work_thread OOM");
  702. goto out;
  703. }
  704. if (unlikely(strncmp(hexstr, current_block, 36))) {
  705. applog(LOG_WARNING, "Stale work detected, discarding");
  706. goto out_free;
  707. }
  708. /* submit solution to bitcoin via JSON-RPC */
  709. while (!submit_upstream_work(wc->u.work)) {
  710. if (unlikely(strncmp(hexstr, current_block, 36))) {
  711. applog(LOG_WARNING, "Stale work detected, discarding");
  712. goto out_free;
  713. }
  714. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  715. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  716. kill_work();
  717. goto out_free;
  718. }
  719. /* pause, then restart work-request loop */
  720. applog(LOG_INFO, "json_rpc_call failed on submit_work, retry after %d seconds",
  721. opt_fail_pause);
  722. sleep(opt_fail_pause);
  723. }
  724. out_free:
  725. free(hexstr);
  726. out:
  727. workio_cmd_free(wc);
  728. return NULL;
  729. }
  730. static bool workio_submit_work(struct workio_cmd *wc)
  731. {
  732. pthread_t submit_thread;
  733. if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, (void *)wc))) {
  734. applog(LOG_ERR, "Failed to create submit_work_thread");
  735. return false;
  736. }
  737. return true;
  738. }
  739. static void inc_staged(int inc, bool lp)
  740. {
  741. pthread_mutex_lock(&stgd_lock);
  742. if (lp) {
  743. lp_staged += inc;
  744. total_staged += inc;
  745. } else if (lp_staged)
  746. lp_staged--;
  747. else
  748. total_staged += inc;
  749. pthread_mutex_unlock(&stgd_lock);
  750. }
  751. static void dec_staged(int inc)
  752. {
  753. pthread_mutex_lock(&stgd_lock);
  754. if (lp_staged)
  755. lp_staged -= inc;
  756. total_staged -= inc;
  757. pthread_mutex_unlock(&stgd_lock);
  758. }
  759. static int requests_staged(void)
  760. {
  761. int ret;
  762. pthread_mutex_lock(&stgd_lock);
  763. ret = total_staged;
  764. pthread_mutex_unlock(&stgd_lock);
  765. return ret;
  766. }
  767. static void *stage_thread(void *userdata)
  768. {
  769. struct thr_info *mythr = userdata;
  770. bool ok = true;
  771. unsigned int i;
  772. for (i = 0; i < 36; i++) {
  773. strcat(current_block, "0");
  774. strcat(blank, "0");
  775. }
  776. while (ok) {
  777. struct work *work = NULL;
  778. char *hexstr;
  779. work = tq_pop(mythr->q, NULL);
  780. if (unlikely(!work)) {
  781. applog(LOG_ERR, "Failed to tq_pop in stage_thread");
  782. ok = false;
  783. break;
  784. }
  785. hexstr = bin2hex(work->data, 36);
  786. if (unlikely(!hexstr)) {
  787. applog(LOG_ERR, "stage_thread OOM");
  788. break;
  789. }
  790. /* current_block is blanked out on successful longpoll */
  791. if (likely(strncmp(current_block, blank, 36))) {
  792. if (unlikely(strncmp(hexstr, current_block, 36))) {
  793. if (want_longpoll)
  794. applog(LOG_WARNING, "New block detected, possible missed longpoll, flushing work queue");
  795. else
  796. applog(LOG_WARNING, "New block detected, flushing work queue");
  797. /* As we can't flush the work from here, signal
  798. * the wakeup thread to restart all the
  799. * threads */
  800. work_restart[stage_thr_id].restart = 1;
  801. }
  802. } else
  803. memcpy(longpoll_block, hexstr, 36);
  804. memcpy(current_block, hexstr, 36);
  805. free(hexstr);
  806. if (unlikely(!tq_push(thr_info[0].q, work))) {
  807. applog(LOG_ERR, "Failed to tq_push work in stage_thread");
  808. ok = false;
  809. break;
  810. }
  811. inc_staged(1, false);
  812. }
  813. tq_freeze(mythr->q);
  814. return NULL;
  815. }
  816. static void *workio_thread(void *userdata)
  817. {
  818. struct thr_info *mythr = userdata;
  819. bool ok = true;
  820. while (ok) {
  821. struct workio_cmd *wc;
  822. /* wait for workio_cmd sent to us, on our queue */
  823. wc = tq_pop(mythr->q, NULL);
  824. if (unlikely(!wc)) {
  825. applog(LOG_ERR, "Failed to tq_pop in workio_thread");
  826. ok = false;
  827. break;
  828. }
  829. /* process workio_cmd */
  830. switch (wc->cmd) {
  831. case WC_GET_WORK:
  832. ok = workio_get_work(wc);
  833. break;
  834. case WC_SUBMIT_WORK:
  835. ok = workio_submit_work(wc);
  836. break;
  837. case WC_DIE:
  838. default:
  839. ok = false;
  840. break;
  841. }
  842. }
  843. tq_freeze(mythr->q);
  844. return NULL;
  845. }
  846. static void hashmeter(int thr_id, struct timeval *diff,
  847. unsigned long hashes_done)
  848. {
  849. struct timeval temp_tv_end, total_diff;
  850. double khashes, secs;
  851. double local_secs;
  852. double utility, efficiency = 0.0;
  853. static double local_mhashes_done = 0;
  854. static double rolling_local = 0;
  855. double local_mhashes = (double)hashes_done / 1000000.0;
  856. /* Don't bother calculating anything if we're not displaying it */
  857. if (opt_quiet || !opt_log_interval)
  858. return;
  859. khashes = hashes_done / 1000.0;
  860. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  861. if (thr_id >= 0 && secs) {
  862. /* So we can call hashmeter from a non worker thread */
  863. struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
  864. if (opt_debug)
  865. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  866. thr_id, hashes_done, hashes_done / secs);
  867. cgpu->local_mhashes += local_mhashes;
  868. cgpu->total_mhashes += local_mhashes;
  869. }
  870. /* Totals are updated by all threads so can race without locking */
  871. pthread_mutex_lock(&hash_lock);
  872. gettimeofday(&temp_tv_end, NULL);
  873. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  874. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  875. total_mhashes_done += local_mhashes;
  876. local_mhashes_done += local_mhashes;
  877. if (total_diff.tv_sec < opt_log_interval)
  878. /* Only update the total every opt_log_interval seconds */
  879. goto out_unlock;
  880. gettimeofday(&total_tv_end, NULL);
  881. /* Use a rolling average by faking an exponential decay over 5 * log */
  882. rolling_local = ((rolling_local * 0.9) + local_mhashes_done) / 1.9;
  883. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  884. total_secs = (double)total_diff.tv_sec +
  885. ((double)total_diff.tv_usec / 1000000.0);
  886. utility = accepted / ( total_secs ? total_secs : 1 ) * 60;
  887. efficiency = getwork_requested ? accepted * 100.0 / getwork_requested : 0.0;
  888. sprintf(statusline, "[(%ds):%.1f (avg):%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m] ",
  889. opt_log_interval, rolling_local / local_secs, total_mhashes_done / total_secs,
  890. getwork_requested, accepted, rejected, hw_errors, efficiency, utility);
  891. print_status(thr_id);
  892. applog(LOG_INFO, "[Rate (%ds):%.1f (avg):%.2f Mhash/s] [Requested:%d Accepted:%d Rejected:%d HW errors:%d Efficiency:%.0f%% Utility:%.2f/m]",
  893. opt_log_interval, rolling_local / local_secs, total_mhashes_done / total_secs,
  894. getwork_requested, accepted, rejected, hw_errors, efficiency, utility);
  895. local_mhashes_done = 0;
  896. out_unlock:
  897. pthread_mutex_unlock(&hash_lock);
  898. }
  899. /* This is overkill, but at least we'll know accurately how much work is
  900. * queued to prevent ever being left without work */
  901. static void inc_queued(void)
  902. {
  903. pthread_mutex_lock(&qd_lock);
  904. total_queued++;
  905. pthread_mutex_unlock(&qd_lock);
  906. }
  907. static void dec_queued(void)
  908. {
  909. pthread_mutex_lock(&qd_lock);
  910. total_queued--;
  911. pthread_mutex_unlock(&qd_lock);
  912. dec_staged(1);
  913. }
  914. static int requests_queued(void)
  915. {
  916. int ret;
  917. pthread_mutex_lock(&qd_lock);
  918. ret = total_queued;
  919. pthread_mutex_unlock(&qd_lock);
  920. return ret;
  921. }
  922. /* All work is queued flagged as being for thread 0 and then the mining thread
  923. * flags it as its own */
  924. static bool queue_request(void)
  925. {
  926. struct thr_info *thr = &thr_info[0];
  927. struct workio_cmd *wc;
  928. /* fill out work request message */
  929. wc = calloc(1, sizeof(*wc));
  930. if (unlikely(!wc)) {
  931. applog(LOG_ERR, "Failed to tq_pop in queue_request");
  932. return false;
  933. }
  934. wc->cmd = WC_GET_WORK;
  935. wc->thr = thr;
  936. /* send work request to workio thread */
  937. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  938. applog(LOG_ERR, "Failed to tq_push in queue_request");
  939. workio_cmd_free(wc);
  940. return false;
  941. }
  942. inc_queued();
  943. return true;
  944. }
  945. static bool discard_request(void)
  946. {
  947. struct thr_info *thr = &thr_info[0];
  948. struct work *work_heap;
  949. /* Just in case we fell in a hole and missed a queue filling */
  950. if (unlikely(!requests_queued())) {
  951. applog(LOG_WARNING, "Tried to discard_request with nil queued");
  952. return true;
  953. }
  954. work_heap = tq_pop(thr->q, NULL);
  955. if (unlikely(!work_heap)) {
  956. applog(LOG_ERR, "Failed to tq_pop in discard_request");
  957. return false;
  958. }
  959. free(work_heap);
  960. dec_queued();
  961. return true;
  962. }
  963. static void flush_requests(bool longpoll)
  964. {
  965. int i, extra;
  966. extra = requests_queued();
  967. /* When flushing from longpoll, we don't know the new work yet. When
  968. * not flushing from longpoll, the first work item is valid so do not
  969. * discard it */
  970. if (longpoll)
  971. memcpy(current_block, blank, 36);
  972. else
  973. extra--;
  974. /* Temporarily increase the staged count so that get_work thinks there
  975. * is work available instead of making threads reuse existing work */
  976. if (extra >= mining_threads)
  977. inc_staged(mining_threads, true);
  978. else
  979. inc_staged(extra, true);
  980. for (i = 0; i < extra; i++) {
  981. /* Queue a whole batch of new requests */
  982. if (unlikely(!queue_request())) {
  983. applog(LOG_ERR, "Failed to queue requests in flush_requests");
  984. kill_work();
  985. break;
  986. }
  987. /* Pop off the old requests. Cancelling the requests would be better
  988. * but is tricky */
  989. if (unlikely(!discard_request())) {
  990. applog(LOG_ERR, "Failed to discard requests in flush_requests");
  991. kill_work();
  992. break;
  993. }
  994. }
  995. }
  996. static bool get_work(struct work *work, bool queued)
  997. {
  998. struct thr_info *thr = &thr_info[0];
  999. struct work *work_heap;
  1000. bool ret = false;
  1001. int failures = 0;
  1002. getwork_requested++;
  1003. retry:
  1004. if (unlikely(!queued && !queue_request())) {
  1005. applog(LOG_WARNING, "Failed to queue_request in get_work");
  1006. goto out;
  1007. }
  1008. if (!requests_staged()) {
  1009. uint32_t *work_ntime;
  1010. uint32_t ntime;
  1011. /* Only print this message once each time we shift to localgen */
  1012. if (!localgen)
  1013. applog(LOG_WARNING, "Server not providing work fast enough, generating work locally");
  1014. localgen = true;
  1015. work_ntime = (uint32_t *)(work->data + 68);
  1016. ntime = be32toh(*work_ntime);
  1017. ntime++;
  1018. *work_ntime = htobe32(ntime);
  1019. ret = true;
  1020. goto out;
  1021. } else if (localgen) {
  1022. localgen = false;
  1023. applog(LOG_WARNING, "Resumed retrieving work from server");
  1024. }
  1025. /* wait for 1st response, or get cached response */
  1026. work_heap = tq_pop(thr->q, NULL);
  1027. if (unlikely(!work_heap)) {
  1028. applog(LOG_WARNING, "Failed to tq_pop in get_work");
  1029. goto out;
  1030. }
  1031. dec_queued();
  1032. memcpy(work, work_heap, sizeof(*work));
  1033. ret = true;
  1034. free(work_heap);
  1035. out:
  1036. if (unlikely(ret == false)) {
  1037. if ((opt_retries >= 0) && (++failures > opt_retries)) {
  1038. applog(LOG_ERR, "Failed %d times to get_work");
  1039. return ret;
  1040. }
  1041. applog(LOG_DEBUG, "Retrying after %d seconds", opt_fail_pause);
  1042. sleep(opt_fail_pause);
  1043. goto retry;
  1044. }
  1045. return ret;
  1046. }
  1047. static bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
  1048. {
  1049. struct workio_cmd *wc;
  1050. /* fill out work request message */
  1051. wc = calloc(1, sizeof(*wc));
  1052. if (unlikely(!wc)) {
  1053. applog(LOG_ERR, "Failed to calloc wc in submit_work_sync");
  1054. return false;
  1055. }
  1056. wc->u.work = malloc(sizeof(*work_in));
  1057. if (unlikely(!wc->u.work)) {
  1058. applog(LOG_ERR, "Failed to calloc work in submit_work_sync");
  1059. goto err_out;
  1060. }
  1061. wc->cmd = WC_SUBMIT_WORK;
  1062. wc->thr = thr;
  1063. memcpy(wc->u.work, work_in, sizeof(*work_in));
  1064. /* send solution to workio thread */
  1065. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  1066. applog(LOG_ERR, "Failed to tq_push work in submit_work_sync");
  1067. goto err_out;
  1068. }
  1069. return true;
  1070. err_out:
  1071. workio_cmd_free(wc);
  1072. return false;
  1073. }
  1074. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  1075. {
  1076. work->data[64+12+0] = (nonce>>0) & 0xff;
  1077. work->data[64+12+1] = (nonce>>8) & 0xff;
  1078. work->data[64+12+2] = (nonce>>16) & 0xff;
  1079. work->data[64+12+3] = (nonce>>24) & 0xff;
  1080. return submit_work_sync(thr, work);
  1081. }
  1082. static void *miner_thread(void *userdata)
  1083. {
  1084. struct thr_info *mythr = userdata;
  1085. const int thr_id = mythr->id;
  1086. uint32_t max_nonce = 0xffffff;
  1087. bool needs_work = true;
  1088. /* Try to cycle approximately 5 times before each log update */
  1089. const unsigned long cycle = opt_log_interval / 5 ? : 1;
  1090. /* Request the next work item at 2/3 of the scantime */
  1091. unsigned const int request_interval = opt_scantime * 2 / 3 ? : 1;
  1092. unsigned const long request_nonce = MAXTHREADS / 3 * 2;
  1093. bool requested = true;
  1094. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  1095. * and if that fails, then SCHED_BATCH. No need for this to be an
  1096. * error if it fails */
  1097. setpriority(PRIO_PROCESS, 0, 19);
  1098. drop_policy();
  1099. /* Cpu affinity only makes sense if the number of threads is a multiple
  1100. * of the number of CPUs */
  1101. if (!(opt_n_threads % num_processors))
  1102. affine_to_cpu(thr_id - gpu_threads, cpu_from_thr_id(thr_id));
  1103. while (1) {
  1104. struct work work __attribute__((aligned(128)));
  1105. unsigned long hashes_done;
  1106. struct timeval tv_workstart, tv_start, tv_end, diff;
  1107. uint64_t max64;
  1108. bool rc;
  1109. if (needs_work) {
  1110. gettimeofday(&tv_workstart, NULL);
  1111. /* obtain new work from internal workio thread */
  1112. if (unlikely(!get_work(&work, requested))) {
  1113. applog(LOG_ERR, "work retrieval failed, exiting "
  1114. "mining thread %d", thr_id);
  1115. goto out;
  1116. }
  1117. mythr->cgpu->getworks++;
  1118. work.thr_id = thr_id;
  1119. needs_work = requested = false;
  1120. work.blk.nonce = 0;
  1121. }
  1122. hashes_done = 0;
  1123. gettimeofday(&tv_start, NULL);
  1124. /* scan nonces for a proof-of-work hash */
  1125. switch (opt_algo) {
  1126. case ALGO_C:
  1127. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  1128. work.hash1, work.hash, work.target,
  1129. max_nonce, &hashes_done,
  1130. work.blk.nonce);
  1131. break;
  1132. #ifdef WANT_X8664_SSE2
  1133. case ALGO_SSE2_64: {
  1134. unsigned int rc5 =
  1135. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  1136. work.hash1, work.hash,
  1137. work.target,
  1138. max_nonce, &hashes_done,
  1139. work.blk.nonce);
  1140. rc = (rc5 == -1) ? false : true;
  1141. }
  1142. break;
  1143. #endif
  1144. #ifdef WANT_SSE2_4WAY
  1145. case ALGO_4WAY: {
  1146. unsigned int rc4 =
  1147. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  1148. work.hash1, work.hash,
  1149. work.target,
  1150. max_nonce, &hashes_done,
  1151. work.blk.nonce);
  1152. rc = (rc4 == -1) ? false : true;
  1153. }
  1154. break;
  1155. #endif
  1156. #ifdef WANT_VIA_PADLOCK
  1157. case ALGO_VIA:
  1158. rc = scanhash_via(thr_id, work.data, work.target,
  1159. max_nonce, &hashes_done,
  1160. work.blk.nonce);
  1161. break;
  1162. #endif
  1163. case ALGO_CRYPTOPP:
  1164. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  1165. work.hash1, work.hash, work.target,
  1166. max_nonce, &hashes_done,
  1167. work.blk.nonce);
  1168. break;
  1169. #ifdef WANT_CRYPTOPP_ASM32
  1170. case ALGO_CRYPTOPP_ASM32:
  1171. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  1172. work.hash1, work.hash, work.target,
  1173. max_nonce, &hashes_done,
  1174. work.blk.nonce);
  1175. break;
  1176. #endif
  1177. default:
  1178. /* should never happen */
  1179. goto out;
  1180. }
  1181. /* record scanhash elapsed time */
  1182. gettimeofday(&tv_end, NULL);
  1183. timeval_subtract(&diff, &tv_end, &tv_start);
  1184. hashes_done -= work.blk.nonce;
  1185. hashmeter(thr_id, &diff, hashes_done);
  1186. work.blk.nonce += hashes_done;
  1187. /* adjust max_nonce to meet target cycle time */
  1188. if (diff.tv_usec > 500000)
  1189. diff.tv_sec++;
  1190. if (diff.tv_sec && diff.tv_sec != cycle) {
  1191. max64 = work.blk.nonce +
  1192. ((uint64_t)hashes_done * cycle) / diff.tv_sec;
  1193. } else
  1194. max64 = work.blk.nonce + hashes_done;
  1195. if (max64 > 0xfffffffaULL)
  1196. max64 = 0xfffffffaULL;
  1197. max_nonce = max64;
  1198. /* if nonce found, submit work */
  1199. if (unlikely(rc)) {
  1200. if (opt_debug)
  1201. applog(LOG_DEBUG, "CPU %d found something?", cpu_from_thr_id(thr_id));
  1202. if (unlikely(!submit_work_sync(mythr, &work))) {
  1203. applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
  1204. break;
  1205. }
  1206. work.blk.nonce += 4;
  1207. }
  1208. timeval_subtract(&diff, &tv_end, &tv_workstart);
  1209. if (!requested && (diff.tv_sec > request_interval || work.blk.nonce > request_nonce)) {
  1210. if (unlikely(!queue_request())) {
  1211. applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
  1212. goto out;
  1213. }
  1214. requested = true;
  1215. }
  1216. if (diff.tv_sec > opt_scantime || work_restart[thr_id].restart ||
  1217. work.blk.nonce >= MAXTHREADS - hashes_done)
  1218. needs_work = true;
  1219. }
  1220. out:
  1221. tq_freeze(mythr->q);
  1222. return NULL;
  1223. }
  1224. enum {
  1225. STAT_SLEEP_INTERVAL = 1,
  1226. STAT_CTR_INTERVAL = 10000000,
  1227. FAILURE_INTERVAL = 30,
  1228. };
  1229. #ifdef HAVE_OPENCL
  1230. static _clState *clStates[16];
  1231. static inline cl_int queue_kernel_parameters(_clState *clState, dev_blk_ctx *blk)
  1232. {
  1233. cl_kernel *kernel = &clState->kernel;
  1234. cl_int status = 0;
  1235. int num = 0;
  1236. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  1237. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  1238. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  1239. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  1240. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  1241. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  1242. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  1243. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  1244. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  1245. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  1246. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  1247. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  1248. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  1249. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  1250. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  1251. if (clState->hasBitAlign == true) {
  1252. /* Parameters for phatk kernel */
  1253. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W2);
  1254. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W16);
  1255. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W17);
  1256. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->PreVal4);
  1257. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->T1);
  1258. } else {
  1259. /* Parameters for poclbm kernel */
  1260. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  1261. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  1262. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  1263. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  1264. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  1265. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  1266. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  1267. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  1268. }
  1269. status |= clSetKernelArg(*kernel, num++, sizeof(clState->outputBuffer),
  1270. (void *)&clState->outputBuffer);
  1271. return status;
  1272. }
  1273. static void *gpuminer_thread(void *userdata)
  1274. {
  1275. const unsigned long cycle = opt_log_interval / 5 ? : 1;
  1276. struct timeval tv_start, tv_end, diff;
  1277. struct thr_info *mythr = userdata;
  1278. const int thr_id = mythr->id;
  1279. uint32_t *res, *blank_res;
  1280. size_t globalThreads[1];
  1281. size_t localThreads[1];
  1282. cl_int status;
  1283. _clState *clState = clStates[thr_id];
  1284. const cl_kernel *kernel = &clState->kernel;
  1285. struct work *work = malloc(sizeof(struct work));
  1286. unsigned const int threads = 1 << (15 + scan_intensity);
  1287. unsigned const int vectors = clState->preferred_vwidth;
  1288. unsigned const int hashes = threads * vectors;
  1289. unsigned int hashes_done = 0;
  1290. /* Request the next work item at 2/3 of the scantime */
  1291. unsigned const int request_interval = opt_scantime * 2 / 3 ? : 1;
  1292. unsigned const long request_nonce = MAXTHREADS / 3 * 2;
  1293. bool requested = true;
  1294. res = calloc(BUFFERSIZE, 1);
  1295. blank_res = calloc(BUFFERSIZE, 1);
  1296. if (!res || !blank_res) {
  1297. applog(LOG_ERR, "Failed to calloc in gpuminer_thread");
  1298. goto out;
  1299. }
  1300. gettimeofday(&tv_start, NULL);
  1301. globalThreads[0] = threads;
  1302. localThreads[0] = clState->work_size;
  1303. diff.tv_sec = ~0UL;
  1304. gettimeofday(&tv_end, NULL);
  1305. while (1) {
  1306. struct timeval tv_workstart;
  1307. /* This finish flushes the readbuffer set with CL_FALSE later */
  1308. clFinish(clState->commandQueue);
  1309. if (diff.tv_sec > opt_scantime || work->blk.nonce >= MAXTHREADS - hashes || work_restart[thr_id].restart) {
  1310. /* Ignore any reads since we're getting new work and queue a clean buffer */
  1311. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1312. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1313. if (unlikely(status != CL_SUCCESS))
  1314. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1315. memset(res, 0, BUFFERSIZE);
  1316. gettimeofday(&tv_workstart, NULL);
  1317. /* obtain new work from internal workio thread */
  1318. if (unlikely(!get_work(work, requested))) {
  1319. applog(LOG_ERR, "work retrieval failed, exiting "
  1320. "gpu mining thread %d", mythr->id);
  1321. goto out;
  1322. }
  1323. mythr->cgpu->getworks++;
  1324. work->thr_id = thr_id;
  1325. requested = false;
  1326. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  1327. work->blk.nonce = 0;
  1328. work_restart[thr_id].restart = 0;
  1329. if (opt_debug)
  1330. applog(LOG_DEBUG, "getwork thread %d", thr_id);
  1331. /* Flushes the writebuffer set with CL_FALSE above */
  1332. clFinish(clState->commandQueue);
  1333. }
  1334. status = queue_kernel_parameters(clState, &work->blk);
  1335. if (unlikely(status != CL_SUCCESS))
  1336. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  1337. /* MAXBUFFERS entry is used as a flag to say nonces exist */
  1338. if (res[MAXBUFFERS]) {
  1339. /* Clear the buffer again */
  1340. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1341. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1342. if (unlikely(status != CL_SUCCESS))
  1343. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1344. if (opt_debug)
  1345. applog(LOG_DEBUG, "GPU %d found something?", gpu_from_thr_id(thr_id));
  1346. postcalc_hash_async(mythr, work, res);
  1347. memset(res, 0, BUFFERSIZE);
  1348. clFinish(clState->commandQueue);
  1349. }
  1350. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  1351. globalThreads, localThreads, 0, NULL, NULL);
  1352. if (unlikely(status != CL_SUCCESS))
  1353. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  1354. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1355. BUFFERSIZE, res, 0, NULL, NULL);
  1356. if (unlikely(status != CL_SUCCESS))
  1357. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  1358. gettimeofday(&tv_end, NULL);
  1359. timeval_subtract(&diff, &tv_end, &tv_start);
  1360. hashes_done += hashes;
  1361. work->blk.nonce += hashes;
  1362. if (diff.tv_usec > 500000)
  1363. diff.tv_sec++;
  1364. if (diff.tv_sec >= cycle) {
  1365. hashmeter(thr_id, &diff, hashes_done);
  1366. gettimeofday(&tv_start, NULL);
  1367. hashes_done = 0;
  1368. }
  1369. timeval_subtract(&diff, &tv_end, &tv_workstart);
  1370. if (!requested && (diff.tv_sec > request_interval || work->blk.nonce > request_nonce)) {
  1371. if (unlikely(!queue_request())) {
  1372. applog(LOG_ERR, "Failed to queue_request in gpuminer_thread %d", thr_id);
  1373. goto out;
  1374. }
  1375. requested = true;
  1376. }
  1377. }
  1378. out:
  1379. tq_freeze(mythr->q);
  1380. return NULL;
  1381. }
  1382. #endif /* HAVE_OPENCL */
  1383. static void restart_threads(bool longpoll)
  1384. {
  1385. int i;
  1386. /* Discard old queued requests and get new ones */
  1387. flush_requests(longpoll);
  1388. for (i = 0; i < mining_threads; i++)
  1389. work_restart[i].restart = 1;
  1390. }
  1391. static void *longpoll_thread(void *userdata)
  1392. {
  1393. struct thr_info *mythr = userdata;
  1394. CURL *curl = NULL;
  1395. char *copy_start, *hdr_path, *lp_url = NULL;
  1396. bool need_slash = false;
  1397. int failures = 0;
  1398. unsigned int i;
  1399. hdr_path = tq_pop(mythr->q, NULL);
  1400. if (!hdr_path)
  1401. goto out;
  1402. /* full URL */
  1403. if (strstr(hdr_path, "://")) {
  1404. lp_url = hdr_path;
  1405. hdr_path = NULL;
  1406. }
  1407. /* absolute path, on current server */
  1408. else {
  1409. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  1410. if (rpc_url[strlen(rpc_url) - 1] != '/')
  1411. need_slash = true;
  1412. lp_url = malloc(strlen(rpc_url) + strlen(copy_start) + 2);
  1413. if (!lp_url)
  1414. goto out;
  1415. sprintf(lp_url, "%s%s%s", rpc_url, need_slash ? "/" : "", copy_start);
  1416. }
  1417. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  1418. curl = curl_easy_init();
  1419. if (unlikely(!curl)) {
  1420. applog(LOG_ERR, "CURL initialisation failed");
  1421. goto out;
  1422. }
  1423. for (i = 0; i < 36; i++)
  1424. strcat(longpoll_block, "0");
  1425. while (1) {
  1426. json_t *val;
  1427. val = json_rpc_call(curl, lp_url, rpc_userpass, rpc_req,
  1428. false, true);
  1429. if (likely(val)) {
  1430. failures = 0;
  1431. json_decref(val);
  1432. /* Keep track of who ordered a restart_threads to make
  1433. * sure it's only done once per new block */
  1434. if (likely(!strncmp(longpoll_block, blank, 36) ||
  1435. !strncmp(longpoll_block, current_block, 36))) {
  1436. applog(LOG_WARNING, "LONGPOLL detected new block, flushing work queue");
  1437. restart_threads(true);
  1438. } else
  1439. applog(LOG_WARNING, "LONGPOLL received - new block detected and work flushed already");
  1440. } else {
  1441. if (failures++ < 10) {
  1442. sleep(30);
  1443. applog(LOG_WARNING,
  1444. "longpoll failed, sleeping for 30s");
  1445. } else {
  1446. applog(LOG_ERR,
  1447. "longpoll failed, ending thread");
  1448. goto out;
  1449. }
  1450. }
  1451. memcpy(longpoll_block, current_block, 36);
  1452. }
  1453. out:
  1454. free(hdr_path);
  1455. free(lp_url);
  1456. tq_freeze(mythr->q);
  1457. if (curl)
  1458. curl_easy_cleanup(curl);
  1459. return NULL;
  1460. }
  1461. /* Makes sure the hashmeter keeps going even if mining threads stall */
  1462. static void *wakeup_thread(void *userdata)
  1463. {
  1464. const unsigned int interval = opt_log_interval / 2 ? : 1;
  1465. struct timeval zero_tv;
  1466. memset(&zero_tv, 0, sizeof(struct timeval));
  1467. while (1) {
  1468. sleep(interval);
  1469. if (requests_queued() < opt_queue)
  1470. queue_request();
  1471. hashmeter(-1, &zero_tv, 0);
  1472. if (unlikely(work_restart[stage_thr_id].restart)) {
  1473. restart_threads(false);
  1474. work_restart[stage_thr_id].restart = 0;
  1475. }
  1476. }
  1477. return NULL;
  1478. }
  1479. int main (int argc, char *argv[])
  1480. {
  1481. struct thr_info *thr;
  1482. unsigned int i, j = 0, x, y;
  1483. char name[32];
  1484. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1485. return 1;
  1486. if (unlikely(pthread_mutex_init(&qd_lock, NULL)))
  1487. return 1;
  1488. if (unlikely(pthread_mutex_init(&stgd_lock, NULL)))
  1489. return 1;
  1490. #ifdef WIN32
  1491. opt_n_threads = num_processors = 1;
  1492. #else
  1493. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1494. opt_n_threads = num_processors;
  1495. #endif /* !WIN32 */
  1496. #ifdef HAVE_OPENCL
  1497. for (i = 0; i < 16; i++)
  1498. gpu_devices[true] = false;
  1499. nDevs = clDevicesNum();
  1500. if (nDevs < 0)
  1501. return 1;
  1502. #endif
  1503. if (nDevs)
  1504. opt_n_threads = 0;
  1505. rpc_url = strdup(DEF_RPC_URL);
  1506. /* parse command line */
  1507. opt_register_table(opt_config_table,
  1508. "Options for both config file and command line");
  1509. opt_register_table(opt_cmdline_table,
  1510. "Options for command line only");
  1511. opt_parse(&argc, argv, applog_and_exit);
  1512. if (argc != 1) {
  1513. applog(LOG_ERR, "Unexpected extra commandline arguments");
  1514. return 1;
  1515. }
  1516. if (total_devices) {
  1517. if (total_devices > nDevs) {
  1518. applog(LOG_ERR, "More devices specified than exist");
  1519. return 1;
  1520. }
  1521. for (i = 0; i < 16; i++)
  1522. if (gpu_devices[i] && i + 1 > nDevs) {
  1523. applog(LOG_ERR, "Command line options set a device that doesn't exist");
  1524. return 1;
  1525. }
  1526. gpu_threads = total_devices * opt_g_threads;
  1527. } else {
  1528. gpu_threads = nDevs * opt_g_threads;
  1529. for (i = 0; i < nDevs; i++)
  1530. gpu_devices[i] = true;
  1531. }
  1532. if (!gpu_threads && !forced_n_threads) {
  1533. /* Maybe they turned GPU off; restore default CPU threads. */
  1534. opt_n_threads = num_processors;
  1535. }
  1536. logcursor = 4;
  1537. mining_threads = opt_n_threads + gpu_threads;
  1538. gpucursor = logcursor;
  1539. cpucursor = gpucursor + nDevs;
  1540. logstart = cpucursor + (opt_n_threads ? num_processors : 0) + 1;
  1541. logcursor = logstart + 1;
  1542. if (!rpc_userpass) {
  1543. if (!rpc_user || !rpc_pass) {
  1544. applog(LOG_ERR, "No login credentials supplied");
  1545. return 1;
  1546. }
  1547. rpc_userpass = malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
  1548. if (!rpc_userpass)
  1549. return 1;
  1550. sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
  1551. }
  1552. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1553. return 1;
  1554. #ifdef HAVE_SYSLOG_H
  1555. if (use_syslog)
  1556. openlog("cpuminer", LOG_PID, LOG_USER);
  1557. #endif
  1558. work_restart = calloc(mining_threads + 4, sizeof(*work_restart));
  1559. if (!work_restart)
  1560. return 1;
  1561. thr_info = calloc(mining_threads + 4, sizeof(*thr));
  1562. if (!thr_info)
  1563. return 1;
  1564. /* init workio thread info */
  1565. work_thr_id = mining_threads;
  1566. thr = &thr_info[work_thr_id];
  1567. thr->id = work_thr_id;
  1568. thr->q = tq_new();
  1569. if (!thr->q)
  1570. return 1;
  1571. /* start work I/O thread */
  1572. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  1573. applog(LOG_ERR, "workio thread create failed");
  1574. return 1;
  1575. }
  1576. /* init longpoll thread info */
  1577. if (want_longpoll) {
  1578. longpoll_thr_id = mining_threads + 1;
  1579. thr = &thr_info[longpoll_thr_id];
  1580. thr->id = longpoll_thr_id;
  1581. thr->q = tq_new();
  1582. if (!thr->q)
  1583. return 1;
  1584. /* start longpoll thread */
  1585. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  1586. applog(LOG_ERR, "longpoll thread create failed");
  1587. return 1;
  1588. }
  1589. pthread_detach(thr->pth);
  1590. } else
  1591. longpoll_thr_id = -1;
  1592. gettimeofday(&total_tv_start, NULL);
  1593. gettimeofday(&total_tv_end, NULL);
  1594. if (opt_n_threads ) {
  1595. cpus = calloc(num_processors, sizeof(struct cgpu_info));
  1596. if (unlikely(!cpus)) {
  1597. applog(LOG_ERR, "Failed to calloc cpus");
  1598. return 1;
  1599. }
  1600. }
  1601. if (gpu_threads) {
  1602. gpus = calloc(nDevs, sizeof(struct cgpu_info));
  1603. if (unlikely(!gpus)) {
  1604. applog(LOG_ERR, "Failed to calloc gpus");
  1605. return 1;
  1606. }
  1607. }
  1608. stage_thr_id = mining_threads + 3;
  1609. thr = &thr_info[stage_thr_id];
  1610. thr->q = tq_new();
  1611. if (!thr->q)
  1612. return 1;
  1613. /* start stage thread */
  1614. if (pthread_create(&thr->pth, NULL, stage_thread, thr)) {
  1615. applog(LOG_ERR, "stage thread create failed");
  1616. return 1;
  1617. }
  1618. /* Flag the work as ready forcing the mining threads to wait till we
  1619. * actually put something into the queue */
  1620. inc_staged(mining_threads, true);
  1621. #ifdef HAVE_OPENCL
  1622. i = 0;
  1623. /* start GPU mining threads */
  1624. for (j = 0; j < nDevs * opt_g_threads; j++) {
  1625. int gpu = gpu_from_thr_id(j);
  1626. /* Skip devices not set to work */
  1627. if (!gpu_devices[gpu])
  1628. continue;
  1629. thr = &thr_info[i];
  1630. thr->id = i;
  1631. gpus[gpu].is_gpu = 1;
  1632. gpus[gpu].cpu_gpu = gpu;
  1633. thr->cgpu = &gpus[gpu];
  1634. thr->q = tq_new();
  1635. if (!thr->q) {
  1636. applog(LOG_ERR, "tq_new failed in starting gpu mining threads");
  1637. return 1;
  1638. }
  1639. applog(LOG_INFO, "Init GPU thread %i", i);
  1640. clStates[i] = initCl(gpu, name, sizeof(name));
  1641. if (!clStates[i]) {
  1642. applog(LOG_ERR, "Failed to init GPU thread %d", i);
  1643. continue;
  1644. }
  1645. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1646. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1647. applog(LOG_ERR, "thread %d create failed", i);
  1648. return 1;
  1649. }
  1650. pthread_detach(thr->pth);
  1651. i++;
  1652. }
  1653. applog(LOG_INFO, "%d gpu miner threads started", gpu_threads);
  1654. #endif
  1655. /* start CPU mining threads */
  1656. for (i = gpu_threads; i < mining_threads; i++) {
  1657. int cpu = cpu_from_thr_id(i);
  1658. thr = &thr_info[i];
  1659. thr->id = i;
  1660. cpus[cpu].cpu_gpu = cpu;
  1661. thr->cgpu = &cpus[cpu];
  1662. thr->q = tq_new();
  1663. if (!thr->q) {
  1664. applog(LOG_ERR, "tq_new failed in starting cpu mining threads");
  1665. return 1;
  1666. }
  1667. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1668. applog(LOG_ERR, "thread %d create failed", i);
  1669. return 1;
  1670. }
  1671. pthread_detach(thr->pth);
  1672. }
  1673. applog(LOG_INFO, "%d cpu miner threads started, "
  1674. "using SHA256 '%s' algorithm.",
  1675. opt_n_threads,
  1676. algo_names[opt_algo]);
  1677. thr = &thr_info[mining_threads + 2];
  1678. /* start wakeup thread */
  1679. if (pthread_create(&thr->pth, NULL, wakeup_thread, NULL)) {
  1680. applog(LOG_ERR, "wakeup thread create failed");
  1681. return 1;
  1682. }
  1683. /* Restart count as it will be wrong till all threads are started */
  1684. pthread_mutex_lock(&hash_lock);
  1685. gettimeofday(&total_tv_start, NULL);
  1686. gettimeofday(&total_tv_end, NULL);
  1687. total_mhashes_done = 0;
  1688. pthread_mutex_unlock(&hash_lock);
  1689. /* Set up the ncurses interface */
  1690. mainwin = initscr();
  1691. statuswin = newwin(logstart, 80, 0, 0);
  1692. getmaxyx(mainwin, y, x);
  1693. logwin = newwin(y - logcursor, 0, logcursor, 0);
  1694. idlok(logwin, true);
  1695. scrollok(logwin, true);
  1696. leaveok(logwin, true);
  1697. leaveok(statuswin, true);
  1698. curses_active = true;
  1699. for (i = 0; i < mining_threads; i++)
  1700. print_status(i);
  1701. /* Now that everything's ready put enough work in the queue */
  1702. for (i = 0; i < opt_queue + mining_threads; i++) {
  1703. if (unlikely(!queue_request())) {
  1704. applog(LOG_ERR, "Failed to queue_request in main");
  1705. return 1;
  1706. }
  1707. }
  1708. /* main loop - simply wait for workio thread to exit */
  1709. pthread_join(thr_info[work_thr_id].pth, NULL);
  1710. curl_global_cleanup();
  1711. if (gpu_threads)
  1712. free(gpus);
  1713. if (opt_n_threads)
  1714. free(cpus);
  1715. applog(LOG_INFO, "workio thread dead, exiting.");
  1716. delwin(logwin);
  1717. delwin(statuswin);
  1718. endwin();
  1719. refresh();
  1720. return 0;
  1721. }