main.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609
  1. /*
  2. * Copyright 2011 Con Kolivas
  3. * Copyright 2010 Jeff Garzik
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 2 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "config.h"
  11. #include <curses.h>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <sys/time.h>
  19. #include <time.h>
  20. #include <math.h>
  21. #include <stdarg.h>
  22. #include <assert.h>
  23. #include <signal.h>
  24. #ifndef WIN32
  25. #include <sys/resource.h>
  26. #endif
  27. #include <ccan/opt/opt.h>
  28. #include <jansson.h>
  29. #include <curl/curl.h>
  30. #include "compat.h"
  31. #include "miner.h"
  32. #include "findnonce.h"
  33. #include "ocl.h"
  34. #define PROGRAM_NAME "cgminer"
  35. #define DEF_RPC_URL "http://127.0.0.1:8332/"
  36. #define DEF_RPC_USERNAME "rpcuser"
  37. #define DEF_RPC_PASSWORD "rpcpass"
  38. #define DEF_RPC_USERPASS DEF_RPC_USERNAME ":" DEF_RPC_PASSWORD
  39. #ifdef __linux /* Linux specific policy and affinity management */
  40. #include <sched.h>
  41. static inline void drop_policy(void)
  42. {
  43. struct sched_param param;
  44. #ifdef SCHED_BATCH
  45. #ifdef SCHED_IDLE
  46. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  47. #endif
  48. sched_setscheduler(0, SCHED_BATCH, &param);
  49. #endif
  50. }
  51. static inline void affine_to_cpu(int id, int cpu)
  52. {
  53. cpu_set_t set;
  54. CPU_ZERO(&set);
  55. CPU_SET(cpu, &set);
  56. sched_setaffinity(0, sizeof(&set), &set);
  57. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  58. }
  59. #else
  60. static inline void drop_policy(void)
  61. {
  62. }
  63. static inline void affine_to_cpu(int id, int cpu)
  64. {
  65. }
  66. #endif
  67. enum workio_commands {
  68. WC_GET_WORK,
  69. WC_SUBMIT_WORK,
  70. WC_DIE,
  71. };
  72. struct workio_cmd {
  73. enum workio_commands cmd;
  74. struct thr_info *thr;
  75. union {
  76. struct work *work;
  77. } u;
  78. };
  79. enum sha256_algos {
  80. ALGO_C, /* plain C */
  81. ALGO_4WAY, /* parallel SSE2 */
  82. ALGO_VIA, /* VIA padlock */
  83. ALGO_CRYPTOPP, /* Crypto++ (C) */
  84. ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
  85. ALGO_SSE2_64, /* SSE2 for x86_64 */
  86. ALGO_SSE4_64, /* SSE4 for x86_64 */
  87. };
  88. static const char *algo_names[] = {
  89. [ALGO_C] = "c",
  90. #ifdef WANT_SSE2_4WAY
  91. [ALGO_4WAY] = "4way",
  92. #endif
  93. #ifdef WANT_VIA_PADLOCK
  94. [ALGO_VIA] = "via",
  95. #endif
  96. [ALGO_CRYPTOPP] = "cryptopp",
  97. #ifdef WANT_CRYPTOPP_ASM32
  98. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  99. #endif
  100. #ifdef WANT_X8664_SSE2
  101. [ALGO_SSE2_64] = "sse2_64",
  102. #endif
  103. #ifdef WANT_X8664_SSE4
  104. [ALGO_SSE4_64] = "sse4_64",
  105. #endif
  106. };
  107. bool opt_debug = false;
  108. bool opt_protocol = false;
  109. bool want_longpoll = true;
  110. bool have_longpoll = false;
  111. bool use_syslog = false;
  112. static bool opt_quiet = false;
  113. static int opt_retries = -1;
  114. static int opt_fail_pause = 5;
  115. static int opt_log_interval = 5;
  116. bool opt_log_output = false;
  117. static bool opt_dynamic = true;
  118. static int opt_queue;
  119. int opt_vectors;
  120. int opt_worksize;
  121. int opt_scantime = 60;
  122. static const bool opt_time = true;
  123. #ifdef WANT_X8664_SSE4
  124. static enum sha256_algos opt_algo = ALGO_SSE4_64;
  125. #elif WANT_X8664_SSE2
  126. static enum sha256_algos opt_algo = ALGO_SSE2_64;
  127. #else
  128. static enum sha256_algos opt_algo = ALGO_C;
  129. #endif
  130. static int nDevs;
  131. static int opt_g_threads = 2;
  132. static int opt_device;
  133. static int total_devices;
  134. static bool gpu_devices[16];
  135. static int gpu_threads;
  136. static bool forced_n_threads;
  137. static int opt_n_threads;
  138. static int mining_threads;
  139. static int num_processors;
  140. static int scan_intensity;
  141. static bool use_curses = true;
  142. struct thr_info *thr_info;
  143. static int work_thr_id;
  144. int longpoll_thr_id;
  145. static int stage_thr_id;
  146. static int watchdog_thr_id;
  147. struct work_restart *work_restart = NULL;
  148. static pthread_mutex_t hash_lock;
  149. static pthread_mutex_t qd_lock;
  150. static pthread_mutex_t stgd_lock;
  151. static pthread_mutex_t curses_lock;
  152. static double total_mhashes_done;
  153. static struct timeval total_tv_start, total_tv_end;
  154. pthread_mutex_t control_lock;
  155. int hw_errors;
  156. static int total_accepted, total_rejected;
  157. static int total_getworks, total_stale, total_discarded;
  158. static int total_queued, total_staged, lp_staged;
  159. static unsigned int new_blocks;
  160. static unsigned int local_work;
  161. static unsigned int total_lo, total_ro;
  162. static struct pool *pools = NULL;
  163. static struct pool *cp; /* Current pool */
  164. static int total_pools;
  165. static bool curses_active = false;
  166. static char current_block[37];
  167. static char longpoll_block[37];
  168. static char blank[37];
  169. static char datestamp[40];
  170. static char blockdate[40];
  171. struct sigaction termhandler, inthandler;
  172. struct thread_q *getq;
  173. static void applog_and_exit(const char *fmt, ...)
  174. {
  175. va_list ap;
  176. va_start(ap, fmt);
  177. vapplog(LOG_ERR, fmt, ap);
  178. va_end(ap);
  179. exit(1);
  180. }
  181. static void add_pool(void)
  182. {
  183. struct pool *pool;
  184. total_pools++;
  185. pools = realloc(pools, sizeof(struct pool) * total_pools);
  186. if (!pools) {
  187. applog(LOG_ERR, "Failed to malloc pools in add_pool");
  188. exit (1);
  189. }
  190. pool = &pools[total_pools - 1];
  191. memset(pool, 0, sizeof(struct pool));
  192. if (unlikely(pthread_mutex_init(&pool->pool_lock, NULL))) {
  193. applog(LOG_ERR, "Failed to pthread_mutex_init in add_pool");
  194. exit (1);
  195. }
  196. }
  197. /* Pool variant of test and set */
  198. static bool pool_tset(struct pool *pool, bool *var)
  199. {
  200. bool ret;
  201. pthread_mutex_lock(&pool->pool_lock);
  202. ret = *var;
  203. *var = true;
  204. pthread_mutex_unlock(&pool->pool_lock);
  205. return ret;
  206. }
  207. static bool pool_tclear(struct pool *pool, bool *var)
  208. {
  209. bool ret;
  210. pthread_mutex_lock(&pool->pool_lock);
  211. ret = *var;
  212. *var = false;
  213. pthread_mutex_unlock(&pool->pool_lock);
  214. return ret;
  215. }
  216. /* FIXME: Use asprintf for better errors. */
  217. static char *set_algo(const char *arg, enum sha256_algos *algo)
  218. {
  219. enum sha256_algos i;
  220. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  221. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  222. *algo = i;
  223. return NULL;
  224. }
  225. }
  226. return "Unknown algorithm";
  227. }
  228. static void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  229. {
  230. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  231. }
  232. static char *set_int_range(const char *arg, int *i, int min, int max)
  233. {
  234. char *err = opt_set_intval(arg, i);
  235. if (err)
  236. return err;
  237. if (*i < min || *i > max)
  238. return "Value out of range";
  239. return NULL;
  240. }
  241. static char *set_int_0_to_9999(const char *arg, int *i)
  242. {
  243. return set_int_range(arg, i, 0, 9999);
  244. }
  245. static char *forced_int_0_to_14(const char *arg, int *i)
  246. {
  247. opt_dynamic = false;
  248. return set_int_range(arg, i, 0, 14);
  249. }
  250. static char *force_nthreads_int(const char *arg, int *i)
  251. {
  252. forced_n_threads = true;
  253. return set_int_range(arg, i, 0, 9999);
  254. }
  255. static char *set_int_0_to_10(const char *arg, int *i)
  256. {
  257. return set_int_range(arg, i, 0, 10);
  258. }
  259. static char *set_devices(const char *arg, int *i)
  260. {
  261. char *err = opt_set_intval(arg, i);
  262. if (err)
  263. return err;
  264. if (*i < 0 || *i > 15)
  265. return "Invalid GPU device number";
  266. total_devices++;
  267. gpu_devices[*i] = true;
  268. return NULL;
  269. }
  270. static char *set_url(const char *arg, char **p)
  271. {
  272. struct pool *pool;
  273. add_pool();
  274. pool = &pools[total_pools - 1];
  275. opt_set_charp(arg, &pool->rpc_url);
  276. if (strncmp(arg, "http://", 7) &&
  277. strncmp(arg, "https://", 8))
  278. return "URL must start with http:// or https://";
  279. return NULL;
  280. }
  281. static char *set_user(const char *arg, char **p)
  282. {
  283. struct pool *pool;
  284. if (!total_pools)
  285. return "No URL set for user";
  286. pool = &pools[total_pools - 1];
  287. opt_set_charp(arg, &pool->rpc_user);
  288. return NULL;
  289. }
  290. static char *set_pass(const char *arg, char **p)
  291. {
  292. struct pool *pool;
  293. if (!total_pools)
  294. return "No URL set for pass";
  295. pool = &pools[total_pools - 1];
  296. opt_set_charp(arg, &pool->rpc_pass);
  297. return NULL;
  298. }
  299. static char *set_userpass(const char *arg, char **p)
  300. {
  301. struct pool *pool;
  302. if (!total_pools)
  303. return "No URL set for userpass";
  304. pool = &pools[total_pools - 1];
  305. opt_set_charp(arg, &pool->rpc_userpass);
  306. return NULL;
  307. }
  308. static char *set_vector(const char *arg, int *i)
  309. {
  310. char *err = opt_set_intval(arg, i);
  311. if (err)
  312. return err;
  313. if (*i != 1 && *i != 2 && *i != 4)
  314. return "Valid vectors are 1, 2 or 4";
  315. return NULL;
  316. }
  317. static char *enable_debug(bool *flag)
  318. {
  319. *flag = true;
  320. /* Turn out verbose output, too. */
  321. opt_log_output = true;
  322. return NULL;
  323. }
  324. static char *trpc_url;
  325. static char *trpc_userpass;
  326. static char *trpc_user, *trpc_pass;
  327. /* These options are available from config file or commandline */
  328. static struct opt_table opt_config_table[] = {
  329. OPT_WITH_ARG("--algo|-a",
  330. set_algo, show_algo, &opt_algo,
  331. "Specify sha256 implementation for CPU mining:\n"
  332. "\tc\t\tLinux kernel sha256, implemented in C"
  333. #ifdef WANT_SSE2_4WAY
  334. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  335. #endif
  336. #ifdef WANT_VIA_PADLOCK
  337. "\n\tvia\t\tVIA padlock implementation"
  338. #endif
  339. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  340. #ifdef WANT_CRYPTOPP_ASM32
  341. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  342. #endif
  343. #ifdef WANT_X8664_SSE2
  344. "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
  345. #endif
  346. #ifdef WANT_X8664_SSE4
  347. "\n\tsse4_64\t\tSSE4 implementation for x86_64 machines"
  348. #endif
  349. ),
  350. OPT_WITH_ARG("--cpu-threads|-t",
  351. force_nthreads_int, opt_show_intval, &opt_n_threads,
  352. "Number of miner CPU threads"),
  353. OPT_WITHOUT_ARG("--debug|-D",
  354. enable_debug, &opt_debug,
  355. "Enable debug output"),
  356. #ifdef HAVE_OPENCL
  357. OPT_WITH_ARG("--device|-d",
  358. set_devices, NULL, &opt_device,
  359. "Select device to use, (Use repeat -d for multiple devices, default: all)"),
  360. OPT_WITH_ARG("--gpu-threads|-g",
  361. set_int_0_to_10, opt_show_intval, &opt_g_threads,
  362. "Number of threads per GPU (0 - 10)"),
  363. OPT_WITH_ARG("--intensity|-I",
  364. forced_int_0_to_14, opt_show_intval, &scan_intensity,
  365. "Intensity of GPU scanning (0 - 14, default: dynamic to maintain desktop interactivity)"),
  366. #endif
  367. OPT_WITH_ARG("--log|-l",
  368. set_int_0_to_9999, opt_show_intval, &opt_log_interval,
  369. "Interval in seconds between log output"),
  370. OPT_WITHOUT_ARG("--no-longpoll",
  371. opt_set_invbool, &want_longpoll,
  372. "Disable X-Long-Polling support"),
  373. OPT_WITH_ARG("--pass|-p",
  374. set_pass, NULL, &trpc_pass,
  375. "Password for bitcoin JSON-RPC server"),
  376. OPT_WITHOUT_ARG("--protocol-dump|-P",
  377. opt_set_bool, &opt_protocol,
  378. "Verbose dump of protocol-level activities"),
  379. OPT_WITH_ARG("--queue|-Q",
  380. set_int_0_to_10, opt_show_intval, &opt_queue,
  381. "Number of extra work items to queue (0 - 10)"),
  382. OPT_WITHOUT_ARG("--quiet|-q",
  383. opt_set_bool, &opt_quiet,
  384. "Disable per-thread hashmeter output"),
  385. OPT_WITH_ARG("--retries|-r",
  386. opt_set_intval, opt_show_intval, &opt_retries,
  387. "Number of times to retry before giving up, if JSON-RPC call fails (-1 means never)"),
  388. OPT_WITH_ARG("--retry-pause|-R",
  389. set_int_0_to_9999, opt_show_intval, &opt_fail_pause,
  390. "Number of seconds to pause, between retries"),
  391. OPT_WITH_ARG("--scan-time|-s",
  392. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  393. "Upper bound on time spent scanning current work, in seconds"),
  394. #ifdef HAVE_SYSLOG_H
  395. OPT_WITHOUT_ARG("--syslog",
  396. opt_set_bool, &use_syslog,
  397. "Use system log for output messages (default: standard error)"),
  398. #endif
  399. OPT_WITHOUT_ARG("--text-only|-T",
  400. opt_set_invbool, &use_curses,
  401. "Disable ncurses formatted screen output"),
  402. OPT_WITH_ARG("--url|-o",
  403. set_url, opt_show_charp, &trpc_url,
  404. "URL for bitcoin JSON-RPC server"),
  405. OPT_WITH_ARG("--user|-u",
  406. set_user, NULL, &trpc_user,
  407. "Username for bitcoin JSON-RPC server"),
  408. #ifdef HAVE_OPENCL
  409. OPT_WITH_ARG("--vectors|-v",
  410. set_vector, NULL, &opt_vectors,
  411. "Override detected optimal vector width (1, 2 or 4)"),
  412. #endif
  413. OPT_WITHOUT_ARG("--verbose",
  414. opt_set_bool, &opt_log_output,
  415. "Log verbose output to stderr as well as status output"),
  416. #ifdef HAVE_OPENCL
  417. OPT_WITH_ARG("--worksize|-w",
  418. set_int_0_to_9999, opt_show_intval, &opt_worksize,
  419. "Override detected optimal worksize"),
  420. #endif
  421. OPT_WITH_ARG("--userpass|-O",
  422. set_userpass, NULL, &trpc_userpass,
  423. "Username:Password pair for bitcoin JSON-RPC server"),
  424. OPT_ENDTABLE
  425. };
  426. static char *parse_config(json_t *config)
  427. {
  428. static char err_buf[200];
  429. json_t *val;
  430. struct opt_table *opt;
  431. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  432. char *p, *name;
  433. /* We don't handle subtables. */
  434. assert(!(opt->type & OPT_SUBTABLE));
  435. /* Pull apart the option name(s). */
  436. name = strdup(opt->names);
  437. for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
  438. char *err;
  439. /* Ignore short options. */
  440. if (p[1] != '-')
  441. continue;
  442. val = json_object_get(config, p+2);
  443. if (!val)
  444. continue;
  445. if ((opt->type & OPT_HASARG) && json_is_string(val)) {
  446. err = opt->cb_arg(json_string_value(val),
  447. opt->u.arg);
  448. } else if ((opt->type&OPT_NOARG) && json_is_true(val)) {
  449. err = opt->cb(opt->u.arg);
  450. } else {
  451. err = "Invalid value";
  452. }
  453. if (err) {
  454. sprintf(err_buf, "Parsing JSON option %s: %s",
  455. p, err);
  456. return err_buf;
  457. }
  458. }
  459. free(name);
  460. }
  461. return NULL;
  462. }
  463. static char *load_config(const char *arg, void *unused)
  464. {
  465. json_error_t err;
  466. json_t *config;
  467. config = json_load_file(arg, 0, &err);
  468. if (!json_is_object(config))
  469. return "JSON decode of file failed";
  470. /* Parse the config now, so we can override it. That can keep pointers
  471. * so don't free config object. */
  472. return parse_config(config);
  473. }
  474. static char *print_ndevs_and_exit(int *ndevs)
  475. {
  476. printf("%i GPU devices detected\n", *ndevs);
  477. exit(*ndevs);
  478. }
  479. /* These options are available from commandline only */
  480. static struct opt_table opt_cmdline_table[] = {
  481. OPT_WITH_ARG("--config|-c",
  482. load_config, NULL, NULL,
  483. "Load a JSON-format configuration file\n"
  484. "See example-cfg.json for an example configuration."),
  485. OPT_WITHOUT_ARG("--help|-h",
  486. opt_usage_and_exit,
  487. #ifdef HAVE_OPENCL
  488. "\nBuilt with CPU and GPU mining support.\n\n",
  489. #else
  490. "\nBuilt with CPU mining support only.\n\n",
  491. #endif
  492. "Print this message"),
  493. OPT_WITHOUT_ARG("--ndevs|-n",
  494. print_ndevs_and_exit, &nDevs,
  495. "Enumerate number of detected GPUs and exit"),
  496. OPT_ENDTABLE
  497. };
  498. static bool jobj_binary(const json_t *obj, const char *key,
  499. void *buf, size_t buflen)
  500. {
  501. const char *hexstr;
  502. json_t *tmp;
  503. tmp = json_object_get(obj, key);
  504. if (unlikely(!tmp)) {
  505. applog(LOG_ERR, "JSON key '%s' not found", key);
  506. return false;
  507. }
  508. hexstr = json_string_value(tmp);
  509. if (unlikely(!hexstr)) {
  510. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  511. return false;
  512. }
  513. if (!hex2bin(buf, hexstr, buflen))
  514. return false;
  515. return true;
  516. }
  517. static bool work_decode(const json_t *val, struct work *work)
  518. {
  519. if (unlikely(!jobj_binary(val, "midstate",
  520. work->midstate, sizeof(work->midstate)))) {
  521. applog(LOG_ERR, "JSON inval midstate");
  522. goto err_out;
  523. }
  524. if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data)))) {
  525. applog(LOG_ERR, "JSON inval data");
  526. goto err_out;
  527. }
  528. if (unlikely(!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1)))) {
  529. applog(LOG_ERR, "JSON inval hash1");
  530. goto err_out;
  531. }
  532. if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target)))) {
  533. applog(LOG_ERR, "JSON inval target");
  534. goto err_out;
  535. }
  536. memset(work->hash, 0, sizeof(work->hash));
  537. return true;
  538. err_out:
  539. return false;
  540. }
  541. static inline int dev_from_id(int thr_id)
  542. {
  543. return thr_info[thr_id].cgpu->cpu_gpu;
  544. }
  545. static WINDOW *mainwin, *statuswin, *logwin;
  546. static double total_secs = 0.1;
  547. static char statusline[256];
  548. static int cpucursor, gpucursor, logstart, logcursor;
  549. static struct cgpu_info *gpus, *cpus;
  550. static void text_print_status(int thr_id)
  551. {
  552. struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
  553. printf(" %sPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]\n",
  554. cgpu->is_gpu ? "G" : "C", cgpu->cpu_gpu, cgpu->total_mhashes / total_secs,
  555. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  556. cgpu->efficiency, cgpu->utility);
  557. }
  558. /* Must be called with curses mutex lock held and curses_active */
  559. static void curses_print_status(int thr_id)
  560. {
  561. struct pool *pool = cp;
  562. wmove(statuswin, 0, 0);
  563. wattron(statuswin, A_BOLD);
  564. wprintw(statuswin, " " PROGRAM_NAME " version " VERSION " - Started: %s", datestamp);
  565. wattroff(statuswin, A_BOLD);
  566. wmove(statuswin, 1, 0);
  567. whline(statuswin, '-', 80);
  568. wmove(statuswin, 2,0);
  569. wprintw(statuswin, " %s", statusline);
  570. wclrtoeol(statuswin);
  571. wmove(statuswin, 3,0);
  572. wprintw(statuswin, " TQ: %d ST: %d LS: %d SS: %d DW: %d NB: %d LW: %d LO: %d RF: %d I: %d",
  573. total_queued, total_staged, lp_staged, total_stale, total_discarded, new_blocks,
  574. local_work, total_lo, total_ro, scan_intensity);
  575. wclrtoeol(statuswin);
  576. wmove(statuswin, 4, 0);
  577. wprintw(statuswin, " Connected to %s as user %s", pool->rpc_url, pool->rpc_user);
  578. wmove(statuswin, 5, 0);
  579. wprintw(statuswin, " Block %s started: %s", current_block + 4, blockdate);
  580. wmove(statuswin, 6, 0);
  581. whline(statuswin, '-', 80);
  582. wmove(statuswin, logstart - 1, 0);
  583. whline(statuswin, '-', 80);
  584. if (thr_id >= 0 && thr_id < gpu_threads) {
  585. int gpu = dev_from_id(thr_id);
  586. struct cgpu_info *cgpu = &gpus[gpu];
  587. wmove(statuswin, gpucursor + gpu, 0);
  588. wprintw(statuswin, " GPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  589. gpu, cgpu->total_mhashes / total_secs,
  590. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  591. cgpu->efficiency, cgpu->utility);
  592. wclrtoeol(statuswin);
  593. } else if (thr_id >= gpu_threads) {
  594. int cpu = dev_from_id(thr_id);
  595. struct cgpu_info *cgpu = &cpus[cpu];
  596. wmove(statuswin, cpucursor + cpu, 0);
  597. wprintw(statuswin, " CPU %d: [%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  598. cpu, cgpu->total_mhashes / total_secs,
  599. cgpu->getworks, cgpu->accepted, cgpu->rejected, cgpu->hw_errors,
  600. cgpu->efficiency, cgpu->utility);
  601. wclrtoeol(statuswin);
  602. }
  603. wrefresh(statuswin);
  604. }
  605. static void print_status(int thr_id)
  606. {
  607. if (!curses_active)
  608. text_print_status(thr_id);
  609. else {
  610. pthread_mutex_lock(&curses_lock);
  611. curses_print_status(thr_id);
  612. wrefresh(statuswin);
  613. pthread_mutex_unlock(&curses_lock);
  614. }
  615. }
  616. void log_curses(const char *f, va_list ap)
  617. {
  618. if (curses_active) {
  619. pthread_mutex_lock(&curses_lock);
  620. vw_printw(logwin, f, ap);
  621. wrefresh(logwin);
  622. pthread_mutex_unlock(&curses_lock);
  623. } else
  624. vprintf(f, ap);
  625. }
  626. static bool submit_upstream_work(const struct work *work)
  627. {
  628. char *hexstr = NULL;
  629. json_t *val, *res;
  630. char s[345];
  631. bool rc = false;
  632. int thr_id = work->thr_id;
  633. struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
  634. CURL *curl = curl_easy_init();
  635. struct pool *pool = cp;
  636. if (unlikely(!curl)) {
  637. applog(LOG_ERR, "CURL initialisation failed");
  638. return rc;
  639. }
  640. /* build hex string */
  641. hexstr = bin2hex(work->data, sizeof(work->data));
  642. if (unlikely(!hexstr)) {
  643. applog(LOG_ERR, "submit_upstream_work OOM");
  644. goto out_nofree;
  645. }
  646. /* build JSON-RPC request */
  647. sprintf(s,
  648. "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
  649. hexstr);
  650. if (opt_debug)
  651. applog(LOG_DEBUG, "DBG: sending RPC call: %s", s);
  652. /* issue JSON-RPC request */
  653. val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, s, false, false);
  654. if (unlikely(!val)) {
  655. applog(LOG_INFO, "submit_upstream_work json_rpc_call failed");
  656. if (!pool_tset(pool, &pool->submit_fail)) {
  657. total_ro++;
  658. pool->remotefail_occasions++;
  659. applog(LOG_WARNING, "Upstream communication failure, caching submissions");
  660. }
  661. goto out;
  662. } else if (pool_tclear(pool, &pool->submit_fail))
  663. applog(LOG_WARNING, "Upstream communication resumed, submitting work");
  664. res = json_object_get(val, "result");
  665. /* Theoretically threads could race when modifying accepted and
  666. * rejected values but the chance of two submits completing at the
  667. * same time is zero so there is no point adding extra locking */
  668. if (json_is_true(res)) {
  669. cgpu->accepted++;
  670. total_accepted++;
  671. pool->accepted++;
  672. if (opt_debug)
  673. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  674. if (!opt_quiet)
  675. applog(LOG_WARNING, "Share %.8s accepted from %sPU %d thread %d",
  676. hexstr + 152, cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, thr_id);
  677. } else {
  678. cgpu->rejected++;
  679. total_rejected++;
  680. pool->rejected++;
  681. if (opt_debug)
  682. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  683. if (!opt_quiet)
  684. applog(LOG_WARNING, "Share %.8s rejected from %sPU %d thread %d",
  685. hexstr + 152, cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, thr_id);
  686. }
  687. cgpu->utility = cgpu->accepted / ( total_secs ? total_secs : 1 ) * 60;
  688. cgpu->efficiency = cgpu->getworks ? cgpu->accepted * 100.0 / cgpu->getworks : 0.0;
  689. if (!opt_quiet)
  690. print_status(thr_id);
  691. applog(LOG_INFO, "%sPU %d Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m",
  692. cgpu->is_gpu? "G" : "C", cgpu->cpu_gpu, cgpu->getworks, cgpu->accepted,
  693. cgpu->rejected, cgpu->hw_errors, cgpu->efficiency, cgpu->utility);
  694. json_decref(val);
  695. rc = true;
  696. out:
  697. free(hexstr);
  698. out_nofree:
  699. curl_easy_cleanup(curl);
  700. return rc;
  701. }
  702. static const char *rpc_req =
  703. "{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
  704. static bool get_upstream_work(struct work *work)
  705. {
  706. struct pool *pool = cp;
  707. json_t *val;
  708. bool rc = false;
  709. CURL *curl = curl_easy_init();
  710. if (unlikely(!curl)) {
  711. applog(LOG_ERR, "CURL initialisation failed");
  712. return rc;
  713. }
  714. val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req,
  715. want_longpoll, false);
  716. if (unlikely(!val)) {
  717. applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
  718. goto out;
  719. }
  720. rc = work_decode(json_object_get(val, "result"), work);
  721. json_decref(val);
  722. out:
  723. curl_easy_cleanup(curl);
  724. return rc;
  725. }
  726. static void workio_cmd_free(struct workio_cmd *wc)
  727. {
  728. if (!wc)
  729. return;
  730. switch (wc->cmd) {
  731. case WC_SUBMIT_WORK:
  732. free(wc->u.work);
  733. break;
  734. default: /* do nothing */
  735. break;
  736. }
  737. memset(wc, 0, sizeof(*wc)); /* poison */
  738. free(wc);
  739. }
  740. static void disable_curses(void)
  741. {
  742. if (test_and_clear(&curses_active)) {
  743. delwin(logwin);
  744. delwin(statuswin);
  745. delwin(mainwin);
  746. endwin();
  747. refresh();
  748. }
  749. }
  750. void kill_work(void)
  751. {
  752. struct workio_cmd *wc;
  753. struct thr_info *thr;
  754. unsigned int i;
  755. disable_curses();
  756. applog(LOG_INFO, "Received kill message");
  757. /* Kill the watchdog thread */
  758. thr = &thr_info[watchdog_thr_id];
  759. pthread_cancel(thr->pth);
  760. /* Stop the mining threads*/
  761. for (i = 0; i < mining_threads; i++) {
  762. thr = &thr_info[i];
  763. tq_freeze(thr->q);
  764. /* No need to check if this succeeds or not */
  765. pthread_cancel(thr->pth);
  766. }
  767. /* Stop the others */
  768. thr = &thr_info[stage_thr_id];
  769. pthread_cancel(thr->pth);
  770. thr = &thr_info[longpoll_thr_id];
  771. pthread_cancel(thr->pth);
  772. wc = calloc(1, sizeof(*wc));
  773. if (unlikely(!wc)) {
  774. applog(LOG_ERR, "Failed to calloc wc in kill_work");
  775. /* We're just trying to die anyway, so forget graceful */
  776. exit (1);
  777. }
  778. wc->cmd = WC_DIE;
  779. wc->thr = 0;
  780. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  781. applog(LOG_ERR, "Failed to tq_push work in kill_work");
  782. exit (1);
  783. }
  784. }
  785. static void sighandler(int sig)
  786. {
  787. /* Restore signal handlers so we can still quit if kill_work fails */
  788. sigaction(SIGTERM, &termhandler, NULL);
  789. sigaction(SIGINT, &inthandler, NULL);
  790. kill_work();
  791. }
  792. static void *get_work_thread(void *userdata)
  793. {
  794. struct workio_cmd *wc = (struct workio_cmd *)userdata;
  795. struct work *ret_work;
  796. int failures = 0;
  797. pthread_detach(pthread_self());
  798. ret_work = calloc(1, sizeof(*ret_work));
  799. if (unlikely(!ret_work)) {
  800. applog(LOG_ERR, "Failed to calloc ret_work in workio_get_work");
  801. kill_work();
  802. goto out;
  803. }
  804. /* obtain new work from bitcoin via JSON-RPC */
  805. while (!get_upstream_work(ret_work)) {
  806. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  807. applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
  808. free(ret_work);
  809. kill_work();
  810. goto out;
  811. }
  812. /* pause, then restart work-request loop */
  813. applog(LOG_DEBUG, "json_rpc_call failed on get work, retry after %d seconds",
  814. opt_fail_pause);
  815. sleep(opt_fail_pause);
  816. }
  817. /* send work to requesting thread */
  818. if (unlikely(!tq_push(thr_info[stage_thr_id].q, ret_work))) {
  819. applog(LOG_ERR, "Failed to tq_push work in workio_get_work");
  820. kill_work();
  821. free(ret_work);
  822. }
  823. out:
  824. workio_cmd_free(wc);
  825. return NULL;
  826. }
  827. static bool workio_get_work(struct workio_cmd *wc)
  828. {
  829. pthread_t get_thread;
  830. if (unlikely(pthread_create(&get_thread, NULL, get_work_thread, (void *)wc))) {
  831. applog(LOG_ERR, "Failed to create get_work_thread");
  832. return false;
  833. }
  834. return true;
  835. }
  836. static bool stale_work(struct work *work)
  837. {
  838. bool ret = false;
  839. char *hexstr;
  840. if (!strncmp(blank, current_block, 36))
  841. return ret;
  842. hexstr = bin2hex(work->data, 36);
  843. if (unlikely(!hexstr)) {
  844. applog(LOG_ERR, "submit_work_thread OOM");
  845. return ret;
  846. }
  847. if (strncmp(hexstr, current_block, 36))
  848. ret = true;
  849. free(hexstr);
  850. return ret;
  851. }
  852. static void *submit_work_thread(void *userdata)
  853. {
  854. struct workio_cmd *wc = (struct workio_cmd *)userdata;
  855. struct pool *pool = cp;
  856. int failures = 0;
  857. pthread_detach(pthread_self());
  858. if (stale_work(wc->u.work)) {
  859. applog(LOG_WARNING, "Stale share detected, discarding");
  860. total_stale++;
  861. pool->stale_shares++;
  862. goto out;
  863. }
  864. /* submit solution to bitcoin via JSON-RPC */
  865. while (!submit_upstream_work(wc->u.work)) {
  866. if (stale_work(wc->u.work)) {
  867. applog(LOG_WARNING, "Stale share detected, discarding");
  868. total_stale++;
  869. pool->stale_shares++;
  870. break;
  871. }
  872. if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
  873. applog(LOG_ERR, "Failed %d retries ...terminating workio thread", opt_retries);
  874. kill_work();
  875. break;
  876. }
  877. /* pause, then restart work-request loop */
  878. applog(LOG_INFO, "json_rpc_call failed on submit_work, retry after %d seconds",
  879. opt_fail_pause);
  880. sleep(opt_fail_pause);
  881. }
  882. out:
  883. workio_cmd_free(wc);
  884. return NULL;
  885. }
  886. static bool workio_submit_work(struct workio_cmd *wc)
  887. {
  888. pthread_t submit_thread;
  889. if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, (void *)wc))) {
  890. applog(LOG_ERR, "Failed to create submit_work_thread");
  891. return false;
  892. }
  893. return true;
  894. }
  895. static void inc_staged(int inc, bool lp)
  896. {
  897. struct pool *pool = cp;
  898. pthread_mutex_lock(&stgd_lock);
  899. if (lp) {
  900. lp_staged += inc;
  901. total_staged += inc;
  902. pool->idlenet = true;
  903. } else if (lp_staged) {
  904. if (!--lp_staged) {
  905. unsigned int i;
  906. /* Make sure the watchdog thread doesn't kill the mining
  907. * threads once we unset the idlenet flag */
  908. for (i = 0; i < mining_threads; i++)
  909. gettimeofday(&thr_info[i].last, NULL);
  910. pool->idlenet = false;
  911. }
  912. } else
  913. total_staged += inc;
  914. pthread_mutex_unlock(&stgd_lock);
  915. }
  916. static void dec_staged(int inc)
  917. {
  918. pthread_mutex_lock(&stgd_lock);
  919. total_staged -= inc;
  920. pthread_mutex_unlock(&stgd_lock);
  921. }
  922. static int requests_staged(void)
  923. {
  924. int ret;
  925. pthread_mutex_lock(&stgd_lock);
  926. ret = total_staged;
  927. pthread_mutex_unlock(&stgd_lock);
  928. return ret;
  929. }
  930. static int real_staged(void)
  931. {
  932. int ret;
  933. pthread_mutex_lock(&stgd_lock);
  934. ret = total_staged - lp_staged;
  935. pthread_mutex_unlock(&stgd_lock);
  936. return ret;
  937. }
  938. static void set_curblock(char *hexstr)
  939. {
  940. struct timeval tv_now;
  941. struct tm tm;
  942. memcpy(current_block, hexstr, 36);
  943. gettimeofday(&tv_now, NULL);
  944. localtime_r(&tv_now.tv_sec, &tm);
  945. sprintf(blockdate, "[%d-%02d-%02d %02d:%02d:%02d]",
  946. tm.tm_year + 1900,
  947. tm.tm_mon + 1,
  948. tm.tm_mday,
  949. tm.tm_hour,
  950. tm.tm_min,
  951. tm.tm_sec);
  952. }
  953. static void *stage_thread(void *userdata)
  954. {
  955. struct thr_info *mythr = userdata;
  956. bool ok = true;
  957. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  958. while (ok) {
  959. struct work *work = NULL;
  960. char *hexstr;
  961. work = tq_pop(mythr->q, NULL);
  962. if (unlikely(!work)) {
  963. applog(LOG_ERR, "Failed to tq_pop in stage_thread");
  964. ok = false;
  965. break;
  966. }
  967. hexstr = bin2hex(work->data, 36);
  968. if (unlikely(!hexstr)) {
  969. applog(LOG_ERR, "stage_thread OOM");
  970. break;
  971. }
  972. /* current_block is blanked out on successful longpoll */
  973. if (likely(strncmp(current_block, blank, 36))) {
  974. if (unlikely(strncmp(hexstr, current_block, 36))) {
  975. new_blocks++;
  976. if (have_longpoll)
  977. applog(LOG_WARNING, "New block detected on network before longpoll, waiting on fresh work");
  978. else
  979. applog(LOG_WARNING, "New block detected on network, waiting on fresh work");
  980. /* As we can't flush the work from here, signal
  981. * the wakeup thread to restart all the
  982. * threads */
  983. work_restart[watchdog_thr_id].restart = 1;
  984. set_curblock(hexstr);
  985. }
  986. } else {
  987. set_curblock(hexstr);
  988. memcpy(longpoll_block, hexstr, 36);
  989. }
  990. free(hexstr);
  991. if (unlikely(!tq_push(getq, work))) {
  992. applog(LOG_ERR, "Failed to tq_push work in stage_thread");
  993. ok = false;
  994. break;
  995. }
  996. inc_staged(1, false);
  997. }
  998. tq_freeze(mythr->q);
  999. return NULL;
  1000. }
  1001. static void *workio_thread(void *userdata)
  1002. {
  1003. struct thr_info *mythr = userdata;
  1004. bool ok = true;
  1005. while (ok) {
  1006. struct workio_cmd *wc;
  1007. /* wait for workio_cmd sent to us, on our queue */
  1008. wc = tq_pop(mythr->q, NULL);
  1009. if (unlikely(!wc)) {
  1010. applog(LOG_ERR, "Failed to tq_pop in workio_thread");
  1011. ok = false;
  1012. break;
  1013. }
  1014. /* process workio_cmd */
  1015. switch (wc->cmd) {
  1016. case WC_GET_WORK:
  1017. ok = workio_get_work(wc);
  1018. break;
  1019. case WC_SUBMIT_WORK:
  1020. ok = workio_submit_work(wc);
  1021. break;
  1022. case WC_DIE:
  1023. default:
  1024. ok = false;
  1025. break;
  1026. }
  1027. }
  1028. tq_freeze(mythr->q);
  1029. return NULL;
  1030. }
  1031. static void hashmeter(int thr_id, struct timeval *diff,
  1032. unsigned long hashes_done)
  1033. {
  1034. struct timeval temp_tv_end, total_diff;
  1035. double khashes, secs;
  1036. double local_secs;
  1037. double utility, efficiency = 0.0;
  1038. static double local_mhashes_done = 0;
  1039. static double rolling_local = 0;
  1040. double local_mhashes = (double)hashes_done / 1000000.0;
  1041. struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
  1042. /* Update the last time this thread reported in */
  1043. if (thr_id >= 0)
  1044. gettimeofday(&thr_info[thr_id].last, NULL);
  1045. /* Don't bother calculating anything if we're not displaying it */
  1046. if (opt_quiet || !opt_log_interval)
  1047. return;
  1048. khashes = hashes_done / 1000.0;
  1049. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  1050. if (thr_id >= 0 && secs) {
  1051. /* So we can call hashmeter from a non worker thread */
  1052. if (opt_debug)
  1053. applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
  1054. thr_id, hashes_done, hashes_done / secs);
  1055. cgpu->local_mhashes += local_mhashes;
  1056. cgpu->total_mhashes += local_mhashes;
  1057. }
  1058. /* Totals are updated by all threads so can race without locking */
  1059. pthread_mutex_lock(&hash_lock);
  1060. gettimeofday(&temp_tv_end, NULL);
  1061. timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
  1062. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  1063. total_mhashes_done += local_mhashes;
  1064. local_mhashes_done += local_mhashes;
  1065. if (total_diff.tv_sec < opt_log_interval)
  1066. /* Only update the total every opt_log_interval seconds */
  1067. goto out_unlock;
  1068. gettimeofday(&total_tv_end, NULL);
  1069. /* Use a rolling average by faking an exponential decay over 5 * log */
  1070. rolling_local = ((rolling_local * 0.9) + local_mhashes_done) / 1.9;
  1071. timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
  1072. total_secs = (double)total_diff.tv_sec +
  1073. ((double)total_diff.tv_usec / 1000000.0);
  1074. utility = total_accepted / ( total_secs ? total_secs : 1 ) * 60;
  1075. efficiency = total_getworks ? total_accepted * 100.0 / total_getworks : 0.0;
  1076. sprintf(statusline, "[(%ds):%.1f (avg):%.1f Mh/s] [Q:%d A:%d R:%d HW:%d E:%.0f%% U:%.2f/m]",
  1077. opt_log_interval, rolling_local / local_secs, total_mhashes_done / total_secs,
  1078. total_getworks, total_accepted, total_rejected, hw_errors, efficiency, utility);
  1079. if (!curses_active) {
  1080. printf("%s \r", statusline);
  1081. fflush(stdout);
  1082. } else
  1083. applog(LOG_INFO, "%s", statusline);
  1084. local_mhashes_done = 0;
  1085. out_unlock:
  1086. pthread_mutex_unlock(&hash_lock);
  1087. }
  1088. /* This is overkill, but at least we'll know accurately how much work is
  1089. * queued to prevent ever being left without work */
  1090. static void inc_queued(void)
  1091. {
  1092. pthread_mutex_lock(&qd_lock);
  1093. total_queued++;
  1094. pthread_mutex_unlock(&qd_lock);
  1095. }
  1096. static void dec_queued(void)
  1097. {
  1098. pthread_mutex_lock(&qd_lock);
  1099. if (total_queued > 0)
  1100. total_queued--;
  1101. pthread_mutex_unlock(&qd_lock);
  1102. dec_staged(1);
  1103. }
  1104. static int requests_queued(void)
  1105. {
  1106. int ret;
  1107. pthread_mutex_lock(&qd_lock);
  1108. ret = total_queued;
  1109. pthread_mutex_unlock(&qd_lock);
  1110. return ret;
  1111. }
  1112. static bool queue_request(void)
  1113. {
  1114. int maxq = opt_queue + mining_threads;
  1115. struct workio_cmd *wc;
  1116. struct pool *pool = cp;
  1117. /* If we've been generating lots of local work we may already have
  1118. * enough in the queue */
  1119. if (requests_queued() >= maxq || real_staged() >= maxq)
  1120. return true;
  1121. /* fill out work request message */
  1122. wc = calloc(1, sizeof(*wc));
  1123. if (unlikely(!wc)) {
  1124. applog(LOG_ERR, "Failed to tq_pop in queue_request");
  1125. return false;
  1126. }
  1127. wc->cmd = WC_GET_WORK;
  1128. /* The get work does not belong to any thread */
  1129. wc->thr = NULL;
  1130. /* send work request to workio thread */
  1131. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  1132. applog(LOG_ERR, "Failed to tq_push in queue_request");
  1133. workio_cmd_free(wc);
  1134. return false;
  1135. }
  1136. total_getworks++;
  1137. pool->getwork_requested++;
  1138. inc_queued();
  1139. return true;
  1140. }
  1141. static void discard_staged(void)
  1142. {
  1143. struct work *work_heap;
  1144. struct pool *pool = cp;
  1145. /* Just in case we fell in a hole and missed a queue filling */
  1146. if (unlikely(!requests_staged()))
  1147. return;
  1148. work_heap = tq_pop(getq, NULL);
  1149. if (unlikely(!work_heap))
  1150. return;
  1151. free(work_heap);
  1152. dec_queued();
  1153. pool->discarded_work++;
  1154. total_discarded++;
  1155. }
  1156. static void flush_requests(bool longpoll)
  1157. {
  1158. int i, stale;
  1159. /* We should have one fresh work item staged from the block change. */
  1160. stale = requests_staged() - 1;
  1161. if (longpoll)
  1162. memcpy(current_block, blank, 36);
  1163. /* Temporarily increase the staged count so that get_work thinks there
  1164. * is work available instead of making threads reuse existing work */
  1165. inc_staged(mining_threads, true);
  1166. for (i = 0; i < stale; i++) {
  1167. /* Queue a whole batch of new requests */
  1168. if (unlikely(!queue_request())) {
  1169. applog(LOG_ERR, "Failed to queue requests in flush_requests");
  1170. kill_work();
  1171. break;
  1172. }
  1173. /* Pop off the old requests. Cancelling the requests would be better
  1174. * but is tricky */
  1175. discard_staged();
  1176. }
  1177. }
  1178. static bool get_work(struct work *work, bool queued)
  1179. {
  1180. struct pool *pool = cp;
  1181. struct work *work_heap;
  1182. bool ret = false;
  1183. int failures = 0;
  1184. retry:
  1185. if (unlikely(!queued && !queue_request())) {
  1186. applog(LOG_WARNING, "Failed to queue_request in get_work");
  1187. goto out;
  1188. }
  1189. if (!requests_staged() && !stale_work(work)) {
  1190. uint32_t *work_ntime;
  1191. uint32_t ntime;
  1192. /* Only print this message once each time we shift to localgen */
  1193. if (!pool_tset(pool, &pool->localgen)) {
  1194. applog(LOG_WARNING, "Server not providing work fast enough, generating work locally");
  1195. pool->localgen_occasions++;
  1196. total_lo++;
  1197. gettimeofday(&pool->tv_localgen, NULL);
  1198. } else {
  1199. struct timeval tv_now, diff;
  1200. gettimeofday(&tv_now, NULL);
  1201. timeval_subtract(&diff, &tv_now, &pool->tv_localgen);
  1202. if (diff.tv_sec > 600) {
  1203. /* A new block appears on average every 10 mins */
  1204. applog(LOG_WARNING, "Prolonged outage. Going idle till network recovers.");
  1205. /* Force every thread to wait for new work */
  1206. inc_staged(mining_threads, true);
  1207. goto retry;
  1208. }
  1209. }
  1210. work_ntime = (uint32_t *)(work->data + 68);
  1211. ntime = be32toh(*work_ntime);
  1212. ntime++;
  1213. *work_ntime = htobe32(ntime);
  1214. ret = true;
  1215. local_work++;
  1216. goto out;
  1217. }
  1218. /* wait for 1st response, or get cached response */
  1219. work_heap = tq_pop(getq, NULL);
  1220. if (unlikely(!work_heap)) {
  1221. applog(LOG_WARNING, "Failed to tq_pop in get_work");
  1222. goto out;
  1223. }
  1224. /* If we make it here we have succeeded in getting fresh work */
  1225. if (pool_tclear(pool, &pool->localgen))
  1226. applog(LOG_WARNING, "Resuming with work from server");
  1227. dec_queued();
  1228. memcpy(work, work_heap, sizeof(*work));
  1229. ret = true;
  1230. free(work_heap);
  1231. out:
  1232. if (unlikely(ret == false)) {
  1233. if ((opt_retries >= 0) && (++failures > opt_retries)) {
  1234. applog(LOG_ERR, "Failed %d times to get_work");
  1235. return ret;
  1236. }
  1237. applog(LOG_DEBUG, "Retrying after %d seconds", opt_fail_pause);
  1238. sleep(opt_fail_pause);
  1239. goto retry;
  1240. }
  1241. return ret;
  1242. }
  1243. static bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
  1244. {
  1245. struct workio_cmd *wc;
  1246. /* fill out work request message */
  1247. wc = calloc(1, sizeof(*wc));
  1248. if (unlikely(!wc)) {
  1249. applog(LOG_ERR, "Failed to calloc wc in submit_work_sync");
  1250. return false;
  1251. }
  1252. wc->u.work = malloc(sizeof(*work_in));
  1253. if (unlikely(!wc->u.work)) {
  1254. applog(LOG_ERR, "Failed to calloc work in submit_work_sync");
  1255. goto err_out;
  1256. }
  1257. wc->cmd = WC_SUBMIT_WORK;
  1258. wc->thr = thr;
  1259. memcpy(wc->u.work, work_in, sizeof(*work_in));
  1260. /* send solution to workio thread */
  1261. if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
  1262. applog(LOG_ERR, "Failed to tq_push work in submit_work_sync");
  1263. goto err_out;
  1264. }
  1265. return true;
  1266. err_out:
  1267. workio_cmd_free(wc);
  1268. return false;
  1269. }
  1270. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  1271. {
  1272. work->data[64+12+0] = (nonce>>0) & 0xff;
  1273. work->data[64+12+1] = (nonce>>8) & 0xff;
  1274. work->data[64+12+2] = (nonce>>16) & 0xff;
  1275. work->data[64+12+3] = (nonce>>24) & 0xff;
  1276. return submit_work_sync(thr, work);
  1277. }
  1278. static void *miner_thread(void *userdata)
  1279. {
  1280. struct thr_info *mythr = userdata;
  1281. const int thr_id = mythr->id;
  1282. uint32_t max_nonce = 0xffffff;
  1283. unsigned long hashes_done = max_nonce;
  1284. bool needs_work = true;
  1285. /* Try to cycle approximately 5 times before each log update */
  1286. const unsigned long cycle = opt_log_interval / 5 ? : 1;
  1287. /* Request the next work item at 2/3 of the scantime */
  1288. unsigned const int request_interval = opt_scantime * 2 / 3 ? : 1;
  1289. unsigned const long request_nonce = MAXTHREADS / 3 * 2;
  1290. bool requested = true;
  1291. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1292. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  1293. * and if that fails, then SCHED_BATCH. No need for this to be an
  1294. * error if it fails */
  1295. setpriority(PRIO_PROCESS, 0, 19);
  1296. drop_policy();
  1297. /* Cpu affinity only makes sense if the number of threads is a multiple
  1298. * of the number of CPUs */
  1299. if (!(opt_n_threads % num_processors))
  1300. affine_to_cpu(thr_id - gpu_threads, dev_from_id(thr_id));
  1301. while (1) {
  1302. struct work work __attribute__((aligned(128)));
  1303. struct timeval tv_workstart, tv_start, tv_end, diff;
  1304. uint64_t max64;
  1305. bool rc;
  1306. if (needs_work) {
  1307. gettimeofday(&tv_workstart, NULL);
  1308. /* obtain new work from internal workio thread */
  1309. if (unlikely(!get_work(&work, requested))) {
  1310. applog(LOG_ERR, "work retrieval failed, exiting "
  1311. "mining thread %d", thr_id);
  1312. goto out;
  1313. }
  1314. mythr->cgpu->getworks++;
  1315. work.thr_id = thr_id;
  1316. needs_work = requested = false;
  1317. work.blk.nonce = 0;
  1318. max_nonce = hashes_done;
  1319. }
  1320. hashes_done = 0;
  1321. gettimeofday(&tv_start, NULL);
  1322. /* scan nonces for a proof-of-work hash */
  1323. switch (opt_algo) {
  1324. case ALGO_C:
  1325. rc = scanhash_c(thr_id, work.midstate, work.data + 64,
  1326. work.hash1, work.hash, work.target,
  1327. max_nonce, &hashes_done,
  1328. work.blk.nonce);
  1329. break;
  1330. #ifdef WANT_X8664_SSE2
  1331. case ALGO_SSE2_64: {
  1332. unsigned int rc5 =
  1333. scanhash_sse2_64(thr_id, work.midstate, work.data + 64,
  1334. work.hash1, work.hash,
  1335. work.target,
  1336. max_nonce, &hashes_done,
  1337. work.blk.nonce);
  1338. rc = (rc5 == -1) ? false : true;
  1339. }
  1340. break;
  1341. #endif
  1342. #ifdef WANT_X8664_SSE4
  1343. case ALGO_SSE4_64: {
  1344. unsigned int rc5 =
  1345. scanhash_sse4_64(thr_id, work.midstate, work.data + 64,
  1346. work.hash1, work.hash,
  1347. work.target,
  1348. max_nonce, &hashes_done,
  1349. work.blk.nonce);
  1350. rc = (rc5 == -1) ? false : true;
  1351. }
  1352. break;
  1353. #endif
  1354. #ifdef WANT_SSE2_4WAY
  1355. case ALGO_4WAY: {
  1356. unsigned int rc4 =
  1357. ScanHash_4WaySSE2(thr_id, work.midstate, work.data + 64,
  1358. work.hash1, work.hash,
  1359. work.target,
  1360. max_nonce, &hashes_done,
  1361. work.blk.nonce);
  1362. rc = (rc4 == -1) ? false : true;
  1363. }
  1364. break;
  1365. #endif
  1366. #ifdef WANT_VIA_PADLOCK
  1367. case ALGO_VIA:
  1368. rc = scanhash_via(thr_id, work.data, work.target,
  1369. max_nonce, &hashes_done,
  1370. work.blk.nonce);
  1371. break;
  1372. #endif
  1373. case ALGO_CRYPTOPP:
  1374. rc = scanhash_cryptopp(thr_id, work.midstate, work.data + 64,
  1375. work.hash1, work.hash, work.target,
  1376. max_nonce, &hashes_done,
  1377. work.blk.nonce);
  1378. break;
  1379. #ifdef WANT_CRYPTOPP_ASM32
  1380. case ALGO_CRYPTOPP_ASM32:
  1381. rc = scanhash_asm32(thr_id, work.midstate, work.data + 64,
  1382. work.hash1, work.hash, work.target,
  1383. max_nonce, &hashes_done,
  1384. work.blk.nonce);
  1385. break;
  1386. #endif
  1387. default:
  1388. /* should never happen */
  1389. goto out;
  1390. }
  1391. /* record scanhash elapsed time */
  1392. gettimeofday(&tv_end, NULL);
  1393. timeval_subtract(&diff, &tv_end, &tv_start);
  1394. hashes_done -= work.blk.nonce;
  1395. hashmeter(thr_id, &diff, hashes_done);
  1396. work.blk.nonce += hashes_done;
  1397. /* adjust max_nonce to meet target cycle time */
  1398. if (diff.tv_usec > 500000)
  1399. diff.tv_sec++;
  1400. if (diff.tv_sec && diff.tv_sec != cycle) {
  1401. max64 = work.blk.nonce +
  1402. ((uint64_t)hashes_done * cycle) / diff.tv_sec;
  1403. } else
  1404. max64 = work.blk.nonce + hashes_done;
  1405. if (max64 > 0xfffffffaULL)
  1406. max64 = 0xfffffffaULL;
  1407. max_nonce = max64;
  1408. /* if nonce found, submit work */
  1409. if (unlikely(rc)) {
  1410. if (opt_debug)
  1411. applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
  1412. if (unlikely(!submit_work_sync(mythr, &work))) {
  1413. applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
  1414. break;
  1415. }
  1416. work.blk.nonce += 4;
  1417. }
  1418. timeval_subtract(&diff, &tv_end, &tv_workstart);
  1419. if (!requested && (diff.tv_sec > request_interval || work.blk.nonce > request_nonce)) {
  1420. if (unlikely(!queue_request())) {
  1421. applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
  1422. goto out;
  1423. }
  1424. requested = true;
  1425. }
  1426. if (diff.tv_sec > opt_scantime || work_restart[thr_id].restart ||
  1427. work.blk.nonce >= MAXTHREADS - hashes_done ||
  1428. stale_work(&work))
  1429. needs_work = true;
  1430. }
  1431. out:
  1432. tq_freeze(mythr->q);
  1433. return NULL;
  1434. }
  1435. enum {
  1436. STAT_SLEEP_INTERVAL = 1,
  1437. STAT_CTR_INTERVAL = 10000000,
  1438. FAILURE_INTERVAL = 30,
  1439. };
  1440. #ifdef HAVE_OPENCL
  1441. static _clState *clStates[16];
  1442. static inline cl_int queue_kernel_parameters(_clState *clState, dev_blk_ctx *blk)
  1443. {
  1444. cl_kernel *kernel = &clState->kernel;
  1445. cl_int status = 0;
  1446. int num = 0;
  1447. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_a);
  1448. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_b);
  1449. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_c);
  1450. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_d);
  1451. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_e);
  1452. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_f);
  1453. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_g);
  1454. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->ctx_h);
  1455. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_b);
  1456. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_c);
  1457. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_d);
  1458. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_f);
  1459. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_g);
  1460. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->cty_h);
  1461. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->nonce);
  1462. if (clState->hasBitAlign == true) {
  1463. /* Parameters for phatk kernel */
  1464. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W2);
  1465. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W16);
  1466. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->W17);
  1467. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->PreVal4);
  1468. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->T1);
  1469. } else {
  1470. /* Parameters for poclbm kernel */
  1471. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW0);
  1472. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW1);
  1473. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW2);
  1474. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW3);
  1475. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW15);
  1476. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fW01r);
  1477. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e);
  1478. status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->fcty_e2);
  1479. }
  1480. status |= clSetKernelArg(*kernel, num++, sizeof(clState->outputBuffer),
  1481. (void *)&clState->outputBuffer);
  1482. return status;
  1483. }
  1484. static void set_threads_hashes(unsigned int vectors, unsigned int *threads,
  1485. unsigned int *hashes, size_t *globalThreads)
  1486. {
  1487. *globalThreads = *threads = 1 << (15 + scan_intensity);
  1488. *hashes = *threads * vectors;
  1489. }
  1490. static void *gpuminer_thread(void *userdata)
  1491. {
  1492. const unsigned long cycle = opt_log_interval / 5 ? : 1;
  1493. struct timeval tv_start, tv_end, diff, tv_workstart;
  1494. struct thr_info *mythr = userdata;
  1495. const int thr_id = mythr->id;
  1496. uint32_t *res, *blank_res;
  1497. double gpu_ms_average = 7;
  1498. size_t globalThreads[1];
  1499. size_t localThreads[1];
  1500. cl_int status;
  1501. _clState *clState = clStates[thr_id];
  1502. const cl_kernel *kernel = &clState->kernel;
  1503. struct work *work = malloc(sizeof(struct work));
  1504. unsigned int threads = 1 << (15 + scan_intensity);
  1505. unsigned const int vectors = clState->preferred_vwidth;
  1506. unsigned int hashes = threads * vectors;
  1507. unsigned int hashes_done = 0;
  1508. /* Request the next work item at 2/3 of the scantime */
  1509. unsigned const int request_interval = opt_scantime * 2 / 3 ? : 1;
  1510. unsigned const long request_nonce = MAXTHREADS / 3 * 2;
  1511. bool requested = true;
  1512. if (opt_dynamic) {
  1513. /* Minimise impact on desktop if we want dynamic mode */
  1514. setpriority(PRIO_PROCESS, 0, 19);
  1515. drop_policy();
  1516. }
  1517. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1518. res = calloc(BUFFERSIZE, 1);
  1519. blank_res = calloc(BUFFERSIZE, 1);
  1520. if (!res || !blank_res) {
  1521. applog(LOG_ERR, "Failed to calloc in gpuminer_thread");
  1522. goto out;
  1523. }
  1524. gettimeofday(&tv_start, NULL);
  1525. globalThreads[0] = threads;
  1526. localThreads[0] = clState->work_size;
  1527. diff.tv_sec = 0;
  1528. gettimeofday(&tv_end, NULL);
  1529. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_TRUE, 0,
  1530. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1531. if (unlikely(status != CL_SUCCESS))
  1532. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1533. gettimeofday(&tv_workstart, NULL);
  1534. /* obtain new work from internal workio thread */
  1535. if (unlikely(!get_work(work, requested))) {
  1536. applog(LOG_ERR, "work retrieval failed, exiting "
  1537. "gpu mining thread %d", mythr->id);
  1538. goto out;
  1539. }
  1540. mythr->cgpu->getworks++;
  1541. work->thr_id = thr_id;
  1542. requested = false;
  1543. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  1544. work->blk.nonce = 0;
  1545. while (1) {
  1546. struct timeval tv_gpustart, tv_gpuend;
  1547. suseconds_t gpu_us;
  1548. gettimeofday(&tv_gpustart, NULL);
  1549. timeval_subtract(&diff, &tv_gpustart, &tv_gpuend);
  1550. /* This finish flushes the readbuffer set with CL_FALSE later */
  1551. clFinish(clState->commandQueue);
  1552. gettimeofday(&tv_gpuend, NULL);
  1553. timeval_subtract(&diff, &tv_gpuend, &tv_gpustart);
  1554. gpu_us = diff.tv_sec * 1000000 + diff.tv_usec;
  1555. gpu_ms_average = ((gpu_us / 1000) + gpu_ms_average * 0.9) / 1.9;
  1556. if (opt_dynamic) {
  1557. /* Try to not let the GPU be out for longer than 6ms, but
  1558. * increase intensity when the system is idle, unless
  1559. * dynamic is disabled. */
  1560. if (gpu_ms_average > 7) {
  1561. if (scan_intensity > 0)
  1562. scan_intensity--;
  1563. set_threads_hashes(vectors, &threads, &hashes, globalThreads);
  1564. } else if (gpu_ms_average < 3) {
  1565. if (scan_intensity < 14)
  1566. scan_intensity++;
  1567. set_threads_hashes(vectors, &threads, &hashes, globalThreads);
  1568. }
  1569. }
  1570. if (diff.tv_sec > opt_scantime ||
  1571. work->blk.nonce >= MAXTHREADS - hashes ||
  1572. work_restart[thr_id].restart ||
  1573. stale_work(work)) {
  1574. /* Ignore any reads since we're getting new work and queue a clean buffer */
  1575. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1576. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1577. if (unlikely(status != CL_SUCCESS))
  1578. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1579. memset(res, 0, BUFFERSIZE);
  1580. gettimeofday(&tv_workstart, NULL);
  1581. /* obtain new work from internal workio thread */
  1582. if (unlikely(!get_work(work, requested))) {
  1583. applog(LOG_ERR, "work retrieval failed, exiting "
  1584. "gpu mining thread %d", mythr->id);
  1585. goto out;
  1586. }
  1587. mythr->cgpu->getworks++;
  1588. work->thr_id = thr_id;
  1589. requested = false;
  1590. precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
  1591. work->blk.nonce = 0;
  1592. work_restart[thr_id].restart = 0;
  1593. if (opt_debug)
  1594. applog(LOG_DEBUG, "getwork thread %d", thr_id);
  1595. /* Flushes the writebuffer set with CL_FALSE above */
  1596. clFinish(clState->commandQueue);
  1597. }
  1598. status = queue_kernel_parameters(clState, &work->blk);
  1599. if (unlikely(status != CL_SUCCESS))
  1600. { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); goto out; }
  1601. /* MAXBUFFERS entry is used as a flag to say nonces exist */
  1602. if (res[MAXBUFFERS]) {
  1603. /* Clear the buffer again */
  1604. status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1605. BUFFERSIZE, blank_res, 0, NULL, NULL);
  1606. if (unlikely(status != CL_SUCCESS))
  1607. { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); goto out; }
  1608. if (opt_debug)
  1609. applog(LOG_DEBUG, "GPU %d found something?", dev_from_id(thr_id));
  1610. postcalc_hash_async(mythr, work, res);
  1611. memset(res, 0, BUFFERSIZE);
  1612. clFinish(clState->commandQueue);
  1613. }
  1614. status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL,
  1615. globalThreads, localThreads, 0, NULL, NULL);
  1616. if (unlikely(status != CL_SUCCESS))
  1617. { applog(LOG_ERR, "Error: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)"); goto out; }
  1618. status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0,
  1619. BUFFERSIZE, res, 0, NULL, NULL);
  1620. if (unlikely(status != CL_SUCCESS))
  1621. { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)"); goto out;}
  1622. gettimeofday(&tv_end, NULL);
  1623. timeval_subtract(&diff, &tv_end, &tv_start);
  1624. hashes_done += hashes;
  1625. work->blk.nonce += hashes;
  1626. if (diff.tv_usec > 500000)
  1627. diff.tv_sec++;
  1628. if (diff.tv_sec >= cycle) {
  1629. hashmeter(thr_id, &diff, hashes_done);
  1630. gettimeofday(&tv_start, NULL);
  1631. hashes_done = 0;
  1632. }
  1633. timeval_subtract(&diff, &tv_end, &tv_workstart);
  1634. if (!requested && (diff.tv_sec > request_interval || work->blk.nonce > request_nonce)) {
  1635. if (unlikely(!queue_request())) {
  1636. applog(LOG_ERR, "Failed to queue_request in gpuminer_thread %d", thr_id);
  1637. goto out;
  1638. }
  1639. requested = true;
  1640. }
  1641. }
  1642. out:
  1643. tq_freeze(mythr->q);
  1644. return NULL;
  1645. }
  1646. #endif /* HAVE_OPENCL */
  1647. static void restart_threads(bool longpoll)
  1648. {
  1649. int i;
  1650. /* Discard old queued requests and get new ones */
  1651. flush_requests(longpoll);
  1652. for (i = 0; i < mining_threads; i++)
  1653. work_restart[i].restart = 1;
  1654. }
  1655. /* Stage another work item from the work returned in a longpoll */
  1656. static void convert_to_work(json_t *val)
  1657. {
  1658. struct work *work;
  1659. bool rc;
  1660. work = calloc(sizeof(*work), 1);
  1661. if (unlikely(!work)) {
  1662. applog(LOG_ERR, "OOM in convert_to_work");
  1663. return;
  1664. }
  1665. rc= work_decode(json_object_get(val, "result"), work);
  1666. if (unlikely(!rc)) {
  1667. applog(LOG_ERR, "Could not convert longpoll data to work");
  1668. return;
  1669. }
  1670. if (unlikely(!tq_push(thr_info[stage_thr_id].q, work)))
  1671. applog(LOG_ERR, "Could not tq_push work in convert_to_work");
  1672. else if (opt_debug)
  1673. applog(LOG_DEBUG, "Converted longpoll data to work");
  1674. }
  1675. static void *longpoll_thread(void *userdata)
  1676. {
  1677. struct thr_info *mythr = userdata;
  1678. CURL *curl = NULL;
  1679. char *copy_start, *hdr_path, *lp_url = NULL;
  1680. bool need_slash = false;
  1681. int failures = 0;
  1682. struct pool *pool = cp;
  1683. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1684. hdr_path = tq_pop(mythr->q, NULL);
  1685. if (!hdr_path)
  1686. goto out;
  1687. /* full URL */
  1688. if (strstr(hdr_path, "://")) {
  1689. lp_url = hdr_path;
  1690. hdr_path = NULL;
  1691. }
  1692. /* absolute path, on current server */
  1693. else {
  1694. copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path;
  1695. if (pool->rpc_url[strlen(pool->rpc_url) - 1] != '/')
  1696. need_slash = true;
  1697. lp_url = malloc(strlen(pool->rpc_url) + strlen(copy_start) + 2);
  1698. if (!lp_url)
  1699. goto out;
  1700. sprintf(lp_url, "%s%s%s", pool->rpc_url, need_slash ? "/" : "", copy_start);
  1701. }
  1702. applog(LOG_INFO, "Long-polling activated for %s", lp_url);
  1703. curl = curl_easy_init();
  1704. if (unlikely(!curl)) {
  1705. applog(LOG_ERR, "CURL initialisation failed");
  1706. goto out;
  1707. }
  1708. while (1) {
  1709. struct timeval start, end;
  1710. json_t *val;
  1711. gettimeofday(&start, NULL);
  1712. val = json_rpc_call(curl, lp_url, pool->rpc_userpass, rpc_req,
  1713. false, true);
  1714. if (likely(val)) {
  1715. /* Keep track of who ordered a restart_threads to make
  1716. * sure it's only done once per new block */
  1717. if (likely(!strncmp(longpoll_block, blank, 36) ||
  1718. !strncmp(longpoll_block, current_block, 36))) {
  1719. new_blocks++;
  1720. applog(LOG_WARNING, "LONGPOLL detected new block on network, waiting on fresh work");
  1721. restart_threads(true);
  1722. } else
  1723. applog(LOG_WARNING, "LONGPOLL received after new block already detected");
  1724. convert_to_work(val);
  1725. failures = 0;
  1726. json_decref(val);
  1727. } else {
  1728. /* Some pools regularly drop the longpoll request so
  1729. * only see this as longpoll failure if it happens
  1730. * immediately and just restart it the rest of the
  1731. * time. */
  1732. gettimeofday(&end, NULL);
  1733. if (end.tv_sec - start.tv_sec > 30)
  1734. continue;
  1735. if (failures++ < 10) {
  1736. sleep(30);
  1737. applog(LOG_WARNING,
  1738. "longpoll failed, sleeping for 30s");
  1739. } else {
  1740. applog(LOG_ERR,
  1741. "longpoll failed, ending thread");
  1742. goto out;
  1743. }
  1744. }
  1745. memcpy(longpoll_block, current_block, 36);
  1746. }
  1747. out:
  1748. free(hdr_path);
  1749. free(lp_url);
  1750. tq_freeze(mythr->q);
  1751. if (curl)
  1752. curl_easy_cleanup(curl);
  1753. return NULL;
  1754. }
  1755. static void reinit_cputhread(int thr_id)
  1756. {
  1757. struct thr_info *thr = &thr_info[thr_id];
  1758. tq_freeze(thr->q);
  1759. if (!(pthread_cancel(thr->pth)) && pthread_join(thr->pth, NULL)) {
  1760. applog(LOG_ERR, "Failed to pthread_join in reinit_cputhread");
  1761. goto failed_out;
  1762. }
  1763. applog(LOG_INFO, "Reinit CPU thread %d", thr_id);
  1764. tq_thaw(thr->q);
  1765. gettimeofday(&thr->last, NULL);
  1766. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  1767. applog(LOG_ERR, "thread %d create failed", thr_id);
  1768. goto failed_out;
  1769. }
  1770. return;
  1771. failed_out:
  1772. kill_work();
  1773. }
  1774. #ifdef HAVE_OPENCL
  1775. static void reinit_gputhread(int thr_id)
  1776. {
  1777. int gpu = dev_from_id(thr_id);
  1778. struct thr_info *thr = &thr_info[thr_id];
  1779. char name[256];
  1780. tq_freeze(thr->q);
  1781. if (!(pthread_cancel(thr->pth)) && pthread_join(thr->pth, NULL)) {
  1782. applog(LOG_ERR, "Failed to pthread_join in reinit_gputhread");
  1783. goto failed_out;
  1784. }
  1785. free(clStates[thr_id]);
  1786. applog(LOG_INFO, "Reinit GPU thread %d", thr_id);
  1787. tq_thaw(thr->q);
  1788. clStates[thr_id] = initCl(gpu, name, sizeof(name));
  1789. if (!clStates[thr_id]) {
  1790. applog(LOG_ERR, "Failed to reinit GPU thread %d", thr_id);
  1791. goto failed_out;
  1792. }
  1793. applog(LOG_INFO, "initCl() finished. Found %s", name);
  1794. gettimeofday(&thr->last, NULL);
  1795. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  1796. applog(LOG_ERR, "thread %d create failed", thr_id);
  1797. goto failed_out;
  1798. }
  1799. return;
  1800. failed_out:
  1801. kill_work();
  1802. }
  1803. static void reinit_thread(int thr_id)
  1804. {
  1805. if (thr_id < gpu_threads)
  1806. reinit_gputhread(thr_id);
  1807. else
  1808. reinit_cputhread(thr_id);
  1809. }
  1810. #else
  1811. static void reinit_thread(int thr_id)
  1812. {
  1813. reinit_cputhread(thr_id);
  1814. }
  1815. #endif
  1816. /* Determine which are the first threads belonging to a device and if they're
  1817. * active */
  1818. static bool active_device(int thr_id)
  1819. {
  1820. if (thr_id < gpu_threads) {
  1821. if (thr_id >= total_devices)
  1822. return false;
  1823. if (!gpu_devices[dev_from_id(thr_id)])
  1824. return false;
  1825. } else if (thr_id > gpu_threads + num_processors)
  1826. return false;
  1827. return true;
  1828. }
  1829. /* Makes sure the hashmeter keeps going even if mining threads stall, updates
  1830. * the screen at regular intervals, and restarts threads if they appear to have
  1831. * died. */
  1832. static void *watchdog_thread(void *userdata)
  1833. {
  1834. const unsigned int interval = opt_log_interval / 2 ? : 1;
  1835. struct timeval zero_tv;
  1836. struct pool *pool = cp;
  1837. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  1838. memset(&zero_tv, 0, sizeof(struct timeval));
  1839. while (1) {
  1840. int x, y, logx, logy, i;
  1841. struct timeval now;
  1842. sleep(interval);
  1843. if (requests_queued() < opt_queue)
  1844. queue_request();
  1845. hashmeter(-1, &zero_tv, 0);
  1846. if (curses_active) {
  1847. pthread_mutex_lock(&curses_lock);
  1848. getmaxyx(mainwin, y, x);
  1849. getmaxyx(logwin, logy, logx);
  1850. y -= logcursor;
  1851. /* Detect screen size change */
  1852. if (x != logx || y != logy)
  1853. wresize(logwin, y, x);
  1854. for (i = 0; i < mining_threads; i++) {
  1855. if (active_device(i))
  1856. curses_print_status(i);
  1857. }
  1858. redrawwin(logwin);
  1859. redrawwin(statuswin);
  1860. pthread_mutex_unlock(&curses_lock);
  1861. }
  1862. if (unlikely(work_restart[watchdog_thr_id].restart)) {
  1863. restart_threads(false);
  1864. work_restart[watchdog_thr_id].restart = 0;
  1865. }
  1866. gettimeofday(&now, NULL);
  1867. #if 0
  1868. for (i = 0; i < mining_threads; i++) {
  1869. #else
  1870. for (i = 0; i < gpu_threads; i++) {
  1871. #endif
  1872. struct thr_info *thr = &thr_info[i];
  1873. /* Do not kill threads waiting on longpoll staged work
  1874. * or idle network */
  1875. if (now.tv_sec - thr->last.tv_sec > 60 && !pool->idlenet) {
  1876. applog(LOG_ERR, "Attempting to restart thread %d, idle for more than 60 seconds", i);
  1877. /* Create one mandatory work item */
  1878. inc_staged(1, true);
  1879. if (unlikely(!queue_request())) {
  1880. applog(LOG_ERR, "Failed to queue_request in watchdog_thread");
  1881. kill_work();
  1882. break;
  1883. }
  1884. reinit_thread(i);
  1885. applog(LOG_WARNING, "Thread %d restarted", i);
  1886. }
  1887. }
  1888. }
  1889. return NULL;
  1890. }
  1891. static void print_summary(void)
  1892. {
  1893. struct timeval diff;
  1894. int hours, mins, secs, i;
  1895. double utility, efficiency = 0.0;
  1896. timeval_subtract(&diff, &total_tv_end, &total_tv_start);
  1897. hours = diff.tv_sec / 3600;
  1898. mins = (diff.tv_sec % 3600) / 60;
  1899. secs = diff.tv_sec % 60;
  1900. utility = total_accepted / ( total_secs ? total_secs : 1 ) * 60;
  1901. efficiency = total_getworks ? total_accepted * 100.0 / total_getworks : 0.0;
  1902. printf("\nSummary of runtime statistics:\n\n");
  1903. printf("Started at %s\n", datestamp);
  1904. printf("Runtime: %d hrs : %d mins : %d secs\n", hours, mins, secs);
  1905. if (total_secs)
  1906. printf("Average hashrate: %.1f Megahash/s\n", total_mhashes_done / total_secs);
  1907. printf("Queued work requests: %d\n", total_getworks);
  1908. printf("Share submissions: %d\n", total_accepted + total_rejected);
  1909. printf("Accepted shares: %d\n", total_accepted);
  1910. printf("Rejected shares: %d\n", total_rejected);
  1911. if (total_accepted || total_rejected)
  1912. printf("Reject ratio: %.1f\n", (double)(total_rejected * 100) / (double)(total_accepted + total_rejected));
  1913. printf("Hardware errors: %d\n", hw_errors);
  1914. printf("Efficiency (accepted / queued): %.0f%%\n", efficiency);
  1915. printf("Utility (accepted shares / min): %.2f/min\n\n", utility);
  1916. printf("Discarded work due to new blocks: %d\n", total_discarded);
  1917. printf("Stale submissions discarded due to new blocks: %d\n", total_stale);
  1918. printf("Unable to get work from server occasions: %d\n", total_lo);
  1919. printf("Work items generated locally: %d\n", local_work);
  1920. printf("Submitting work remotely delay occasions: %d\n", total_ro);
  1921. printf("New blocks detected on network: %d\n\n", new_blocks);
  1922. printf("Summary of per device statistics:\n\n");
  1923. for (i = 0; i < mining_threads; i++) {
  1924. if (active_device(i))
  1925. print_status(i);
  1926. }
  1927. printf("\n");
  1928. }
  1929. int main (int argc, char *argv[])
  1930. {
  1931. unsigned int i, j = 0, x, y;
  1932. struct sigaction handler;
  1933. struct thr_info *thr;
  1934. char name[256];
  1935. struct tm tm;
  1936. struct pool *pool;
  1937. /* This dangerous functions tramples random dynamically allocated
  1938. * variables so do it before anything at all */
  1939. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1940. return 1;
  1941. /* This dangerous functions tramples random dynamically allocated
  1942. * variables so do it before anything at all */
  1943. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  1944. return 1;
  1945. if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
  1946. return 1;
  1947. if (unlikely(pthread_mutex_init(&qd_lock, NULL)))
  1948. return 1;
  1949. if (unlikely(pthread_mutex_init(&stgd_lock, NULL)))
  1950. return 1;
  1951. if (unlikely(pthread_mutex_init(&curses_lock, NULL)))
  1952. return 1;
  1953. if (unlikely(pthread_mutex_init(&control_lock, NULL)))
  1954. return 1;
  1955. handler.sa_handler = &sighandler;
  1956. sigaction(SIGTERM, &handler, &termhandler);
  1957. sigaction(SIGINT, &handler, &inthandler);
  1958. gettimeofday(&total_tv_start, NULL);
  1959. gettimeofday(&total_tv_end, NULL);
  1960. localtime_r(&total_tv_start.tv_sec, &tm);
  1961. sprintf(datestamp, "[%d-%02d-%02d %02d:%02d:%02d]",
  1962. tm.tm_year + 1900,
  1963. tm.tm_mon + 1,
  1964. tm.tm_mday,
  1965. tm.tm_hour,
  1966. tm.tm_min,
  1967. tm.tm_sec);
  1968. for (i = 0; i < 36; i++) {
  1969. strcat(blank, "0");
  1970. strcat(current_block, "0");
  1971. strcat(longpoll_block, "0");
  1972. }
  1973. #ifdef WIN32
  1974. opt_n_threads = num_processors = 1;
  1975. #else
  1976. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  1977. opt_n_threads = num_processors;
  1978. #endif /* !WIN32 */
  1979. #ifdef HAVE_OPENCL
  1980. for (i = 0; i < 16; i++)
  1981. gpu_devices[i] = false;
  1982. nDevs = clDevicesNum();
  1983. if (nDevs < 0)
  1984. return 1;
  1985. #endif
  1986. if (nDevs)
  1987. opt_n_threads = 0;
  1988. trpc_url = strdup(DEF_RPC_URL);
  1989. /* parse command line */
  1990. opt_register_table(opt_config_table,
  1991. "Options for both config file and command line");
  1992. opt_register_table(opt_cmdline_table,
  1993. "Options for command line only");
  1994. opt_parse(&argc, argv, applog_and_exit);
  1995. if (argc != 1) {
  1996. applog(LOG_ERR, "Unexpected extra commandline arguments");
  1997. return 1;
  1998. }
  1999. if (!total_pools) {
  2000. applog(LOG_ERR, "No server specified");
  2001. return 1;
  2002. }
  2003. cp = &pools[0];
  2004. pool = cp;
  2005. if (total_devices) {
  2006. if (total_devices > nDevs) {
  2007. applog(LOG_ERR, "More devices specified than exist");
  2008. return 1;
  2009. }
  2010. for (i = 0; i < 16; i++)
  2011. if (gpu_devices[i] && i + 1 > nDevs) {
  2012. applog(LOG_ERR, "Command line options set a device that doesn't exist");
  2013. return 1;
  2014. }
  2015. gpu_threads = total_devices * opt_g_threads;
  2016. } else {
  2017. gpu_threads = nDevs * opt_g_threads;
  2018. for (i = 0; i < nDevs; i++)
  2019. gpu_devices[i] = true;
  2020. total_devices = nDevs;
  2021. }
  2022. if (!gpu_threads && !forced_n_threads) {
  2023. /* Maybe they turned GPU off; restore default CPU threads. */
  2024. opt_n_threads = num_processors;
  2025. }
  2026. logcursor = 7;
  2027. mining_threads = opt_n_threads + gpu_threads;
  2028. gpucursor = logcursor;
  2029. cpucursor = gpucursor + nDevs;
  2030. logstart = cpucursor + (opt_n_threads ? num_processors : 0) + 1;
  2031. logcursor = logstart + 1;
  2032. if (!pool->rpc_userpass) {
  2033. if (!pool->rpc_user || !pool->rpc_pass) {
  2034. applog(LOG_ERR, "No login credentials supplied");
  2035. return 1;
  2036. }
  2037. pool->rpc_userpass = malloc(strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2);
  2038. if (!pool->rpc_userpass)
  2039. return 1;
  2040. sprintf(pool->rpc_userpass, "%s:%s", pool->rpc_user, pool->rpc_pass);
  2041. } else {
  2042. pool->rpc_user = malloc(strlen(pool->rpc_userpass));
  2043. if (!pool->rpc_user)
  2044. return 1;
  2045. strcpy(pool->rpc_user, pool->rpc_userpass);
  2046. pool->rpc_user = strtok(pool->rpc_user, ":");
  2047. if (!pool->rpc_user) {
  2048. applog(LOG_ERR, "Failed to find colon delimiter in userpass");
  2049. return 1;
  2050. }
  2051. }
  2052. #ifdef HAVE_SYSLOG_H
  2053. if (use_syslog)
  2054. openlog("cpuminer", LOG_PID, LOG_USER);
  2055. #endif
  2056. work_restart = calloc(mining_threads + 4, sizeof(*work_restart));
  2057. if (!work_restart)
  2058. return 1;
  2059. thr_info = calloc(mining_threads + 4, sizeof(*thr));
  2060. if (!thr_info)
  2061. return 1;
  2062. /* init workio thread info */
  2063. work_thr_id = mining_threads;
  2064. thr = &thr_info[work_thr_id];
  2065. thr->id = work_thr_id;
  2066. thr->q = tq_new();
  2067. if (!thr->q)
  2068. return 1;
  2069. /* start work I/O thread */
  2070. if (pthread_create(&thr->pth, NULL, workio_thread, thr)) {
  2071. applog(LOG_ERR, "workio thread create failed");
  2072. return 1;
  2073. }
  2074. /* init longpoll thread info */
  2075. if (want_longpoll) {
  2076. longpoll_thr_id = mining_threads + 1;
  2077. thr = &thr_info[longpoll_thr_id];
  2078. thr->id = longpoll_thr_id;
  2079. thr->q = tq_new();
  2080. if (!thr->q)
  2081. return 1;
  2082. /* start longpoll thread */
  2083. if (unlikely(pthread_create(&thr->pth, NULL, longpoll_thread, thr))) {
  2084. applog(LOG_ERR, "longpoll thread create failed");
  2085. return 1;
  2086. }
  2087. pthread_detach(thr->pth);
  2088. } else
  2089. longpoll_thr_id = -1;
  2090. if (opt_n_threads ) {
  2091. cpus = calloc(num_processors, sizeof(struct cgpu_info));
  2092. if (unlikely(!cpus)) {
  2093. applog(LOG_ERR, "Failed to calloc cpus");
  2094. return 1;
  2095. }
  2096. }
  2097. if (gpu_threads) {
  2098. gpus = calloc(nDevs, sizeof(struct cgpu_info));
  2099. if (unlikely(!gpus)) {
  2100. applog(LOG_ERR, "Failed to calloc gpus");
  2101. return 1;
  2102. }
  2103. }
  2104. stage_thr_id = mining_threads + 3;
  2105. thr = &thr_info[stage_thr_id];
  2106. thr->q = tq_new();
  2107. if (!thr->q)
  2108. return 1;
  2109. /* start stage thread */
  2110. if (pthread_create(&thr->pth, NULL, stage_thread, thr)) {
  2111. applog(LOG_ERR, "stage thread create failed");
  2112. return 1;
  2113. }
  2114. /* Flag the work as ready forcing the mining threads to wait till we
  2115. * actually put something into the queue */
  2116. inc_staged(mining_threads, true);
  2117. /* Create a unique get work queue */
  2118. getq = tq_new();
  2119. if (!getq) {
  2120. applog(LOG_ERR, "Failed to create getq");
  2121. return 1;
  2122. }
  2123. #ifdef HAVE_OPENCL
  2124. i = 0;
  2125. /* start GPU mining threads */
  2126. for (j = 0; j < nDevs * opt_g_threads; j++) {
  2127. int gpu = j % nDevs;
  2128. gpus[gpu].is_gpu = 1;
  2129. gpus[gpu].cpu_gpu = gpu;
  2130. /* Skip devices not set to mine */
  2131. if (!gpu_devices[gpu])
  2132. continue;
  2133. thr = &thr_info[i];
  2134. thr->id = i;
  2135. thr->cgpu = &gpus[gpu];
  2136. thr->q = tq_new();
  2137. if (!thr->q) {
  2138. applog(LOG_ERR, "tq_new failed in starting gpu mining threads");
  2139. return 1;
  2140. }
  2141. applog(LOG_INFO, "Init GPU thread %i", i);
  2142. clStates[i] = initCl(gpu, name, sizeof(name));
  2143. if (!clStates[i]) {
  2144. applog(LOG_ERR, "Failed to init GPU thread %d", i);
  2145. continue;
  2146. }
  2147. applog(LOG_INFO, "initCl() finished. Found %s", name);
  2148. gettimeofday(&thr->last, NULL);
  2149. if (unlikely(pthread_create(&thr->pth, NULL, gpuminer_thread, thr))) {
  2150. applog(LOG_ERR, "thread %d create failed", i);
  2151. return 1;
  2152. }
  2153. i++;
  2154. }
  2155. applog(LOG_INFO, "%d gpu miner threads started", gpu_threads);
  2156. #endif
  2157. /* start CPU mining threads */
  2158. for (i = gpu_threads; i < mining_threads; i++) {
  2159. int cpu = (i - gpu_threads) % num_processors;
  2160. thr = &thr_info[i];
  2161. thr->id = i;
  2162. cpus[cpu].cpu_gpu = cpu;
  2163. thr->cgpu = &cpus[cpu];
  2164. thr->q = tq_new();
  2165. if (!thr->q) {
  2166. applog(LOG_ERR, "tq_new failed in starting cpu mining threads");
  2167. return 1;
  2168. }
  2169. gettimeofday(&thr->last, NULL);
  2170. if (unlikely(pthread_create(&thr->pth, NULL, miner_thread, thr))) {
  2171. applog(LOG_ERR, "thread %d create failed", i);
  2172. return 1;
  2173. }
  2174. }
  2175. applog(LOG_INFO, "%d cpu miner threads started, "
  2176. "using SHA256 '%s' algorithm.",
  2177. opt_n_threads,
  2178. algo_names[opt_algo]);
  2179. watchdog_thr_id = mining_threads + 2;
  2180. thr = &thr_info[watchdog_thr_id];
  2181. /* start wakeup thread */
  2182. if (pthread_create(&thr->pth, NULL, watchdog_thread, NULL)) {
  2183. applog(LOG_ERR, "wakeup thread create failed");
  2184. return 1;
  2185. }
  2186. /* Restart count as it will be wrong till all threads are started */
  2187. pthread_mutex_lock(&hash_lock);
  2188. gettimeofday(&total_tv_start, NULL);
  2189. gettimeofday(&total_tv_end, NULL);
  2190. total_mhashes_done = 0;
  2191. pthread_mutex_unlock(&hash_lock);
  2192. /* Set up the ncurses interface */
  2193. if (!opt_quiet && use_curses) {
  2194. mainwin = initscr();
  2195. getmaxyx(mainwin, y, x);
  2196. statuswin = newwin(logstart, x, 0, 0);
  2197. logwin = newwin(y - logcursor, 0, logcursor, 0);
  2198. idlok(logwin, true);
  2199. scrollok(logwin, true);
  2200. leaveok(logwin, true);
  2201. leaveok(statuswin, true);
  2202. test_and_set(&curses_active);
  2203. for (i = 0; i < mining_threads; i++)
  2204. print_status(i);
  2205. }
  2206. /* Now that everything's ready put enough work in the queue */
  2207. for (i = 0; i < opt_queue + mining_threads; i++) {
  2208. if (unlikely(!queue_request())) {
  2209. applog(LOG_ERR, "Failed to queue_request in main");
  2210. return 1;
  2211. }
  2212. }
  2213. /* main loop - simply wait for workio thread to exit */
  2214. pthread_join(thr_info[work_thr_id].pth, NULL);
  2215. applog(LOG_INFO, "workio thread dead, exiting.");
  2216. gettimeofday(&total_tv_end, NULL);
  2217. disable_curses();
  2218. if (!opt_quiet && successful_connect)
  2219. print_summary();
  2220. if (gpu_threads)
  2221. free(gpus);
  2222. if (opt_n_threads)
  2223. free(cpus);
  2224. if (pools)
  2225. free(pools);
  2226. curl_global_cleanup();
  2227. return 0;
  2228. }