util.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. * Copyright 2012 Giel van Schijndel
  6. * Copyright 2012 Gavin Andresen
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <stdbool.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <ctype.h>
  19. #include <stdarg.h>
  20. #include <string.h>
  21. #include <pthread.h>
  22. #include <jansson.h>
  23. #include <curl/curl.h>
  24. #include <time.h>
  25. #include <errno.h>
  26. #include <unistd.h>
  27. #include <sys/types.h>
  28. #ifdef HAVE_SYS_PRCTL_H
  29. # include <sys/prctl.h>
  30. #endif
  31. #if defined(__FreeBSD__) || defined(__OpenBSD__)
  32. # include <pthread_np.h>
  33. #endif
  34. #ifndef WIN32
  35. #include <fcntl.h>
  36. # ifdef __linux
  37. # include <sys/prctl.h>
  38. # endif
  39. # include <sys/socket.h>
  40. # include <netinet/in.h>
  41. # include <netinet/tcp.h>
  42. # include <netdb.h>
  43. #else
  44. # include <windows.h>
  45. # include <winsock2.h>
  46. # include <mstcpip.h>
  47. # include <ws2tcpip.h>
  48. # include <mmsystem.h>
  49. #endif
  50. #include <utlist.h>
  51. #include "miner.h"
  52. #include "compat.h"
  53. #include "util.h"
  54. #define DEFAULT_SOCKWAIT 60
  55. bool successful_connect = false;
  56. struct timeval nettime;
  57. struct data_buffer {
  58. void *buf;
  59. size_t len;
  60. curl_socket_t *idlemarker;
  61. };
  62. struct upload_buffer {
  63. const void *buf;
  64. size_t len;
  65. };
  66. struct header_info {
  67. char *lp_path;
  68. int rolltime;
  69. char *reason;
  70. char *stratum_url;
  71. bool hadrolltime;
  72. bool canroll;
  73. bool hadexpire;
  74. };
  75. struct tq_ent {
  76. void *data;
  77. struct tq_ent *prev;
  78. struct tq_ent *next;
  79. };
  80. static void databuf_free(struct data_buffer *db)
  81. {
  82. if (!db)
  83. return;
  84. free(db->buf);
  85. #ifdef DEBUG_DATABUF
  86. applog(LOG_DEBUG, "databuf_free(%p)", db->buf);
  87. #endif
  88. memset(db, 0, sizeof(*db));
  89. }
  90. // aka data_buffer_write
  91. static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb,
  92. void *user_data)
  93. {
  94. struct data_buffer *db = user_data;
  95. size_t oldlen, newlen;
  96. oldlen = db->len;
  97. if (unlikely(nmemb == 0 || size == 0 || oldlen >= SIZE_MAX - size))
  98. return 0;
  99. if (unlikely(nmemb > (SIZE_MAX - oldlen) / size))
  100. nmemb = (SIZE_MAX - oldlen) / size;
  101. size_t len = size * nmemb;
  102. void *newmem;
  103. static const unsigned char zero = 0;
  104. if (db->idlemarker) {
  105. const unsigned char *cptr = ptr;
  106. for (size_t i = 0; i < len; ++i)
  107. if (!(isCspace(cptr[i]) || cptr[i] == '{')) {
  108. *db->idlemarker = CURL_SOCKET_BAD;
  109. db->idlemarker = NULL;
  110. break;
  111. }
  112. }
  113. newlen = oldlen + len;
  114. newmem = realloc(db->buf, newlen + 1);
  115. #ifdef DEBUG_DATABUF
  116. applog(LOG_DEBUG, "data_buffer_write realloc(%p, %lu) => %p", db->buf, (long unsigned)(newlen + 1), newmem);
  117. #endif
  118. if (!newmem)
  119. return 0;
  120. db->buf = newmem;
  121. db->len = newlen;
  122. memcpy(db->buf + oldlen, ptr, len);
  123. memcpy(db->buf + newlen, &zero, 1); /* null terminate */
  124. return nmemb;
  125. }
  126. static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb,
  127. void *user_data)
  128. {
  129. struct upload_buffer *ub = user_data;
  130. unsigned int len = size * nmemb;
  131. if (len > ub->len)
  132. len = ub->len;
  133. if (len) {
  134. memcpy(ptr, ub->buf, len);
  135. ub->buf += len;
  136. ub->len -= len;
  137. }
  138. return len;
  139. }
  140. static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
  141. {
  142. struct header_info *hi = user_data;
  143. size_t remlen, slen, ptrlen = size * nmemb;
  144. char *rem, *val = NULL, *key = NULL;
  145. void *tmp;
  146. val = calloc(1, ptrlen);
  147. key = calloc(1, ptrlen);
  148. if (!key || !val)
  149. goto out;
  150. tmp = memchr(ptr, ':', ptrlen);
  151. if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */
  152. goto out;
  153. slen = tmp - ptr;
  154. if ((slen + 1) == ptrlen) /* skip key w/ no value */
  155. goto out;
  156. memcpy(key, ptr, slen); /* store & nul term key */
  157. key[slen] = 0;
  158. rem = ptr + slen + 1; /* trim value's leading whitespace */
  159. remlen = ptrlen - slen - 1;
  160. while ((remlen > 0) && (isCspace(*rem))) {
  161. remlen--;
  162. rem++;
  163. }
  164. memcpy(val, rem, remlen); /* store value, trim trailing ws */
  165. val[remlen] = 0;
  166. while ((*val) && (isCspace(val[strlen(val) - 1])))
  167. val[strlen(val) - 1] = 0;
  168. if (!*val) /* skip blank value */
  169. goto out;
  170. if (opt_protocol)
  171. applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
  172. if (!strcasecmp("X-Roll-Ntime", key)) {
  173. hi->hadrolltime = true;
  174. if (!strncasecmp("N", val, 1))
  175. applog(LOG_DEBUG, "X-Roll-Ntime: N found");
  176. else {
  177. hi->canroll = true;
  178. /* Check to see if expire= is supported and if not, set
  179. * the rolltime to the default scantime */
  180. if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) {
  181. sscanf(val + 7, "%d", &hi->rolltime);
  182. hi->hadexpire = true;
  183. } else
  184. hi->rolltime = opt_scantime;
  185. applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
  186. }
  187. }
  188. if (!strcasecmp("X-Long-Polling", key)) {
  189. hi->lp_path = val; /* steal memory reference */
  190. val = NULL;
  191. }
  192. if (!strcasecmp("X-Reject-Reason", key)) {
  193. hi->reason = val; /* steal memory reference */
  194. val = NULL;
  195. }
  196. if (!strcasecmp("X-Stratum", key)) {
  197. hi->stratum_url = val;
  198. val = NULL;
  199. }
  200. out:
  201. free(key);
  202. free(val);
  203. return ptrlen;
  204. }
  205. static int keep_sockalive(SOCKETTYPE fd)
  206. {
  207. const int tcp_one = 1;
  208. const int tcp_keepidle = 45;
  209. const int tcp_keepintvl = 30;
  210. int ret = 0;
  211. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const char *)&tcp_one, sizeof(tcp_one))))
  212. ret = 1;
  213. #ifndef WIN32
  214. int flags = fcntl(fd, F_GETFL, 0);
  215. fcntl(fd, F_SETFL, O_NONBLOCK | flags);
  216. #else
  217. u_long flags = 1;
  218. ioctlsocket(fd, FIONBIO, &flags);
  219. #endif
  220. if (!opt_delaynet)
  221. #ifndef __linux
  222. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  223. #else /* __linux */
  224. if (unlikely(setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  225. #endif /* __linux */
  226. ret = 1;
  227. #ifdef __linux
  228. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one))))
  229. ret = 1;
  230. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle))))
  231. ret = 1;
  232. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  233. ret = 1;
  234. #endif /* __linux */
  235. #ifdef __APPLE_CC__
  236. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  237. ret = 1;
  238. #endif /* __APPLE_CC__ */
  239. #ifdef WIN32
  240. const int zero = 0;
  241. struct tcp_keepalive vals;
  242. vals.onoff = 1;
  243. vals.keepalivetime = tcp_keepidle * 1000;
  244. vals.keepaliveinterval = tcp_keepintvl * 1000;
  245. DWORD outputBytes;
  246. if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL)))
  247. ret = 1;
  248. /* Windows happily submits indefinitely to the send buffer blissfully
  249. * unaware nothing is getting there without gracefully failing unless
  250. * we disable the send buffer */
  251. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&zero, sizeof(zero))))
  252. ret = 1;
  253. #endif /* WIN32 */
  254. return ret;
  255. }
  256. int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
  257. curlsocktype __maybe_unused purpose)
  258. {
  259. return keep_sockalive(fd);
  260. }
  261. static void last_nettime(struct timeval *last)
  262. {
  263. rd_lock(&netacc_lock);
  264. last->tv_sec = nettime.tv_sec;
  265. last->tv_usec = nettime.tv_usec;
  266. rd_unlock(&netacc_lock);
  267. }
  268. static void set_nettime(void)
  269. {
  270. wr_lock(&netacc_lock);
  271. cgtime(&nettime);
  272. wr_unlock(&netacc_lock);
  273. }
  274. static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type,
  275. char *data, size_t size,
  276. void *userdata)
  277. {
  278. struct pool *pool = (struct pool *)userdata;
  279. switch(type) {
  280. case CURLINFO_HEADER_IN:
  281. case CURLINFO_DATA_IN:
  282. case CURLINFO_SSL_DATA_IN:
  283. pool->cgminer_pool_stats.bytes_received += size;
  284. total_bytes_rcvd += size;
  285. pool->cgminer_pool_stats.net_bytes_received += size;
  286. break;
  287. case CURLINFO_HEADER_OUT:
  288. case CURLINFO_DATA_OUT:
  289. case CURLINFO_SSL_DATA_OUT:
  290. pool->cgminer_pool_stats.bytes_sent += size;
  291. total_bytes_sent += size;
  292. pool->cgminer_pool_stats.net_bytes_sent += size;
  293. break;
  294. case CURLINFO_TEXT:
  295. {
  296. if (!opt_protocol)
  297. break;
  298. // data is not null-terminated, so we need to copy and terminate it for applog
  299. char datacp[size + 1];
  300. memcpy(datacp, data, size);
  301. while (likely(size) && unlikely(isCspace(datacp[size-1])))
  302. --size;
  303. if (unlikely(!size))
  304. break;
  305. datacp[size] = '\0';
  306. applog(LOG_DEBUG, "Pool %u: %s", pool->pool_no, datacp);
  307. break;
  308. }
  309. default:
  310. break;
  311. }
  312. return 0;
  313. }
  314. struct json_rpc_call_state {
  315. struct data_buffer all_data;
  316. struct header_info hi;
  317. void *priv;
  318. char curl_err_str[CURL_ERROR_SIZE];
  319. struct curl_slist *headers;
  320. struct upload_buffer upload_data;
  321. struct pool *pool;
  322. };
  323. void json_rpc_call_async(CURL *curl, const char *url,
  324. const char *userpass, const char *rpc_req,
  325. bool longpoll,
  326. struct pool *pool, bool share,
  327. void *priv)
  328. {
  329. struct json_rpc_call_state *state = malloc(sizeof(struct json_rpc_call_state));
  330. *state = (struct json_rpc_call_state){
  331. .priv = priv,
  332. .pool = pool,
  333. };
  334. long timeout = longpoll ? (60 * 60) : 60;
  335. char len_hdr[64], user_agent_hdr[128];
  336. struct curl_slist *headers = NULL;
  337. if (longpoll)
  338. state->all_data.idlemarker = &pool->lp_socket;
  339. /* it is assumed that 'curl' is freshly [re]initialized at this pt */
  340. curl_easy_setopt(curl, CURLOPT_PRIVATE, state);
  341. curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
  342. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  343. * to enable it */
  344. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  345. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  346. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  347. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  348. curl_easy_setopt(curl, CURLOPT_URL, url);
  349. curl_easy_setopt(curl, CURLOPT_ENCODING, "");
  350. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
  351. /* Shares are staggered already and delays in submission can be costly
  352. * so do not delay them */
  353. if (!opt_delaynet || share)
  354. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  355. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb);
  356. curl_easy_setopt(curl, CURLOPT_WRITEDATA, &state->all_data);
  357. curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb);
  358. curl_easy_setopt(curl, CURLOPT_READDATA, &state->upload_data);
  359. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, &state->curl_err_str[0]);
  360. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
  361. curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb);
  362. curl_easy_setopt(curl, CURLOPT_HEADERDATA, &state->hi);
  363. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  364. if (pool->rpc_proxy) {
  365. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  366. } else if (opt_socks_proxy) {
  367. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  368. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  369. }
  370. if (userpass) {
  371. curl_easy_setopt(curl, CURLOPT_USERPWD, userpass);
  372. curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
  373. }
  374. if (longpoll)
  375. curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, json_rpc_call_sockopt_cb);
  376. curl_easy_setopt(curl, CURLOPT_POST, 1);
  377. if (opt_protocol)
  378. applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req);
  379. state->upload_data.buf = rpc_req;
  380. state->upload_data.len = strlen(rpc_req);
  381. sprintf(len_hdr, "Content-Length: %lu",
  382. (unsigned long) state->upload_data.len);
  383. sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE"/"VERSION);
  384. headers = curl_slist_append(headers,
  385. "Content-type: application/json");
  386. headers = curl_slist_append(headers,
  387. "X-Mining-Extensions: longpoll midstate rollntime submitold");
  388. if (longpoll)
  389. headers = curl_slist_append(headers,
  390. "X-Minimum-Wait: 0");
  391. if (likely(global_hashrate)) {
  392. char ghashrate[255];
  393. sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, (uint64_t)global_hashrate);
  394. headers = curl_slist_append(headers, ghashrate);
  395. }
  396. headers = curl_slist_append(headers, len_hdr);
  397. headers = curl_slist_append(headers, user_agent_hdr);
  398. headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/
  399. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
  400. state->headers = headers;
  401. if (opt_delaynet) {
  402. /* Don't delay share submission, but still track the nettime */
  403. if (!share) {
  404. long long now_msecs, last_msecs;
  405. struct timeval now, last;
  406. cgtime(&now);
  407. last_nettime(&last);
  408. now_msecs = (long long)now.tv_sec * 1000;
  409. now_msecs += now.tv_usec / 1000;
  410. last_msecs = (long long)last.tv_sec * 1000;
  411. last_msecs += last.tv_usec / 1000;
  412. if (now_msecs > last_msecs && now_msecs - last_msecs < 250) {
  413. struct timespec rgtp;
  414. rgtp.tv_sec = 0;
  415. rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000;
  416. nanosleep(&rgtp, NULL);
  417. }
  418. }
  419. set_nettime();
  420. }
  421. }
  422. json_t *json_rpc_call_completed(CURL *curl, int rc, bool probe, int *rolltime, void *out_priv)
  423. {
  424. struct json_rpc_call_state *state;
  425. if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, (void*)&state) != CURLE_OK) {
  426. applog(LOG_ERR, "Failed to get private curl data");
  427. if (out_priv)
  428. *(void**)out_priv = NULL;
  429. goto err_out;
  430. }
  431. if (out_priv)
  432. *(void**)out_priv = state->priv;
  433. json_t *val, *err_val, *res_val;
  434. json_error_t err;
  435. struct pool *pool = state->pool;
  436. bool probing = probe && !pool->probed;
  437. if (rc) {
  438. applog(LOG_INFO, "HTTP request failed: %s", state->curl_err_str);
  439. goto err_out;
  440. }
  441. if (!state->all_data.buf) {
  442. applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
  443. goto err_out;
  444. }
  445. pool->cgminer_pool_stats.times_sent++;
  446. pool->cgminer_pool_stats.times_received++;
  447. if (probing) {
  448. pool->probed = true;
  449. /* If X-Long-Polling was found, activate long polling */
  450. if (state->hi.lp_path) {
  451. if (pool->hdr_path != NULL)
  452. free(pool->hdr_path);
  453. pool->hdr_path = state->hi.lp_path;
  454. } else
  455. pool->hdr_path = NULL;
  456. if (state->hi.stratum_url) {
  457. pool->stratum_url = state->hi.stratum_url;
  458. state->hi.stratum_url = NULL;
  459. }
  460. } else {
  461. if (state->hi.lp_path) {
  462. free(state->hi.lp_path);
  463. state->hi.lp_path = NULL;
  464. }
  465. if (state->hi.stratum_url) {
  466. free(state->hi.stratum_url);
  467. state->hi.stratum_url = NULL;
  468. }
  469. }
  470. if (pool->force_rollntime)
  471. {
  472. state->hi.canroll = true;
  473. state->hi.hadexpire = true;
  474. state->hi.rolltime = pool->force_rollntime;
  475. }
  476. if (rolltime)
  477. *rolltime = state->hi.rolltime;
  478. pool->cgminer_pool_stats.rolltime = state->hi.rolltime;
  479. pool->cgminer_pool_stats.hadrolltime = state->hi.hadrolltime;
  480. pool->cgminer_pool_stats.canroll = state->hi.canroll;
  481. pool->cgminer_pool_stats.hadexpire = state->hi.hadexpire;
  482. val = JSON_LOADS(state->all_data.buf, &err);
  483. if (!val) {
  484. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  485. if (opt_protocol)
  486. applog(LOG_DEBUG, "JSON protocol response:\n%s", (char*)state->all_data.buf);
  487. goto err_out;
  488. }
  489. if (opt_protocol) {
  490. char *s = json_dumps(val, JSON_INDENT(3));
  491. applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
  492. free(s);
  493. }
  494. /* JSON-RPC valid response returns a non-null 'result',
  495. * and a null 'error'.
  496. */
  497. res_val = json_object_get(val, "result");
  498. err_val = json_object_get(val, "error");
  499. if (!res_val ||(err_val && !json_is_null(err_val))) {
  500. char *s;
  501. if (err_val)
  502. s = json_dumps(err_val, JSON_INDENT(3));
  503. else
  504. s = strdup("(unknown reason)");
  505. applog(LOG_INFO, "JSON-RPC call failed: %s", s);
  506. free(s);
  507. json_decref(val);
  508. goto err_out;
  509. }
  510. if (state->hi.reason) {
  511. json_object_set_new(val, "reject-reason", json_string(state->hi.reason));
  512. free(state->hi.reason);
  513. state->hi.reason = NULL;
  514. }
  515. successful_connect = true;
  516. databuf_free(&state->all_data);
  517. curl_slist_free_all(state->headers);
  518. curl_easy_reset(curl);
  519. free(state);
  520. return val;
  521. err_out:
  522. databuf_free(&state->all_data);
  523. curl_slist_free_all(state->headers);
  524. curl_easy_reset(curl);
  525. if (!successful_connect)
  526. applog(LOG_DEBUG, "Failed to connect in json_rpc_call");
  527. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  528. free(state);
  529. return NULL;
  530. }
  531. json_t *json_rpc_call(CURL *curl, const char *url,
  532. const char *userpass, const char *rpc_req,
  533. bool probe, bool longpoll, int *rolltime,
  534. struct pool *pool, bool share)
  535. {
  536. json_rpc_call_async(curl, url, userpass, rpc_req, longpoll, pool, share, NULL);
  537. int rc = curl_easy_perform(curl);
  538. return json_rpc_call_completed(curl, rc, probe, rolltime, NULL);
  539. }
  540. bool our_curl_supports_proxy_uris()
  541. {
  542. curl_version_info_data *data = curl_version_info(CURLVERSION_NOW);
  543. return data->age && data->version_num >= (( 7 <<16)|( 21 <<8)| 7); // 7.21.7
  544. }
  545. // NOTE: This assumes reference URI is a root
  546. char *absolute_uri(char *uri, const char *ref)
  547. {
  548. if (strstr(uri, "://"))
  549. return strdup(uri);
  550. char *copy_start, *abs;
  551. bool need_slash = false;
  552. copy_start = (uri[0] == '/') ? &uri[1] : uri;
  553. if (ref[strlen(ref) - 1] != '/')
  554. need_slash = true;
  555. abs = malloc(strlen(ref) + strlen(copy_start) + 2);
  556. if (!abs) {
  557. applog(LOG_ERR, "Malloc failure in absolute_uri");
  558. return NULL;
  559. }
  560. sprintf(abs, "%s%s%s", ref, need_slash ? "/" : "", copy_start);
  561. return abs;
  562. }
  563. static const char _hexchars[0x10] = "0123456789abcdef";
  564. void bin2hex(char *out, const void *in, size_t len)
  565. {
  566. const unsigned char *p = in;
  567. while (len--)
  568. {
  569. (out++)[0] = _hexchars[p[0] >> 4];
  570. (out++)[0] = _hexchars[p[0] & 0xf];
  571. ++p;
  572. }
  573. out[0] = '\0';
  574. }
  575. static inline
  576. int _hex2bin_char(const char c)
  577. {
  578. if (c >= '0' && c <= '9')
  579. return c - '0';
  580. if (c >= 'a' && c <= 'f')
  581. return (c - 'a') + 10;
  582. if (c >= 'A' && c <= 'F')
  583. return (c - 'A') + 10;
  584. return -1;
  585. }
  586. /* Does the reverse of bin2hex but does not allocate any ram */
  587. bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
  588. {
  589. int n, o;
  590. while (len--)
  591. {
  592. n = _hex2bin_char((hexstr++)[0]);
  593. if (unlikely(n == -1))
  594. {
  595. badchar:
  596. if (!hexstr[-1])
  597. applog(LOG_ERR, "hex2bin: str truncated");
  598. else
  599. applog(LOG_ERR, "hex2bin: invalid character 0x%02x", (int)hexstr[-1]);
  600. return false;
  601. }
  602. o = _hex2bin_char((hexstr++)[0]);
  603. if (unlikely(o == -1))
  604. goto badchar;
  605. (p++)[0] = (n << 4) | o;
  606. }
  607. return likely(!hexstr[0]);
  608. }
  609. void ucs2tochar(char * const out, const uint16_t * const in, const size_t sz)
  610. {
  611. for (int i = 0; i < sz; ++i)
  612. out[i] = in[i];
  613. }
  614. char *ucs2tochar_dup(uint16_t * const in, const size_t sz)
  615. {
  616. char *out = malloc(sz + 1);
  617. ucs2tochar(out, in, sz);
  618. out[sz] = '\0';
  619. return out;
  620. }
  621. void hash_data(unsigned char *out_hash, const unsigned char *data)
  622. {
  623. unsigned char blkheader[80];
  624. // data is past the first SHA256 step (padding and interpreting as big endian on a little endian platform), so we need to flip each 32-bit chunk around to get the original input block header
  625. swap32yes(blkheader, data, 80 / 4);
  626. // double-SHA256 to get the block hash
  627. gen_hash(blkheader, out_hash, 80);
  628. }
  629. // Example output: 0000000000000000000000000000000000000000000000000000ffff00000000 (bdiff 1)
  630. void real_block_target(unsigned char *target, const unsigned char *data)
  631. {
  632. uint8_t targetshift;
  633. if (unlikely(data[72] < 3 || data[72] > 0x20))
  634. {
  635. // Invalid (out of bounds) target
  636. memset(target, 0xff, 32);
  637. return;
  638. }
  639. targetshift = data[72] - 3;
  640. memset(target, 0, targetshift);
  641. target[targetshift++] = data[75];
  642. target[targetshift++] = data[74];
  643. target[targetshift++] = data[73];
  644. memset(&target[targetshift], 0, 0x20 - targetshift);
  645. }
  646. bool hash_target_check(const unsigned char *hash, const unsigned char *target)
  647. {
  648. const uint32_t *h32 = (uint32_t*)&hash[0];
  649. const uint32_t *t32 = (uint32_t*)&target[0];
  650. for (int i = 7; i >= 0; --i) {
  651. uint32_t h32i = le32toh(h32[i]);
  652. uint32_t t32i = le32toh(t32[i]);
  653. if (h32i > t32i)
  654. return false;
  655. if (h32i < t32i)
  656. return true;
  657. }
  658. return true;
  659. }
  660. bool hash_target_check_v(const unsigned char *hash, const unsigned char *target)
  661. {
  662. bool rc;
  663. rc = hash_target_check(hash, target);
  664. if (opt_debug) {
  665. unsigned char hash_swap[32], target_swap[32];
  666. char hash_str[65];
  667. char target_str[65];
  668. for (int i = 0; i < 32; ++i) {
  669. hash_swap[i] = hash[31-i];
  670. target_swap[i] = target[31-i];
  671. }
  672. bin2hex(hash_str, hash_swap, 32);
  673. bin2hex(target_str, target_swap, 32);
  674. applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s",
  675. hash_str,
  676. target_str,
  677. rc ? "YES (hash <= target)" :
  678. "no (false positive; hash > target)");
  679. }
  680. return rc;
  681. }
  682. // This operates on a native-endian SHA256 state
  683. // In other words, on little endian platforms, every 4 bytes are in reverse order
  684. bool fulltest(const unsigned char *hash, const unsigned char *target)
  685. {
  686. unsigned char hash2[32];
  687. swap32tobe(hash2, hash, 32 / 4);
  688. return hash_target_check_v(hash2, target);
  689. }
  690. struct thread_q *tq_new(void)
  691. {
  692. struct thread_q *tq;
  693. tq = calloc(1, sizeof(*tq));
  694. if (!tq)
  695. return NULL;
  696. pthread_mutex_init(&tq->mutex, NULL);
  697. pthread_cond_init(&tq->cond, NULL);
  698. return tq;
  699. }
  700. void tq_free(struct thread_q *tq)
  701. {
  702. struct tq_ent *ent, *iter;
  703. if (!tq)
  704. return;
  705. DL_FOREACH_SAFE(tq->q, ent, iter) {
  706. DL_DELETE(tq->q, ent);
  707. free(ent);
  708. }
  709. pthread_cond_destroy(&tq->cond);
  710. pthread_mutex_destroy(&tq->mutex);
  711. memset(tq, 0, sizeof(*tq)); /* poison */
  712. free(tq);
  713. }
  714. static void tq_freezethaw(struct thread_q *tq, bool frozen)
  715. {
  716. mutex_lock(&tq->mutex);
  717. tq->frozen = frozen;
  718. pthread_cond_signal(&tq->cond);
  719. mutex_unlock(&tq->mutex);
  720. }
  721. void tq_freeze(struct thread_q *tq)
  722. {
  723. tq_freezethaw(tq, true);
  724. }
  725. void tq_thaw(struct thread_q *tq)
  726. {
  727. tq_freezethaw(tq, false);
  728. }
  729. bool tq_push(struct thread_q *tq, void *data)
  730. {
  731. struct tq_ent *ent;
  732. bool rc = true;
  733. ent = calloc(1, sizeof(*ent));
  734. if (!ent)
  735. return false;
  736. ent->data = data;
  737. mutex_lock(&tq->mutex);
  738. if (!tq->frozen) {
  739. DL_APPEND(tq->q, ent);
  740. } else {
  741. free(ent);
  742. rc = false;
  743. }
  744. pthread_cond_signal(&tq->cond);
  745. mutex_unlock(&tq->mutex);
  746. return rc;
  747. }
  748. void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
  749. {
  750. struct tq_ent *ent;
  751. void *rval = NULL;
  752. int rc;
  753. mutex_lock(&tq->mutex);
  754. if (tq->q)
  755. goto pop;
  756. if (abstime)
  757. rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime);
  758. else
  759. rc = pthread_cond_wait(&tq->cond, &tq->mutex);
  760. if (rc)
  761. goto out;
  762. if (!tq->q)
  763. goto out;
  764. pop:
  765. ent = tq->q;
  766. rval = ent->data;
  767. DL_DELETE(tq->q, ent);
  768. free(ent);
  769. out:
  770. mutex_unlock(&tq->mutex);
  771. return rval;
  772. }
  773. int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
  774. {
  775. int rv = pthread_create(&thr->pth, attr, start, arg);
  776. if (likely(!rv))
  777. thr->has_pth = true;
  778. return rv;
  779. }
  780. void thr_info_freeze(struct thr_info *thr)
  781. {
  782. struct tq_ent *ent, *iter;
  783. struct thread_q *tq;
  784. if (!thr)
  785. return;
  786. tq = thr->q;
  787. if (!tq)
  788. return;
  789. mutex_lock(&tq->mutex);
  790. tq->frozen = true;
  791. DL_FOREACH_SAFE(tq->q, ent, iter) {
  792. DL_DELETE(tq->q, ent);
  793. free(ent);
  794. }
  795. mutex_unlock(&tq->mutex);
  796. }
  797. void thr_info_cancel(struct thr_info *thr)
  798. {
  799. if (!thr)
  800. return;
  801. if (thr->has_pth) {
  802. pthread_cancel(thr->pth);
  803. thr->has_pth = false;
  804. }
  805. }
  806. #ifndef HAVE_PTHREAD_CANCEL
  807. // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill
  808. enum pthread_cancel_workaround_mode {
  809. PCWM_DEFAULT = 0,
  810. PCWM_TERMINATE = 1,
  811. PCWM_ASYNC = 2,
  812. PCWM_DISABLED = 4,
  813. PCWM_CANCELLED = 8,
  814. };
  815. static pthread_key_t key_pcwm;
  816. struct sigaction pcwm_orig_term_handler;
  817. static
  818. void do_pthread_cancel_exit(int flags)
  819. {
  820. if (!(flags & PCWM_ASYNC))
  821. // NOTE: Logging disables cancel while mutex held, so this is safe
  822. applog(LOG_WARNING, "pthread_cancel workaround: Cannot defer cancellation, terminating thread NOW");
  823. pthread_exit(PTHREAD_CANCELED);
  824. }
  825. static
  826. void sighandler_pthread_cancel(int sig)
  827. {
  828. int flags = (int)pthread_getspecific(key_pcwm);
  829. if (flags & PCWM_TERMINATE) // Main thread
  830. {
  831. // Restore original handler and call it
  832. if (sigaction(sig, &pcwm_orig_term_handler, NULL))
  833. quit(1, "pthread_cancel workaround: Failed to restore original handler");
  834. raise(SIGTERM);
  835. quit(1, "pthread_cancel workaround: Original handler returned");
  836. }
  837. if (flags & PCWM_CANCELLED) // Already pending cancel
  838. return;
  839. if (flags & PCWM_DISABLED)
  840. {
  841. flags |= PCWM_CANCELLED;
  842. if (pthread_setspecific(key_pcwm, (void*)flags))
  843. quit(1, "pthread_cancel workaround: pthread_setspecific failed (setting PCWM_CANCELLED)");
  844. return;
  845. }
  846. do_pthread_cancel_exit(flags);
  847. }
  848. void pthread_testcancel(void)
  849. {
  850. int flags = (int)pthread_getspecific(key_pcwm);
  851. if (flags & PCWM_CANCELLED && !(flags & PCWM_DISABLED))
  852. do_pthread_cancel_exit(flags);
  853. }
  854. int pthread_setcancelstate(int state, int *oldstate)
  855. {
  856. int flags = (int)pthread_getspecific(key_pcwm);
  857. if (oldstate)
  858. *oldstate = (flags & PCWM_DISABLED) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
  859. if (state == PTHREAD_CANCEL_DISABLE)
  860. flags |= PCWM_DISABLED;
  861. else
  862. {
  863. if (flags & PCWM_CANCELLED)
  864. do_pthread_cancel_exit(flags);
  865. flags &= ~PCWM_DISABLED;
  866. }
  867. if (pthread_setspecific(key_pcwm, (void*)flags))
  868. return -1;
  869. return 0;
  870. }
  871. int pthread_setcanceltype(int type, int *oldtype)
  872. {
  873. int flags = (int)pthread_getspecific(key_pcwm);
  874. if (oldtype)
  875. *oldtype = (flags & PCWM_ASYNC) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
  876. if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
  877. flags |= PCWM_ASYNC;
  878. else
  879. flags &= ~PCWM_ASYNC;
  880. if (pthread_setspecific(key_pcwm, (void*)flags))
  881. return -1;
  882. return 0;
  883. }
  884. void setup_pthread_cancel_workaround()
  885. {
  886. if (pthread_key_create(&key_pcwm, NULL))
  887. quit(1, "pthread_cancel workaround: pthread_key_create failed");
  888. if (pthread_setspecific(key_pcwm, (void*)PCWM_TERMINATE))
  889. quit(1, "pthread_cancel workaround: pthread_setspecific failed");
  890. struct sigaction new_sigact = {
  891. .sa_handler = sighandler_pthread_cancel,
  892. };
  893. if (sigaction(SIGTERM, &new_sigact, &pcwm_orig_term_handler))
  894. quit(1, "pthread_cancel workaround: Failed to install SIGTERM handler");
  895. }
  896. #endif
  897. static void _now_gettimeofday(struct timeval *);
  898. static void _cgsleep_us_r_nanosleep(cgtimer_t *, int64_t);
  899. #ifdef HAVE_POOR_GETTIMEOFDAY
  900. static struct timeval tv_timeofday_offset;
  901. static struct timeval _tv_timeofday_lastchecked;
  902. static pthread_mutex_t _tv_timeofday_mutex = PTHREAD_MUTEX_INITIALIZER;
  903. static
  904. void bfg_calibrate_timeofday(struct timeval *expected, char *buf)
  905. {
  906. struct timeval actual, delta;
  907. timeradd(expected, &tv_timeofday_offset, expected);
  908. _now_gettimeofday(&actual);
  909. if (expected->tv_sec >= actual.tv_sec - 1 && expected->tv_sec <= actual.tv_sec + 1)
  910. // Within reason - no change necessary
  911. return;
  912. timersub(&actual, expected, &delta);
  913. timeradd(&tv_timeofday_offset, &delta, &tv_timeofday_offset);
  914. sprintf(buf, "Recalibrating timeofday offset (delta %ld.%06lds)", (long)delta.tv_sec, (long)delta.tv_usec);
  915. *expected = actual;
  916. }
  917. void bfg_gettimeofday(struct timeval *out)
  918. {
  919. char buf[64] = "";
  920. timer_set_now(out);
  921. mutex_lock(&_tv_timeofday_mutex);
  922. if (_tv_timeofday_lastchecked.tv_sec < out->tv_sec - 21)
  923. bfg_calibrate_timeofday(out, buf);
  924. else
  925. timeradd(out, &tv_timeofday_offset, out);
  926. mutex_unlock(&_tv_timeofday_mutex);
  927. if (unlikely(buf[0]))
  928. applog(LOG_WARNING, "%s", buf);
  929. }
  930. #endif
  931. #ifdef WIN32
  932. static LARGE_INTEGER _perffreq;
  933. static
  934. void _now_queryperformancecounter(struct timeval *tv)
  935. {
  936. LARGE_INTEGER now;
  937. if (unlikely(!QueryPerformanceCounter(&now)))
  938. quit(1, "QueryPerformanceCounter failed");
  939. *tv = (struct timeval){
  940. .tv_sec = now.QuadPart / _perffreq.QuadPart,
  941. .tv_usec = (now.QuadPart % _perffreq.QuadPart) * 1000000 / _perffreq.QuadPart,
  942. };
  943. }
  944. #endif
  945. static void bfg_init_time();
  946. static
  947. void _now_is_not_set(__maybe_unused struct timeval *tv)
  948. {
  949. bfg_init_time();
  950. timer_set_now(tv);
  951. }
  952. void (*timer_set_now)(struct timeval *tv) = _now_is_not_set;
  953. void (*cgsleep_us_r)(cgtimer_t *, int64_t) = _cgsleep_us_r_nanosleep;
  954. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  955. static clockid_t bfg_timer_clk;
  956. static
  957. void _now_clock_gettime(struct timeval *tv)
  958. {
  959. struct timespec ts;
  960. if (unlikely(clock_gettime(bfg_timer_clk, &ts)))
  961. quit(1, "clock_gettime failed");
  962. *tv = (struct timeval){
  963. .tv_sec = ts.tv_sec,
  964. .tv_usec = ts.tv_nsec / 1000,
  965. };
  966. }
  967. #ifdef HAVE_CLOCK_NANOSLEEP
  968. static
  969. void _cgsleep_us_r_monotonic(cgtimer_t *tv_start, int64_t us)
  970. {
  971. struct timeval tv_end[1];
  972. struct timespec ts_end[1];
  973. int ret;
  974. timer_set_delay(tv_end, tv_start, us);
  975. timeval_to_spec(ts_end, tv_end);
  976. do {
  977. ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL);
  978. } while (ret == EINTR);
  979. }
  980. #endif
  981. static
  982. bool _bfg_try_clock_gettime(clockid_t clk)
  983. {
  984. struct timespec ts;
  985. if (clock_gettime(clk, &ts))
  986. return false;
  987. bfg_timer_clk = clk;
  988. timer_set_now = _now_clock_gettime;
  989. return true;
  990. }
  991. #endif
  992. static
  993. void bfg_init_time()
  994. {
  995. if (timer_set_now != _now_is_not_set)
  996. return;
  997. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  998. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC_RAW
  999. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC_RAW))
  1000. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC_RAW)");
  1001. else
  1002. #endif
  1003. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC))
  1004. {
  1005. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC)");
  1006. #ifdef HAVE_CLOCK_NANOSLEEP
  1007. cgsleep_us_r = _cgsleep_us_r_monotonic;
  1008. #endif
  1009. }
  1010. else
  1011. #endif
  1012. #ifdef WIN32
  1013. if (QueryPerformanceFrequency(&_perffreq) && _perffreq.QuadPart)
  1014. {
  1015. timer_set_now = _now_queryperformancecounter;
  1016. applog(LOG_DEBUG, "Timers: Using QueryPerformanceCounter");
  1017. }
  1018. else
  1019. #endif
  1020. {
  1021. timer_set_now = _now_gettimeofday;
  1022. applog(LOG_DEBUG, "Timers: Using gettimeofday");
  1023. }
  1024. #ifdef HAVE_POOR_GETTIMEOFDAY
  1025. char buf[64] = "";
  1026. struct timeval tv;
  1027. timer_set_now(&tv);
  1028. bfg_calibrate_timeofday(&tv, buf);
  1029. applog(LOG_DEBUG, "%s", buf);
  1030. #endif
  1031. }
  1032. void subtime(struct timeval *a, struct timeval *b)
  1033. {
  1034. timersub(a, b, b);
  1035. }
  1036. void addtime(struct timeval *a, struct timeval *b)
  1037. {
  1038. timeradd(a, b, b);
  1039. }
  1040. bool time_more(struct timeval *a, struct timeval *b)
  1041. {
  1042. return timercmp(a, b, >);
  1043. }
  1044. bool time_less(struct timeval *a, struct timeval *b)
  1045. {
  1046. return timercmp(a, b, <);
  1047. }
  1048. void copy_time(struct timeval *dest, const struct timeval *src)
  1049. {
  1050. memcpy(dest, src, sizeof(struct timeval));
  1051. }
  1052. void timespec_to_val(struct timeval *val, const struct timespec *spec)
  1053. {
  1054. val->tv_sec = spec->tv_sec;
  1055. val->tv_usec = spec->tv_nsec / 1000;
  1056. }
  1057. void timeval_to_spec(struct timespec *spec, const struct timeval *val)
  1058. {
  1059. spec->tv_sec = val->tv_sec;
  1060. spec->tv_nsec = val->tv_usec * 1000;
  1061. }
  1062. void us_to_timeval(struct timeval *val, int64_t us)
  1063. {
  1064. lldiv_t tvdiv = lldiv(us, 1000000);
  1065. val->tv_sec = tvdiv.quot;
  1066. val->tv_usec = tvdiv.rem;
  1067. }
  1068. void us_to_timespec(struct timespec *spec, int64_t us)
  1069. {
  1070. lldiv_t tvdiv = lldiv(us, 1000000);
  1071. spec->tv_sec = tvdiv.quot;
  1072. spec->tv_nsec = tvdiv.rem * 1000;
  1073. }
  1074. void ms_to_timespec(struct timespec *spec, int64_t ms)
  1075. {
  1076. lldiv_t tvdiv = lldiv(ms, 1000);
  1077. spec->tv_sec = tvdiv.quot;
  1078. spec->tv_nsec = tvdiv.rem * 1000000;
  1079. }
  1080. void timeraddspec(struct timespec *a, const struct timespec *b)
  1081. {
  1082. a->tv_sec += b->tv_sec;
  1083. a->tv_nsec += b->tv_nsec;
  1084. if (a->tv_nsec >= 1000000000) {
  1085. a->tv_nsec -= 1000000000;
  1086. a->tv_sec++;
  1087. }
  1088. }
  1089. #ifndef WIN32
  1090. static
  1091. void _now_gettimeofday(struct timeval *tv)
  1092. {
  1093. gettimeofday(tv, NULL);
  1094. }
  1095. #else
  1096. /* Windows start time is since 1601 lol so convert it to unix epoch 1970. */
  1097. #define EPOCHFILETIME (116444736000000000LL)
  1098. /* Return the system time as an lldiv_t in decimicroseconds. */
  1099. static void decius_time(lldiv_t *lidiv)
  1100. {
  1101. FILETIME ft;
  1102. LARGE_INTEGER li;
  1103. GetSystemTimeAsFileTime(&ft);
  1104. li.LowPart = ft.dwLowDateTime;
  1105. li.HighPart = ft.dwHighDateTime;
  1106. li.QuadPart -= EPOCHFILETIME;
  1107. /* SystemTime is in decimicroseconds so divide by an unusual number */
  1108. *lidiv = lldiv(li.QuadPart, 10000000);
  1109. }
  1110. void _now_gettimeofday(struct timeval *tv)
  1111. {
  1112. lldiv_t lidiv;
  1113. decius_time(&lidiv);
  1114. tv->tv_sec = lidiv.quot;
  1115. tv->tv_usec = lidiv.rem / 10;
  1116. }
  1117. #endif
  1118. void cgsleep_ms_r(cgtimer_t *tv_start, int ms)
  1119. {
  1120. cgsleep_us_r(tv_start, ((int64_t)ms) * 1000);
  1121. }
  1122. static
  1123. void _cgsleep_us_r_nanosleep(cgtimer_t *tv_start, int64_t us)
  1124. {
  1125. struct timeval tv_timer[1], tv[1];
  1126. struct timespec ts[1];
  1127. timer_set_delay(tv_timer, tv_start, us);
  1128. while (true)
  1129. {
  1130. timer_set_now(tv);
  1131. if (!timercmp(tv_timer, tv, >))
  1132. return;
  1133. timersub(tv_timer, tv, tv);
  1134. timeval_to_spec(ts, tv);
  1135. nanosleep(ts, NULL);
  1136. }
  1137. }
  1138. void cgsleep_ms(int ms)
  1139. {
  1140. cgtimer_t ts_start;
  1141. cgsleep_prepare_r(&ts_start);
  1142. cgsleep_ms_r(&ts_start, ms);
  1143. }
  1144. void cgsleep_us(int64_t us)
  1145. {
  1146. cgtimer_t ts_start;
  1147. cgsleep_prepare_r(&ts_start);
  1148. cgsleep_us_r(&ts_start, us);
  1149. }
  1150. /* Returns the microseconds difference between end and start times as a double */
  1151. double us_tdiff(struct timeval *end, struct timeval *start)
  1152. {
  1153. return end->tv_sec * 1000000 + end->tv_usec - start->tv_sec * 1000000 - start->tv_usec;
  1154. }
  1155. /* Returns the seconds difference between end and start times as a double */
  1156. double tdiff(struct timeval *end, struct timeval *start)
  1157. {
  1158. return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0;
  1159. }
  1160. bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
  1161. {
  1162. char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
  1163. char url_address[256], port[6];
  1164. int url_len, port_len = 0;
  1165. url_begin = strstr(url, "//");
  1166. if (!url_begin)
  1167. url_begin = url;
  1168. else
  1169. url_begin += 2;
  1170. /* Look for numeric ipv6 entries */
  1171. ipv6_begin = strstr(url_begin, "[");
  1172. ipv6_end = strstr(url_begin, "]");
  1173. if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
  1174. url_end = strstr(ipv6_end, ":");
  1175. else
  1176. url_end = strstr(url_begin, ":");
  1177. if (url_end) {
  1178. url_len = url_end - url_begin;
  1179. port_len = strlen(url_begin) - url_len - 1;
  1180. if (port_len < 1)
  1181. return false;
  1182. port_start = url_end + 1;
  1183. } else
  1184. url_len = strlen(url_begin);
  1185. if (url_len < 1)
  1186. return false;
  1187. sprintf(url_address, "%.*s", url_len, url_begin);
  1188. if (port_len) {
  1189. char *slash;
  1190. snprintf(port, 6, "%.*s", port_len, port_start);
  1191. slash = strchr(port, '/');
  1192. if (slash)
  1193. *slash = '\0';
  1194. } else
  1195. strcpy(port, "80");
  1196. free(*sockaddr_port);
  1197. *sockaddr_port = strdup(port);
  1198. free(*sockaddr_url);
  1199. *sockaddr_url = strdup(url_address);
  1200. return true;
  1201. }
  1202. enum send_ret {
  1203. SEND_OK,
  1204. SEND_SELECTFAIL,
  1205. SEND_SENDFAIL,
  1206. SEND_INACTIVE
  1207. };
  1208. /* Send a single command across a socket, appending \n to it. This should all
  1209. * be done under stratum lock except when first establishing the socket */
  1210. static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len)
  1211. {
  1212. SOCKETTYPE sock = pool->sock;
  1213. ssize_t ssent = 0;
  1214. strcat(s, "\n");
  1215. len++;
  1216. while (len > 0 ) {
  1217. struct timeval timeout = {1, 0};
  1218. ssize_t sent;
  1219. fd_set wd;
  1220. FD_ZERO(&wd);
  1221. FD_SET(sock, &wd);
  1222. if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1)
  1223. return SEND_SELECTFAIL;
  1224. #ifdef __APPLE__
  1225. sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE);
  1226. #elif WIN32
  1227. sent = send(pool->sock, s + ssent, len, 0);
  1228. #else
  1229. sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL);
  1230. #endif
  1231. if (sent < 0) {
  1232. if (!sock_blocks())
  1233. return SEND_SENDFAIL;
  1234. sent = 0;
  1235. }
  1236. ssent += sent;
  1237. len -= sent;
  1238. }
  1239. pool->cgminer_pool_stats.times_sent++;
  1240. pool->cgminer_pool_stats.bytes_sent += ssent;
  1241. total_bytes_sent += ssent;
  1242. pool->cgminer_pool_stats.net_bytes_sent += ssent;
  1243. return SEND_OK;
  1244. }
  1245. bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force)
  1246. {
  1247. enum send_ret ret = SEND_INACTIVE;
  1248. if (opt_protocol)
  1249. applog(LOG_DEBUG, "Pool %u: SEND: %s", pool->pool_no, s);
  1250. mutex_lock(&pool->stratum_lock);
  1251. if (pool->stratum_active || force)
  1252. ret = __stratum_send(pool, s, len);
  1253. mutex_unlock(&pool->stratum_lock);
  1254. /* This is to avoid doing applog under stratum_lock */
  1255. switch (ret) {
  1256. default:
  1257. case SEND_OK:
  1258. break;
  1259. case SEND_SELECTFAIL:
  1260. applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
  1261. suspend_stratum(pool);
  1262. break;
  1263. case SEND_SENDFAIL:
  1264. applog(LOG_DEBUG, "Failed to send in stratum_send");
  1265. suspend_stratum(pool);
  1266. break;
  1267. case SEND_INACTIVE:
  1268. applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
  1269. break;
  1270. }
  1271. return (ret == SEND_OK);
  1272. }
  1273. static bool socket_full(struct pool *pool, int wait)
  1274. {
  1275. SOCKETTYPE sock = pool->sock;
  1276. struct timeval timeout;
  1277. fd_set rd;
  1278. if (sock == INVSOCK)
  1279. return true;
  1280. if (unlikely(wait < 0))
  1281. wait = 0;
  1282. FD_ZERO(&rd);
  1283. FD_SET(sock, &rd);
  1284. timeout.tv_usec = 0;
  1285. timeout.tv_sec = wait;
  1286. if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0)
  1287. return true;
  1288. return false;
  1289. }
  1290. /* Check to see if Santa's been good to you */
  1291. bool sock_full(struct pool *pool)
  1292. {
  1293. if (strlen(pool->sockbuf))
  1294. return true;
  1295. return (socket_full(pool, 0));
  1296. }
  1297. static void clear_sockbuf(struct pool *pool)
  1298. {
  1299. strcpy(pool->sockbuf, "");
  1300. }
  1301. static void clear_sock(struct pool *pool)
  1302. {
  1303. ssize_t n;
  1304. mutex_lock(&pool->stratum_lock);
  1305. do {
  1306. if (pool->sock)
  1307. n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0);
  1308. else
  1309. n = 0;
  1310. } while (n > 0);
  1311. mutex_unlock(&pool->stratum_lock);
  1312. clear_sockbuf(pool);
  1313. }
  1314. /* Make sure the pool sockbuf is large enough to cope with any coinbase size
  1315. * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE
  1316. * and zeroing the new memory */
  1317. static void recalloc_sock(struct pool *pool, size_t len)
  1318. {
  1319. size_t old, new;
  1320. old = strlen(pool->sockbuf);
  1321. new = old + len + 1;
  1322. if (new < pool->sockbuf_size)
  1323. return;
  1324. new = new + (RBUFSIZE - (new % RBUFSIZE));
  1325. // Avoid potentially recursive locking
  1326. // applog(LOG_DEBUG, "Recallocing pool sockbuf to %lu", (unsigned long)new);
  1327. pool->sockbuf = realloc(pool->sockbuf, new);
  1328. if (!pool->sockbuf)
  1329. quithere(1, "Failed to realloc pool sockbuf");
  1330. memset(pool->sockbuf + old, 0, new - old);
  1331. pool->sockbuf_size = new;
  1332. }
  1333. /* Peeks at a socket to find the first end of line and then reads just that
  1334. * from the socket and returns that as a malloced char */
  1335. char *recv_line(struct pool *pool)
  1336. {
  1337. char *tok, *sret = NULL;
  1338. ssize_t len, buflen;
  1339. int waited = 0;
  1340. if (!strstr(pool->sockbuf, "\n")) {
  1341. struct timeval rstart, now;
  1342. cgtime(&rstart);
  1343. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1344. applog(LOG_DEBUG, "Timed out waiting for data on socket_full");
  1345. goto out;
  1346. }
  1347. do {
  1348. char s[RBUFSIZE];
  1349. size_t slen;
  1350. ssize_t n;
  1351. memset(s, 0, RBUFSIZE);
  1352. n = recv(pool->sock, s, RECVSIZE, 0);
  1353. if (!n) {
  1354. applog(LOG_DEBUG, "Socket closed waiting in recv_line");
  1355. suspend_stratum(pool);
  1356. break;
  1357. }
  1358. cgtime(&now);
  1359. waited = tdiff(&now, &rstart);
  1360. if (n < 0) {
  1361. //Save errno from being overweitten bei socket_ commands
  1362. int socket_recv_errno;
  1363. socket_recv_errno = SOCKERR;
  1364. if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) {
  1365. applog(LOG_DEBUG, "Failed to recv sock in recv_line: %s", bfg_strerror(socket_recv_errno, BST_SOCKET));
  1366. suspend_stratum(pool);
  1367. break;
  1368. }
  1369. } else {
  1370. slen = strlen(s);
  1371. recalloc_sock(pool, slen);
  1372. strcat(pool->sockbuf, s);
  1373. }
  1374. } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n"));
  1375. }
  1376. buflen = strlen(pool->sockbuf);
  1377. tok = strtok(pool->sockbuf, "\n");
  1378. if (!tok) {
  1379. applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line");
  1380. goto out;
  1381. }
  1382. sret = strdup(tok);
  1383. len = strlen(sret);
  1384. /* Copy what's left in the buffer after the \n, including the
  1385. * terminating \0 */
  1386. if (buflen > len + 1)
  1387. memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1);
  1388. else
  1389. strcpy(pool->sockbuf, "");
  1390. pool->cgminer_pool_stats.times_received++;
  1391. pool->cgminer_pool_stats.bytes_received += len;
  1392. total_bytes_rcvd += len;
  1393. pool->cgminer_pool_stats.net_bytes_received += len;
  1394. out:
  1395. if (!sret)
  1396. clear_sock(pool);
  1397. else if (opt_protocol)
  1398. applog(LOG_DEBUG, "Pool %u: RECV: %s", pool->pool_no, sret);
  1399. return sret;
  1400. }
  1401. /* Dumps any JSON value as a string. Just like jansson 2.1's JSON_ENCODE_ANY
  1402. * flag, but this is compatible with 2.0. */
  1403. char *json_dumps_ANY(json_t *json, size_t flags)
  1404. {
  1405. switch (json_typeof(json))
  1406. {
  1407. case JSON_ARRAY:
  1408. case JSON_OBJECT:
  1409. return json_dumps(json, flags);
  1410. default:
  1411. break;
  1412. }
  1413. char *rv;
  1414. #ifdef JSON_ENCODE_ANY
  1415. rv = json_dumps(json, JSON_ENCODE_ANY | flags);
  1416. if (rv)
  1417. return rv;
  1418. #endif
  1419. json_t *tmp = json_array();
  1420. char *s;
  1421. int i;
  1422. size_t len;
  1423. if (!tmp)
  1424. quithere(1, "Failed to allocate json array");
  1425. if (json_array_append(tmp, json))
  1426. quithere(1, "Failed to append temporary array");
  1427. s = json_dumps(tmp, flags);
  1428. if (!s)
  1429. return NULL;
  1430. for (i = 0; s[i] != '['; ++i)
  1431. if (unlikely(!(s[i] && isCspace(s[i]))))
  1432. quithere(1, "Failed to find opening bracket in array dump");
  1433. len = strlen(&s[++i]) - 1;
  1434. if (unlikely(s[i+len] != ']'))
  1435. quithere(1, "Failed to find closing bracket in array dump");
  1436. rv = malloc(len + 1);
  1437. memcpy(rv, &s[i], len);
  1438. rv[len] = '\0';
  1439. free(s);
  1440. json_decref(tmp);
  1441. return rv;
  1442. }
  1443. /* Extracts a string value from a json array with error checking. To be used
  1444. * when the value of the string returned is only examined and not to be stored.
  1445. * See json_array_string below */
  1446. const char *__json_array_string(json_t *val, unsigned int entry)
  1447. {
  1448. json_t *arr_entry;
  1449. if (json_is_null(val))
  1450. return NULL;
  1451. if (!json_is_array(val))
  1452. return NULL;
  1453. if (entry > json_array_size(val))
  1454. return NULL;
  1455. arr_entry = json_array_get(val, entry);
  1456. if (!json_is_string(arr_entry))
  1457. return NULL;
  1458. return json_string_value(arr_entry);
  1459. }
  1460. /* Creates a freshly malloced dup of __json_array_string */
  1461. static char *json_array_string(json_t *val, unsigned int entry)
  1462. {
  1463. const char *buf = __json_array_string(val, entry);
  1464. if (buf)
  1465. return strdup(buf);
  1466. return NULL;
  1467. }
  1468. void stratum_probe_transparency(struct pool *pool)
  1469. {
  1470. // Request transaction data to discourage pools from doing anything shady
  1471. char s[1024];
  1472. int sLen;
  1473. sLen = sprintf(s, "{\"params\": [\"%s\"], \"id\": \"txlist%s\", \"method\": \"mining.get_transactions\"}",
  1474. pool->swork.job_id,
  1475. pool->swork.job_id);
  1476. stratum_send(pool, s, sLen);
  1477. if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency))
  1478. cgtime(&pool->swork.tv_transparency);
  1479. pool->swork.transparency_probed = true;
  1480. }
  1481. static bool parse_notify(struct pool *pool, json_t *val)
  1482. {
  1483. const char *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime;
  1484. char *job_id;
  1485. bool clean, ret = false;
  1486. int merkles, i;
  1487. size_t cb1_len, cb2_len;
  1488. json_t *arr;
  1489. arr = json_array_get(val, 4);
  1490. if (!arr || !json_is_array(arr))
  1491. goto out;
  1492. merkles = json_array_size(arr);
  1493. for (i = 0; i < merkles; i++)
  1494. if (!json_is_string(json_array_get(arr, i)))
  1495. goto out;
  1496. prev_hash = __json_array_string(val, 1);
  1497. coinbase1 = __json_array_string(val, 2);
  1498. coinbase2 = __json_array_string(val, 3);
  1499. bbversion = __json_array_string(val, 5);
  1500. nbit = __json_array_string(val, 6);
  1501. ntime = __json_array_string(val, 7);
  1502. clean = json_is_true(json_array_get(val, 8));
  1503. if (!prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime)
  1504. goto out;
  1505. job_id = json_array_string(val, 0);
  1506. if (!job_id)
  1507. goto out;
  1508. cg_wlock(&pool->data_lock);
  1509. cgtime(&pool->swork.tv_received);
  1510. free(pool->swork.job_id);
  1511. pool->swork.job_id = job_id;
  1512. pool->submit_old = !clean;
  1513. pool->swork.clean = true;
  1514. hex2bin(&pool->swork.header1[0], bbversion, 4);
  1515. hex2bin(&pool->swork.header1[4], prev_hash, 32);
  1516. hex2bin((void*)&pool->swork.ntime, ntime, 4);
  1517. pool->swork.ntime = be32toh(pool->swork.ntime);
  1518. hex2bin(&pool->swork.diffbits[0], nbit, 4);
  1519. cb1_len = strlen(coinbase1) / 2;
  1520. pool->swork.nonce2_offset = cb1_len + pool->n1_len;
  1521. cb2_len = strlen(coinbase2) / 2;
  1522. bytes_resize(&pool->swork.coinbase, pool->swork.nonce2_offset + pool->n2size + cb2_len);
  1523. uint8_t *coinbase = bytes_buf(&pool->swork.coinbase);
  1524. hex2bin(coinbase, coinbase1, cb1_len);
  1525. hex2bin(&coinbase[cb1_len], pool->nonce1, pool->n1_len);
  1526. // NOTE: gap for nonce2, filled at work generation time
  1527. hex2bin(&coinbase[pool->swork.nonce2_offset + pool->n2size], coinbase2, cb2_len);
  1528. bytes_resize(&pool->swork.merkle_bin, 32 * merkles);
  1529. for (i = 0; i < merkles; i++)
  1530. hex2bin(&bytes_buf(&pool->swork.merkle_bin)[i * 32], json_string_value(json_array_get(arr, i)), 32);
  1531. pool->swork.merkles = merkles;
  1532. pool->nonce2 = 0;
  1533. cg_wunlock(&pool->data_lock);
  1534. applog(LOG_DEBUG, "Received stratum notify from pool %u with job_id=%s",
  1535. pool->pool_no, job_id);
  1536. if (opt_debug && opt_protocol)
  1537. {
  1538. applog(LOG_DEBUG, "job_id: %s", job_id);
  1539. applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
  1540. applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
  1541. applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
  1542. for (i = 0; i < merkles; i++)
  1543. applog(LOG_DEBUG, "merkle%d: %s", i, json_string_value(json_array_get(arr, i)));
  1544. applog(LOG_DEBUG, "bbversion: %s", bbversion);
  1545. applog(LOG_DEBUG, "nbit: %s", nbit);
  1546. applog(LOG_DEBUG, "ntime: %s", ntime);
  1547. applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
  1548. }
  1549. /* A notify message is the closest stratum gets to a getwork */
  1550. pool->getwork_requested++;
  1551. total_getworks++;
  1552. if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency))
  1553. if (pool->probed)
  1554. stratum_probe_transparency(pool);
  1555. ret = true;
  1556. out:
  1557. return ret;
  1558. }
  1559. static bool parse_diff(struct pool *pool, json_t *val)
  1560. {
  1561. double diff;
  1562. diff = json_number_value(json_array_get(val, 0));
  1563. if (diff == 0)
  1564. return false;
  1565. cg_wlock(&pool->data_lock);
  1566. pool->swork.diff = diff;
  1567. cg_wunlock(&pool->data_lock);
  1568. applog(LOG_DEBUG, "Pool %d stratum bdifficulty set to %f", pool->pool_no, diff);
  1569. return true;
  1570. }
  1571. static bool parse_reconnect(struct pool *pool, json_t *val)
  1572. {
  1573. const char *url, *port;
  1574. char address[256];
  1575. url = __json_array_string(val, 0);
  1576. if (!url)
  1577. url = pool->sockaddr_url;
  1578. port = __json_array_string(val, 1);
  1579. if (!port)
  1580. port = pool->stratum_port;
  1581. snprintf(address, sizeof(address), "%s:%s", url, port);
  1582. if (!extract_sockaddr(address, &pool->sockaddr_url, &pool->stratum_port))
  1583. return false;
  1584. pool->stratum_url = pool->sockaddr_url;
  1585. applog(LOG_NOTICE, "Reconnect requested from pool %d to %s", pool->pool_no, address);
  1586. if (!restart_stratum(pool))
  1587. return false;
  1588. return true;
  1589. }
  1590. static bool send_version(struct pool *pool, json_t *val)
  1591. {
  1592. char s[RBUFSIZE], *idstr;
  1593. json_t *id = json_object_get(val, "id");
  1594. if (!(id && !json_is_null(id)))
  1595. return false;
  1596. idstr = json_dumps_ANY(id, 0);
  1597. sprintf(s, "{\"id\": %s, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", idstr);
  1598. free(idstr);
  1599. if (!stratum_send(pool, s, strlen(s)))
  1600. return false;
  1601. return true;
  1602. }
  1603. static bool stratum_show_message(struct pool *pool, json_t *val, json_t *params)
  1604. {
  1605. char *msg;
  1606. char s[RBUFSIZE], *idstr;
  1607. json_t *id = json_object_get(val, "id");
  1608. msg = json_array_string(params, 0);
  1609. if (likely(msg))
  1610. {
  1611. free(pool->admin_msg);
  1612. pool->admin_msg = msg;
  1613. applog(LOG_NOTICE, "Message from pool %u: %s", pool->pool_no, msg);
  1614. }
  1615. if (!(id && !json_is_null(id)))
  1616. return true;
  1617. idstr = json_dumps_ANY(id, 0);
  1618. if (likely(msg))
  1619. sprintf(s, "{\"id\": %s, \"result\": true, \"error\": null}", idstr);
  1620. else
  1621. sprintf(s, "{\"id\": %s, \"result\": null, \"error\": [-1, \"Failed to parse message\", null]}", idstr);
  1622. free(idstr);
  1623. if (!stratum_send(pool, s, strlen(s)))
  1624. return false;
  1625. return true;
  1626. }
  1627. bool parse_method(struct pool *pool, char *s)
  1628. {
  1629. json_t *val = NULL, *method, *err_val, *params;
  1630. json_error_t err;
  1631. bool ret = false;
  1632. const char *buf;
  1633. if (!s)
  1634. goto out;
  1635. val = JSON_LOADS(s, &err);
  1636. if (!val) {
  1637. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1638. goto out;
  1639. }
  1640. method = json_object_get(val, "method");
  1641. if (!method)
  1642. goto out;
  1643. err_val = json_object_get(val, "error");
  1644. params = json_object_get(val, "params");
  1645. if (err_val && !json_is_null(err_val)) {
  1646. char *ss;
  1647. if (err_val)
  1648. ss = json_dumps(err_val, JSON_INDENT(3));
  1649. else
  1650. ss = strdup("(unknown reason)");
  1651. applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss);
  1652. free(ss);
  1653. goto out;
  1654. }
  1655. buf = json_string_value(method);
  1656. if (!buf)
  1657. goto out;
  1658. if (!strncasecmp(buf, "mining.notify", 13)) {
  1659. if (parse_notify(pool, params))
  1660. pool->stratum_notify = ret = true;
  1661. else
  1662. pool->stratum_notify = ret = false;
  1663. goto out;
  1664. }
  1665. if (!strncasecmp(buf, "mining.set_difficulty", 21) && parse_diff(pool, params)) {
  1666. ret = true;
  1667. goto out;
  1668. }
  1669. if (!strncasecmp(buf, "client.reconnect", 16) && parse_reconnect(pool, params)) {
  1670. ret = true;
  1671. goto out;
  1672. }
  1673. if (!strncasecmp(buf, "client.get_version", 18) && send_version(pool, val)) {
  1674. ret = true;
  1675. goto out;
  1676. }
  1677. if (!strncasecmp(buf, "client.show_message", 19) && stratum_show_message(pool, val, params)) {
  1678. ret = true;
  1679. goto out;
  1680. }
  1681. out:
  1682. if (val)
  1683. json_decref(val);
  1684. return ret;
  1685. }
  1686. extern bool parse_stratum_response(struct pool *, char *s);
  1687. bool auth_stratum(struct pool *pool)
  1688. {
  1689. json_t *val = NULL, *res_val, *err_val;
  1690. char s[RBUFSIZE], *sret = NULL;
  1691. json_error_t err;
  1692. bool ret = false;
  1693. sprintf(s, "{\"id\": \"auth\", \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}",
  1694. pool->rpc_user, pool->rpc_pass);
  1695. if (!stratum_send(pool, s, strlen(s)))
  1696. goto out;
  1697. /* Parse all data in the queue and anything left should be auth */
  1698. while (42) {
  1699. sret = recv_line(pool);
  1700. if (!sret)
  1701. goto out;
  1702. if (parse_method(pool, sret))
  1703. free(sret);
  1704. else
  1705. break;
  1706. }
  1707. val = JSON_LOADS(sret, &err);
  1708. free(sret);
  1709. res_val = json_object_get(val, "result");
  1710. err_val = json_object_get(val, "error");
  1711. if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) {
  1712. char *ss;
  1713. if (err_val)
  1714. ss = json_dumps(err_val, JSON_INDENT(3));
  1715. else
  1716. ss = strdup("(unknown reason)");
  1717. applog(LOG_WARNING, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss);
  1718. free(ss);
  1719. goto out;
  1720. }
  1721. ret = true;
  1722. applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no);
  1723. pool->probed = true;
  1724. successful_connect = true;
  1725. out:
  1726. if (val)
  1727. json_decref(val);
  1728. if (pool->stratum_notify)
  1729. stratum_probe_transparency(pool);
  1730. return ret;
  1731. }
  1732. curl_socket_t grab_socket_opensocket_cb(void *clientp, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr)
  1733. {
  1734. struct pool *pool = clientp;
  1735. curl_socket_t sck = socket(addr->family, addr->socktype, addr->protocol);
  1736. pool->sock = sck;
  1737. return sck;
  1738. }
  1739. static bool setup_stratum_curl(struct pool *pool)
  1740. {
  1741. char curl_err_str[CURL_ERROR_SIZE];
  1742. CURL *curl = NULL;
  1743. char s[RBUFSIZE];
  1744. bool ret = false;
  1745. applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf);
  1746. mutex_lock(&pool->stratum_lock);
  1747. timer_unset(&pool->swork.tv_transparency);
  1748. pool->stratum_active = false;
  1749. pool->stratum_notify = false;
  1750. pool->swork.transparency_probed = false;
  1751. if (pool->stratum_curl)
  1752. curl_easy_cleanup(pool->stratum_curl);
  1753. pool->stratum_curl = curl_easy_init();
  1754. if (unlikely(!pool->stratum_curl))
  1755. quithere(1, "Failed to curl_easy_init");
  1756. if (pool->sockbuf)
  1757. pool->sockbuf[0] = '\0';
  1758. curl = pool->stratum_curl;
  1759. if (!pool->sockbuf) {
  1760. pool->sockbuf = calloc(RBUFSIZE, 1);
  1761. if (!pool->sockbuf)
  1762. quithere(1, "Failed to calloc pool sockbuf");
  1763. pool->sockbuf_size = RBUFSIZE;
  1764. }
  1765. /* Create a http url for use with curl */
  1766. sprintf(s, "http://%s:%s", pool->sockaddr_url, pool->stratum_port);
  1767. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  1768. curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 30);
  1769. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
  1770. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  1771. curl_easy_setopt(curl, CURLOPT_URL, s);
  1772. if (!opt_delaynet)
  1773. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  1774. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  1775. * to enable it */
  1776. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  1777. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  1778. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  1779. // CURLINFO_LASTSOCKET is broken on Win64 (which has a wider SOCKET type than curl_easy_getinfo returns), so we use this hack for now
  1780. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, grab_socket_opensocket_cb);
  1781. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  1782. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  1783. if (pool->rpc_proxy) {
  1784. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  1785. } else if (opt_socks_proxy) {
  1786. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  1787. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  1788. }
  1789. curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1);
  1790. pool->sock = INVSOCK;
  1791. if (curl_easy_perform(curl)) {
  1792. applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str);
  1793. errout:
  1794. curl_easy_cleanup(curl);
  1795. pool->stratum_curl = NULL;
  1796. goto out;
  1797. }
  1798. if (pool->sock == INVSOCK)
  1799. {
  1800. applog(LOG_ERR, "Stratum connect succeeded, but technical problem extracting socket (pool %u)", pool->pool_no);
  1801. goto errout;
  1802. }
  1803. keep_sockalive(pool->sock);
  1804. pool->cgminer_pool_stats.times_sent++;
  1805. pool->cgminer_pool_stats.times_received++;
  1806. ret = true;
  1807. out:
  1808. mutex_unlock(&pool->stratum_lock);
  1809. return ret;
  1810. }
  1811. static char *get_sessionid(json_t *val)
  1812. {
  1813. char *ret = NULL;
  1814. json_t *arr_val;
  1815. int arrsize, i;
  1816. arr_val = json_array_get(val, 0);
  1817. if (!arr_val || !json_is_array(arr_val))
  1818. goto out;
  1819. arrsize = json_array_size(arr_val);
  1820. for (i = 0; i < arrsize; i++) {
  1821. json_t *arr = json_array_get(arr_val, i);
  1822. const char *notify;
  1823. if (!arr | !json_is_array(arr))
  1824. break;
  1825. notify = __json_array_string(arr, 0);
  1826. if (!notify)
  1827. continue;
  1828. if (!strncasecmp(notify, "mining.notify", 13)) {
  1829. ret = json_array_string(arr, 1);
  1830. break;
  1831. }
  1832. }
  1833. out:
  1834. return ret;
  1835. }
  1836. void suspend_stratum(struct pool *pool)
  1837. {
  1838. clear_sockbuf(pool);
  1839. applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no);
  1840. mutex_lock(&pool->stratum_lock);
  1841. pool->stratum_active = pool->stratum_notify = false;
  1842. if (pool->stratum_curl) {
  1843. curl_easy_cleanup(pool->stratum_curl);
  1844. }
  1845. pool->stratum_curl = NULL;
  1846. pool->sock = INVSOCK;
  1847. mutex_unlock(&pool->stratum_lock);
  1848. }
  1849. bool initiate_stratum(struct pool *pool)
  1850. {
  1851. bool ret = false, recvd = false, noresume = false, sockd = false;
  1852. bool trysuggest = request_target_str;
  1853. char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid;
  1854. json_t *val = NULL, *res_val, *err_val;
  1855. json_error_t err;
  1856. int n2size;
  1857. resend:
  1858. if (!setup_stratum_curl(pool)) {
  1859. sockd = false;
  1860. goto out;
  1861. }
  1862. sockd = true;
  1863. clear_sock(pool);
  1864. if (trysuggest)
  1865. {
  1866. int sz = sprintf(s, "{\"id\": null, \"method\": \"mining.suggest_target\", \"params\": [\"%s\"]}", request_target_str);
  1867. if (!_stratum_send(pool, s, sz, true))
  1868. {
  1869. applog(LOG_DEBUG, "Pool %u: Failed to send suggest_target in initiate_stratum", pool->pool_no);
  1870. goto out;
  1871. }
  1872. recvd = true;
  1873. }
  1874. if (noresume) {
  1875. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++);
  1876. } else {
  1877. if (pool->sessionid)
  1878. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid);
  1879. else
  1880. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++);
  1881. }
  1882. if (!_stratum_send(pool, s, strlen(s), true)) {
  1883. applog(LOG_DEBUG, "Failed to send s in initiate_stratum");
  1884. goto out;
  1885. }
  1886. recvd = true;
  1887. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1888. applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum");
  1889. goto out;
  1890. }
  1891. sret = recv_line(pool);
  1892. if (!sret)
  1893. goto out;
  1894. val = JSON_LOADS(sret, &err);
  1895. free(sret);
  1896. if (!val) {
  1897. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1898. goto out;
  1899. }
  1900. res_val = json_object_get(val, "result");
  1901. err_val = json_object_get(val, "error");
  1902. if (!res_val || json_is_null(res_val) ||
  1903. (err_val && !json_is_null(err_val))) {
  1904. char *ss;
  1905. if (err_val)
  1906. ss = json_dumps(err_val, JSON_INDENT(3));
  1907. else
  1908. ss = strdup("(unknown reason)");
  1909. applog(LOG_INFO, "JSON-RPC decode failed: %s", ss);
  1910. free(ss);
  1911. goto out;
  1912. }
  1913. sessionid = get_sessionid(res_val);
  1914. if (!sessionid)
  1915. applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum");
  1916. nonce1 = json_array_string(res_val, 1);
  1917. if (!nonce1) {
  1918. applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");
  1919. free(sessionid);
  1920. goto out;
  1921. }
  1922. n2size = json_integer_value(json_array_get(res_val, 2));
  1923. if (!n2size) {
  1924. applog(LOG_INFO, "Failed to get n2size in initiate_stratum");
  1925. free(sessionid);
  1926. free(nonce1);
  1927. goto out;
  1928. }
  1929. cg_wlock(&pool->data_lock);
  1930. free(pool->sessionid);
  1931. pool->sessionid = sessionid;
  1932. free(pool->nonce1);
  1933. pool->nonce1 = nonce1;
  1934. pool->n1_len = strlen(nonce1) / 2;
  1935. pool->n2size = n2size;
  1936. pool->nonce2sz = (n2size > sizeof(pool->nonce2)) ? sizeof(pool->nonce2) : n2size;
  1937. #ifdef WORDS_BIGENDIAN
  1938. pool->nonce2off = (n2size < sizeof(pool->nonce2)) ? (sizeof(pool->nonce2) - n2size) : 0;
  1939. #endif
  1940. cg_wunlock(&pool->data_lock);
  1941. if (sessionid)
  1942. applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid);
  1943. ret = true;
  1944. out:
  1945. if (val)
  1946. {
  1947. json_decref(val);
  1948. val = NULL;
  1949. }
  1950. if (ret) {
  1951. if (!pool->stratum_url)
  1952. pool->stratum_url = pool->sockaddr_url;
  1953. pool->stratum_active = true;
  1954. pool->swork.diff = 1;
  1955. if (opt_protocol) {
  1956. applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d",
  1957. pool->pool_no, pool->nonce1, pool->n2size);
  1958. }
  1959. } else {
  1960. if (recvd)
  1961. {
  1962. if (trysuggest)
  1963. {
  1964. applog(LOG_DEBUG, "Pool %u: Failed to connect stratum with mining.suggest_target, retrying without", pool->pool_no);
  1965. trysuggest = false;
  1966. goto resend;
  1967. }
  1968. if (!noresume)
  1969. {
  1970. applog(LOG_DEBUG, "Failed to resume stratum, trying afresh");
  1971. noresume = true;
  1972. goto resend;
  1973. }
  1974. }
  1975. applog(LOG_DEBUG, "Initiate stratum failed");
  1976. if (sockd)
  1977. suspend_stratum(pool);
  1978. }
  1979. return ret;
  1980. }
  1981. bool restart_stratum(struct pool *pool)
  1982. {
  1983. if (pool->stratum_active)
  1984. suspend_stratum(pool);
  1985. if (!initiate_stratum(pool))
  1986. return false;
  1987. if (!auth_stratum(pool))
  1988. return false;
  1989. return true;
  1990. }
  1991. void dev_error_update(struct cgpu_info *dev, enum dev_reason reason)
  1992. {
  1993. dev->device_last_not_well = time(NULL);
  1994. cgtime(&dev->tv_device_last_not_well);
  1995. dev->device_not_well_reason = reason;
  1996. }
  1997. void dev_error(struct cgpu_info *dev, enum dev_reason reason)
  1998. {
  1999. dev_error_update(dev, reason);
  2000. switch (reason) {
  2001. case REASON_THREAD_FAIL_INIT:
  2002. dev->thread_fail_init_count++;
  2003. break;
  2004. case REASON_THREAD_ZERO_HASH:
  2005. dev->thread_zero_hash_count++;
  2006. break;
  2007. case REASON_THREAD_FAIL_QUEUE:
  2008. dev->thread_fail_queue_count++;
  2009. break;
  2010. case REASON_DEV_SICK_IDLE_60:
  2011. dev->dev_sick_idle_60_count++;
  2012. break;
  2013. case REASON_DEV_DEAD_IDLE_600:
  2014. dev->dev_dead_idle_600_count++;
  2015. break;
  2016. case REASON_DEV_NOSTART:
  2017. dev->dev_nostart_count++;
  2018. break;
  2019. case REASON_DEV_OVER_HEAT:
  2020. dev->dev_over_heat_count++;
  2021. break;
  2022. case REASON_DEV_THERMAL_CUTOFF:
  2023. dev->dev_thermal_cutoff_count++;
  2024. break;
  2025. case REASON_DEV_COMMS_ERROR:
  2026. dev->dev_comms_error_count++;
  2027. break;
  2028. case REASON_DEV_THROTTLE:
  2029. dev->dev_throttle_count++;
  2030. break;
  2031. }
  2032. }
  2033. /* Realloc an existing string to fit an extra string s, appending s to it. */
  2034. void *realloc_strcat(char *ptr, char *s)
  2035. {
  2036. size_t old = strlen(ptr), len = strlen(s);
  2037. char *ret;
  2038. if (!len)
  2039. return ptr;
  2040. len += old + 1;
  2041. align_len(&len);
  2042. ret = malloc(len);
  2043. if (unlikely(!ret))
  2044. quithere(1, "Failed to malloc");
  2045. sprintf(ret, "%s%s", ptr, s);
  2046. free(ptr);
  2047. return ret;
  2048. }
  2049. static
  2050. bool sanechars[] = {
  2051. false, false, false, false, false, false, false, false,
  2052. false, false, false, false, false, false, false, false,
  2053. false, false, false, false, false, false, false, false,
  2054. false, false, false, false, false, false, false, false,
  2055. false, false, false, false, false, false, false, false,
  2056. false, false, false, false, false, false, false, false,
  2057. true , true , true , true , true , true , true , true ,
  2058. true , true , false, false, false, false, false, false,
  2059. false, true , true , true , true , true , true , true ,
  2060. true , true , true , true , true , true , true , true ,
  2061. true , true , true , true , true , true , true , true ,
  2062. true , true , true , false, false, false, false, false,
  2063. false, true , true , true , true , true , true , true ,
  2064. true , true , true , true , true , true , true , true ,
  2065. true , true , true , true , true , true , true , true ,
  2066. true , true , true , false, false, false, false, false,
  2067. };
  2068. char *sanestr(char *o, char *s)
  2069. {
  2070. char *rv = o;
  2071. bool br = false;
  2072. for ( ; s[0]; ++s)
  2073. {
  2074. if (sanechars[s[0] & 0x7f])
  2075. {
  2076. if (br)
  2077. {
  2078. br = false;
  2079. if (s[0] >= '0' && s[0] <= '9')
  2080. (o++)[0] = '_';
  2081. }
  2082. (o++)[0] = s[0];
  2083. }
  2084. else
  2085. if (o != s && o[-1] >= '0' && o[-1] <= '9')
  2086. br = true;
  2087. }
  2088. o[0] = '\0';
  2089. return rv;
  2090. }
  2091. void RenameThread(const char* name)
  2092. {
  2093. #if defined(PR_SET_NAME)
  2094. // Only the first 15 characters are used (16 - NUL terminator)
  2095. prctl(PR_SET_NAME, name, 0, 0, 0);
  2096. #elif defined(__APPLE__)
  2097. pthread_setname_np(name);
  2098. #elif (defined(__FreeBSD__) || defined(__OpenBSD__))
  2099. pthread_set_name_np(pthread_self(), name);
  2100. #else
  2101. // Prevent warnings for unused parameters...
  2102. (void)name;
  2103. #endif
  2104. }
  2105. static pthread_key_t key_bfgtls;
  2106. struct bfgtls_data {
  2107. char *bfg_strerror_result;
  2108. size_t bfg_strerror_resultsz;
  2109. #ifdef WIN32
  2110. LPSTR bfg_strerror_socketresult;
  2111. #endif
  2112. };
  2113. static
  2114. struct bfgtls_data *get_bfgtls()
  2115. {
  2116. struct bfgtls_data *bfgtls = pthread_getspecific(key_bfgtls);
  2117. if (bfgtls)
  2118. return bfgtls;
  2119. void *p;
  2120. bfgtls = malloc(sizeof(*bfgtls));
  2121. if (!bfgtls)
  2122. quithere(1, "malloc bfgtls failed");
  2123. p = malloc(64);
  2124. if (!p)
  2125. quithere(1, "malloc bfg_strerror_result failed");
  2126. *bfgtls = (struct bfgtls_data){
  2127. .bfg_strerror_resultsz = 64,
  2128. .bfg_strerror_result = p,
  2129. };
  2130. if (pthread_setspecific(key_bfgtls, bfgtls))
  2131. quithere(1, "pthread_setspecific failed");
  2132. return bfgtls;
  2133. }
  2134. void bfg_init_threadlocal()
  2135. {
  2136. if (pthread_key_create(&key_bfgtls, NULL))
  2137. quithere(1, "pthread_key_create failed");
  2138. }
  2139. static
  2140. bool bfg_grow_buffer(char ** const bufp, size_t * const bufszp, size_t minimum)
  2141. {
  2142. if (minimum <= *bufszp)
  2143. return false;
  2144. while (minimum > *bufszp)
  2145. *bufszp = 2;
  2146. *bufp = realloc(*bufp, *bufszp);
  2147. if (unlikely(!*bufp))
  2148. quithere(1, "realloc failed");
  2149. return true;
  2150. }
  2151. static
  2152. const char *bfg_strcpy_growing_buffer(char ** const bufp, size_t * const bufszp, const char *src)
  2153. {
  2154. if (!src)
  2155. return NULL;
  2156. const size_t srcsz = strlen(src) + 1;
  2157. bfg_grow_buffer(bufp, bufszp, srcsz);
  2158. memcpy(*bufp, src, srcsz);
  2159. return *bufp;
  2160. }
  2161. // Guaranteed to always return some string (or quit)
  2162. const char *bfg_strerror(int e, enum bfg_strerror_type type)
  2163. {
  2164. static __maybe_unused pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  2165. struct bfgtls_data *bfgtls = get_bfgtls();
  2166. size_t * const bufszp = &bfgtls->bfg_strerror_resultsz;
  2167. char ** const bufp = &bfgtls->bfg_strerror_result;
  2168. const char *have = NULL;
  2169. switch (type) {
  2170. case BST_LIBUSB:
  2171. // NOTE: Nested preprocessor checks since the latter isn't defined at all without the former
  2172. #ifdef HAVE_LIBUSB
  2173. # if HAVE_DECL_LIBUSB_ERROR_NAME
  2174. // libusb makes no guarantees for thread-safety or persistence
  2175. mutex_lock(&mutex);
  2176. have = bfg_strcpy_growing_buffer(bufp, bufszp, libusb_error_name(e));
  2177. mutex_unlock(&mutex);
  2178. # endif
  2179. #endif
  2180. break;
  2181. case BST_SOCKET:
  2182. case BST_SYSTEM:
  2183. {
  2184. #ifdef WIN32
  2185. // Windows has a different namespace for system and socket errors
  2186. LPSTR *msg = &bfgtls->bfg_strerror_socketresult;
  2187. if (*msg)
  2188. LocalFree(*msg);
  2189. if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, 0, e, 0, (LPSTR)msg, 0, 0))
  2190. return *msg;
  2191. *msg = NULL;
  2192. break;
  2193. #endif
  2194. }
  2195. // Fallthru on non-WIN32
  2196. case BST_ERRNO:
  2197. {
  2198. #ifdef __STRERROR_S_WORKS
  2199. // FIXME: Not sure how to get this on MingW64
  2200. retry:
  2201. if (likely(!strerror_s(*bufp, *bufszp, e)))
  2202. {
  2203. if (bfg_grow_buffer(bufp, bufszp, strlen(*bufp) + 2))
  2204. goto retry;
  2205. return *bufp;
  2206. }
  2207. // TODO: XSI strerror_r
  2208. // TODO: GNU strerror_r
  2209. #else
  2210. mutex_lock(&mutex);
  2211. have = bfg_strcpy_growing_buffer(bufp, bufszp, strerror(e));
  2212. mutex_unlock(&mutex);
  2213. #endif
  2214. }
  2215. }
  2216. if (have)
  2217. return *bufp;
  2218. // Failback: Stringify the number
  2219. static const char fmt[] = "%s error #%d", *typestr;
  2220. switch (type) {
  2221. case BST_ERRNO:
  2222. typestr = "System";
  2223. break;
  2224. case BST_SOCKET:
  2225. typestr = "Socket";
  2226. break;
  2227. case BST_LIBUSB:
  2228. typestr = "libusb";
  2229. break;
  2230. default:
  2231. typestr = "Unexpected";
  2232. }
  2233. int sz = snprintf((char*)bfgtls, 0, fmt, typestr, e) + 1;
  2234. bfg_grow_buffer(bufp, bufszp, sz);
  2235. sprintf(*bufp, fmt, typestr, e);
  2236. return *bufp;
  2237. }
  2238. void notifier_init(notifier_t pipefd)
  2239. {
  2240. #ifdef WIN32
  2241. #define WindowsErrorStr(e) bfg_strerror(e, BST_SOCKET)
  2242. SOCKET listener, connecter, acceptor;
  2243. listener = socket(AF_INET, SOCK_STREAM, 0);
  2244. if (listener == INVALID_SOCKET)
  2245. quit(1, "Failed to create listener socket"IN_FMT_FFL": %s",
  2246. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2247. connecter = socket(AF_INET, SOCK_STREAM, 0);
  2248. if (connecter == INVALID_SOCKET)
  2249. quit(1, "Failed to create connect socket"IN_FMT_FFL": %s",
  2250. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2251. struct sockaddr_in inaddr = {
  2252. .sin_family = AF_INET,
  2253. .sin_addr = {
  2254. .s_addr = htonl(INADDR_LOOPBACK),
  2255. },
  2256. .sin_port = 0,
  2257. };
  2258. {
  2259. static const int reuse = 1;
  2260. setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse));
  2261. }
  2262. if (bind(listener, (struct sockaddr*)&inaddr, sizeof(inaddr)) == SOCKET_ERROR)
  2263. quit(1, "Failed to bind listener socket"IN_FMT_FFL": %s",
  2264. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2265. socklen_t inaddr_sz = sizeof(inaddr);
  2266. if (getsockname(listener, (struct sockaddr*)&inaddr, &inaddr_sz) == SOCKET_ERROR)
  2267. quit(1, "Failed to getsockname"IN_FMT_FFL": %s",
  2268. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2269. if (listen(listener, 1) == SOCKET_ERROR)
  2270. quit(1, "Failed to listen"IN_FMT_FFL": %s",
  2271. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2272. inaddr.sin_family = AF_INET;
  2273. inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  2274. if (connect(connecter, (struct sockaddr*)&inaddr, inaddr_sz) == SOCKET_ERROR)
  2275. quit(1, "Failed to connect"IN_FMT_FFL": %s",
  2276. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2277. acceptor = accept(listener, NULL, NULL);
  2278. if (acceptor == INVALID_SOCKET)
  2279. quit(1, "Failed to accept"IN_FMT_FFL": %s",
  2280. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2281. closesocket(listener);
  2282. pipefd[0] = connecter;
  2283. pipefd[1] = acceptor;
  2284. #else
  2285. if (pipe(pipefd))
  2286. quithere(1, "Failed to create pipe");
  2287. #endif
  2288. }
  2289. void notifier_wake(notifier_t fd)
  2290. {
  2291. if (fd[1] == INVSOCK)
  2292. return;
  2293. if (1 !=
  2294. #ifdef WIN32
  2295. send(fd[1], "\0", 1, 0)
  2296. #else
  2297. write(fd[1], "\0", 1)
  2298. #endif
  2299. )
  2300. applog(LOG_WARNING, "Error trying to wake notifier");
  2301. }
  2302. void notifier_read(notifier_t fd)
  2303. {
  2304. char buf[0x10];
  2305. #ifdef WIN32
  2306. IGNORE_RETURN_VALUE(recv(fd[0], buf, sizeof(buf), 0));
  2307. #else
  2308. IGNORE_RETURN_VALUE(read(fd[0], buf, sizeof(buf)));
  2309. #endif
  2310. }
  2311. void notifier_init_invalid(notifier_t fd)
  2312. {
  2313. fd[0] = fd[1] = INVSOCK;
  2314. }
  2315. void notifier_destroy(notifier_t fd)
  2316. {
  2317. #ifdef WIN32
  2318. closesocket(fd[0]);
  2319. closesocket(fd[1]);
  2320. #else
  2321. close(fd[0]);
  2322. close(fd[1]);
  2323. #endif
  2324. fd[0] = fd[1] = INVSOCK;
  2325. }
  2326. void _bytes_alloc_failure(size_t sz)
  2327. {
  2328. quit(1, "bytes_resize failed to allocate %lu bytes", (unsigned long)sz);
  2329. }
  2330. void *cmd_thread(void *cmdp)
  2331. {
  2332. const char *cmd = cmdp;
  2333. applog(LOG_DEBUG, "Executing command: %s", cmd);
  2334. int rc = system(cmd);
  2335. if (rc)
  2336. applog(LOG_WARNING, "Command returned %d exit code: %s", rc, cmd);
  2337. return NULL;
  2338. }
  2339. void run_cmd(const char *cmd)
  2340. {
  2341. if (!cmd)
  2342. return;
  2343. pthread_t pth;
  2344. pthread_create(&pth, NULL, cmd_thread, (void*)cmd);
  2345. }