util.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. * Copyright 2012 Giel van Schijndel
  6. * Copyright 2012 Gavin Andresen
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <stdbool.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <ctype.h>
  19. #include <stdarg.h>
  20. #include <string.h>
  21. #include <pthread.h>
  22. #include <jansson.h>
  23. #include <curl/curl.h>
  24. #include <time.h>
  25. #include <errno.h>
  26. #include <unistd.h>
  27. #include <sys/types.h>
  28. #ifdef HAVE_SYS_PRCTL_H
  29. # include <sys/prctl.h>
  30. #endif
  31. #if defined(__FreeBSD__) || defined(__OpenBSD__)
  32. # include <pthread_np.h>
  33. #endif
  34. #ifndef WIN32
  35. #include <fcntl.h>
  36. # ifdef __linux
  37. # include <sys/prctl.h>
  38. # endif
  39. # include <sys/socket.h>
  40. # include <netinet/in.h>
  41. # include <netinet/tcp.h>
  42. # include <netdb.h>
  43. #else
  44. # include <windows.h>
  45. # include <winsock2.h>
  46. # include <mstcpip.h>
  47. # include <ws2tcpip.h>
  48. # include <mmsystem.h>
  49. #endif
  50. #include <utlist.h>
  51. #ifdef NEED_BFG_LOWL_VCOM
  52. #include "lowl-vcom.h"
  53. #endif
  54. #include "miner.h"
  55. #include "compat.h"
  56. #include "util.h"
  57. #define DEFAULT_SOCKWAIT 60
  58. bool successful_connect = false;
  59. struct timeval nettime;
  60. struct data_buffer {
  61. void *buf;
  62. size_t len;
  63. curl_socket_t *idlemarker;
  64. };
  65. struct upload_buffer {
  66. const void *buf;
  67. size_t len;
  68. };
  69. struct header_info {
  70. char *lp_path;
  71. int rolltime;
  72. char *reason;
  73. char *stratum_url;
  74. bool hadrolltime;
  75. bool canroll;
  76. bool hadexpire;
  77. };
  78. struct tq_ent {
  79. void *data;
  80. struct tq_ent *prev;
  81. struct tq_ent *next;
  82. };
  83. static void databuf_free(struct data_buffer *db)
  84. {
  85. if (!db)
  86. return;
  87. free(db->buf);
  88. #ifdef DEBUG_DATABUF
  89. applog(LOG_DEBUG, "databuf_free(%p)", db->buf);
  90. #endif
  91. memset(db, 0, sizeof(*db));
  92. }
  93. // aka data_buffer_write
  94. static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb,
  95. void *user_data)
  96. {
  97. struct data_buffer *db = user_data;
  98. size_t oldlen, newlen;
  99. oldlen = db->len;
  100. if (unlikely(nmemb == 0 || size == 0 || oldlen >= SIZE_MAX - size))
  101. return 0;
  102. if (unlikely(nmemb > (SIZE_MAX - oldlen) / size))
  103. nmemb = (SIZE_MAX - oldlen) / size;
  104. size_t len = size * nmemb;
  105. void *newmem;
  106. static const unsigned char zero = 0;
  107. if (db->idlemarker) {
  108. const unsigned char *cptr = ptr;
  109. for (size_t i = 0; i < len; ++i)
  110. if (!(isCspace(cptr[i]) || cptr[i] == '{')) {
  111. *db->idlemarker = CURL_SOCKET_BAD;
  112. db->idlemarker = NULL;
  113. break;
  114. }
  115. }
  116. newlen = oldlen + len;
  117. newmem = realloc(db->buf, newlen + 1);
  118. #ifdef DEBUG_DATABUF
  119. applog(LOG_DEBUG, "data_buffer_write realloc(%p, %lu) => %p", db->buf, (long unsigned)(newlen + 1), newmem);
  120. #endif
  121. if (!newmem)
  122. return 0;
  123. db->buf = newmem;
  124. db->len = newlen;
  125. memcpy(db->buf + oldlen, ptr, len);
  126. memcpy(db->buf + newlen, &zero, 1); /* null terminate */
  127. return nmemb;
  128. }
  129. static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb,
  130. void *user_data)
  131. {
  132. struct upload_buffer *ub = user_data;
  133. unsigned int len = size * nmemb;
  134. if (len > ub->len)
  135. len = ub->len;
  136. if (len) {
  137. memcpy(ptr, ub->buf, len);
  138. ub->buf += len;
  139. ub->len -= len;
  140. }
  141. return len;
  142. }
  143. static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
  144. {
  145. struct header_info *hi = user_data;
  146. size_t remlen, slen, ptrlen = size * nmemb;
  147. char *rem, *val = NULL, *key = NULL;
  148. void *tmp;
  149. val = calloc(1, ptrlen);
  150. key = calloc(1, ptrlen);
  151. if (!key || !val)
  152. goto out;
  153. tmp = memchr(ptr, ':', ptrlen);
  154. if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */
  155. goto out;
  156. slen = tmp - ptr;
  157. if ((slen + 1) == ptrlen) /* skip key w/ no value */
  158. goto out;
  159. memcpy(key, ptr, slen); /* store & nul term key */
  160. key[slen] = 0;
  161. rem = ptr + slen + 1; /* trim value's leading whitespace */
  162. remlen = ptrlen - slen - 1;
  163. while ((remlen > 0) && (isCspace(*rem))) {
  164. remlen--;
  165. rem++;
  166. }
  167. memcpy(val, rem, remlen); /* store value, trim trailing ws */
  168. val[remlen] = 0;
  169. while ((*val) && (isCspace(val[strlen(val) - 1])))
  170. val[strlen(val) - 1] = 0;
  171. if (!*val) /* skip blank value */
  172. goto out;
  173. if (opt_protocol)
  174. applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
  175. if (!strcasecmp("X-Roll-Ntime", key)) {
  176. hi->hadrolltime = true;
  177. if (!strncasecmp("N", val, 1))
  178. applog(LOG_DEBUG, "X-Roll-Ntime: N found");
  179. else {
  180. hi->canroll = true;
  181. /* Check to see if expire= is supported and if not, set
  182. * the rolltime to the default scantime */
  183. if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) {
  184. sscanf(val + 7, "%d", &hi->rolltime);
  185. hi->hadexpire = true;
  186. } else
  187. hi->rolltime = opt_scantime;
  188. applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
  189. }
  190. }
  191. if (!strcasecmp("X-Long-Polling", key)) {
  192. hi->lp_path = val; /* steal memory reference */
  193. val = NULL;
  194. }
  195. if (!strcasecmp("X-Reject-Reason", key)) {
  196. hi->reason = val; /* steal memory reference */
  197. val = NULL;
  198. }
  199. if (!strcasecmp("X-Stratum", key)) {
  200. hi->stratum_url = val;
  201. val = NULL;
  202. }
  203. out:
  204. free(key);
  205. free(val);
  206. return ptrlen;
  207. }
  208. static int keep_sockalive(SOCKETTYPE fd)
  209. {
  210. const int tcp_one = 1;
  211. const int tcp_keepidle = 45;
  212. const int tcp_keepintvl = 30;
  213. int ret = 0;
  214. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const char *)&tcp_one, sizeof(tcp_one))))
  215. ret = 1;
  216. #ifndef WIN32
  217. int flags = fcntl(fd, F_GETFL, 0);
  218. fcntl(fd, F_SETFL, O_NONBLOCK | flags);
  219. #else
  220. u_long flags = 1;
  221. ioctlsocket(fd, FIONBIO, &flags);
  222. #endif
  223. if (!opt_delaynet)
  224. #ifndef __linux
  225. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  226. #else /* __linux */
  227. if (unlikely(setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  228. #endif /* __linux */
  229. ret = 1;
  230. #ifdef __linux
  231. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one))))
  232. ret = 1;
  233. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle))))
  234. ret = 1;
  235. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  236. ret = 1;
  237. #endif /* __linux */
  238. #ifdef __APPLE_CC__
  239. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  240. ret = 1;
  241. #endif /* __APPLE_CC__ */
  242. #ifdef WIN32
  243. const int zero = 0;
  244. struct tcp_keepalive vals;
  245. vals.onoff = 1;
  246. vals.keepalivetime = tcp_keepidle * 1000;
  247. vals.keepaliveinterval = tcp_keepintvl * 1000;
  248. DWORD outputBytes;
  249. if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL)))
  250. ret = 1;
  251. /* Windows happily submits indefinitely to the send buffer blissfully
  252. * unaware nothing is getting there without gracefully failing unless
  253. * we disable the send buffer */
  254. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&zero, sizeof(zero))))
  255. ret = 1;
  256. #endif /* WIN32 */
  257. return ret;
  258. }
  259. int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
  260. curlsocktype __maybe_unused purpose)
  261. {
  262. return keep_sockalive(fd);
  263. }
  264. static void last_nettime(struct timeval *last)
  265. {
  266. rd_lock(&netacc_lock);
  267. last->tv_sec = nettime.tv_sec;
  268. last->tv_usec = nettime.tv_usec;
  269. rd_unlock(&netacc_lock);
  270. }
  271. static void set_nettime(void)
  272. {
  273. wr_lock(&netacc_lock);
  274. cgtime(&nettime);
  275. wr_unlock(&netacc_lock);
  276. }
  277. static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type,
  278. char *data, size_t size,
  279. void *userdata)
  280. {
  281. struct pool *pool = (struct pool *)userdata;
  282. switch(type) {
  283. case CURLINFO_HEADER_IN:
  284. case CURLINFO_DATA_IN:
  285. case CURLINFO_SSL_DATA_IN:
  286. pool->cgminer_pool_stats.bytes_received += size;
  287. total_bytes_rcvd += size;
  288. pool->cgminer_pool_stats.net_bytes_received += size;
  289. break;
  290. case CURLINFO_HEADER_OUT:
  291. case CURLINFO_DATA_OUT:
  292. case CURLINFO_SSL_DATA_OUT:
  293. pool->cgminer_pool_stats.bytes_sent += size;
  294. total_bytes_sent += size;
  295. pool->cgminer_pool_stats.net_bytes_sent += size;
  296. break;
  297. case CURLINFO_TEXT:
  298. {
  299. if (!opt_protocol)
  300. break;
  301. // data is not null-terminated, so we need to copy and terminate it for applog
  302. char datacp[size + 1];
  303. memcpy(datacp, data, size);
  304. while (likely(size) && unlikely(isCspace(datacp[size-1])))
  305. --size;
  306. if (unlikely(!size))
  307. break;
  308. datacp[size] = '\0';
  309. applog(LOG_DEBUG, "Pool %u: %s", pool->pool_no, datacp);
  310. break;
  311. }
  312. default:
  313. break;
  314. }
  315. return 0;
  316. }
  317. struct json_rpc_call_state {
  318. struct data_buffer all_data;
  319. struct header_info hi;
  320. void *priv;
  321. char curl_err_str[CURL_ERROR_SIZE];
  322. struct curl_slist *headers;
  323. struct upload_buffer upload_data;
  324. struct pool *pool;
  325. };
  326. void json_rpc_call_async(CURL *curl, const char *url,
  327. const char *userpass, const char *rpc_req,
  328. bool longpoll,
  329. struct pool *pool, bool share,
  330. void *priv)
  331. {
  332. struct json_rpc_call_state *state = malloc(sizeof(struct json_rpc_call_state));
  333. *state = (struct json_rpc_call_state){
  334. .priv = priv,
  335. .pool = pool,
  336. };
  337. long timeout = longpoll ? (60 * 60) : 60;
  338. char len_hdr[64], user_agent_hdr[128];
  339. struct curl_slist *headers = NULL;
  340. if (longpoll)
  341. state->all_data.idlemarker = &pool->lp_socket;
  342. /* it is assumed that 'curl' is freshly [re]initialized at this pt */
  343. curl_easy_setopt(curl, CURLOPT_PRIVATE, state);
  344. curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
  345. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  346. * to enable it */
  347. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  348. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  349. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  350. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  351. curl_easy_setopt(curl, CURLOPT_URL, url);
  352. curl_easy_setopt(curl, CURLOPT_ENCODING, "");
  353. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
  354. /* Shares are staggered already and delays in submission can be costly
  355. * so do not delay them */
  356. if (!opt_delaynet || share)
  357. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  358. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb);
  359. curl_easy_setopt(curl, CURLOPT_WRITEDATA, &state->all_data);
  360. curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb);
  361. curl_easy_setopt(curl, CURLOPT_READDATA, &state->upload_data);
  362. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, &state->curl_err_str[0]);
  363. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
  364. curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb);
  365. curl_easy_setopt(curl, CURLOPT_HEADERDATA, &state->hi);
  366. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  367. if (pool->rpc_proxy) {
  368. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  369. } else if (opt_socks_proxy) {
  370. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  371. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  372. }
  373. if (userpass) {
  374. curl_easy_setopt(curl, CURLOPT_USERPWD, userpass);
  375. curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
  376. }
  377. if (longpoll)
  378. curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, json_rpc_call_sockopt_cb);
  379. curl_easy_setopt(curl, CURLOPT_POST, 1);
  380. if (opt_protocol)
  381. applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req);
  382. state->upload_data.buf = rpc_req;
  383. state->upload_data.len = strlen(rpc_req);
  384. sprintf(len_hdr, "Content-Length: %lu",
  385. (unsigned long) state->upload_data.len);
  386. sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE"/"VERSION);
  387. headers = curl_slist_append(headers,
  388. "Content-type: application/json");
  389. headers = curl_slist_append(headers,
  390. "X-Mining-Extensions: longpoll midstate rollntime submitold");
  391. if (longpoll)
  392. headers = curl_slist_append(headers,
  393. "X-Minimum-Wait: 0");
  394. if (likely(global_hashrate)) {
  395. char ghashrate[255];
  396. sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, (uint64_t)global_hashrate);
  397. headers = curl_slist_append(headers, ghashrate);
  398. }
  399. headers = curl_slist_append(headers, len_hdr);
  400. headers = curl_slist_append(headers, user_agent_hdr);
  401. headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/
  402. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
  403. state->headers = headers;
  404. if (opt_delaynet) {
  405. /* Don't delay share submission, but still track the nettime */
  406. if (!share) {
  407. long long now_msecs, last_msecs;
  408. struct timeval now, last;
  409. cgtime(&now);
  410. last_nettime(&last);
  411. now_msecs = (long long)now.tv_sec * 1000;
  412. now_msecs += now.tv_usec / 1000;
  413. last_msecs = (long long)last.tv_sec * 1000;
  414. last_msecs += last.tv_usec / 1000;
  415. if (now_msecs > last_msecs && now_msecs - last_msecs < 250) {
  416. struct timespec rgtp;
  417. rgtp.tv_sec = 0;
  418. rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000;
  419. nanosleep(&rgtp, NULL);
  420. }
  421. }
  422. set_nettime();
  423. }
  424. }
  425. json_t *json_rpc_call_completed(CURL *curl, int rc, bool probe, int *rolltime, void *out_priv)
  426. {
  427. struct json_rpc_call_state *state;
  428. if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, (void*)&state) != CURLE_OK) {
  429. applog(LOG_ERR, "Failed to get private curl data");
  430. if (out_priv)
  431. *(void**)out_priv = NULL;
  432. goto err_out;
  433. }
  434. if (out_priv)
  435. *(void**)out_priv = state->priv;
  436. json_t *val, *err_val, *res_val;
  437. json_error_t err;
  438. struct pool *pool = state->pool;
  439. bool probing = probe && !pool->probed;
  440. if (rc) {
  441. applog(LOG_INFO, "HTTP request failed: %s", state->curl_err_str);
  442. goto err_out;
  443. }
  444. if (!state->all_data.buf) {
  445. applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
  446. goto err_out;
  447. }
  448. pool->cgminer_pool_stats.times_sent++;
  449. pool->cgminer_pool_stats.times_received++;
  450. if (probing) {
  451. pool->probed = true;
  452. /* If X-Long-Polling was found, activate long polling */
  453. if (state->hi.lp_path) {
  454. if (pool->hdr_path != NULL)
  455. free(pool->hdr_path);
  456. pool->hdr_path = state->hi.lp_path;
  457. } else
  458. pool->hdr_path = NULL;
  459. if (state->hi.stratum_url) {
  460. pool->stratum_url = state->hi.stratum_url;
  461. state->hi.stratum_url = NULL;
  462. }
  463. } else {
  464. if (state->hi.lp_path) {
  465. free(state->hi.lp_path);
  466. state->hi.lp_path = NULL;
  467. }
  468. if (state->hi.stratum_url) {
  469. free(state->hi.stratum_url);
  470. state->hi.stratum_url = NULL;
  471. }
  472. }
  473. if (pool->force_rollntime)
  474. {
  475. state->hi.canroll = true;
  476. state->hi.hadexpire = true;
  477. state->hi.rolltime = pool->force_rollntime;
  478. }
  479. if (rolltime)
  480. *rolltime = state->hi.rolltime;
  481. pool->cgminer_pool_stats.rolltime = state->hi.rolltime;
  482. pool->cgminer_pool_stats.hadrolltime = state->hi.hadrolltime;
  483. pool->cgminer_pool_stats.canroll = state->hi.canroll;
  484. pool->cgminer_pool_stats.hadexpire = state->hi.hadexpire;
  485. val = JSON_LOADS(state->all_data.buf, &err);
  486. if (!val) {
  487. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  488. if (opt_protocol)
  489. applog(LOG_DEBUG, "JSON protocol response:\n%s", (char*)state->all_data.buf);
  490. goto err_out;
  491. }
  492. if (opt_protocol) {
  493. char *s = json_dumps(val, JSON_INDENT(3));
  494. applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
  495. free(s);
  496. }
  497. /* JSON-RPC valid response returns a non-null 'result',
  498. * and a null 'error'.
  499. */
  500. res_val = json_object_get(val, "result");
  501. err_val = json_object_get(val, "error");
  502. if (!res_val ||(err_val && !json_is_null(err_val))) {
  503. char *s;
  504. if (err_val)
  505. s = json_dumps(err_val, JSON_INDENT(3));
  506. else
  507. s = strdup("(unknown reason)");
  508. applog(LOG_INFO, "JSON-RPC call failed: %s", s);
  509. free(s);
  510. json_decref(val);
  511. goto err_out;
  512. }
  513. if (state->hi.reason) {
  514. json_object_set_new(val, "reject-reason", json_string(state->hi.reason));
  515. free(state->hi.reason);
  516. state->hi.reason = NULL;
  517. }
  518. successful_connect = true;
  519. databuf_free(&state->all_data);
  520. curl_slist_free_all(state->headers);
  521. curl_easy_reset(curl);
  522. free(state);
  523. return val;
  524. err_out:
  525. databuf_free(&state->all_data);
  526. curl_slist_free_all(state->headers);
  527. curl_easy_reset(curl);
  528. if (!successful_connect)
  529. applog(LOG_DEBUG, "Failed to connect in json_rpc_call");
  530. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  531. free(state);
  532. return NULL;
  533. }
  534. json_t *json_rpc_call(CURL *curl, const char *url,
  535. const char *userpass, const char *rpc_req,
  536. bool probe, bool longpoll, int *rolltime,
  537. struct pool *pool, bool share)
  538. {
  539. json_rpc_call_async(curl, url, userpass, rpc_req, longpoll, pool, share, NULL);
  540. int rc = curl_easy_perform(curl);
  541. return json_rpc_call_completed(curl, rc, probe, rolltime, NULL);
  542. }
  543. bool our_curl_supports_proxy_uris()
  544. {
  545. curl_version_info_data *data = curl_version_info(CURLVERSION_NOW);
  546. return data->age && data->version_num >= (( 7 <<16)|( 21 <<8)| 7); // 7.21.7
  547. }
  548. // NOTE: This assumes reference URI is a root
  549. char *absolute_uri(char *uri, const char *ref)
  550. {
  551. if (strstr(uri, "://"))
  552. return strdup(uri);
  553. char *copy_start, *abs;
  554. bool need_slash = false;
  555. copy_start = (uri[0] == '/') ? &uri[1] : uri;
  556. if (ref[strlen(ref) - 1] != '/')
  557. need_slash = true;
  558. abs = malloc(strlen(ref) + strlen(copy_start) + 2);
  559. if (!abs) {
  560. applog(LOG_ERR, "Malloc failure in absolute_uri");
  561. return NULL;
  562. }
  563. sprintf(abs, "%s%s%s", ref, need_slash ? "/" : "", copy_start);
  564. return abs;
  565. }
  566. static const char _hexchars[0x10] = "0123456789abcdef";
  567. void bin2hex(char *out, const void *in, size_t len)
  568. {
  569. const unsigned char *p = in;
  570. while (len--)
  571. {
  572. (out++)[0] = _hexchars[p[0] >> 4];
  573. (out++)[0] = _hexchars[p[0] & 0xf];
  574. ++p;
  575. }
  576. out[0] = '\0';
  577. }
  578. static inline
  579. int _hex2bin_char(const char c)
  580. {
  581. if (c >= '0' && c <= '9')
  582. return c - '0';
  583. if (c >= 'a' && c <= 'f')
  584. return (c - 'a') + 10;
  585. if (c >= 'A' && c <= 'F')
  586. return (c - 'A') + 10;
  587. return -1;
  588. }
  589. /* Does the reverse of bin2hex but does not allocate any ram */
  590. bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
  591. {
  592. int n, o;
  593. while (len--)
  594. {
  595. n = _hex2bin_char((hexstr++)[0]);
  596. if (unlikely(n == -1))
  597. {
  598. badchar:
  599. if (!hexstr[-1])
  600. applog(LOG_ERR, "hex2bin: str truncated");
  601. else
  602. applog(LOG_ERR, "hex2bin: invalid character 0x%02x", (int)hexstr[-1]);
  603. return false;
  604. }
  605. o = _hex2bin_char((hexstr++)[0]);
  606. if (unlikely(o == -1))
  607. goto badchar;
  608. (p++)[0] = (n << 4) | o;
  609. }
  610. return likely(!hexstr[0]);
  611. }
  612. void ucs2tochar(char * const out, const uint16_t * const in, const size_t sz)
  613. {
  614. for (int i = 0; i < sz; ++i)
  615. out[i] = in[i];
  616. }
  617. char *ucs2tochar_dup(uint16_t * const in, const size_t sz)
  618. {
  619. char *out = malloc(sz + 1);
  620. ucs2tochar(out, in, sz);
  621. out[sz] = '\0';
  622. return out;
  623. }
  624. void hash_data(unsigned char *out_hash, const unsigned char *data)
  625. {
  626. unsigned char blkheader[80];
  627. // data is past the first SHA256 step (padding and interpreting as big endian on a little endian platform), so we need to flip each 32-bit chunk around to get the original input block header
  628. swap32yes(blkheader, data, 80 / 4);
  629. // double-SHA256 to get the block hash
  630. gen_hash(blkheader, out_hash, 80);
  631. }
  632. // Example output: 0000000000000000000000000000000000000000000000000000ffff00000000 (bdiff 1)
  633. void real_block_target(unsigned char *target, const unsigned char *data)
  634. {
  635. uint8_t targetshift;
  636. if (unlikely(data[72] < 3 || data[72] > 0x20))
  637. {
  638. // Invalid (out of bounds) target
  639. memset(target, 0xff, 32);
  640. return;
  641. }
  642. targetshift = data[72] - 3;
  643. memset(target, 0, targetshift);
  644. target[targetshift++] = data[75];
  645. target[targetshift++] = data[74];
  646. target[targetshift++] = data[73];
  647. memset(&target[targetshift], 0, 0x20 - targetshift);
  648. }
  649. bool hash_target_check(const unsigned char *hash, const unsigned char *target)
  650. {
  651. const uint32_t *h32 = (uint32_t*)&hash[0];
  652. const uint32_t *t32 = (uint32_t*)&target[0];
  653. for (int i = 7; i >= 0; --i) {
  654. uint32_t h32i = le32toh(h32[i]);
  655. uint32_t t32i = le32toh(t32[i]);
  656. if (h32i > t32i)
  657. return false;
  658. if (h32i < t32i)
  659. return true;
  660. }
  661. return true;
  662. }
  663. bool hash_target_check_v(const unsigned char *hash, const unsigned char *target)
  664. {
  665. bool rc;
  666. rc = hash_target_check(hash, target);
  667. if (opt_debug) {
  668. unsigned char hash_swap[32], target_swap[32];
  669. char hash_str[65];
  670. char target_str[65];
  671. for (int i = 0; i < 32; ++i) {
  672. hash_swap[i] = hash[31-i];
  673. target_swap[i] = target[31-i];
  674. }
  675. bin2hex(hash_str, hash_swap, 32);
  676. bin2hex(target_str, target_swap, 32);
  677. applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s",
  678. hash_str,
  679. target_str,
  680. rc ? "YES (hash <= target)" :
  681. "no (false positive; hash > target)");
  682. }
  683. return rc;
  684. }
  685. // This operates on a native-endian SHA256 state
  686. // In other words, on little endian platforms, every 4 bytes are in reverse order
  687. bool fulltest(const unsigned char *hash, const unsigned char *target)
  688. {
  689. unsigned char hash2[32];
  690. swap32tobe(hash2, hash, 32 / 4);
  691. return hash_target_check_v(hash2, target);
  692. }
  693. struct thread_q *tq_new(void)
  694. {
  695. struct thread_q *tq;
  696. tq = calloc(1, sizeof(*tq));
  697. if (!tq)
  698. return NULL;
  699. pthread_mutex_init(&tq->mutex, NULL);
  700. pthread_cond_init(&tq->cond, NULL);
  701. return tq;
  702. }
  703. void tq_free(struct thread_q *tq)
  704. {
  705. struct tq_ent *ent, *iter;
  706. if (!tq)
  707. return;
  708. DL_FOREACH_SAFE(tq->q, ent, iter) {
  709. DL_DELETE(tq->q, ent);
  710. free(ent);
  711. }
  712. pthread_cond_destroy(&tq->cond);
  713. pthread_mutex_destroy(&tq->mutex);
  714. memset(tq, 0, sizeof(*tq)); /* poison */
  715. free(tq);
  716. }
  717. static void tq_freezethaw(struct thread_q *tq, bool frozen)
  718. {
  719. mutex_lock(&tq->mutex);
  720. tq->frozen = frozen;
  721. pthread_cond_signal(&tq->cond);
  722. mutex_unlock(&tq->mutex);
  723. }
  724. void tq_freeze(struct thread_q *tq)
  725. {
  726. tq_freezethaw(tq, true);
  727. }
  728. void tq_thaw(struct thread_q *tq)
  729. {
  730. tq_freezethaw(tq, false);
  731. }
  732. bool tq_push(struct thread_q *tq, void *data)
  733. {
  734. struct tq_ent *ent;
  735. bool rc = true;
  736. ent = calloc(1, sizeof(*ent));
  737. if (!ent)
  738. return false;
  739. ent->data = data;
  740. mutex_lock(&tq->mutex);
  741. if (!tq->frozen) {
  742. DL_APPEND(tq->q, ent);
  743. } else {
  744. free(ent);
  745. rc = false;
  746. }
  747. pthread_cond_signal(&tq->cond);
  748. mutex_unlock(&tq->mutex);
  749. return rc;
  750. }
  751. void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
  752. {
  753. struct tq_ent *ent;
  754. void *rval = NULL;
  755. int rc;
  756. mutex_lock(&tq->mutex);
  757. if (tq->q)
  758. goto pop;
  759. if (abstime)
  760. rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime);
  761. else
  762. rc = pthread_cond_wait(&tq->cond, &tq->mutex);
  763. if (rc)
  764. goto out;
  765. if (!tq->q)
  766. goto out;
  767. pop:
  768. ent = tq->q;
  769. rval = ent->data;
  770. DL_DELETE(tq->q, ent);
  771. free(ent);
  772. out:
  773. mutex_unlock(&tq->mutex);
  774. return rval;
  775. }
  776. int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
  777. {
  778. int rv = pthread_create(&thr->pth, attr, start, arg);
  779. if (likely(!rv))
  780. thr->has_pth = true;
  781. return rv;
  782. }
  783. void thr_info_freeze(struct thr_info *thr)
  784. {
  785. struct tq_ent *ent, *iter;
  786. struct thread_q *tq;
  787. if (!thr)
  788. return;
  789. tq = thr->q;
  790. if (!tq)
  791. return;
  792. mutex_lock(&tq->mutex);
  793. tq->frozen = true;
  794. DL_FOREACH_SAFE(tq->q, ent, iter) {
  795. DL_DELETE(tq->q, ent);
  796. free(ent);
  797. }
  798. mutex_unlock(&tq->mutex);
  799. }
  800. void thr_info_cancel(struct thr_info *thr)
  801. {
  802. if (!thr)
  803. return;
  804. if (thr->has_pth) {
  805. pthread_cancel(thr->pth);
  806. thr->has_pth = false;
  807. }
  808. }
  809. #ifndef HAVE_PTHREAD_CANCEL
  810. // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill
  811. enum pthread_cancel_workaround_mode {
  812. PCWM_DEFAULT = 0,
  813. PCWM_TERMINATE = 1,
  814. PCWM_ASYNC = 2,
  815. PCWM_DISABLED = 4,
  816. PCWM_CANCELLED = 8,
  817. };
  818. static pthread_key_t key_pcwm;
  819. struct sigaction pcwm_orig_term_handler;
  820. static
  821. void do_pthread_cancel_exit(int flags)
  822. {
  823. if (!(flags & PCWM_ASYNC))
  824. // NOTE: Logging disables cancel while mutex held, so this is safe
  825. applog(LOG_WARNING, "pthread_cancel workaround: Cannot defer cancellation, terminating thread NOW");
  826. pthread_exit(PTHREAD_CANCELED);
  827. }
  828. static
  829. void sighandler_pthread_cancel(int sig)
  830. {
  831. int flags = (int)pthread_getspecific(key_pcwm);
  832. if (flags & PCWM_TERMINATE) // Main thread
  833. {
  834. // Restore original handler and call it
  835. if (sigaction(sig, &pcwm_orig_term_handler, NULL))
  836. quit(1, "pthread_cancel workaround: Failed to restore original handler");
  837. raise(SIGTERM);
  838. quit(1, "pthread_cancel workaround: Original handler returned");
  839. }
  840. if (flags & PCWM_CANCELLED) // Already pending cancel
  841. return;
  842. if (flags & PCWM_DISABLED)
  843. {
  844. flags |= PCWM_CANCELLED;
  845. if (pthread_setspecific(key_pcwm, (void*)flags))
  846. quit(1, "pthread_cancel workaround: pthread_setspecific failed (setting PCWM_CANCELLED)");
  847. return;
  848. }
  849. do_pthread_cancel_exit(flags);
  850. }
  851. void pthread_testcancel(void)
  852. {
  853. int flags = (int)pthread_getspecific(key_pcwm);
  854. if (flags & PCWM_CANCELLED && !(flags & PCWM_DISABLED))
  855. do_pthread_cancel_exit(flags);
  856. }
  857. int pthread_setcancelstate(int state, int *oldstate)
  858. {
  859. int flags = (int)pthread_getspecific(key_pcwm);
  860. if (oldstate)
  861. *oldstate = (flags & PCWM_DISABLED) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
  862. if (state == PTHREAD_CANCEL_DISABLE)
  863. flags |= PCWM_DISABLED;
  864. else
  865. {
  866. if (flags & PCWM_CANCELLED)
  867. do_pthread_cancel_exit(flags);
  868. flags &= ~PCWM_DISABLED;
  869. }
  870. if (pthread_setspecific(key_pcwm, (void*)flags))
  871. return -1;
  872. return 0;
  873. }
  874. int pthread_setcanceltype(int type, int *oldtype)
  875. {
  876. int flags = (int)pthread_getspecific(key_pcwm);
  877. if (oldtype)
  878. *oldtype = (flags & PCWM_ASYNC) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
  879. if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
  880. flags |= PCWM_ASYNC;
  881. else
  882. flags &= ~PCWM_ASYNC;
  883. if (pthread_setspecific(key_pcwm, (void*)flags))
  884. return -1;
  885. return 0;
  886. }
  887. void setup_pthread_cancel_workaround()
  888. {
  889. if (pthread_key_create(&key_pcwm, NULL))
  890. quit(1, "pthread_cancel workaround: pthread_key_create failed");
  891. if (pthread_setspecific(key_pcwm, (void*)PCWM_TERMINATE))
  892. quit(1, "pthread_cancel workaround: pthread_setspecific failed");
  893. struct sigaction new_sigact = {
  894. .sa_handler = sighandler_pthread_cancel,
  895. };
  896. if (sigaction(SIGTERM, &new_sigact, &pcwm_orig_term_handler))
  897. quit(1, "pthread_cancel workaround: Failed to install SIGTERM handler");
  898. }
  899. #endif
  900. static void _now_gettimeofday(struct timeval *);
  901. static void _cgsleep_us_r_nanosleep(cgtimer_t *, int64_t);
  902. #ifdef HAVE_POOR_GETTIMEOFDAY
  903. static struct timeval tv_timeofday_offset;
  904. static struct timeval _tv_timeofday_lastchecked;
  905. static pthread_mutex_t _tv_timeofday_mutex = PTHREAD_MUTEX_INITIALIZER;
  906. static
  907. void bfg_calibrate_timeofday(struct timeval *expected, char *buf)
  908. {
  909. struct timeval actual, delta;
  910. timeradd(expected, &tv_timeofday_offset, expected);
  911. _now_gettimeofday(&actual);
  912. if (expected->tv_sec >= actual.tv_sec - 1 && expected->tv_sec <= actual.tv_sec + 1)
  913. // Within reason - no change necessary
  914. return;
  915. timersub(&actual, expected, &delta);
  916. timeradd(&tv_timeofday_offset, &delta, &tv_timeofday_offset);
  917. sprintf(buf, "Recalibrating timeofday offset (delta %ld.%06lds)", (long)delta.tv_sec, (long)delta.tv_usec);
  918. *expected = actual;
  919. }
  920. void bfg_gettimeofday(struct timeval *out)
  921. {
  922. char buf[64] = "";
  923. timer_set_now(out);
  924. mutex_lock(&_tv_timeofday_mutex);
  925. if (_tv_timeofday_lastchecked.tv_sec < out->tv_sec - 21)
  926. bfg_calibrate_timeofday(out, buf);
  927. else
  928. timeradd(out, &tv_timeofday_offset, out);
  929. mutex_unlock(&_tv_timeofday_mutex);
  930. if (unlikely(buf[0]))
  931. applog(LOG_WARNING, "%s", buf);
  932. }
  933. #endif
  934. #ifdef WIN32
  935. static LARGE_INTEGER _perffreq;
  936. static
  937. void _now_queryperformancecounter(struct timeval *tv)
  938. {
  939. LARGE_INTEGER now;
  940. if (unlikely(!QueryPerformanceCounter(&now)))
  941. quit(1, "QueryPerformanceCounter failed");
  942. *tv = (struct timeval){
  943. .tv_sec = now.QuadPart / _perffreq.QuadPart,
  944. .tv_usec = (now.QuadPart % _perffreq.QuadPart) * 1000000 / _perffreq.QuadPart,
  945. };
  946. }
  947. #endif
  948. static void bfg_init_time();
  949. static
  950. void _now_is_not_set(__maybe_unused struct timeval *tv)
  951. {
  952. bfg_init_time();
  953. timer_set_now(tv);
  954. }
  955. void (*timer_set_now)(struct timeval *tv) = _now_is_not_set;
  956. void (*cgsleep_us_r)(cgtimer_t *, int64_t) = _cgsleep_us_r_nanosleep;
  957. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  958. static clockid_t bfg_timer_clk;
  959. static
  960. void _now_clock_gettime(struct timeval *tv)
  961. {
  962. struct timespec ts;
  963. if (unlikely(clock_gettime(bfg_timer_clk, &ts)))
  964. quit(1, "clock_gettime failed");
  965. *tv = (struct timeval){
  966. .tv_sec = ts.tv_sec,
  967. .tv_usec = ts.tv_nsec / 1000,
  968. };
  969. }
  970. #ifdef HAVE_CLOCK_NANOSLEEP
  971. static
  972. void _cgsleep_us_r_monotonic(cgtimer_t *tv_start, int64_t us)
  973. {
  974. struct timeval tv_end[1];
  975. struct timespec ts_end[1];
  976. int ret;
  977. timer_set_delay(tv_end, tv_start, us);
  978. timeval_to_spec(ts_end, tv_end);
  979. do {
  980. ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL);
  981. } while (ret == EINTR);
  982. }
  983. #endif
  984. static
  985. bool _bfg_try_clock_gettime(clockid_t clk)
  986. {
  987. struct timespec ts;
  988. if (clock_gettime(clk, &ts))
  989. return false;
  990. bfg_timer_clk = clk;
  991. timer_set_now = _now_clock_gettime;
  992. return true;
  993. }
  994. #endif
  995. static
  996. void bfg_init_time()
  997. {
  998. if (timer_set_now != _now_is_not_set)
  999. return;
  1000. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  1001. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC_RAW
  1002. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC_RAW))
  1003. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC_RAW)");
  1004. else
  1005. #endif
  1006. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC))
  1007. {
  1008. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC)");
  1009. #ifdef HAVE_CLOCK_NANOSLEEP
  1010. cgsleep_us_r = _cgsleep_us_r_monotonic;
  1011. #endif
  1012. }
  1013. else
  1014. #endif
  1015. #ifdef WIN32
  1016. if (QueryPerformanceFrequency(&_perffreq) && _perffreq.QuadPart)
  1017. {
  1018. timer_set_now = _now_queryperformancecounter;
  1019. applog(LOG_DEBUG, "Timers: Using QueryPerformanceCounter");
  1020. }
  1021. else
  1022. #endif
  1023. {
  1024. timer_set_now = _now_gettimeofday;
  1025. applog(LOG_DEBUG, "Timers: Using gettimeofday");
  1026. }
  1027. #ifdef HAVE_POOR_GETTIMEOFDAY
  1028. char buf[64] = "";
  1029. struct timeval tv;
  1030. timer_set_now(&tv);
  1031. bfg_calibrate_timeofday(&tv, buf);
  1032. applog(LOG_DEBUG, "%s", buf);
  1033. #endif
  1034. }
  1035. void subtime(struct timeval *a, struct timeval *b)
  1036. {
  1037. timersub(a, b, b);
  1038. }
  1039. void addtime(struct timeval *a, struct timeval *b)
  1040. {
  1041. timeradd(a, b, b);
  1042. }
  1043. bool time_more(struct timeval *a, struct timeval *b)
  1044. {
  1045. return timercmp(a, b, >);
  1046. }
  1047. bool time_less(struct timeval *a, struct timeval *b)
  1048. {
  1049. return timercmp(a, b, <);
  1050. }
  1051. void copy_time(struct timeval *dest, const struct timeval *src)
  1052. {
  1053. memcpy(dest, src, sizeof(struct timeval));
  1054. }
  1055. void timespec_to_val(struct timeval *val, const struct timespec *spec)
  1056. {
  1057. val->tv_sec = spec->tv_sec;
  1058. val->tv_usec = spec->tv_nsec / 1000;
  1059. }
  1060. void timeval_to_spec(struct timespec *spec, const struct timeval *val)
  1061. {
  1062. spec->tv_sec = val->tv_sec;
  1063. spec->tv_nsec = val->tv_usec * 1000;
  1064. }
  1065. void us_to_timeval(struct timeval *val, int64_t us)
  1066. {
  1067. lldiv_t tvdiv = lldiv(us, 1000000);
  1068. val->tv_sec = tvdiv.quot;
  1069. val->tv_usec = tvdiv.rem;
  1070. }
  1071. void us_to_timespec(struct timespec *spec, int64_t us)
  1072. {
  1073. lldiv_t tvdiv = lldiv(us, 1000000);
  1074. spec->tv_sec = tvdiv.quot;
  1075. spec->tv_nsec = tvdiv.rem * 1000;
  1076. }
  1077. void ms_to_timespec(struct timespec *spec, int64_t ms)
  1078. {
  1079. lldiv_t tvdiv = lldiv(ms, 1000);
  1080. spec->tv_sec = tvdiv.quot;
  1081. spec->tv_nsec = tvdiv.rem * 1000000;
  1082. }
  1083. void timeraddspec(struct timespec *a, const struct timespec *b)
  1084. {
  1085. a->tv_sec += b->tv_sec;
  1086. a->tv_nsec += b->tv_nsec;
  1087. if (a->tv_nsec >= 1000000000) {
  1088. a->tv_nsec -= 1000000000;
  1089. a->tv_sec++;
  1090. }
  1091. }
  1092. #ifndef WIN32
  1093. static
  1094. void _now_gettimeofday(struct timeval *tv)
  1095. {
  1096. gettimeofday(tv, NULL);
  1097. }
  1098. #else
  1099. /* Windows start time is since 1601 lol so convert it to unix epoch 1970. */
  1100. #define EPOCHFILETIME (116444736000000000LL)
  1101. /* Return the system time as an lldiv_t in decimicroseconds. */
  1102. static void decius_time(lldiv_t *lidiv)
  1103. {
  1104. FILETIME ft;
  1105. LARGE_INTEGER li;
  1106. GetSystemTimeAsFileTime(&ft);
  1107. li.LowPart = ft.dwLowDateTime;
  1108. li.HighPart = ft.dwHighDateTime;
  1109. li.QuadPart -= EPOCHFILETIME;
  1110. /* SystemTime is in decimicroseconds so divide by an unusual number */
  1111. *lidiv = lldiv(li.QuadPart, 10000000);
  1112. }
  1113. void _now_gettimeofday(struct timeval *tv)
  1114. {
  1115. lldiv_t lidiv;
  1116. decius_time(&lidiv);
  1117. tv->tv_sec = lidiv.quot;
  1118. tv->tv_usec = lidiv.rem / 10;
  1119. }
  1120. #endif
  1121. void cgsleep_ms_r(cgtimer_t *tv_start, int ms)
  1122. {
  1123. cgsleep_us_r(tv_start, ((int64_t)ms) * 1000);
  1124. }
  1125. static
  1126. void _cgsleep_us_r_nanosleep(cgtimer_t *tv_start, int64_t us)
  1127. {
  1128. struct timeval tv_timer[1], tv[1];
  1129. struct timespec ts[1];
  1130. timer_set_delay(tv_timer, tv_start, us);
  1131. while (true)
  1132. {
  1133. timer_set_now(tv);
  1134. if (!timercmp(tv_timer, tv, >))
  1135. return;
  1136. timersub(tv_timer, tv, tv);
  1137. timeval_to_spec(ts, tv);
  1138. nanosleep(ts, NULL);
  1139. }
  1140. }
  1141. void cgsleep_ms(int ms)
  1142. {
  1143. cgtimer_t ts_start;
  1144. cgsleep_prepare_r(&ts_start);
  1145. cgsleep_ms_r(&ts_start, ms);
  1146. }
  1147. void cgsleep_us(int64_t us)
  1148. {
  1149. cgtimer_t ts_start;
  1150. cgsleep_prepare_r(&ts_start);
  1151. cgsleep_us_r(&ts_start, us);
  1152. }
  1153. /* Returns the microseconds difference between end and start times as a double */
  1154. double us_tdiff(struct timeval *end, struct timeval *start)
  1155. {
  1156. return end->tv_sec * 1000000 + end->tv_usec - start->tv_sec * 1000000 - start->tv_usec;
  1157. }
  1158. /* Returns the seconds difference between end and start times as a double */
  1159. double tdiff(struct timeval *end, struct timeval *start)
  1160. {
  1161. return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0;
  1162. }
  1163. bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
  1164. {
  1165. char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
  1166. char url_address[256], port[6];
  1167. int url_len, port_len = 0;
  1168. url_begin = strstr(url, "//");
  1169. if (!url_begin)
  1170. url_begin = url;
  1171. else
  1172. url_begin += 2;
  1173. /* Look for numeric ipv6 entries */
  1174. ipv6_begin = strstr(url_begin, "[");
  1175. ipv6_end = strstr(url_begin, "]");
  1176. if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
  1177. url_end = strstr(ipv6_end, ":");
  1178. else
  1179. url_end = strstr(url_begin, ":");
  1180. if (url_end) {
  1181. url_len = url_end - url_begin;
  1182. port_len = strlen(url_begin) - url_len - 1;
  1183. if (port_len < 1)
  1184. return false;
  1185. port_start = url_end + 1;
  1186. } else
  1187. url_len = strlen(url_begin);
  1188. if (url_len < 1)
  1189. return false;
  1190. sprintf(url_address, "%.*s", url_len, url_begin);
  1191. if (port_len) {
  1192. char *slash;
  1193. snprintf(port, 6, "%.*s", port_len, port_start);
  1194. slash = strchr(port, '/');
  1195. if (slash)
  1196. *slash = '\0';
  1197. } else
  1198. strcpy(port, "80");
  1199. free(*sockaddr_port);
  1200. *sockaddr_port = strdup(port);
  1201. free(*sockaddr_url);
  1202. *sockaddr_url = strdup(url_address);
  1203. return true;
  1204. }
  1205. enum send_ret {
  1206. SEND_OK,
  1207. SEND_SELECTFAIL,
  1208. SEND_SENDFAIL,
  1209. SEND_INACTIVE
  1210. };
  1211. /* Send a single command across a socket, appending \n to it. This should all
  1212. * be done under stratum lock except when first establishing the socket */
  1213. static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len)
  1214. {
  1215. SOCKETTYPE sock = pool->sock;
  1216. ssize_t ssent = 0;
  1217. strcat(s, "\n");
  1218. len++;
  1219. while (len > 0 ) {
  1220. struct timeval timeout = {1, 0};
  1221. ssize_t sent;
  1222. fd_set wd;
  1223. FD_ZERO(&wd);
  1224. FD_SET(sock, &wd);
  1225. if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1)
  1226. return SEND_SELECTFAIL;
  1227. #ifdef __APPLE__
  1228. sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE);
  1229. #elif WIN32
  1230. sent = send(pool->sock, s + ssent, len, 0);
  1231. #else
  1232. sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL);
  1233. #endif
  1234. if (sent < 0) {
  1235. if (!sock_blocks())
  1236. return SEND_SENDFAIL;
  1237. sent = 0;
  1238. }
  1239. ssent += sent;
  1240. len -= sent;
  1241. }
  1242. pool->cgminer_pool_stats.times_sent++;
  1243. pool->cgminer_pool_stats.bytes_sent += ssent;
  1244. total_bytes_sent += ssent;
  1245. pool->cgminer_pool_stats.net_bytes_sent += ssent;
  1246. return SEND_OK;
  1247. }
  1248. bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force)
  1249. {
  1250. enum send_ret ret = SEND_INACTIVE;
  1251. if (opt_protocol)
  1252. applog(LOG_DEBUG, "Pool %u: SEND: %s", pool->pool_no, s);
  1253. mutex_lock(&pool->stratum_lock);
  1254. if (pool->stratum_active || force)
  1255. ret = __stratum_send(pool, s, len);
  1256. mutex_unlock(&pool->stratum_lock);
  1257. /* This is to avoid doing applog under stratum_lock */
  1258. switch (ret) {
  1259. default:
  1260. case SEND_OK:
  1261. break;
  1262. case SEND_SELECTFAIL:
  1263. applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
  1264. suspend_stratum(pool);
  1265. break;
  1266. case SEND_SENDFAIL:
  1267. applog(LOG_DEBUG, "Failed to send in stratum_send");
  1268. suspend_stratum(pool);
  1269. break;
  1270. case SEND_INACTIVE:
  1271. applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
  1272. break;
  1273. }
  1274. return (ret == SEND_OK);
  1275. }
  1276. static bool socket_full(struct pool *pool, int wait)
  1277. {
  1278. SOCKETTYPE sock = pool->sock;
  1279. struct timeval timeout;
  1280. fd_set rd;
  1281. if (sock == INVSOCK)
  1282. return true;
  1283. if (unlikely(wait < 0))
  1284. wait = 0;
  1285. FD_ZERO(&rd);
  1286. FD_SET(sock, &rd);
  1287. timeout.tv_usec = 0;
  1288. timeout.tv_sec = wait;
  1289. if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0)
  1290. return true;
  1291. return false;
  1292. }
  1293. /* Check to see if Santa's been good to you */
  1294. bool sock_full(struct pool *pool)
  1295. {
  1296. if (strlen(pool->sockbuf))
  1297. return true;
  1298. return (socket_full(pool, 0));
  1299. }
  1300. static void clear_sockbuf(struct pool *pool)
  1301. {
  1302. strcpy(pool->sockbuf, "");
  1303. }
  1304. static void clear_sock(struct pool *pool)
  1305. {
  1306. ssize_t n;
  1307. mutex_lock(&pool->stratum_lock);
  1308. do {
  1309. if (pool->sock)
  1310. n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0);
  1311. else
  1312. n = 0;
  1313. } while (n > 0);
  1314. mutex_unlock(&pool->stratum_lock);
  1315. clear_sockbuf(pool);
  1316. }
  1317. /* Make sure the pool sockbuf is large enough to cope with any coinbase size
  1318. * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE
  1319. * and zeroing the new memory */
  1320. static void recalloc_sock(struct pool *pool, size_t len)
  1321. {
  1322. size_t old, new;
  1323. old = strlen(pool->sockbuf);
  1324. new = old + len + 1;
  1325. if (new < pool->sockbuf_size)
  1326. return;
  1327. new = new + (RBUFSIZE - (new % RBUFSIZE));
  1328. // Avoid potentially recursive locking
  1329. // applog(LOG_DEBUG, "Recallocing pool sockbuf to %lu", (unsigned long)new);
  1330. pool->sockbuf = realloc(pool->sockbuf, new);
  1331. if (!pool->sockbuf)
  1332. quithere(1, "Failed to realloc pool sockbuf");
  1333. memset(pool->sockbuf + old, 0, new - old);
  1334. pool->sockbuf_size = new;
  1335. }
  1336. /* Peeks at a socket to find the first end of line and then reads just that
  1337. * from the socket and returns that as a malloced char */
  1338. char *recv_line(struct pool *pool)
  1339. {
  1340. char *tok, *sret = NULL;
  1341. ssize_t len, buflen;
  1342. int waited = 0;
  1343. if (!strstr(pool->sockbuf, "\n")) {
  1344. struct timeval rstart, now;
  1345. cgtime(&rstart);
  1346. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1347. applog(LOG_DEBUG, "Timed out waiting for data on socket_full");
  1348. goto out;
  1349. }
  1350. do {
  1351. char s[RBUFSIZE];
  1352. size_t slen;
  1353. ssize_t n;
  1354. memset(s, 0, RBUFSIZE);
  1355. n = recv(pool->sock, s, RECVSIZE, 0);
  1356. if (!n) {
  1357. applog(LOG_DEBUG, "Socket closed waiting in recv_line");
  1358. suspend_stratum(pool);
  1359. break;
  1360. }
  1361. cgtime(&now);
  1362. waited = tdiff(&now, &rstart);
  1363. if (n < 0) {
  1364. //Save errno from being overweitten bei socket_ commands
  1365. int socket_recv_errno;
  1366. socket_recv_errno = SOCKERR;
  1367. if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) {
  1368. applog(LOG_DEBUG, "Failed to recv sock in recv_line: %s", bfg_strerror(socket_recv_errno, BST_SOCKET));
  1369. suspend_stratum(pool);
  1370. break;
  1371. }
  1372. } else {
  1373. slen = strlen(s);
  1374. recalloc_sock(pool, slen);
  1375. strcat(pool->sockbuf, s);
  1376. }
  1377. } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n"));
  1378. }
  1379. buflen = strlen(pool->sockbuf);
  1380. tok = strtok(pool->sockbuf, "\n");
  1381. if (!tok) {
  1382. applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line");
  1383. goto out;
  1384. }
  1385. sret = strdup(tok);
  1386. len = strlen(sret);
  1387. /* Copy what's left in the buffer after the \n, including the
  1388. * terminating \0 */
  1389. if (buflen > len + 1)
  1390. memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1);
  1391. else
  1392. strcpy(pool->sockbuf, "");
  1393. pool->cgminer_pool_stats.times_received++;
  1394. pool->cgminer_pool_stats.bytes_received += len;
  1395. total_bytes_rcvd += len;
  1396. pool->cgminer_pool_stats.net_bytes_received += len;
  1397. out:
  1398. if (!sret)
  1399. clear_sock(pool);
  1400. else if (opt_protocol)
  1401. applog(LOG_DEBUG, "Pool %u: RECV: %s", pool->pool_no, sret);
  1402. return sret;
  1403. }
  1404. /* Dumps any JSON value as a string. Just like jansson 2.1's JSON_ENCODE_ANY
  1405. * flag, but this is compatible with 2.0. */
  1406. char *json_dumps_ANY(json_t *json, size_t flags)
  1407. {
  1408. switch (json_typeof(json))
  1409. {
  1410. case JSON_ARRAY:
  1411. case JSON_OBJECT:
  1412. return json_dumps(json, flags);
  1413. default:
  1414. break;
  1415. }
  1416. char *rv;
  1417. #ifdef JSON_ENCODE_ANY
  1418. rv = json_dumps(json, JSON_ENCODE_ANY | flags);
  1419. if (rv)
  1420. return rv;
  1421. #endif
  1422. json_t *tmp = json_array();
  1423. char *s;
  1424. int i;
  1425. size_t len;
  1426. if (!tmp)
  1427. quithere(1, "Failed to allocate json array");
  1428. if (json_array_append(tmp, json))
  1429. quithere(1, "Failed to append temporary array");
  1430. s = json_dumps(tmp, flags);
  1431. if (!s)
  1432. return NULL;
  1433. for (i = 0; s[i] != '['; ++i)
  1434. if (unlikely(!(s[i] && isCspace(s[i]))))
  1435. quithere(1, "Failed to find opening bracket in array dump");
  1436. len = strlen(&s[++i]) - 1;
  1437. if (unlikely(s[i+len] != ']'))
  1438. quithere(1, "Failed to find closing bracket in array dump");
  1439. rv = malloc(len + 1);
  1440. memcpy(rv, &s[i], len);
  1441. rv[len] = '\0';
  1442. free(s);
  1443. json_decref(tmp);
  1444. return rv;
  1445. }
  1446. /* Extracts a string value from a json array with error checking. To be used
  1447. * when the value of the string returned is only examined and not to be stored.
  1448. * See json_array_string below */
  1449. const char *__json_array_string(json_t *val, unsigned int entry)
  1450. {
  1451. json_t *arr_entry;
  1452. if (json_is_null(val))
  1453. return NULL;
  1454. if (!json_is_array(val))
  1455. return NULL;
  1456. if (entry > json_array_size(val))
  1457. return NULL;
  1458. arr_entry = json_array_get(val, entry);
  1459. if (!json_is_string(arr_entry))
  1460. return NULL;
  1461. return json_string_value(arr_entry);
  1462. }
  1463. /* Creates a freshly malloced dup of __json_array_string */
  1464. static char *json_array_string(json_t *val, unsigned int entry)
  1465. {
  1466. const char *buf = __json_array_string(val, entry);
  1467. if (buf)
  1468. return strdup(buf);
  1469. return NULL;
  1470. }
  1471. void stratum_probe_transparency(struct pool *pool)
  1472. {
  1473. // Request transaction data to discourage pools from doing anything shady
  1474. char s[1024];
  1475. int sLen;
  1476. sLen = sprintf(s, "{\"params\": [\"%s\"], \"id\": \"txlist%s\", \"method\": \"mining.get_transactions\"}",
  1477. pool->swork.job_id,
  1478. pool->swork.job_id);
  1479. stratum_send(pool, s, sLen);
  1480. if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency))
  1481. cgtime(&pool->swork.tv_transparency);
  1482. pool->swork.transparency_probed = true;
  1483. }
  1484. static bool parse_notify(struct pool *pool, json_t *val)
  1485. {
  1486. const char *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime;
  1487. char *job_id;
  1488. bool clean, ret = false;
  1489. int merkles, i;
  1490. size_t cb1_len, cb2_len;
  1491. json_t *arr;
  1492. arr = json_array_get(val, 4);
  1493. if (!arr || !json_is_array(arr))
  1494. goto out;
  1495. merkles = json_array_size(arr);
  1496. for (i = 0; i < merkles; i++)
  1497. if (!json_is_string(json_array_get(arr, i)))
  1498. goto out;
  1499. prev_hash = __json_array_string(val, 1);
  1500. coinbase1 = __json_array_string(val, 2);
  1501. coinbase2 = __json_array_string(val, 3);
  1502. bbversion = __json_array_string(val, 5);
  1503. nbit = __json_array_string(val, 6);
  1504. ntime = __json_array_string(val, 7);
  1505. clean = json_is_true(json_array_get(val, 8));
  1506. if (!prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime)
  1507. goto out;
  1508. job_id = json_array_string(val, 0);
  1509. if (!job_id)
  1510. goto out;
  1511. cg_wlock(&pool->data_lock);
  1512. cgtime(&pool->swork.tv_received);
  1513. free(pool->swork.job_id);
  1514. pool->swork.job_id = job_id;
  1515. pool->submit_old = !clean;
  1516. pool->swork.clean = true;
  1517. hex2bin(&pool->swork.header1[0], bbversion, 4);
  1518. hex2bin(&pool->swork.header1[4], prev_hash, 32);
  1519. hex2bin((void*)&pool->swork.ntime, ntime, 4);
  1520. pool->swork.ntime = be32toh(pool->swork.ntime);
  1521. hex2bin(&pool->swork.diffbits[0], nbit, 4);
  1522. cb1_len = strlen(coinbase1) / 2;
  1523. pool->swork.nonce2_offset = cb1_len + pool->n1_len;
  1524. cb2_len = strlen(coinbase2) / 2;
  1525. bytes_resize(&pool->swork.coinbase, pool->swork.nonce2_offset + pool->n2size + cb2_len);
  1526. uint8_t *coinbase = bytes_buf(&pool->swork.coinbase);
  1527. hex2bin(coinbase, coinbase1, cb1_len);
  1528. hex2bin(&coinbase[cb1_len], pool->nonce1, pool->n1_len);
  1529. // NOTE: gap for nonce2, filled at work generation time
  1530. hex2bin(&coinbase[pool->swork.nonce2_offset + pool->n2size], coinbase2, cb2_len);
  1531. bytes_resize(&pool->swork.merkle_bin, 32 * merkles);
  1532. for (i = 0; i < merkles; i++)
  1533. hex2bin(&bytes_buf(&pool->swork.merkle_bin)[i * 32], json_string_value(json_array_get(arr, i)), 32);
  1534. pool->swork.merkles = merkles;
  1535. pool->nonce2 = 0;
  1536. cg_wunlock(&pool->data_lock);
  1537. applog(LOG_DEBUG, "Received stratum notify from pool %u with job_id=%s",
  1538. pool->pool_no, job_id);
  1539. if (opt_debug && opt_protocol)
  1540. {
  1541. applog(LOG_DEBUG, "job_id: %s", job_id);
  1542. applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
  1543. applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
  1544. applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
  1545. for (i = 0; i < merkles; i++)
  1546. applog(LOG_DEBUG, "merkle%d: %s", i, json_string_value(json_array_get(arr, i)));
  1547. applog(LOG_DEBUG, "bbversion: %s", bbversion);
  1548. applog(LOG_DEBUG, "nbit: %s", nbit);
  1549. applog(LOG_DEBUG, "ntime: %s", ntime);
  1550. applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
  1551. }
  1552. /* A notify message is the closest stratum gets to a getwork */
  1553. pool->getwork_requested++;
  1554. total_getworks++;
  1555. if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency))
  1556. if (pool->probed)
  1557. stratum_probe_transparency(pool);
  1558. ret = true;
  1559. out:
  1560. return ret;
  1561. }
  1562. static bool parse_diff(struct pool *pool, json_t *val)
  1563. {
  1564. double diff;
  1565. diff = json_number_value(json_array_get(val, 0));
  1566. if (diff == 0)
  1567. return false;
  1568. cg_wlock(&pool->data_lock);
  1569. pool->swork.diff = diff;
  1570. cg_wunlock(&pool->data_lock);
  1571. applog(LOG_DEBUG, "Pool %d stratum bdifficulty set to %f", pool->pool_no, diff);
  1572. return true;
  1573. }
  1574. static bool parse_reconnect(struct pool *pool, json_t *val)
  1575. {
  1576. const char *url, *port;
  1577. char address[256];
  1578. url = __json_array_string(val, 0);
  1579. if (!url)
  1580. url = pool->sockaddr_url;
  1581. port = __json_array_string(val, 1);
  1582. if (!port)
  1583. port = pool->stratum_port;
  1584. snprintf(address, sizeof(address), "%s:%s", url, port);
  1585. if (!extract_sockaddr(address, &pool->sockaddr_url, &pool->stratum_port))
  1586. return false;
  1587. pool->stratum_url = pool->sockaddr_url;
  1588. applog(LOG_NOTICE, "Reconnect requested from pool %d to %s", pool->pool_no, address);
  1589. if (!restart_stratum(pool))
  1590. return false;
  1591. return true;
  1592. }
  1593. static bool send_version(struct pool *pool, json_t *val)
  1594. {
  1595. char s[RBUFSIZE], *idstr;
  1596. json_t *id = json_object_get(val, "id");
  1597. if (!(id && !json_is_null(id)))
  1598. return false;
  1599. idstr = json_dumps_ANY(id, 0);
  1600. sprintf(s, "{\"id\": %s, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", idstr);
  1601. free(idstr);
  1602. if (!stratum_send(pool, s, strlen(s)))
  1603. return false;
  1604. return true;
  1605. }
  1606. static bool stratum_show_message(struct pool *pool, json_t *val, json_t *params)
  1607. {
  1608. char *msg;
  1609. char s[RBUFSIZE], *idstr;
  1610. json_t *id = json_object_get(val, "id");
  1611. msg = json_array_string(params, 0);
  1612. if (likely(msg))
  1613. {
  1614. free(pool->admin_msg);
  1615. pool->admin_msg = msg;
  1616. applog(LOG_NOTICE, "Message from pool %u: %s", pool->pool_no, msg);
  1617. }
  1618. if (!(id && !json_is_null(id)))
  1619. return true;
  1620. idstr = json_dumps_ANY(id, 0);
  1621. if (likely(msg))
  1622. sprintf(s, "{\"id\": %s, \"result\": true, \"error\": null}", idstr);
  1623. else
  1624. sprintf(s, "{\"id\": %s, \"result\": null, \"error\": [-1, \"Failed to parse message\", null]}", idstr);
  1625. free(idstr);
  1626. if (!stratum_send(pool, s, strlen(s)))
  1627. return false;
  1628. return true;
  1629. }
  1630. bool parse_method(struct pool *pool, char *s)
  1631. {
  1632. json_t *val = NULL, *method, *err_val, *params;
  1633. json_error_t err;
  1634. bool ret = false;
  1635. const char *buf;
  1636. if (!s)
  1637. goto out;
  1638. val = JSON_LOADS(s, &err);
  1639. if (!val) {
  1640. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1641. goto out;
  1642. }
  1643. method = json_object_get(val, "method");
  1644. if (!method)
  1645. goto out;
  1646. err_val = json_object_get(val, "error");
  1647. params = json_object_get(val, "params");
  1648. if (err_val && !json_is_null(err_val)) {
  1649. char *ss;
  1650. if (err_val)
  1651. ss = json_dumps(err_val, JSON_INDENT(3));
  1652. else
  1653. ss = strdup("(unknown reason)");
  1654. applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss);
  1655. free(ss);
  1656. goto out;
  1657. }
  1658. buf = json_string_value(method);
  1659. if (!buf)
  1660. goto out;
  1661. if (!strncasecmp(buf, "mining.notify", 13)) {
  1662. if (parse_notify(pool, params))
  1663. pool->stratum_notify = ret = true;
  1664. else
  1665. pool->stratum_notify = ret = false;
  1666. goto out;
  1667. }
  1668. if (!strncasecmp(buf, "mining.set_difficulty", 21) && parse_diff(pool, params)) {
  1669. ret = true;
  1670. goto out;
  1671. }
  1672. if (!strncasecmp(buf, "client.reconnect", 16) && parse_reconnect(pool, params)) {
  1673. ret = true;
  1674. goto out;
  1675. }
  1676. if (!strncasecmp(buf, "client.get_version", 18) && send_version(pool, val)) {
  1677. ret = true;
  1678. goto out;
  1679. }
  1680. if (!strncasecmp(buf, "client.show_message", 19) && stratum_show_message(pool, val, params)) {
  1681. ret = true;
  1682. goto out;
  1683. }
  1684. out:
  1685. if (val)
  1686. json_decref(val);
  1687. return ret;
  1688. }
  1689. extern bool parse_stratum_response(struct pool *, char *s);
  1690. bool auth_stratum(struct pool *pool)
  1691. {
  1692. json_t *val = NULL, *res_val, *err_val;
  1693. char s[RBUFSIZE], *sret = NULL;
  1694. json_error_t err;
  1695. bool ret = false;
  1696. sprintf(s, "{\"id\": \"auth\", \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}",
  1697. pool->rpc_user, pool->rpc_pass);
  1698. if (!stratum_send(pool, s, strlen(s)))
  1699. goto out;
  1700. /* Parse all data in the queue and anything left should be auth */
  1701. while (42) {
  1702. sret = recv_line(pool);
  1703. if (!sret)
  1704. goto out;
  1705. if (parse_method(pool, sret))
  1706. free(sret);
  1707. else
  1708. break;
  1709. }
  1710. val = JSON_LOADS(sret, &err);
  1711. free(sret);
  1712. res_val = json_object_get(val, "result");
  1713. err_val = json_object_get(val, "error");
  1714. if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) {
  1715. char *ss;
  1716. if (err_val)
  1717. ss = json_dumps(err_val, JSON_INDENT(3));
  1718. else
  1719. ss = strdup("(unknown reason)");
  1720. applog(LOG_WARNING, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss);
  1721. free(ss);
  1722. goto out;
  1723. }
  1724. ret = true;
  1725. applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no);
  1726. pool->probed = true;
  1727. successful_connect = true;
  1728. out:
  1729. if (val)
  1730. json_decref(val);
  1731. if (pool->stratum_notify)
  1732. stratum_probe_transparency(pool);
  1733. return ret;
  1734. }
  1735. curl_socket_t grab_socket_opensocket_cb(void *clientp, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr)
  1736. {
  1737. struct pool *pool = clientp;
  1738. curl_socket_t sck = socket(addr->family, addr->socktype, addr->protocol);
  1739. pool->sock = sck;
  1740. return sck;
  1741. }
  1742. static bool setup_stratum_curl(struct pool *pool)
  1743. {
  1744. char curl_err_str[CURL_ERROR_SIZE];
  1745. CURL *curl = NULL;
  1746. char s[RBUFSIZE];
  1747. bool ret = false;
  1748. applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf);
  1749. mutex_lock(&pool->stratum_lock);
  1750. timer_unset(&pool->swork.tv_transparency);
  1751. pool->stratum_active = false;
  1752. pool->stratum_notify = false;
  1753. pool->swork.transparency_probed = false;
  1754. if (pool->stratum_curl)
  1755. curl_easy_cleanup(pool->stratum_curl);
  1756. pool->stratum_curl = curl_easy_init();
  1757. if (unlikely(!pool->stratum_curl))
  1758. quithere(1, "Failed to curl_easy_init");
  1759. if (pool->sockbuf)
  1760. pool->sockbuf[0] = '\0';
  1761. curl = pool->stratum_curl;
  1762. if (!pool->sockbuf) {
  1763. pool->sockbuf = calloc(RBUFSIZE, 1);
  1764. if (!pool->sockbuf)
  1765. quithere(1, "Failed to calloc pool sockbuf");
  1766. pool->sockbuf_size = RBUFSIZE;
  1767. }
  1768. /* Create a http url for use with curl */
  1769. sprintf(s, "http://%s:%s", pool->sockaddr_url, pool->stratum_port);
  1770. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  1771. curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 30);
  1772. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
  1773. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  1774. curl_easy_setopt(curl, CURLOPT_URL, s);
  1775. if (!opt_delaynet)
  1776. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  1777. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  1778. * to enable it */
  1779. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  1780. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  1781. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  1782. // CURLINFO_LASTSOCKET is broken on Win64 (which has a wider SOCKET type than curl_easy_getinfo returns), so we use this hack for now
  1783. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, grab_socket_opensocket_cb);
  1784. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  1785. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  1786. if (pool->rpc_proxy) {
  1787. curl_easy_setopt(curl, CURLOPT_HTTPPROXYTUNNEL, 1);
  1788. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  1789. } else if (opt_socks_proxy) {
  1790. curl_easy_setopt(curl, CURLOPT_HTTPPROXYTUNNEL, 1);
  1791. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  1792. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  1793. }
  1794. curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1);
  1795. pool->sock = INVSOCK;
  1796. if (curl_easy_perform(curl)) {
  1797. applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str);
  1798. errout:
  1799. curl_easy_cleanup(curl);
  1800. pool->stratum_curl = NULL;
  1801. goto out;
  1802. }
  1803. if (pool->sock == INVSOCK)
  1804. {
  1805. applog(LOG_ERR, "Stratum connect succeeded, but technical problem extracting socket (pool %u)", pool->pool_no);
  1806. goto errout;
  1807. }
  1808. keep_sockalive(pool->sock);
  1809. pool->cgminer_pool_stats.times_sent++;
  1810. pool->cgminer_pool_stats.times_received++;
  1811. ret = true;
  1812. out:
  1813. mutex_unlock(&pool->stratum_lock);
  1814. return ret;
  1815. }
  1816. static char *get_sessionid(json_t *val)
  1817. {
  1818. char *ret = NULL;
  1819. json_t *arr_val;
  1820. int arrsize, i;
  1821. arr_val = json_array_get(val, 0);
  1822. if (!arr_val || !json_is_array(arr_val))
  1823. goto out;
  1824. arrsize = json_array_size(arr_val);
  1825. for (i = 0; i < arrsize; i++) {
  1826. json_t *arr = json_array_get(arr_val, i);
  1827. const char *notify;
  1828. if (!arr | !json_is_array(arr))
  1829. break;
  1830. notify = __json_array_string(arr, 0);
  1831. if (!notify)
  1832. continue;
  1833. if (!strncasecmp(notify, "mining.notify", 13)) {
  1834. ret = json_array_string(arr, 1);
  1835. break;
  1836. }
  1837. }
  1838. out:
  1839. return ret;
  1840. }
  1841. void suspend_stratum(struct pool *pool)
  1842. {
  1843. clear_sockbuf(pool);
  1844. applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no);
  1845. mutex_lock(&pool->stratum_lock);
  1846. pool->stratum_active = pool->stratum_notify = false;
  1847. if (pool->stratum_curl) {
  1848. curl_easy_cleanup(pool->stratum_curl);
  1849. }
  1850. pool->stratum_curl = NULL;
  1851. pool->sock = INVSOCK;
  1852. mutex_unlock(&pool->stratum_lock);
  1853. }
  1854. bool initiate_stratum(struct pool *pool)
  1855. {
  1856. bool ret = false, recvd = false, noresume = false, sockd = false;
  1857. bool trysuggest = request_target_str;
  1858. char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid;
  1859. json_t *val = NULL, *res_val, *err_val;
  1860. json_error_t err;
  1861. int n2size;
  1862. resend:
  1863. if (!setup_stratum_curl(pool)) {
  1864. sockd = false;
  1865. goto out;
  1866. }
  1867. sockd = true;
  1868. clear_sock(pool);
  1869. if (trysuggest)
  1870. {
  1871. int sz = sprintf(s, "{\"id\": null, \"method\": \"mining.suggest_target\", \"params\": [\"%s\"]}", request_target_str);
  1872. if (!_stratum_send(pool, s, sz, true))
  1873. {
  1874. applog(LOG_DEBUG, "Pool %u: Failed to send suggest_target in initiate_stratum", pool->pool_no);
  1875. goto out;
  1876. }
  1877. recvd = true;
  1878. }
  1879. if (noresume) {
  1880. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++);
  1881. } else {
  1882. if (pool->sessionid)
  1883. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid);
  1884. else
  1885. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++);
  1886. }
  1887. if (!_stratum_send(pool, s, strlen(s), true)) {
  1888. applog(LOG_DEBUG, "Failed to send s in initiate_stratum");
  1889. goto out;
  1890. }
  1891. recvd = true;
  1892. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1893. applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum");
  1894. goto out;
  1895. }
  1896. sret = recv_line(pool);
  1897. if (!sret)
  1898. goto out;
  1899. val = JSON_LOADS(sret, &err);
  1900. free(sret);
  1901. if (!val) {
  1902. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1903. goto out;
  1904. }
  1905. res_val = json_object_get(val, "result");
  1906. err_val = json_object_get(val, "error");
  1907. if (!res_val || json_is_null(res_val) ||
  1908. (err_val && !json_is_null(err_val))) {
  1909. char *ss;
  1910. if (err_val)
  1911. ss = json_dumps(err_val, JSON_INDENT(3));
  1912. else
  1913. ss = strdup("(unknown reason)");
  1914. applog(LOG_INFO, "JSON-RPC decode failed: %s", ss);
  1915. free(ss);
  1916. goto out;
  1917. }
  1918. sessionid = get_sessionid(res_val);
  1919. if (!sessionid)
  1920. applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum");
  1921. nonce1 = json_array_string(res_val, 1);
  1922. if (!nonce1) {
  1923. applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");
  1924. free(sessionid);
  1925. goto out;
  1926. }
  1927. n2size = json_integer_value(json_array_get(res_val, 2));
  1928. if (!n2size) {
  1929. applog(LOG_INFO, "Failed to get n2size in initiate_stratum");
  1930. free(sessionid);
  1931. free(nonce1);
  1932. goto out;
  1933. }
  1934. cg_wlock(&pool->data_lock);
  1935. free(pool->sessionid);
  1936. pool->sessionid = sessionid;
  1937. free(pool->nonce1);
  1938. pool->nonce1 = nonce1;
  1939. pool->n1_len = strlen(nonce1) / 2;
  1940. pool->n2size = n2size;
  1941. pool->nonce2sz = (n2size > sizeof(pool->nonce2)) ? sizeof(pool->nonce2) : n2size;
  1942. #ifdef WORDS_BIGENDIAN
  1943. pool->nonce2off = (n2size < sizeof(pool->nonce2)) ? (sizeof(pool->nonce2) - n2size) : 0;
  1944. #endif
  1945. cg_wunlock(&pool->data_lock);
  1946. if (sessionid)
  1947. applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid);
  1948. ret = true;
  1949. out:
  1950. if (val)
  1951. {
  1952. json_decref(val);
  1953. val = NULL;
  1954. }
  1955. if (ret) {
  1956. if (!pool->stratum_url)
  1957. pool->stratum_url = pool->sockaddr_url;
  1958. pool->stratum_active = true;
  1959. pool->swork.diff = 1;
  1960. if (opt_protocol) {
  1961. applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d",
  1962. pool->pool_no, pool->nonce1, pool->n2size);
  1963. }
  1964. } else {
  1965. if (recvd)
  1966. {
  1967. if (trysuggest)
  1968. {
  1969. applog(LOG_DEBUG, "Pool %u: Failed to connect stratum with mining.suggest_target, retrying without", pool->pool_no);
  1970. trysuggest = false;
  1971. goto resend;
  1972. }
  1973. if (!noresume)
  1974. {
  1975. applog(LOG_DEBUG, "Failed to resume stratum, trying afresh");
  1976. noresume = true;
  1977. goto resend;
  1978. }
  1979. }
  1980. applog(LOG_DEBUG, "Initiate stratum failed");
  1981. if (sockd)
  1982. suspend_stratum(pool);
  1983. }
  1984. return ret;
  1985. }
  1986. bool restart_stratum(struct pool *pool)
  1987. {
  1988. if (pool->stratum_active)
  1989. suspend_stratum(pool);
  1990. if (!initiate_stratum(pool))
  1991. return false;
  1992. if (!auth_stratum(pool))
  1993. return false;
  1994. return true;
  1995. }
  1996. void dev_error_update(struct cgpu_info *dev, enum dev_reason reason)
  1997. {
  1998. dev->device_last_not_well = time(NULL);
  1999. cgtime(&dev->tv_device_last_not_well);
  2000. dev->device_not_well_reason = reason;
  2001. }
  2002. void dev_error(struct cgpu_info *dev, enum dev_reason reason)
  2003. {
  2004. dev_error_update(dev, reason);
  2005. switch (reason) {
  2006. case REASON_THREAD_FAIL_INIT:
  2007. dev->thread_fail_init_count++;
  2008. break;
  2009. case REASON_THREAD_ZERO_HASH:
  2010. dev->thread_zero_hash_count++;
  2011. break;
  2012. case REASON_THREAD_FAIL_QUEUE:
  2013. dev->thread_fail_queue_count++;
  2014. break;
  2015. case REASON_DEV_SICK_IDLE_60:
  2016. dev->dev_sick_idle_60_count++;
  2017. break;
  2018. case REASON_DEV_DEAD_IDLE_600:
  2019. dev->dev_dead_idle_600_count++;
  2020. break;
  2021. case REASON_DEV_NOSTART:
  2022. dev->dev_nostart_count++;
  2023. break;
  2024. case REASON_DEV_OVER_HEAT:
  2025. dev->dev_over_heat_count++;
  2026. break;
  2027. case REASON_DEV_THERMAL_CUTOFF:
  2028. dev->dev_thermal_cutoff_count++;
  2029. break;
  2030. case REASON_DEV_COMMS_ERROR:
  2031. dev->dev_comms_error_count++;
  2032. break;
  2033. case REASON_DEV_THROTTLE:
  2034. dev->dev_throttle_count++;
  2035. break;
  2036. }
  2037. }
  2038. /* Realloc an existing string to fit an extra string s, appending s to it. */
  2039. void *realloc_strcat(char *ptr, char *s)
  2040. {
  2041. size_t old = strlen(ptr), len = strlen(s);
  2042. char *ret;
  2043. if (!len)
  2044. return ptr;
  2045. len += old + 1;
  2046. align_len(&len);
  2047. ret = malloc(len);
  2048. if (unlikely(!ret))
  2049. quithere(1, "Failed to malloc");
  2050. sprintf(ret, "%s%s", ptr, s);
  2051. free(ptr);
  2052. return ret;
  2053. }
  2054. static
  2055. bool sanechars[] = {
  2056. false, false, false, false, false, false, false, false,
  2057. false, false, false, false, false, false, false, false,
  2058. false, false, false, false, false, false, false, false,
  2059. false, false, false, false, false, false, false, false,
  2060. false, false, false, false, false, false, false, false,
  2061. false, false, false, false, false, false, false, false,
  2062. true , true , true , true , true , true , true , true ,
  2063. true , true , false, false, false, false, false, false,
  2064. false, true , true , true , true , true , true , true ,
  2065. true , true , true , true , true , true , true , true ,
  2066. true , true , true , true , true , true , true , true ,
  2067. true , true , true , false, false, false, false, false,
  2068. false, true , true , true , true , true , true , true ,
  2069. true , true , true , true , true , true , true , true ,
  2070. true , true , true , true , true , true , true , true ,
  2071. true , true , true , false, false, false, false, false,
  2072. };
  2073. char *sanestr(char *o, char *s)
  2074. {
  2075. char *rv = o;
  2076. bool br = false;
  2077. for ( ; s[0]; ++s)
  2078. {
  2079. if (sanechars[s[0] & 0x7f])
  2080. {
  2081. if (br)
  2082. {
  2083. br = false;
  2084. if (s[0] >= '0' && s[0] <= '9')
  2085. (o++)[0] = '_';
  2086. }
  2087. (o++)[0] = s[0];
  2088. }
  2089. else
  2090. if (o != s && o[-1] >= '0' && o[-1] <= '9')
  2091. br = true;
  2092. }
  2093. o[0] = '\0';
  2094. return rv;
  2095. }
  2096. void RenameThread(const char* name)
  2097. {
  2098. #if defined(PR_SET_NAME)
  2099. // Only the first 15 characters are used (16 - NUL terminator)
  2100. prctl(PR_SET_NAME, name, 0, 0, 0);
  2101. #elif defined(__APPLE__)
  2102. pthread_setname_np(name);
  2103. #elif (defined(__FreeBSD__) || defined(__OpenBSD__))
  2104. pthread_set_name_np(pthread_self(), name);
  2105. #else
  2106. // Prevent warnings for unused parameters...
  2107. (void)name;
  2108. #endif
  2109. }
  2110. static pthread_key_t key_bfgtls;
  2111. struct bfgtls_data {
  2112. char *bfg_strerror_result;
  2113. size_t bfg_strerror_resultsz;
  2114. #ifdef WIN32
  2115. LPSTR bfg_strerror_socketresult;
  2116. #endif
  2117. #ifdef NEED_BFG_LOWL_VCOM
  2118. struct detectone_meta_info_t __detectone_meta_info;
  2119. #endif
  2120. };
  2121. static
  2122. struct bfgtls_data *get_bfgtls()
  2123. {
  2124. struct bfgtls_data *bfgtls = pthread_getspecific(key_bfgtls);
  2125. if (bfgtls)
  2126. return bfgtls;
  2127. void *p;
  2128. bfgtls = malloc(sizeof(*bfgtls));
  2129. if (!bfgtls)
  2130. quithere(1, "malloc bfgtls failed");
  2131. p = malloc(64);
  2132. if (!p)
  2133. quithere(1, "malloc bfg_strerror_result failed");
  2134. *bfgtls = (struct bfgtls_data){
  2135. .bfg_strerror_resultsz = 64,
  2136. .bfg_strerror_result = p,
  2137. };
  2138. if (pthread_setspecific(key_bfgtls, bfgtls))
  2139. quithere(1, "pthread_setspecific failed");
  2140. return bfgtls;
  2141. }
  2142. #ifdef NEED_BFG_LOWL_VCOM
  2143. struct detectone_meta_info_t *_detectone_meta_info()
  2144. {
  2145. return &get_bfgtls()->__detectone_meta_info;
  2146. }
  2147. #endif
  2148. void bfg_init_threadlocal()
  2149. {
  2150. if (pthread_key_create(&key_bfgtls, NULL))
  2151. quithere(1, "pthread_key_create failed");
  2152. }
  2153. static
  2154. bool bfg_grow_buffer(char ** const bufp, size_t * const bufszp, size_t minimum)
  2155. {
  2156. if (minimum <= *bufszp)
  2157. return false;
  2158. while (minimum > *bufszp)
  2159. *bufszp = 2;
  2160. *bufp = realloc(*bufp, *bufszp);
  2161. if (unlikely(!*bufp))
  2162. quithere(1, "realloc failed");
  2163. return true;
  2164. }
  2165. static
  2166. const char *bfg_strcpy_growing_buffer(char ** const bufp, size_t * const bufszp, const char *src)
  2167. {
  2168. if (!src)
  2169. return NULL;
  2170. const size_t srcsz = strlen(src) + 1;
  2171. bfg_grow_buffer(bufp, bufszp, srcsz);
  2172. memcpy(*bufp, src, srcsz);
  2173. return *bufp;
  2174. }
  2175. // Guaranteed to always return some string (or quit)
  2176. const char *bfg_strerror(int e, enum bfg_strerror_type type)
  2177. {
  2178. static __maybe_unused pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  2179. struct bfgtls_data *bfgtls = get_bfgtls();
  2180. size_t * const bufszp = &bfgtls->bfg_strerror_resultsz;
  2181. char ** const bufp = &bfgtls->bfg_strerror_result;
  2182. const char *have = NULL;
  2183. switch (type) {
  2184. case BST_LIBUSB:
  2185. // NOTE: Nested preprocessor checks since the latter isn't defined at all without the former
  2186. #ifdef HAVE_LIBUSB
  2187. # if HAVE_DECL_LIBUSB_ERROR_NAME
  2188. // libusb makes no guarantees for thread-safety or persistence
  2189. mutex_lock(&mutex);
  2190. have = bfg_strcpy_growing_buffer(bufp, bufszp, libusb_error_name(e));
  2191. mutex_unlock(&mutex);
  2192. # endif
  2193. #endif
  2194. break;
  2195. case BST_SOCKET:
  2196. case BST_SYSTEM:
  2197. {
  2198. #ifdef WIN32
  2199. // Windows has a different namespace for system and socket errors
  2200. LPSTR *msg = &bfgtls->bfg_strerror_socketresult;
  2201. if (*msg)
  2202. LocalFree(*msg);
  2203. if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, 0, e, 0, (LPSTR)msg, 0, 0))
  2204. return *msg;
  2205. *msg = NULL;
  2206. break;
  2207. #endif
  2208. }
  2209. // Fallthru on non-WIN32
  2210. case BST_ERRNO:
  2211. {
  2212. #ifdef __STRERROR_S_WORKS
  2213. // FIXME: Not sure how to get this on MingW64
  2214. retry:
  2215. if (likely(!strerror_s(*bufp, *bufszp, e)))
  2216. {
  2217. if (bfg_grow_buffer(bufp, bufszp, strlen(*bufp) + 2))
  2218. goto retry;
  2219. return *bufp;
  2220. }
  2221. // TODO: XSI strerror_r
  2222. // TODO: GNU strerror_r
  2223. #else
  2224. mutex_lock(&mutex);
  2225. have = bfg_strcpy_growing_buffer(bufp, bufszp, strerror(e));
  2226. mutex_unlock(&mutex);
  2227. #endif
  2228. }
  2229. }
  2230. if (have)
  2231. return *bufp;
  2232. // Failback: Stringify the number
  2233. static const char fmt[] = "%s error #%d", *typestr;
  2234. switch (type) {
  2235. case BST_ERRNO:
  2236. typestr = "System";
  2237. break;
  2238. case BST_SOCKET:
  2239. typestr = "Socket";
  2240. break;
  2241. case BST_LIBUSB:
  2242. typestr = "libusb";
  2243. break;
  2244. default:
  2245. typestr = "Unexpected";
  2246. }
  2247. int sz = snprintf((char*)bfgtls, 0, fmt, typestr, e) + 1;
  2248. bfg_grow_buffer(bufp, bufszp, sz);
  2249. sprintf(*bufp, fmt, typestr, e);
  2250. return *bufp;
  2251. }
  2252. void notifier_init(notifier_t pipefd)
  2253. {
  2254. #ifdef WIN32
  2255. #define WindowsErrorStr(e) bfg_strerror(e, BST_SOCKET)
  2256. SOCKET listener, connecter, acceptor;
  2257. listener = socket(AF_INET, SOCK_STREAM, 0);
  2258. if (listener == INVALID_SOCKET)
  2259. quit(1, "Failed to create listener socket"IN_FMT_FFL": %s",
  2260. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2261. connecter = socket(AF_INET, SOCK_STREAM, 0);
  2262. if (connecter == INVALID_SOCKET)
  2263. quit(1, "Failed to create connect socket"IN_FMT_FFL": %s",
  2264. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2265. struct sockaddr_in inaddr = {
  2266. .sin_family = AF_INET,
  2267. .sin_addr = {
  2268. .s_addr = htonl(INADDR_LOOPBACK),
  2269. },
  2270. .sin_port = 0,
  2271. };
  2272. {
  2273. static const int reuse = 1;
  2274. setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse));
  2275. }
  2276. if (bind(listener, (struct sockaddr*)&inaddr, sizeof(inaddr)) == SOCKET_ERROR)
  2277. quit(1, "Failed to bind listener socket"IN_FMT_FFL": %s",
  2278. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2279. socklen_t inaddr_sz = sizeof(inaddr);
  2280. if (getsockname(listener, (struct sockaddr*)&inaddr, &inaddr_sz) == SOCKET_ERROR)
  2281. quit(1, "Failed to getsockname"IN_FMT_FFL": %s",
  2282. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2283. if (listen(listener, 1) == SOCKET_ERROR)
  2284. quit(1, "Failed to listen"IN_FMT_FFL": %s",
  2285. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2286. inaddr.sin_family = AF_INET;
  2287. inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  2288. if (connect(connecter, (struct sockaddr*)&inaddr, inaddr_sz) == SOCKET_ERROR)
  2289. quit(1, "Failed to connect"IN_FMT_FFL": %s",
  2290. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2291. acceptor = accept(listener, NULL, NULL);
  2292. if (acceptor == INVALID_SOCKET)
  2293. quit(1, "Failed to accept"IN_FMT_FFL": %s",
  2294. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2295. closesocket(listener);
  2296. pipefd[0] = connecter;
  2297. pipefd[1] = acceptor;
  2298. #else
  2299. if (pipe(pipefd))
  2300. quithere(1, "Failed to create pipe");
  2301. #endif
  2302. }
  2303. void notifier_wake(notifier_t fd)
  2304. {
  2305. if (fd[1] == INVSOCK)
  2306. return;
  2307. if (1 !=
  2308. #ifdef WIN32
  2309. send(fd[1], "\0", 1, 0)
  2310. #else
  2311. write(fd[1], "\0", 1)
  2312. #endif
  2313. )
  2314. applog(LOG_WARNING, "Error trying to wake notifier");
  2315. }
  2316. void notifier_read(notifier_t fd)
  2317. {
  2318. char buf[0x10];
  2319. #ifdef WIN32
  2320. IGNORE_RETURN_VALUE(recv(fd[0], buf, sizeof(buf), 0));
  2321. #else
  2322. IGNORE_RETURN_VALUE(read(fd[0], buf, sizeof(buf)));
  2323. #endif
  2324. }
  2325. void notifier_init_invalid(notifier_t fd)
  2326. {
  2327. fd[0] = fd[1] = INVSOCK;
  2328. }
  2329. void notifier_destroy(notifier_t fd)
  2330. {
  2331. #ifdef WIN32
  2332. closesocket(fd[0]);
  2333. closesocket(fd[1]);
  2334. #else
  2335. close(fd[0]);
  2336. close(fd[1]);
  2337. #endif
  2338. fd[0] = fd[1] = INVSOCK;
  2339. }
  2340. void _bytes_alloc_failure(size_t sz)
  2341. {
  2342. quit(1, "bytes_resize failed to allocate %lu bytes", (unsigned long)sz);
  2343. }
  2344. void *cmd_thread(void *cmdp)
  2345. {
  2346. const char *cmd = cmdp;
  2347. applog(LOG_DEBUG, "Executing command: %s", cmd);
  2348. int rc = system(cmd);
  2349. if (rc)
  2350. applog(LOG_WARNING, "Command returned %d exit code: %s", rc, cmd);
  2351. return NULL;
  2352. }
  2353. void run_cmd(const char *cmd)
  2354. {
  2355. if (!cmd)
  2356. return;
  2357. pthread_t pth;
  2358. pthread_create(&pth, NULL, cmd_thread, (void*)cmd);
  2359. }