util.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. * Copyright 2012 Giel van Schijndel
  6. * Copyright 2012 Gavin Andresen
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <stdbool.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <ctype.h>
  19. #include <stdarg.h>
  20. #include <string.h>
  21. #include <pthread.h>
  22. #include <jansson.h>
  23. #include <curl/curl.h>
  24. #include <time.h>
  25. #include <errno.h>
  26. #include <unistd.h>
  27. #include <sys/types.h>
  28. #ifdef HAVE_SYS_PRCTL_H
  29. # include <sys/prctl.h>
  30. #endif
  31. #if defined(__FreeBSD__) || defined(__OpenBSD__)
  32. # include <pthread_np.h>
  33. #endif
  34. #ifndef WIN32
  35. #include <fcntl.h>
  36. # ifdef __linux
  37. # include <sys/prctl.h>
  38. # endif
  39. # include <sys/socket.h>
  40. # include <netinet/in.h>
  41. # include <netinet/tcp.h>
  42. # include <netdb.h>
  43. #else
  44. # include <windows.h>
  45. # include <winsock2.h>
  46. # include <mstcpip.h>
  47. # include <ws2tcpip.h>
  48. # include <mmsystem.h>
  49. #endif
  50. #include <utlist.h>
  51. #ifdef NEED_BFG_LOWL_VCOM
  52. #include "lowl-vcom.h"
  53. #endif
  54. #include "miner.h"
  55. #include "compat.h"
  56. #include "util.h"
  57. #define DEFAULT_SOCKWAIT 60
  58. bool successful_connect = false;
  59. struct timeval nettime;
  60. struct data_buffer {
  61. void *buf;
  62. size_t len;
  63. curl_socket_t *idlemarker;
  64. };
  65. struct upload_buffer {
  66. const void *buf;
  67. size_t len;
  68. };
  69. struct header_info {
  70. char *lp_path;
  71. int rolltime;
  72. char *reason;
  73. char *stratum_url;
  74. bool hadrolltime;
  75. bool canroll;
  76. bool hadexpire;
  77. };
  78. struct tq_ent {
  79. void *data;
  80. struct tq_ent *prev;
  81. struct tq_ent *next;
  82. };
  83. static void databuf_free(struct data_buffer *db)
  84. {
  85. if (!db)
  86. return;
  87. free(db->buf);
  88. #ifdef DEBUG_DATABUF
  89. applog(LOG_DEBUG, "databuf_free(%p)", db->buf);
  90. #endif
  91. memset(db, 0, sizeof(*db));
  92. }
  93. // aka data_buffer_write
  94. static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb,
  95. void *user_data)
  96. {
  97. struct data_buffer *db = user_data;
  98. size_t oldlen, newlen;
  99. oldlen = db->len;
  100. if (unlikely(nmemb == 0 || size == 0 || oldlen >= SIZE_MAX - size))
  101. return 0;
  102. if (unlikely(nmemb > (SIZE_MAX - oldlen) / size))
  103. nmemb = (SIZE_MAX - oldlen) / size;
  104. size_t len = size * nmemb;
  105. void *newmem;
  106. static const unsigned char zero = 0;
  107. if (db->idlemarker) {
  108. const unsigned char *cptr = ptr;
  109. for (size_t i = 0; i < len; ++i)
  110. if (!(isCspace(cptr[i]) || cptr[i] == '{')) {
  111. *db->idlemarker = CURL_SOCKET_BAD;
  112. db->idlemarker = NULL;
  113. break;
  114. }
  115. }
  116. newlen = oldlen + len;
  117. newmem = realloc(db->buf, newlen + 1);
  118. #ifdef DEBUG_DATABUF
  119. applog(LOG_DEBUG, "data_buffer_write realloc(%p, %lu) => %p", db->buf, (long unsigned)(newlen + 1), newmem);
  120. #endif
  121. if (!newmem)
  122. return 0;
  123. db->buf = newmem;
  124. db->len = newlen;
  125. memcpy(db->buf + oldlen, ptr, len);
  126. memcpy(db->buf + newlen, &zero, 1); /* null terminate */
  127. return nmemb;
  128. }
  129. static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb,
  130. void *user_data)
  131. {
  132. struct upload_buffer *ub = user_data;
  133. unsigned int len = size * nmemb;
  134. if (len > ub->len)
  135. len = ub->len;
  136. if (len) {
  137. memcpy(ptr, ub->buf, len);
  138. ub->buf += len;
  139. ub->len -= len;
  140. }
  141. return len;
  142. }
  143. static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
  144. {
  145. struct header_info *hi = user_data;
  146. size_t remlen, slen, ptrlen = size * nmemb;
  147. char *rem, *val = NULL, *key = NULL;
  148. void *tmp;
  149. val = calloc(1, ptrlen);
  150. key = calloc(1, ptrlen);
  151. if (!key || !val)
  152. goto out;
  153. tmp = memchr(ptr, ':', ptrlen);
  154. if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */
  155. goto out;
  156. slen = tmp - ptr;
  157. if ((slen + 1) == ptrlen) /* skip key w/ no value */
  158. goto out;
  159. memcpy(key, ptr, slen); /* store & nul term key */
  160. key[slen] = 0;
  161. rem = ptr + slen + 1; /* trim value's leading whitespace */
  162. remlen = ptrlen - slen - 1;
  163. while ((remlen > 0) && (isCspace(*rem))) {
  164. remlen--;
  165. rem++;
  166. }
  167. memcpy(val, rem, remlen); /* store value, trim trailing ws */
  168. val[remlen] = 0;
  169. while ((*val) && (isCspace(val[strlen(val) - 1])))
  170. val[strlen(val) - 1] = 0;
  171. if (!*val) /* skip blank value */
  172. goto out;
  173. if (opt_protocol)
  174. applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
  175. if (!strcasecmp("X-Roll-Ntime", key)) {
  176. hi->hadrolltime = true;
  177. if (!strncasecmp("N", val, 1))
  178. applog(LOG_DEBUG, "X-Roll-Ntime: N found");
  179. else {
  180. hi->canroll = true;
  181. /* Check to see if expire= is supported and if not, set
  182. * the rolltime to the default scantime */
  183. if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) {
  184. sscanf(val + 7, "%d", &hi->rolltime);
  185. hi->hadexpire = true;
  186. } else
  187. hi->rolltime = opt_scantime;
  188. applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
  189. }
  190. }
  191. if (!strcasecmp("X-Long-Polling", key)) {
  192. hi->lp_path = val; /* steal memory reference */
  193. val = NULL;
  194. }
  195. if (!strcasecmp("X-Reject-Reason", key)) {
  196. hi->reason = val; /* steal memory reference */
  197. val = NULL;
  198. }
  199. if (!strcasecmp("X-Stratum", key)) {
  200. hi->stratum_url = val;
  201. val = NULL;
  202. }
  203. out:
  204. free(key);
  205. free(val);
  206. return ptrlen;
  207. }
  208. static int keep_sockalive(SOCKETTYPE fd)
  209. {
  210. const int tcp_one = 1;
  211. const int tcp_keepidle = 45;
  212. const int tcp_keepintvl = 30;
  213. int ret = 0;
  214. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const char *)&tcp_one, sizeof(tcp_one))))
  215. ret = 1;
  216. #ifndef WIN32
  217. int flags = fcntl(fd, F_GETFL, 0);
  218. fcntl(fd, F_SETFL, O_NONBLOCK | flags);
  219. #else
  220. u_long flags = 1;
  221. ioctlsocket(fd, FIONBIO, &flags);
  222. #endif
  223. if (!opt_delaynet)
  224. #ifndef __linux
  225. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  226. #else /* __linux */
  227. if (unlikely(setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  228. #endif /* __linux */
  229. ret = 1;
  230. #ifdef __linux
  231. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one))))
  232. ret = 1;
  233. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle))))
  234. ret = 1;
  235. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  236. ret = 1;
  237. #endif /* __linux */
  238. #ifdef __APPLE_CC__
  239. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  240. ret = 1;
  241. #endif /* __APPLE_CC__ */
  242. #ifdef WIN32
  243. const int zero = 0;
  244. struct tcp_keepalive vals;
  245. vals.onoff = 1;
  246. vals.keepalivetime = tcp_keepidle * 1000;
  247. vals.keepaliveinterval = tcp_keepintvl * 1000;
  248. DWORD outputBytes;
  249. if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL)))
  250. ret = 1;
  251. /* Windows happily submits indefinitely to the send buffer blissfully
  252. * unaware nothing is getting there without gracefully failing unless
  253. * we disable the send buffer */
  254. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&zero, sizeof(zero))))
  255. ret = 1;
  256. #endif /* WIN32 */
  257. return ret;
  258. }
  259. int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
  260. curlsocktype __maybe_unused purpose)
  261. {
  262. return keep_sockalive(fd);
  263. }
  264. static void last_nettime(struct timeval *last)
  265. {
  266. rd_lock(&netacc_lock);
  267. last->tv_sec = nettime.tv_sec;
  268. last->tv_usec = nettime.tv_usec;
  269. rd_unlock(&netacc_lock);
  270. }
  271. static void set_nettime(void)
  272. {
  273. wr_lock(&netacc_lock);
  274. cgtime(&nettime);
  275. wr_unlock(&netacc_lock);
  276. }
  277. static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type,
  278. char *data, size_t size,
  279. void *userdata)
  280. {
  281. struct pool *pool = (struct pool *)userdata;
  282. switch(type) {
  283. case CURLINFO_HEADER_IN:
  284. case CURLINFO_DATA_IN:
  285. case CURLINFO_SSL_DATA_IN:
  286. pool->cgminer_pool_stats.bytes_received += size;
  287. total_bytes_rcvd += size;
  288. pool->cgminer_pool_stats.net_bytes_received += size;
  289. break;
  290. case CURLINFO_HEADER_OUT:
  291. case CURLINFO_DATA_OUT:
  292. case CURLINFO_SSL_DATA_OUT:
  293. pool->cgminer_pool_stats.bytes_sent += size;
  294. total_bytes_sent += size;
  295. pool->cgminer_pool_stats.net_bytes_sent += size;
  296. break;
  297. case CURLINFO_TEXT:
  298. {
  299. if (!opt_protocol)
  300. break;
  301. // data is not null-terminated, so we need to copy and terminate it for applog
  302. char datacp[size + 1];
  303. memcpy(datacp, data, size);
  304. while (likely(size) && unlikely(isCspace(datacp[size-1])))
  305. --size;
  306. if (unlikely(!size))
  307. break;
  308. datacp[size] = '\0';
  309. applog(LOG_DEBUG, "Pool %u: %s", pool->pool_no, datacp);
  310. break;
  311. }
  312. default:
  313. break;
  314. }
  315. return 0;
  316. }
  317. struct json_rpc_call_state {
  318. struct data_buffer all_data;
  319. struct header_info hi;
  320. void *priv;
  321. char curl_err_str[CURL_ERROR_SIZE];
  322. struct curl_slist *headers;
  323. struct upload_buffer upload_data;
  324. struct pool *pool;
  325. };
  326. void json_rpc_call_async(CURL *curl, const char *url,
  327. const char *userpass, const char *rpc_req,
  328. bool longpoll,
  329. struct pool *pool, bool share,
  330. void *priv)
  331. {
  332. struct json_rpc_call_state *state = malloc(sizeof(struct json_rpc_call_state));
  333. *state = (struct json_rpc_call_state){
  334. .priv = priv,
  335. .pool = pool,
  336. };
  337. long timeout = longpoll ? (60 * 60) : 60;
  338. char len_hdr[64], user_agent_hdr[128];
  339. struct curl_slist *headers = NULL;
  340. if (longpoll)
  341. state->all_data.idlemarker = &pool->lp_socket;
  342. /* it is assumed that 'curl' is freshly [re]initialized at this pt */
  343. curl_easy_setopt(curl, CURLOPT_PRIVATE, state);
  344. curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
  345. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  346. * to enable it */
  347. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  348. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  349. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  350. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  351. curl_easy_setopt(curl, CURLOPT_URL, url);
  352. curl_easy_setopt(curl, CURLOPT_ENCODING, "");
  353. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
  354. /* Shares are staggered already and delays in submission can be costly
  355. * so do not delay them */
  356. if (!opt_delaynet || share)
  357. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  358. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb);
  359. curl_easy_setopt(curl, CURLOPT_WRITEDATA, &state->all_data);
  360. curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb);
  361. curl_easy_setopt(curl, CURLOPT_READDATA, &state->upload_data);
  362. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, &state->curl_err_str[0]);
  363. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
  364. curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb);
  365. curl_easy_setopt(curl, CURLOPT_HEADERDATA, &state->hi);
  366. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  367. if (pool->rpc_proxy) {
  368. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  369. } else if (opt_socks_proxy) {
  370. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  371. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  372. }
  373. if (userpass) {
  374. curl_easy_setopt(curl, CURLOPT_USERPWD, userpass);
  375. curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
  376. }
  377. if (longpoll)
  378. curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, json_rpc_call_sockopt_cb);
  379. curl_easy_setopt(curl, CURLOPT_POST, 1);
  380. if (opt_protocol)
  381. applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req);
  382. state->upload_data.buf = rpc_req;
  383. state->upload_data.len = strlen(rpc_req);
  384. sprintf(len_hdr, "Content-Length: %lu",
  385. (unsigned long) state->upload_data.len);
  386. sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE"/"VERSION);
  387. headers = curl_slist_append(headers,
  388. "Content-type: application/json");
  389. headers = curl_slist_append(headers,
  390. "X-Mining-Extensions: longpoll midstate rollntime submitold");
  391. if (longpoll)
  392. headers = curl_slist_append(headers,
  393. "X-Minimum-Wait: 0");
  394. if (likely(global_hashrate)) {
  395. char ghashrate[255];
  396. sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, (uint64_t)global_hashrate);
  397. headers = curl_slist_append(headers, ghashrate);
  398. }
  399. headers = curl_slist_append(headers, len_hdr);
  400. headers = curl_slist_append(headers, user_agent_hdr);
  401. headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/
  402. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
  403. state->headers = headers;
  404. if (opt_delaynet) {
  405. /* Don't delay share submission, but still track the nettime */
  406. if (!share) {
  407. long long now_msecs, last_msecs;
  408. struct timeval now, last;
  409. cgtime(&now);
  410. last_nettime(&last);
  411. now_msecs = (long long)now.tv_sec * 1000;
  412. now_msecs += now.tv_usec / 1000;
  413. last_msecs = (long long)last.tv_sec * 1000;
  414. last_msecs += last.tv_usec / 1000;
  415. if (now_msecs > last_msecs && now_msecs - last_msecs < 250) {
  416. struct timespec rgtp;
  417. rgtp.tv_sec = 0;
  418. rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000;
  419. nanosleep(&rgtp, NULL);
  420. }
  421. }
  422. set_nettime();
  423. }
  424. }
  425. json_t *json_rpc_call_completed(CURL *curl, int rc, bool probe, int *rolltime, void *out_priv)
  426. {
  427. struct json_rpc_call_state *state;
  428. if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, (void*)&state) != CURLE_OK) {
  429. applog(LOG_ERR, "Failed to get private curl data");
  430. if (out_priv)
  431. *(void**)out_priv = NULL;
  432. goto err_out;
  433. }
  434. if (out_priv)
  435. *(void**)out_priv = state->priv;
  436. json_t *val, *err_val, *res_val;
  437. json_error_t err;
  438. struct pool *pool = state->pool;
  439. bool probing = probe && !pool->probed;
  440. if (rc) {
  441. applog(LOG_INFO, "HTTP request failed: %s", state->curl_err_str);
  442. goto err_out;
  443. }
  444. if (!state->all_data.buf) {
  445. applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
  446. goto err_out;
  447. }
  448. pool->cgminer_pool_stats.times_sent++;
  449. pool->cgminer_pool_stats.times_received++;
  450. if (probing) {
  451. pool->probed = true;
  452. /* If X-Long-Polling was found, activate long polling */
  453. if (state->hi.lp_path) {
  454. if (pool->hdr_path != NULL)
  455. free(pool->hdr_path);
  456. pool->hdr_path = state->hi.lp_path;
  457. } else
  458. pool->hdr_path = NULL;
  459. if (state->hi.stratum_url) {
  460. pool->stratum_url = state->hi.stratum_url;
  461. state->hi.stratum_url = NULL;
  462. }
  463. } else {
  464. if (state->hi.lp_path) {
  465. free(state->hi.lp_path);
  466. state->hi.lp_path = NULL;
  467. }
  468. if (state->hi.stratum_url) {
  469. free(state->hi.stratum_url);
  470. state->hi.stratum_url = NULL;
  471. }
  472. }
  473. if (pool->force_rollntime)
  474. {
  475. state->hi.canroll = true;
  476. state->hi.hadexpire = true;
  477. state->hi.rolltime = pool->force_rollntime;
  478. }
  479. if (rolltime)
  480. *rolltime = state->hi.rolltime;
  481. pool->cgminer_pool_stats.rolltime = state->hi.rolltime;
  482. pool->cgminer_pool_stats.hadrolltime = state->hi.hadrolltime;
  483. pool->cgminer_pool_stats.canroll = state->hi.canroll;
  484. pool->cgminer_pool_stats.hadexpire = state->hi.hadexpire;
  485. val = JSON_LOADS(state->all_data.buf, &err);
  486. if (!val) {
  487. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  488. if (opt_protocol)
  489. applog(LOG_DEBUG, "JSON protocol response:\n%s", (char*)state->all_data.buf);
  490. goto err_out;
  491. }
  492. if (opt_protocol) {
  493. char *s = json_dumps(val, JSON_INDENT(3));
  494. applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
  495. free(s);
  496. }
  497. /* JSON-RPC valid response returns a non-null 'result',
  498. * and a null 'error'.
  499. */
  500. res_val = json_object_get(val, "result");
  501. err_val = json_object_get(val, "error");
  502. if (!res_val ||(err_val && !json_is_null(err_val))) {
  503. char *s;
  504. if (err_val)
  505. s = json_dumps(err_val, JSON_INDENT(3));
  506. else
  507. s = strdup("(unknown reason)");
  508. applog(LOG_INFO, "JSON-RPC call failed: %s", s);
  509. free(s);
  510. json_decref(val);
  511. goto err_out;
  512. }
  513. if (state->hi.reason) {
  514. json_object_set_new(val, "reject-reason", json_string(state->hi.reason));
  515. free(state->hi.reason);
  516. state->hi.reason = NULL;
  517. }
  518. successful_connect = true;
  519. databuf_free(&state->all_data);
  520. curl_slist_free_all(state->headers);
  521. curl_easy_reset(curl);
  522. free(state);
  523. return val;
  524. err_out:
  525. databuf_free(&state->all_data);
  526. curl_slist_free_all(state->headers);
  527. curl_easy_reset(curl);
  528. if (!successful_connect)
  529. applog(LOG_DEBUG, "Failed to connect in json_rpc_call");
  530. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  531. free(state);
  532. return NULL;
  533. }
  534. json_t *json_rpc_call(CURL *curl, const char *url,
  535. const char *userpass, const char *rpc_req,
  536. bool probe, bool longpoll, int *rolltime,
  537. struct pool *pool, bool share)
  538. {
  539. json_rpc_call_async(curl, url, userpass, rpc_req, longpoll, pool, share, NULL);
  540. int rc = curl_easy_perform(curl);
  541. return json_rpc_call_completed(curl, rc, probe, rolltime, NULL);
  542. }
  543. bool our_curl_supports_proxy_uris()
  544. {
  545. curl_version_info_data *data = curl_version_info(CURLVERSION_NOW);
  546. return data->age && data->version_num >= (( 7 <<16)|( 21 <<8)| 7); // 7.21.7
  547. }
  548. // NOTE: This assumes reference URI is a root
  549. char *absolute_uri(char *uri, const char *ref)
  550. {
  551. if (strstr(uri, "://"))
  552. return strdup(uri);
  553. char *copy_start, *abs;
  554. bool need_slash = false;
  555. copy_start = (uri[0] == '/') ? &uri[1] : uri;
  556. if (ref[strlen(ref) - 1] != '/')
  557. need_slash = true;
  558. abs = malloc(strlen(ref) + strlen(copy_start) + 2);
  559. if (!abs) {
  560. applog(LOG_ERR, "Malloc failure in absolute_uri");
  561. return NULL;
  562. }
  563. sprintf(abs, "%s%s%s", ref, need_slash ? "/" : "", copy_start);
  564. return abs;
  565. }
  566. static const char _hexchars[0x10] = "0123456789abcdef";
  567. void bin2hex(char *out, const void *in, size_t len)
  568. {
  569. const unsigned char *p = in;
  570. while (len--)
  571. {
  572. (out++)[0] = _hexchars[p[0] >> 4];
  573. (out++)[0] = _hexchars[p[0] & 0xf];
  574. ++p;
  575. }
  576. out[0] = '\0';
  577. }
  578. static inline
  579. int _hex2bin_char(const char c)
  580. {
  581. if (c >= '0' && c <= '9')
  582. return c - '0';
  583. if (c >= 'a' && c <= 'f')
  584. return (c - 'a') + 10;
  585. if (c >= 'A' && c <= 'F')
  586. return (c - 'A') + 10;
  587. return -1;
  588. }
  589. /* Does the reverse of bin2hex but does not allocate any ram */
  590. bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
  591. {
  592. int n, o;
  593. while (len--)
  594. {
  595. n = _hex2bin_char((hexstr++)[0]);
  596. if (unlikely(n == -1))
  597. {
  598. badchar:
  599. if (!hexstr[-1])
  600. applog(LOG_ERR, "hex2bin: str truncated");
  601. else
  602. applog(LOG_ERR, "hex2bin: invalid character 0x%02x", (int)hexstr[-1]);
  603. return false;
  604. }
  605. o = _hex2bin_char((hexstr++)[0]);
  606. if (unlikely(o == -1))
  607. goto badchar;
  608. (p++)[0] = (n << 4) | o;
  609. }
  610. return likely(!hexstr[0]);
  611. }
  612. void ucs2tochar(char * const out, const uint16_t * const in, const size_t sz)
  613. {
  614. for (int i = 0; i < sz; ++i)
  615. out[i] = in[i];
  616. }
  617. char *ucs2tochar_dup(uint16_t * const in, const size_t sz)
  618. {
  619. char *out = malloc(sz + 1);
  620. ucs2tochar(out, in, sz);
  621. out[sz] = '\0';
  622. return out;
  623. }
  624. void hash_data(unsigned char *out_hash, const unsigned char *data)
  625. {
  626. unsigned char blkheader[80];
  627. // data is past the first SHA256 step (padding and interpreting as big endian on a little endian platform), so we need to flip each 32-bit chunk around to get the original input block header
  628. swap32yes(blkheader, data, 80 / 4);
  629. // double-SHA256 to get the block hash
  630. gen_hash(blkheader, out_hash, 80);
  631. }
  632. // Example output: 0000000000000000000000000000000000000000000000000000ffff00000000 (bdiff 1)
  633. void real_block_target(unsigned char *target, const unsigned char *data)
  634. {
  635. uint8_t targetshift;
  636. if (unlikely(data[72] < 3 || data[72] > 0x20))
  637. {
  638. // Invalid (out of bounds) target
  639. memset(target, 0xff, 32);
  640. return;
  641. }
  642. targetshift = data[72] - 3;
  643. memset(target, 0, targetshift);
  644. target[targetshift++] = data[75];
  645. target[targetshift++] = data[74];
  646. target[targetshift++] = data[73];
  647. memset(&target[targetshift], 0, 0x20 - targetshift);
  648. }
  649. bool hash_target_check(const unsigned char *hash, const unsigned char *target)
  650. {
  651. const uint32_t *h32 = (uint32_t*)&hash[0];
  652. const uint32_t *t32 = (uint32_t*)&target[0];
  653. for (int i = 7; i >= 0; --i) {
  654. uint32_t h32i = le32toh(h32[i]);
  655. uint32_t t32i = le32toh(t32[i]);
  656. if (h32i > t32i)
  657. return false;
  658. if (h32i < t32i)
  659. return true;
  660. }
  661. return true;
  662. }
  663. bool hash_target_check_v(const unsigned char *hash, const unsigned char *target)
  664. {
  665. bool rc;
  666. rc = hash_target_check(hash, target);
  667. if (opt_debug) {
  668. unsigned char hash_swap[32], target_swap[32];
  669. char hash_str[65];
  670. char target_str[65];
  671. for (int i = 0; i < 32; ++i) {
  672. hash_swap[i] = hash[31-i];
  673. target_swap[i] = target[31-i];
  674. }
  675. bin2hex(hash_str, hash_swap, 32);
  676. bin2hex(target_str, target_swap, 32);
  677. applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s",
  678. hash_str,
  679. target_str,
  680. rc ? "YES (hash <= target)" :
  681. "no (false positive; hash > target)");
  682. }
  683. return rc;
  684. }
  685. // This operates on a native-endian SHA256 state
  686. // In other words, on little endian platforms, every 4 bytes are in reverse order
  687. bool fulltest(const unsigned char *hash, const unsigned char *target)
  688. {
  689. unsigned char hash2[32];
  690. swap32tobe(hash2, hash, 32 / 4);
  691. return hash_target_check_v(hash2, target);
  692. }
  693. struct thread_q *tq_new(void)
  694. {
  695. struct thread_q *tq;
  696. tq = calloc(1, sizeof(*tq));
  697. if (!tq)
  698. return NULL;
  699. pthread_mutex_init(&tq->mutex, NULL);
  700. pthread_cond_init(&tq->cond, NULL);
  701. return tq;
  702. }
  703. void tq_free(struct thread_q *tq)
  704. {
  705. struct tq_ent *ent, *iter;
  706. if (!tq)
  707. return;
  708. DL_FOREACH_SAFE(tq->q, ent, iter) {
  709. DL_DELETE(tq->q, ent);
  710. free(ent);
  711. }
  712. pthread_cond_destroy(&tq->cond);
  713. pthread_mutex_destroy(&tq->mutex);
  714. memset(tq, 0, sizeof(*tq)); /* poison */
  715. free(tq);
  716. }
  717. static void tq_freezethaw(struct thread_q *tq, bool frozen)
  718. {
  719. mutex_lock(&tq->mutex);
  720. tq->frozen = frozen;
  721. pthread_cond_signal(&tq->cond);
  722. mutex_unlock(&tq->mutex);
  723. }
  724. void tq_freeze(struct thread_q *tq)
  725. {
  726. tq_freezethaw(tq, true);
  727. }
  728. void tq_thaw(struct thread_q *tq)
  729. {
  730. tq_freezethaw(tq, false);
  731. }
  732. bool tq_push(struct thread_q *tq, void *data)
  733. {
  734. struct tq_ent *ent;
  735. bool rc = true;
  736. ent = calloc(1, sizeof(*ent));
  737. if (!ent)
  738. return false;
  739. ent->data = data;
  740. mutex_lock(&tq->mutex);
  741. if (!tq->frozen) {
  742. DL_APPEND(tq->q, ent);
  743. } else {
  744. free(ent);
  745. rc = false;
  746. }
  747. pthread_cond_signal(&tq->cond);
  748. mutex_unlock(&tq->mutex);
  749. return rc;
  750. }
  751. void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
  752. {
  753. struct tq_ent *ent;
  754. void *rval = NULL;
  755. int rc;
  756. mutex_lock(&tq->mutex);
  757. if (tq->q)
  758. goto pop;
  759. if (abstime)
  760. rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime);
  761. else
  762. rc = pthread_cond_wait(&tq->cond, &tq->mutex);
  763. if (rc)
  764. goto out;
  765. if (!tq->q)
  766. goto out;
  767. pop:
  768. ent = tq->q;
  769. rval = ent->data;
  770. DL_DELETE(tq->q, ent);
  771. free(ent);
  772. out:
  773. mutex_unlock(&tq->mutex);
  774. return rval;
  775. }
  776. int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
  777. {
  778. int rv = pthread_create(&thr->pth, attr, start, arg);
  779. if (likely(!rv))
  780. thr->has_pth = true;
  781. return rv;
  782. }
  783. void thr_info_freeze(struct thr_info *thr)
  784. {
  785. struct tq_ent *ent, *iter;
  786. struct thread_q *tq;
  787. if (!thr)
  788. return;
  789. tq = thr->q;
  790. if (!tq)
  791. return;
  792. mutex_lock(&tq->mutex);
  793. tq->frozen = true;
  794. DL_FOREACH_SAFE(tq->q, ent, iter) {
  795. DL_DELETE(tq->q, ent);
  796. free(ent);
  797. }
  798. mutex_unlock(&tq->mutex);
  799. }
  800. void thr_info_cancel(struct thr_info *thr)
  801. {
  802. if (!thr)
  803. return;
  804. if (thr->has_pth) {
  805. pthread_cancel(thr->pth);
  806. thr->has_pth = false;
  807. }
  808. }
  809. #ifndef HAVE_PTHREAD_CANCEL
  810. // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill
  811. enum pthread_cancel_workaround_mode {
  812. PCWM_DEFAULT = 0,
  813. PCWM_TERMINATE = 1,
  814. PCWM_ASYNC = 2,
  815. PCWM_DISABLED = 4,
  816. PCWM_CANCELLED = 8,
  817. };
  818. static pthread_key_t key_pcwm;
  819. struct sigaction pcwm_orig_term_handler;
  820. static
  821. void do_pthread_cancel_exit(int flags)
  822. {
  823. if (!(flags & PCWM_ASYNC))
  824. // NOTE: Logging disables cancel while mutex held, so this is safe
  825. applog(LOG_WARNING, "pthread_cancel workaround: Cannot defer cancellation, terminating thread NOW");
  826. pthread_exit(PTHREAD_CANCELED);
  827. }
  828. static
  829. void sighandler_pthread_cancel(int sig)
  830. {
  831. int flags = (int)pthread_getspecific(key_pcwm);
  832. if (flags & PCWM_TERMINATE) // Main thread
  833. {
  834. // Restore original handler and call it
  835. if (sigaction(sig, &pcwm_orig_term_handler, NULL))
  836. quit(1, "pthread_cancel workaround: Failed to restore original handler");
  837. raise(SIGTERM);
  838. quit(1, "pthread_cancel workaround: Original handler returned");
  839. }
  840. if (flags & PCWM_CANCELLED) // Already pending cancel
  841. return;
  842. if (flags & PCWM_DISABLED)
  843. {
  844. flags |= PCWM_CANCELLED;
  845. if (pthread_setspecific(key_pcwm, (void*)flags))
  846. quit(1, "pthread_cancel workaround: pthread_setspecific failed (setting PCWM_CANCELLED)");
  847. return;
  848. }
  849. do_pthread_cancel_exit(flags);
  850. }
  851. void pthread_testcancel(void)
  852. {
  853. int flags = (int)pthread_getspecific(key_pcwm);
  854. if (flags & PCWM_CANCELLED && !(flags & PCWM_DISABLED))
  855. do_pthread_cancel_exit(flags);
  856. }
  857. int pthread_setcancelstate(int state, int *oldstate)
  858. {
  859. int flags = (int)pthread_getspecific(key_pcwm);
  860. if (oldstate)
  861. *oldstate = (flags & PCWM_DISABLED) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
  862. if (state == PTHREAD_CANCEL_DISABLE)
  863. flags |= PCWM_DISABLED;
  864. else
  865. {
  866. if (flags & PCWM_CANCELLED)
  867. do_pthread_cancel_exit(flags);
  868. flags &= ~PCWM_DISABLED;
  869. }
  870. if (pthread_setspecific(key_pcwm, (void*)flags))
  871. return -1;
  872. return 0;
  873. }
  874. int pthread_setcanceltype(int type, int *oldtype)
  875. {
  876. int flags = (int)pthread_getspecific(key_pcwm);
  877. if (oldtype)
  878. *oldtype = (flags & PCWM_ASYNC) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
  879. if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
  880. flags |= PCWM_ASYNC;
  881. else
  882. flags &= ~PCWM_ASYNC;
  883. if (pthread_setspecific(key_pcwm, (void*)flags))
  884. return -1;
  885. return 0;
  886. }
  887. void setup_pthread_cancel_workaround()
  888. {
  889. if (pthread_key_create(&key_pcwm, NULL))
  890. quit(1, "pthread_cancel workaround: pthread_key_create failed");
  891. if (pthread_setspecific(key_pcwm, (void*)PCWM_TERMINATE))
  892. quit(1, "pthread_cancel workaround: pthread_setspecific failed");
  893. struct sigaction new_sigact = {
  894. .sa_handler = sighandler_pthread_cancel,
  895. };
  896. if (sigaction(SIGTERM, &new_sigact, &pcwm_orig_term_handler))
  897. quit(1, "pthread_cancel workaround: Failed to install SIGTERM handler");
  898. }
  899. #endif
  900. static void _now_gettimeofday(struct timeval *);
  901. static void _cgsleep_us_r_nanosleep(cgtimer_t *, int64_t);
  902. #ifdef HAVE_POOR_GETTIMEOFDAY
  903. static struct timeval tv_timeofday_offset;
  904. static struct timeval _tv_timeofday_lastchecked;
  905. static pthread_mutex_t _tv_timeofday_mutex = PTHREAD_MUTEX_INITIALIZER;
  906. static
  907. void bfg_calibrate_timeofday(struct timeval *expected, char *buf)
  908. {
  909. struct timeval actual, delta;
  910. timeradd(expected, &tv_timeofday_offset, expected);
  911. _now_gettimeofday(&actual);
  912. if (expected->tv_sec >= actual.tv_sec - 1 && expected->tv_sec <= actual.tv_sec + 1)
  913. // Within reason - no change necessary
  914. return;
  915. timersub(&actual, expected, &delta);
  916. timeradd(&tv_timeofday_offset, &delta, &tv_timeofday_offset);
  917. sprintf(buf, "Recalibrating timeofday offset (delta %ld.%06lds)", (long)delta.tv_sec, (long)delta.tv_usec);
  918. *expected = actual;
  919. }
  920. void bfg_gettimeofday(struct timeval *out)
  921. {
  922. char buf[64] = "";
  923. timer_set_now(out);
  924. mutex_lock(&_tv_timeofday_mutex);
  925. if (_tv_timeofday_lastchecked.tv_sec < out->tv_sec - 21)
  926. bfg_calibrate_timeofday(out, buf);
  927. else
  928. timeradd(out, &tv_timeofday_offset, out);
  929. mutex_unlock(&_tv_timeofday_mutex);
  930. if (unlikely(buf[0]))
  931. applog(LOG_WARNING, "%s", buf);
  932. }
  933. #endif
  934. #ifdef WIN32
  935. static LARGE_INTEGER _perffreq;
  936. static
  937. void _now_queryperformancecounter(struct timeval *tv)
  938. {
  939. LARGE_INTEGER now;
  940. if (unlikely(!QueryPerformanceCounter(&now)))
  941. quit(1, "QueryPerformanceCounter failed");
  942. *tv = (struct timeval){
  943. .tv_sec = now.QuadPart / _perffreq.QuadPart,
  944. .tv_usec = (now.QuadPart % _perffreq.QuadPart) * 1000000 / _perffreq.QuadPart,
  945. };
  946. }
  947. #endif
  948. static void bfg_init_time();
  949. static
  950. void _now_is_not_set(__maybe_unused struct timeval *tv)
  951. {
  952. bfg_init_time();
  953. timer_set_now(tv);
  954. }
  955. void (*timer_set_now)(struct timeval *tv) = _now_is_not_set;
  956. void (*cgsleep_us_r)(cgtimer_t *, int64_t) = _cgsleep_us_r_nanosleep;
  957. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  958. static clockid_t bfg_timer_clk;
  959. static
  960. void _now_clock_gettime(struct timeval *tv)
  961. {
  962. struct timespec ts;
  963. if (unlikely(clock_gettime(bfg_timer_clk, &ts)))
  964. quit(1, "clock_gettime failed");
  965. *tv = (struct timeval){
  966. .tv_sec = ts.tv_sec,
  967. .tv_usec = ts.tv_nsec / 1000,
  968. };
  969. }
  970. #ifdef HAVE_CLOCK_NANOSLEEP
  971. static
  972. void _cgsleep_us_r_monotonic(cgtimer_t *tv_start, int64_t us)
  973. {
  974. struct timeval tv_end[1];
  975. struct timespec ts_end[1];
  976. int ret;
  977. timer_set_delay(tv_end, tv_start, us);
  978. timeval_to_spec(ts_end, tv_end);
  979. do {
  980. ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL);
  981. } while (ret == EINTR);
  982. }
  983. #endif
  984. static
  985. bool _bfg_try_clock_gettime(clockid_t clk)
  986. {
  987. struct timespec ts;
  988. if (clock_gettime(clk, &ts))
  989. return false;
  990. bfg_timer_clk = clk;
  991. timer_set_now = _now_clock_gettime;
  992. return true;
  993. }
  994. #endif
  995. static
  996. void bfg_init_time()
  997. {
  998. if (timer_set_now != _now_is_not_set)
  999. return;
  1000. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  1001. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC_RAW
  1002. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC_RAW))
  1003. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC_RAW)");
  1004. else
  1005. #endif
  1006. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC))
  1007. {
  1008. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC)");
  1009. #ifdef HAVE_CLOCK_NANOSLEEP
  1010. cgsleep_us_r = _cgsleep_us_r_monotonic;
  1011. #endif
  1012. }
  1013. else
  1014. #endif
  1015. #ifdef WIN32
  1016. if (QueryPerformanceFrequency(&_perffreq) && _perffreq.QuadPart)
  1017. {
  1018. timer_set_now = _now_queryperformancecounter;
  1019. applog(LOG_DEBUG, "Timers: Using QueryPerformanceCounter");
  1020. }
  1021. else
  1022. #endif
  1023. {
  1024. timer_set_now = _now_gettimeofday;
  1025. applog(LOG_DEBUG, "Timers: Using gettimeofday");
  1026. }
  1027. #ifdef HAVE_POOR_GETTIMEOFDAY
  1028. char buf[64] = "";
  1029. struct timeval tv;
  1030. timer_set_now(&tv);
  1031. bfg_calibrate_timeofday(&tv, buf);
  1032. applog(LOG_DEBUG, "%s", buf);
  1033. #endif
  1034. }
  1035. void subtime(struct timeval *a, struct timeval *b)
  1036. {
  1037. timersub(a, b, b);
  1038. }
  1039. void addtime(struct timeval *a, struct timeval *b)
  1040. {
  1041. timeradd(a, b, b);
  1042. }
  1043. bool time_more(struct timeval *a, struct timeval *b)
  1044. {
  1045. return timercmp(a, b, >);
  1046. }
  1047. bool time_less(struct timeval *a, struct timeval *b)
  1048. {
  1049. return timercmp(a, b, <);
  1050. }
  1051. void copy_time(struct timeval *dest, const struct timeval *src)
  1052. {
  1053. memcpy(dest, src, sizeof(struct timeval));
  1054. }
  1055. void timespec_to_val(struct timeval *val, const struct timespec *spec)
  1056. {
  1057. val->tv_sec = spec->tv_sec;
  1058. val->tv_usec = spec->tv_nsec / 1000;
  1059. }
  1060. void timeval_to_spec(struct timespec *spec, const struct timeval *val)
  1061. {
  1062. spec->tv_sec = val->tv_sec;
  1063. spec->tv_nsec = val->tv_usec * 1000;
  1064. }
  1065. void us_to_timeval(struct timeval *val, int64_t us)
  1066. {
  1067. lldiv_t tvdiv = lldiv(us, 1000000);
  1068. val->tv_sec = tvdiv.quot;
  1069. val->tv_usec = tvdiv.rem;
  1070. }
  1071. void us_to_timespec(struct timespec *spec, int64_t us)
  1072. {
  1073. lldiv_t tvdiv = lldiv(us, 1000000);
  1074. spec->tv_sec = tvdiv.quot;
  1075. spec->tv_nsec = tvdiv.rem * 1000;
  1076. }
  1077. void ms_to_timespec(struct timespec *spec, int64_t ms)
  1078. {
  1079. lldiv_t tvdiv = lldiv(ms, 1000);
  1080. spec->tv_sec = tvdiv.quot;
  1081. spec->tv_nsec = tvdiv.rem * 1000000;
  1082. }
  1083. void timeraddspec(struct timespec *a, const struct timespec *b)
  1084. {
  1085. a->tv_sec += b->tv_sec;
  1086. a->tv_nsec += b->tv_nsec;
  1087. if (a->tv_nsec >= 1000000000) {
  1088. a->tv_nsec -= 1000000000;
  1089. a->tv_sec++;
  1090. }
  1091. }
  1092. #ifndef WIN32
  1093. static
  1094. void _now_gettimeofday(struct timeval *tv)
  1095. {
  1096. gettimeofday(tv, NULL);
  1097. }
  1098. #else
  1099. /* Windows start time is since 1601 lol so convert it to unix epoch 1970. */
  1100. #define EPOCHFILETIME (116444736000000000LL)
  1101. /* Return the system time as an lldiv_t in decimicroseconds. */
  1102. static void decius_time(lldiv_t *lidiv)
  1103. {
  1104. FILETIME ft;
  1105. LARGE_INTEGER li;
  1106. GetSystemTimeAsFileTime(&ft);
  1107. li.LowPart = ft.dwLowDateTime;
  1108. li.HighPart = ft.dwHighDateTime;
  1109. li.QuadPart -= EPOCHFILETIME;
  1110. /* SystemTime is in decimicroseconds so divide by an unusual number */
  1111. *lidiv = lldiv(li.QuadPart, 10000000);
  1112. }
  1113. void _now_gettimeofday(struct timeval *tv)
  1114. {
  1115. lldiv_t lidiv;
  1116. decius_time(&lidiv);
  1117. tv->tv_sec = lidiv.quot;
  1118. tv->tv_usec = lidiv.rem / 10;
  1119. }
  1120. #endif
  1121. void cgsleep_ms_r(cgtimer_t *tv_start, int ms)
  1122. {
  1123. cgsleep_us_r(tv_start, ((int64_t)ms) * 1000);
  1124. }
  1125. static
  1126. void _cgsleep_us_r_nanosleep(cgtimer_t *tv_start, int64_t us)
  1127. {
  1128. struct timeval tv_timer[1], tv[1];
  1129. struct timespec ts[1];
  1130. timer_set_delay(tv_timer, tv_start, us);
  1131. while (true)
  1132. {
  1133. timer_set_now(tv);
  1134. if (!timercmp(tv_timer, tv, >))
  1135. return;
  1136. timersub(tv_timer, tv, tv);
  1137. timeval_to_spec(ts, tv);
  1138. nanosleep(ts, NULL);
  1139. }
  1140. }
  1141. void cgsleep_ms(int ms)
  1142. {
  1143. cgtimer_t ts_start;
  1144. cgsleep_prepare_r(&ts_start);
  1145. cgsleep_ms_r(&ts_start, ms);
  1146. }
  1147. void cgsleep_us(int64_t us)
  1148. {
  1149. cgtimer_t ts_start;
  1150. cgsleep_prepare_r(&ts_start);
  1151. cgsleep_us_r(&ts_start, us);
  1152. }
  1153. /* Returns the microseconds difference between end and start times as a double */
  1154. double us_tdiff(struct timeval *end, struct timeval *start)
  1155. {
  1156. return end->tv_sec * 1000000 + end->tv_usec - start->tv_sec * 1000000 - start->tv_usec;
  1157. }
  1158. /* Returns the seconds difference between end and start times as a double */
  1159. double tdiff(struct timeval *end, struct timeval *start)
  1160. {
  1161. return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0;
  1162. }
  1163. bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
  1164. {
  1165. char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
  1166. char url_address[256], port[6];
  1167. int url_len, port_len = 0;
  1168. url_begin = strstr(url, "//");
  1169. if (!url_begin)
  1170. url_begin = url;
  1171. else
  1172. url_begin += 2;
  1173. /* Look for numeric ipv6 entries */
  1174. ipv6_begin = strstr(url_begin, "[");
  1175. ipv6_end = strstr(url_begin, "]");
  1176. if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
  1177. url_end = strstr(ipv6_end, ":");
  1178. else
  1179. url_end = strstr(url_begin, ":");
  1180. if (url_end) {
  1181. url_len = url_end - url_begin;
  1182. port_len = strlen(url_begin) - url_len - 1;
  1183. if (port_len < 1)
  1184. return false;
  1185. port_start = url_end + 1;
  1186. } else
  1187. url_len = strlen(url_begin);
  1188. if (url_len < 1)
  1189. return false;
  1190. sprintf(url_address, "%.*s", url_len, url_begin);
  1191. if (port_len) {
  1192. char *slash;
  1193. snprintf(port, 6, "%.*s", port_len, port_start);
  1194. slash = strchr(port, '/');
  1195. if (slash)
  1196. *slash = '\0';
  1197. } else
  1198. strcpy(port, "80");
  1199. free(*sockaddr_port);
  1200. *sockaddr_port = strdup(port);
  1201. free(*sockaddr_url);
  1202. *sockaddr_url = strdup(url_address);
  1203. return true;
  1204. }
  1205. enum send_ret {
  1206. SEND_OK,
  1207. SEND_SELECTFAIL,
  1208. SEND_SENDFAIL,
  1209. SEND_INACTIVE
  1210. };
  1211. /* Send a single command across a socket, appending \n to it. This should all
  1212. * be done under stratum lock except when first establishing the socket */
  1213. static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len)
  1214. {
  1215. SOCKETTYPE sock = pool->sock;
  1216. ssize_t ssent = 0;
  1217. strcat(s, "\n");
  1218. len++;
  1219. while (len > 0 ) {
  1220. struct timeval timeout = {1, 0};
  1221. ssize_t sent;
  1222. fd_set wd;
  1223. FD_ZERO(&wd);
  1224. FD_SET(sock, &wd);
  1225. if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1)
  1226. return SEND_SELECTFAIL;
  1227. #ifdef __APPLE__
  1228. sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE);
  1229. #elif WIN32
  1230. sent = send(pool->sock, s + ssent, len, 0);
  1231. #else
  1232. sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL);
  1233. #endif
  1234. if (sent < 0) {
  1235. if (!sock_blocks())
  1236. return SEND_SENDFAIL;
  1237. sent = 0;
  1238. }
  1239. ssent += sent;
  1240. len -= sent;
  1241. }
  1242. pool->cgminer_pool_stats.times_sent++;
  1243. pool->cgminer_pool_stats.bytes_sent += ssent;
  1244. total_bytes_sent += ssent;
  1245. pool->cgminer_pool_stats.net_bytes_sent += ssent;
  1246. return SEND_OK;
  1247. }
  1248. bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force)
  1249. {
  1250. enum send_ret ret = SEND_INACTIVE;
  1251. if (opt_protocol)
  1252. applog(LOG_DEBUG, "Pool %u: SEND: %s", pool->pool_no, s);
  1253. mutex_lock(&pool->stratum_lock);
  1254. if (pool->stratum_active || force)
  1255. ret = __stratum_send(pool, s, len);
  1256. mutex_unlock(&pool->stratum_lock);
  1257. /* This is to avoid doing applog under stratum_lock */
  1258. switch (ret) {
  1259. default:
  1260. case SEND_OK:
  1261. break;
  1262. case SEND_SELECTFAIL:
  1263. applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
  1264. suspend_stratum(pool);
  1265. break;
  1266. case SEND_SENDFAIL:
  1267. applog(LOG_DEBUG, "Failed to send in stratum_send");
  1268. suspend_stratum(pool);
  1269. break;
  1270. case SEND_INACTIVE:
  1271. applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
  1272. break;
  1273. }
  1274. return (ret == SEND_OK);
  1275. }
  1276. static bool socket_full(struct pool *pool, int wait)
  1277. {
  1278. SOCKETTYPE sock = pool->sock;
  1279. struct timeval timeout;
  1280. fd_set rd;
  1281. if (sock == INVSOCK)
  1282. return true;
  1283. if (unlikely(wait < 0))
  1284. wait = 0;
  1285. FD_ZERO(&rd);
  1286. FD_SET(sock, &rd);
  1287. timeout.tv_usec = 0;
  1288. timeout.tv_sec = wait;
  1289. if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0)
  1290. return true;
  1291. return false;
  1292. }
  1293. /* Check to see if Santa's been good to you */
  1294. bool sock_full(struct pool *pool)
  1295. {
  1296. if (strlen(pool->sockbuf))
  1297. return true;
  1298. return (socket_full(pool, 0));
  1299. }
  1300. static void clear_sockbuf(struct pool *pool)
  1301. {
  1302. strcpy(pool->sockbuf, "");
  1303. }
  1304. static void clear_sock(struct pool *pool)
  1305. {
  1306. ssize_t n;
  1307. mutex_lock(&pool->stratum_lock);
  1308. do {
  1309. if (pool->sock)
  1310. n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0);
  1311. else
  1312. n = 0;
  1313. } while (n > 0);
  1314. mutex_unlock(&pool->stratum_lock);
  1315. clear_sockbuf(pool);
  1316. }
  1317. /* Make sure the pool sockbuf is large enough to cope with any coinbase size
  1318. * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE
  1319. * and zeroing the new memory */
  1320. static void recalloc_sock(struct pool *pool, size_t len)
  1321. {
  1322. size_t old, new;
  1323. old = strlen(pool->sockbuf);
  1324. new = old + len + 1;
  1325. if (new < pool->sockbuf_size)
  1326. return;
  1327. new = new + (RBUFSIZE - (new % RBUFSIZE));
  1328. // Avoid potentially recursive locking
  1329. // applog(LOG_DEBUG, "Recallocing pool sockbuf to %lu", (unsigned long)new);
  1330. pool->sockbuf = realloc(pool->sockbuf, new);
  1331. if (!pool->sockbuf)
  1332. quithere(1, "Failed to realloc pool sockbuf");
  1333. memset(pool->sockbuf + old, 0, new - old);
  1334. pool->sockbuf_size = new;
  1335. }
  1336. /* Peeks at a socket to find the first end of line and then reads just that
  1337. * from the socket and returns that as a malloced char */
  1338. char *recv_line(struct pool *pool)
  1339. {
  1340. char *tok, *sret = NULL;
  1341. ssize_t len, buflen;
  1342. int waited = 0;
  1343. if (!strstr(pool->sockbuf, "\n")) {
  1344. struct timeval rstart, now;
  1345. cgtime(&rstart);
  1346. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1347. applog(LOG_DEBUG, "Timed out waiting for data on socket_full");
  1348. goto out;
  1349. }
  1350. do {
  1351. char s[RBUFSIZE];
  1352. size_t slen;
  1353. ssize_t n;
  1354. memset(s, 0, RBUFSIZE);
  1355. n = recv(pool->sock, s, RECVSIZE, 0);
  1356. if (!n) {
  1357. applog(LOG_DEBUG, "Socket closed waiting in recv_line");
  1358. suspend_stratum(pool);
  1359. break;
  1360. }
  1361. cgtime(&now);
  1362. waited = tdiff(&now, &rstart);
  1363. if (n < 0) {
  1364. //Save errno from being overweitten bei socket_ commands
  1365. int socket_recv_errno;
  1366. socket_recv_errno = SOCKERR;
  1367. if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) {
  1368. applog(LOG_DEBUG, "Failed to recv sock in recv_line: %s", bfg_strerror(socket_recv_errno, BST_SOCKET));
  1369. suspend_stratum(pool);
  1370. break;
  1371. }
  1372. } else {
  1373. slen = strlen(s);
  1374. recalloc_sock(pool, slen);
  1375. strcat(pool->sockbuf, s);
  1376. }
  1377. } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n"));
  1378. }
  1379. buflen = strlen(pool->sockbuf);
  1380. tok = strtok(pool->sockbuf, "\n");
  1381. if (!tok) {
  1382. applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line");
  1383. goto out;
  1384. }
  1385. sret = strdup(tok);
  1386. len = strlen(sret);
  1387. /* Copy what's left in the buffer after the \n, including the
  1388. * terminating \0 */
  1389. if (buflen > len + 1)
  1390. memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1);
  1391. else
  1392. strcpy(pool->sockbuf, "");
  1393. pool->cgminer_pool_stats.times_received++;
  1394. pool->cgminer_pool_stats.bytes_received += len;
  1395. total_bytes_rcvd += len;
  1396. pool->cgminer_pool_stats.net_bytes_received += len;
  1397. out:
  1398. if (!sret)
  1399. clear_sock(pool);
  1400. else if (opt_protocol)
  1401. applog(LOG_DEBUG, "Pool %u: RECV: %s", pool->pool_no, sret);
  1402. return sret;
  1403. }
  1404. /* Dumps any JSON value as a string. Just like jansson 2.1's JSON_ENCODE_ANY
  1405. * flag, but this is compatible with 2.0. */
  1406. char *json_dumps_ANY(json_t *json, size_t flags)
  1407. {
  1408. switch (json_typeof(json))
  1409. {
  1410. case JSON_ARRAY:
  1411. case JSON_OBJECT:
  1412. return json_dumps(json, flags);
  1413. default:
  1414. break;
  1415. }
  1416. char *rv;
  1417. #ifdef JSON_ENCODE_ANY
  1418. rv = json_dumps(json, JSON_ENCODE_ANY | flags);
  1419. if (rv)
  1420. return rv;
  1421. #endif
  1422. json_t *tmp = json_array();
  1423. char *s;
  1424. int i;
  1425. size_t len;
  1426. if (!tmp)
  1427. quithere(1, "Failed to allocate json array");
  1428. if (json_array_append(tmp, json))
  1429. quithere(1, "Failed to append temporary array");
  1430. s = json_dumps(tmp, flags);
  1431. if (!s)
  1432. return NULL;
  1433. for (i = 0; s[i] != '['; ++i)
  1434. if (unlikely(!(s[i] && isCspace(s[i]))))
  1435. quithere(1, "Failed to find opening bracket in array dump");
  1436. len = strlen(&s[++i]) - 1;
  1437. if (unlikely(s[i+len] != ']'))
  1438. quithere(1, "Failed to find closing bracket in array dump");
  1439. rv = malloc(len + 1);
  1440. memcpy(rv, &s[i], len);
  1441. rv[len] = '\0';
  1442. free(s);
  1443. json_decref(tmp);
  1444. return rv;
  1445. }
  1446. /* Extracts a string value from a json array with error checking. To be used
  1447. * when the value of the string returned is only examined and not to be stored.
  1448. * See json_array_string below */
  1449. const char *__json_array_string(json_t *val, unsigned int entry)
  1450. {
  1451. json_t *arr_entry;
  1452. if (json_is_null(val))
  1453. return NULL;
  1454. if (!json_is_array(val))
  1455. return NULL;
  1456. if (entry > json_array_size(val))
  1457. return NULL;
  1458. arr_entry = json_array_get(val, entry);
  1459. if (!json_is_string(arr_entry))
  1460. return NULL;
  1461. return json_string_value(arr_entry);
  1462. }
  1463. /* Creates a freshly malloced dup of __json_array_string */
  1464. static char *json_array_string(json_t *val, unsigned int entry)
  1465. {
  1466. const char *buf = __json_array_string(val, entry);
  1467. if (buf)
  1468. return strdup(buf);
  1469. return NULL;
  1470. }
  1471. void stratum_probe_transparency(struct pool *pool)
  1472. {
  1473. // Request transaction data to discourage pools from doing anything shady
  1474. char s[1024];
  1475. int sLen;
  1476. sLen = sprintf(s, "{\"params\": [\"%s\"], \"id\": \"txlist%s\", \"method\": \"mining.get_transactions\"}",
  1477. pool->swork.job_id,
  1478. pool->swork.job_id);
  1479. stratum_send(pool, s, sLen);
  1480. if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency))
  1481. cgtime(&pool->swork.tv_transparency);
  1482. pool->swork.transparency_probed = true;
  1483. }
  1484. static bool parse_notify(struct pool *pool, json_t *val)
  1485. {
  1486. const char *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime;
  1487. char *job_id;
  1488. bool clean, ret = false;
  1489. int merkles, i;
  1490. size_t cb1_len, cb2_len;
  1491. json_t *arr;
  1492. arr = json_array_get(val, 4);
  1493. if (!arr || !json_is_array(arr))
  1494. goto out;
  1495. merkles = json_array_size(arr);
  1496. for (i = 0; i < merkles; i++)
  1497. if (!json_is_string(json_array_get(arr, i)))
  1498. goto out;
  1499. prev_hash = __json_array_string(val, 1);
  1500. coinbase1 = __json_array_string(val, 2);
  1501. coinbase2 = __json_array_string(val, 3);
  1502. bbversion = __json_array_string(val, 5);
  1503. nbit = __json_array_string(val, 6);
  1504. ntime = __json_array_string(val, 7);
  1505. clean = json_is_true(json_array_get(val, 8));
  1506. if (!prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime)
  1507. goto out;
  1508. job_id = json_array_string(val, 0);
  1509. if (!job_id)
  1510. goto out;
  1511. cg_wlock(&pool->data_lock);
  1512. cgtime(&pool->swork.tv_received);
  1513. free(pool->swork.job_id);
  1514. pool->swork.job_id = job_id;
  1515. pool->submit_old = !clean;
  1516. pool->swork.clean = true;
  1517. hex2bin(&pool->swork.header1[0], bbversion, 4);
  1518. hex2bin(&pool->swork.header1[4], prev_hash, 32);
  1519. hex2bin((void*)&pool->swork.ntime, ntime, 4);
  1520. pool->swork.ntime = be32toh(pool->swork.ntime);
  1521. hex2bin(&pool->swork.diffbits[0], nbit, 4);
  1522. cb1_len = strlen(coinbase1) / 2;
  1523. pool->swork.nonce2_offset = cb1_len + pool->n1_len;
  1524. cb2_len = strlen(coinbase2) / 2;
  1525. bytes_resize(&pool->swork.coinbase, pool->swork.nonce2_offset + pool->n2size + cb2_len);
  1526. uint8_t *coinbase = bytes_buf(&pool->swork.coinbase);
  1527. hex2bin(coinbase, coinbase1, cb1_len);
  1528. hex2bin(&coinbase[cb1_len], pool->nonce1, pool->n1_len);
  1529. // NOTE: gap for nonce2, filled at work generation time
  1530. hex2bin(&coinbase[pool->swork.nonce2_offset + pool->n2size], coinbase2, cb2_len);
  1531. bytes_resize(&pool->swork.merkle_bin, 32 * merkles);
  1532. for (i = 0; i < merkles; i++)
  1533. hex2bin(&bytes_buf(&pool->swork.merkle_bin)[i * 32], json_string_value(json_array_get(arr, i)), 32);
  1534. pool->swork.merkles = merkles;
  1535. pool->nonce2 = 0;
  1536. cg_wunlock(&pool->data_lock);
  1537. applog(LOG_DEBUG, "Received stratum notify from pool %u with job_id=%s",
  1538. pool->pool_no, job_id);
  1539. if (opt_debug && opt_protocol)
  1540. {
  1541. applog(LOG_DEBUG, "job_id: %s", job_id);
  1542. applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
  1543. applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
  1544. applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
  1545. for (i = 0; i < merkles; i++)
  1546. applog(LOG_DEBUG, "merkle%d: %s", i, json_string_value(json_array_get(arr, i)));
  1547. applog(LOG_DEBUG, "bbversion: %s", bbversion);
  1548. applog(LOG_DEBUG, "nbit: %s", nbit);
  1549. applog(LOG_DEBUG, "ntime: %s", ntime);
  1550. applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
  1551. }
  1552. /* A notify message is the closest stratum gets to a getwork */
  1553. pool->getwork_requested++;
  1554. total_getworks++;
  1555. if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency))
  1556. if (pool->probed)
  1557. stratum_probe_transparency(pool);
  1558. ret = true;
  1559. out:
  1560. return ret;
  1561. }
  1562. static bool parse_diff(struct pool *pool, json_t *val)
  1563. {
  1564. double diff;
  1565. diff = json_number_value(json_array_get(val, 0));
  1566. if (diff == 0)
  1567. return false;
  1568. cg_wlock(&pool->data_lock);
  1569. pool->swork.diff = diff;
  1570. cg_wunlock(&pool->data_lock);
  1571. applog(LOG_DEBUG, "Pool %d stratum bdifficulty set to %f", pool->pool_no, diff);
  1572. return true;
  1573. }
  1574. static bool parse_reconnect(struct pool *pool, json_t *val)
  1575. {
  1576. const char *url, *port;
  1577. char address[256];
  1578. url = __json_array_string(val, 0);
  1579. if (!url)
  1580. url = pool->sockaddr_url;
  1581. port = __json_array_string(val, 1);
  1582. if (!port)
  1583. port = pool->stratum_port;
  1584. snprintf(address, sizeof(address), "%s:%s", url, port);
  1585. if (!extract_sockaddr(address, &pool->sockaddr_url, &pool->stratum_port))
  1586. return false;
  1587. pool->stratum_url = pool->sockaddr_url;
  1588. applog(LOG_NOTICE, "Reconnect requested from pool %d to %s", pool->pool_no, address);
  1589. if (!restart_stratum(pool))
  1590. return false;
  1591. return true;
  1592. }
  1593. static bool send_version(struct pool *pool, json_t *val)
  1594. {
  1595. char s[RBUFSIZE], *idstr;
  1596. json_t *id = json_object_get(val, "id");
  1597. if (!(id && !json_is_null(id)))
  1598. return false;
  1599. idstr = json_dumps_ANY(id, 0);
  1600. sprintf(s, "{\"id\": %s, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", idstr);
  1601. free(idstr);
  1602. if (!stratum_send(pool, s, strlen(s)))
  1603. return false;
  1604. return true;
  1605. }
  1606. static bool stratum_show_message(struct pool *pool, json_t *val, json_t *params)
  1607. {
  1608. char *msg;
  1609. char s[RBUFSIZE], *idstr;
  1610. json_t *id = json_object_get(val, "id");
  1611. msg = json_array_string(params, 0);
  1612. if (likely(msg))
  1613. {
  1614. free(pool->admin_msg);
  1615. pool->admin_msg = msg;
  1616. applog(LOG_NOTICE, "Message from pool %u: %s", pool->pool_no, msg);
  1617. }
  1618. if (!(id && !json_is_null(id)))
  1619. return true;
  1620. idstr = json_dumps_ANY(id, 0);
  1621. if (likely(msg))
  1622. sprintf(s, "{\"id\": %s, \"result\": true, \"error\": null}", idstr);
  1623. else
  1624. sprintf(s, "{\"id\": %s, \"result\": null, \"error\": [-1, \"Failed to parse message\", null]}", idstr);
  1625. free(idstr);
  1626. if (!stratum_send(pool, s, strlen(s)))
  1627. return false;
  1628. return true;
  1629. }
  1630. bool parse_method(struct pool *pool, char *s)
  1631. {
  1632. json_t *val = NULL, *method, *err_val, *params;
  1633. json_error_t err;
  1634. bool ret = false;
  1635. const char *buf;
  1636. if (!s)
  1637. goto out;
  1638. val = JSON_LOADS(s, &err);
  1639. if (!val) {
  1640. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1641. goto out;
  1642. }
  1643. method = json_object_get(val, "method");
  1644. if (!method)
  1645. goto out;
  1646. err_val = json_object_get(val, "error");
  1647. params = json_object_get(val, "params");
  1648. if (err_val && !json_is_null(err_val)) {
  1649. char *ss;
  1650. if (err_val)
  1651. ss = json_dumps(err_val, JSON_INDENT(3));
  1652. else
  1653. ss = strdup("(unknown reason)");
  1654. applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss);
  1655. free(ss);
  1656. goto out;
  1657. }
  1658. buf = json_string_value(method);
  1659. if (!buf)
  1660. goto out;
  1661. if (!strncasecmp(buf, "mining.notify", 13)) {
  1662. if (parse_notify(pool, params))
  1663. pool->stratum_notify = ret = true;
  1664. else
  1665. pool->stratum_notify = ret = false;
  1666. goto out;
  1667. }
  1668. if (!strncasecmp(buf, "mining.set_difficulty", 21) && parse_diff(pool, params)) {
  1669. ret = true;
  1670. goto out;
  1671. }
  1672. if (!strncasecmp(buf, "client.reconnect", 16) && parse_reconnect(pool, params)) {
  1673. ret = true;
  1674. goto out;
  1675. }
  1676. if (!strncasecmp(buf, "client.get_version", 18) && send_version(pool, val)) {
  1677. ret = true;
  1678. goto out;
  1679. }
  1680. if (!strncasecmp(buf, "client.show_message", 19) && stratum_show_message(pool, val, params)) {
  1681. ret = true;
  1682. goto out;
  1683. }
  1684. out:
  1685. if (val)
  1686. json_decref(val);
  1687. return ret;
  1688. }
  1689. extern bool parse_stratum_response(struct pool *, char *s);
  1690. bool auth_stratum(struct pool *pool)
  1691. {
  1692. json_t *val = NULL, *res_val, *err_val;
  1693. char s[RBUFSIZE], *sret = NULL;
  1694. json_error_t err;
  1695. bool ret = false;
  1696. sprintf(s, "{\"id\": \"auth\", \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}",
  1697. pool->rpc_user, pool->rpc_pass);
  1698. if (!stratum_send(pool, s, strlen(s)))
  1699. goto out;
  1700. /* Parse all data in the queue and anything left should be auth */
  1701. while (42) {
  1702. sret = recv_line(pool);
  1703. if (!sret)
  1704. goto out;
  1705. if (parse_method(pool, sret))
  1706. free(sret);
  1707. else
  1708. break;
  1709. }
  1710. val = JSON_LOADS(sret, &err);
  1711. free(sret);
  1712. res_val = json_object_get(val, "result");
  1713. err_val = json_object_get(val, "error");
  1714. if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) {
  1715. char *ss;
  1716. if (err_val)
  1717. ss = json_dumps(err_val, JSON_INDENT(3));
  1718. else
  1719. ss = strdup("(unknown reason)");
  1720. applog(LOG_WARNING, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss);
  1721. free(ss);
  1722. goto out;
  1723. }
  1724. ret = true;
  1725. applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no);
  1726. pool->probed = true;
  1727. successful_connect = true;
  1728. out:
  1729. if (val)
  1730. json_decref(val);
  1731. if (pool->stratum_notify)
  1732. stratum_probe_transparency(pool);
  1733. return ret;
  1734. }
  1735. curl_socket_t grab_socket_opensocket_cb(void *clientp, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr)
  1736. {
  1737. struct pool *pool = clientp;
  1738. curl_socket_t sck = socket(addr->family, addr->socktype, addr->protocol);
  1739. pool->sock = sck;
  1740. return sck;
  1741. }
  1742. static bool setup_stratum_curl(struct pool *pool)
  1743. {
  1744. char curl_err_str[CURL_ERROR_SIZE];
  1745. CURL *curl = NULL;
  1746. char s[RBUFSIZE];
  1747. bool ret = false;
  1748. applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf);
  1749. mutex_lock(&pool->stratum_lock);
  1750. timer_unset(&pool->swork.tv_transparency);
  1751. pool->stratum_active = false;
  1752. pool->stratum_notify = false;
  1753. pool->swork.transparency_probed = false;
  1754. if (pool->stratum_curl)
  1755. curl_easy_cleanup(pool->stratum_curl);
  1756. pool->stratum_curl = curl_easy_init();
  1757. if (unlikely(!pool->stratum_curl))
  1758. quithere(1, "Failed to curl_easy_init");
  1759. if (pool->sockbuf)
  1760. pool->sockbuf[0] = '\0';
  1761. curl = pool->stratum_curl;
  1762. if (!pool->sockbuf) {
  1763. pool->sockbuf = calloc(RBUFSIZE, 1);
  1764. if (!pool->sockbuf)
  1765. quithere(1, "Failed to calloc pool sockbuf");
  1766. pool->sockbuf_size = RBUFSIZE;
  1767. }
  1768. /* Create a http url for use with curl */
  1769. sprintf(s, "http://%s:%s", pool->sockaddr_url, pool->stratum_port);
  1770. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  1771. curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 30);
  1772. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
  1773. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  1774. curl_easy_setopt(curl, CURLOPT_URL, s);
  1775. if (!opt_delaynet)
  1776. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  1777. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  1778. * to enable it */
  1779. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  1780. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  1781. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  1782. // CURLINFO_LASTSOCKET is broken on Win64 (which has a wider SOCKET type than curl_easy_getinfo returns), so we use this hack for now
  1783. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, grab_socket_opensocket_cb);
  1784. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  1785. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  1786. if (pool->rpc_proxy) {
  1787. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  1788. } else if (opt_socks_proxy) {
  1789. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  1790. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  1791. }
  1792. curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1);
  1793. pool->sock = INVSOCK;
  1794. if (curl_easy_perform(curl)) {
  1795. applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str);
  1796. errout:
  1797. curl_easy_cleanup(curl);
  1798. pool->stratum_curl = NULL;
  1799. goto out;
  1800. }
  1801. if (pool->sock == INVSOCK)
  1802. {
  1803. applog(LOG_ERR, "Stratum connect succeeded, but technical problem extracting socket (pool %u)", pool->pool_no);
  1804. goto errout;
  1805. }
  1806. keep_sockalive(pool->sock);
  1807. pool->cgminer_pool_stats.times_sent++;
  1808. pool->cgminer_pool_stats.times_received++;
  1809. ret = true;
  1810. out:
  1811. mutex_unlock(&pool->stratum_lock);
  1812. return ret;
  1813. }
  1814. static char *get_sessionid(json_t *val)
  1815. {
  1816. char *ret = NULL;
  1817. json_t *arr_val;
  1818. int arrsize, i;
  1819. arr_val = json_array_get(val, 0);
  1820. if (!arr_val || !json_is_array(arr_val))
  1821. goto out;
  1822. arrsize = json_array_size(arr_val);
  1823. for (i = 0; i < arrsize; i++) {
  1824. json_t *arr = json_array_get(arr_val, i);
  1825. const char *notify;
  1826. if (!arr | !json_is_array(arr))
  1827. break;
  1828. notify = __json_array_string(arr, 0);
  1829. if (!notify)
  1830. continue;
  1831. if (!strncasecmp(notify, "mining.notify", 13)) {
  1832. ret = json_array_string(arr, 1);
  1833. break;
  1834. }
  1835. }
  1836. out:
  1837. return ret;
  1838. }
  1839. void suspend_stratum(struct pool *pool)
  1840. {
  1841. clear_sockbuf(pool);
  1842. applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no);
  1843. mutex_lock(&pool->stratum_lock);
  1844. pool->stratum_active = pool->stratum_notify = false;
  1845. if (pool->stratum_curl) {
  1846. curl_easy_cleanup(pool->stratum_curl);
  1847. }
  1848. pool->stratum_curl = NULL;
  1849. pool->sock = INVSOCK;
  1850. mutex_unlock(&pool->stratum_lock);
  1851. }
  1852. bool initiate_stratum(struct pool *pool)
  1853. {
  1854. bool ret = false, recvd = false, noresume = false, sockd = false;
  1855. bool trysuggest = request_target_str;
  1856. char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid;
  1857. json_t *val = NULL, *res_val, *err_val;
  1858. json_error_t err;
  1859. int n2size;
  1860. resend:
  1861. if (!setup_stratum_curl(pool)) {
  1862. sockd = false;
  1863. goto out;
  1864. }
  1865. sockd = true;
  1866. clear_sock(pool);
  1867. if (trysuggest)
  1868. {
  1869. int sz = sprintf(s, "{\"id\": null, \"method\": \"mining.suggest_target\", \"params\": [\"%s\"]}", request_target_str);
  1870. if (!_stratum_send(pool, s, sz, true))
  1871. {
  1872. applog(LOG_DEBUG, "Pool %u: Failed to send suggest_target in initiate_stratum", pool->pool_no);
  1873. goto out;
  1874. }
  1875. recvd = true;
  1876. }
  1877. if (noresume) {
  1878. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++);
  1879. } else {
  1880. if (pool->sessionid)
  1881. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid);
  1882. else
  1883. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++);
  1884. }
  1885. if (!_stratum_send(pool, s, strlen(s), true)) {
  1886. applog(LOG_DEBUG, "Failed to send s in initiate_stratum");
  1887. goto out;
  1888. }
  1889. recvd = true;
  1890. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1891. applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum");
  1892. goto out;
  1893. }
  1894. sret = recv_line(pool);
  1895. if (!sret)
  1896. goto out;
  1897. val = JSON_LOADS(sret, &err);
  1898. free(sret);
  1899. if (!val) {
  1900. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1901. goto out;
  1902. }
  1903. res_val = json_object_get(val, "result");
  1904. err_val = json_object_get(val, "error");
  1905. if (!res_val || json_is_null(res_val) ||
  1906. (err_val && !json_is_null(err_val))) {
  1907. char *ss;
  1908. if (err_val)
  1909. ss = json_dumps(err_val, JSON_INDENT(3));
  1910. else
  1911. ss = strdup("(unknown reason)");
  1912. applog(LOG_INFO, "JSON-RPC decode failed: %s", ss);
  1913. free(ss);
  1914. goto out;
  1915. }
  1916. sessionid = get_sessionid(res_val);
  1917. if (!sessionid)
  1918. applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum");
  1919. nonce1 = json_array_string(res_val, 1);
  1920. if (!nonce1) {
  1921. applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");
  1922. free(sessionid);
  1923. goto out;
  1924. }
  1925. n2size = json_integer_value(json_array_get(res_val, 2));
  1926. if (!n2size) {
  1927. applog(LOG_INFO, "Failed to get n2size in initiate_stratum");
  1928. free(sessionid);
  1929. free(nonce1);
  1930. goto out;
  1931. }
  1932. cg_wlock(&pool->data_lock);
  1933. free(pool->sessionid);
  1934. pool->sessionid = sessionid;
  1935. free(pool->nonce1);
  1936. pool->nonce1 = nonce1;
  1937. pool->n1_len = strlen(nonce1) / 2;
  1938. pool->n2size = n2size;
  1939. pool->nonce2sz = (n2size > sizeof(pool->nonce2)) ? sizeof(pool->nonce2) : n2size;
  1940. #ifdef WORDS_BIGENDIAN
  1941. pool->nonce2off = (n2size < sizeof(pool->nonce2)) ? (sizeof(pool->nonce2) - n2size) : 0;
  1942. #endif
  1943. cg_wunlock(&pool->data_lock);
  1944. if (sessionid)
  1945. applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid);
  1946. ret = true;
  1947. out:
  1948. if (val)
  1949. {
  1950. json_decref(val);
  1951. val = NULL;
  1952. }
  1953. if (ret) {
  1954. if (!pool->stratum_url)
  1955. pool->stratum_url = pool->sockaddr_url;
  1956. pool->stratum_active = true;
  1957. pool->swork.diff = 1;
  1958. if (opt_protocol) {
  1959. applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d",
  1960. pool->pool_no, pool->nonce1, pool->n2size);
  1961. }
  1962. } else {
  1963. if (recvd)
  1964. {
  1965. if (trysuggest)
  1966. {
  1967. applog(LOG_DEBUG, "Pool %u: Failed to connect stratum with mining.suggest_target, retrying without", pool->pool_no);
  1968. trysuggest = false;
  1969. goto resend;
  1970. }
  1971. if (!noresume)
  1972. {
  1973. applog(LOG_DEBUG, "Failed to resume stratum, trying afresh");
  1974. noresume = true;
  1975. goto resend;
  1976. }
  1977. }
  1978. applog(LOG_DEBUG, "Initiate stratum failed");
  1979. if (sockd)
  1980. suspend_stratum(pool);
  1981. }
  1982. return ret;
  1983. }
  1984. bool restart_stratum(struct pool *pool)
  1985. {
  1986. if (pool->stratum_active)
  1987. suspend_stratum(pool);
  1988. if (!initiate_stratum(pool))
  1989. return false;
  1990. if (!auth_stratum(pool))
  1991. return false;
  1992. return true;
  1993. }
  1994. void dev_error_update(struct cgpu_info *dev, enum dev_reason reason)
  1995. {
  1996. dev->device_last_not_well = time(NULL);
  1997. cgtime(&dev->tv_device_last_not_well);
  1998. dev->device_not_well_reason = reason;
  1999. }
  2000. void dev_error(struct cgpu_info *dev, enum dev_reason reason)
  2001. {
  2002. dev_error_update(dev, reason);
  2003. switch (reason) {
  2004. case REASON_THREAD_FAIL_INIT:
  2005. dev->thread_fail_init_count++;
  2006. break;
  2007. case REASON_THREAD_ZERO_HASH:
  2008. dev->thread_zero_hash_count++;
  2009. break;
  2010. case REASON_THREAD_FAIL_QUEUE:
  2011. dev->thread_fail_queue_count++;
  2012. break;
  2013. case REASON_DEV_SICK_IDLE_60:
  2014. dev->dev_sick_idle_60_count++;
  2015. break;
  2016. case REASON_DEV_DEAD_IDLE_600:
  2017. dev->dev_dead_idle_600_count++;
  2018. break;
  2019. case REASON_DEV_NOSTART:
  2020. dev->dev_nostart_count++;
  2021. break;
  2022. case REASON_DEV_OVER_HEAT:
  2023. dev->dev_over_heat_count++;
  2024. break;
  2025. case REASON_DEV_THERMAL_CUTOFF:
  2026. dev->dev_thermal_cutoff_count++;
  2027. break;
  2028. case REASON_DEV_COMMS_ERROR:
  2029. dev->dev_comms_error_count++;
  2030. break;
  2031. case REASON_DEV_THROTTLE:
  2032. dev->dev_throttle_count++;
  2033. break;
  2034. }
  2035. }
  2036. /* Realloc an existing string to fit an extra string s, appending s to it. */
  2037. void *realloc_strcat(char *ptr, char *s)
  2038. {
  2039. size_t old = strlen(ptr), len = strlen(s);
  2040. char *ret;
  2041. if (!len)
  2042. return ptr;
  2043. len += old + 1;
  2044. align_len(&len);
  2045. ret = malloc(len);
  2046. if (unlikely(!ret))
  2047. quithere(1, "Failed to malloc");
  2048. sprintf(ret, "%s%s", ptr, s);
  2049. free(ptr);
  2050. return ret;
  2051. }
  2052. static
  2053. bool sanechars[] = {
  2054. false, false, false, false, false, false, false, false,
  2055. false, false, false, false, false, false, false, false,
  2056. false, false, false, false, false, false, false, false,
  2057. false, false, false, false, false, false, false, false,
  2058. false, false, false, false, false, false, false, false,
  2059. false, false, false, false, false, false, false, false,
  2060. true , true , true , true , true , true , true , true ,
  2061. true , true , false, false, false, false, false, false,
  2062. false, true , true , true , true , true , true , true ,
  2063. true , true , true , true , true , true , true , true ,
  2064. true , true , true , true , true , true , true , true ,
  2065. true , true , true , false, false, false, false, false,
  2066. false, true , true , true , true , true , true , true ,
  2067. true , true , true , true , true , true , true , true ,
  2068. true , true , true , true , true , true , true , true ,
  2069. true , true , true , false, false, false, false, false,
  2070. };
  2071. char *sanestr(char *o, char *s)
  2072. {
  2073. char *rv = o;
  2074. bool br = false;
  2075. for ( ; s[0]; ++s)
  2076. {
  2077. if (sanechars[s[0] & 0x7f])
  2078. {
  2079. if (br)
  2080. {
  2081. br = false;
  2082. if (s[0] >= '0' && s[0] <= '9')
  2083. (o++)[0] = '_';
  2084. }
  2085. (o++)[0] = s[0];
  2086. }
  2087. else
  2088. if (o != s && o[-1] >= '0' && o[-1] <= '9')
  2089. br = true;
  2090. }
  2091. o[0] = '\0';
  2092. return rv;
  2093. }
  2094. void RenameThread(const char* name)
  2095. {
  2096. #if defined(PR_SET_NAME)
  2097. // Only the first 15 characters are used (16 - NUL terminator)
  2098. prctl(PR_SET_NAME, name, 0, 0, 0);
  2099. #elif defined(__APPLE__)
  2100. pthread_setname_np(name);
  2101. #elif (defined(__FreeBSD__) || defined(__OpenBSD__))
  2102. pthread_set_name_np(pthread_self(), name);
  2103. #else
  2104. // Prevent warnings for unused parameters...
  2105. (void)name;
  2106. #endif
  2107. }
  2108. static pthread_key_t key_bfgtls;
  2109. struct bfgtls_data {
  2110. char *bfg_strerror_result;
  2111. size_t bfg_strerror_resultsz;
  2112. #ifdef WIN32
  2113. LPSTR bfg_strerror_socketresult;
  2114. #endif
  2115. #ifdef NEED_BFG_LOWL_VCOM
  2116. struct detectone_meta_info_t __detectone_meta_info;
  2117. #endif
  2118. };
  2119. static
  2120. struct bfgtls_data *get_bfgtls()
  2121. {
  2122. struct bfgtls_data *bfgtls = pthread_getspecific(key_bfgtls);
  2123. if (bfgtls)
  2124. return bfgtls;
  2125. void *p;
  2126. bfgtls = malloc(sizeof(*bfgtls));
  2127. if (!bfgtls)
  2128. quithere(1, "malloc bfgtls failed");
  2129. p = malloc(64);
  2130. if (!p)
  2131. quithere(1, "malloc bfg_strerror_result failed");
  2132. *bfgtls = (struct bfgtls_data){
  2133. .bfg_strerror_resultsz = 64,
  2134. .bfg_strerror_result = p,
  2135. };
  2136. if (pthread_setspecific(key_bfgtls, bfgtls))
  2137. quithere(1, "pthread_setspecific failed");
  2138. return bfgtls;
  2139. }
  2140. #ifdef NEED_BFG_LOWL_VCOM
  2141. struct detectone_meta_info_t *_detectone_meta_info()
  2142. {
  2143. return &get_bfgtls()->__detectone_meta_info;
  2144. }
  2145. #endif
  2146. void bfg_init_threadlocal()
  2147. {
  2148. if (pthread_key_create(&key_bfgtls, NULL))
  2149. quithere(1, "pthread_key_create failed");
  2150. }
  2151. static
  2152. bool bfg_grow_buffer(char ** const bufp, size_t * const bufszp, size_t minimum)
  2153. {
  2154. if (minimum <= *bufszp)
  2155. return false;
  2156. while (minimum > *bufszp)
  2157. *bufszp = 2;
  2158. *bufp = realloc(*bufp, *bufszp);
  2159. if (unlikely(!*bufp))
  2160. quithere(1, "realloc failed");
  2161. return true;
  2162. }
  2163. static
  2164. const char *bfg_strcpy_growing_buffer(char ** const bufp, size_t * const bufszp, const char *src)
  2165. {
  2166. if (!src)
  2167. return NULL;
  2168. const size_t srcsz = strlen(src) + 1;
  2169. bfg_grow_buffer(bufp, bufszp, srcsz);
  2170. memcpy(*bufp, src, srcsz);
  2171. return *bufp;
  2172. }
  2173. // Guaranteed to always return some string (or quit)
  2174. const char *bfg_strerror(int e, enum bfg_strerror_type type)
  2175. {
  2176. static __maybe_unused pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  2177. struct bfgtls_data *bfgtls = get_bfgtls();
  2178. size_t * const bufszp = &bfgtls->bfg_strerror_resultsz;
  2179. char ** const bufp = &bfgtls->bfg_strerror_result;
  2180. const char *have = NULL;
  2181. switch (type) {
  2182. case BST_LIBUSB:
  2183. // NOTE: Nested preprocessor checks since the latter isn't defined at all without the former
  2184. #ifdef HAVE_LIBUSB
  2185. # if HAVE_DECL_LIBUSB_ERROR_NAME
  2186. // libusb makes no guarantees for thread-safety or persistence
  2187. mutex_lock(&mutex);
  2188. have = bfg_strcpy_growing_buffer(bufp, bufszp, libusb_error_name(e));
  2189. mutex_unlock(&mutex);
  2190. # endif
  2191. #endif
  2192. break;
  2193. case BST_SOCKET:
  2194. case BST_SYSTEM:
  2195. {
  2196. #ifdef WIN32
  2197. // Windows has a different namespace for system and socket errors
  2198. LPSTR *msg = &bfgtls->bfg_strerror_socketresult;
  2199. if (*msg)
  2200. LocalFree(*msg);
  2201. if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, 0, e, 0, (LPSTR)msg, 0, 0))
  2202. return *msg;
  2203. *msg = NULL;
  2204. break;
  2205. #endif
  2206. }
  2207. // Fallthru on non-WIN32
  2208. case BST_ERRNO:
  2209. {
  2210. #ifdef __STRERROR_S_WORKS
  2211. // FIXME: Not sure how to get this on MingW64
  2212. retry:
  2213. if (likely(!strerror_s(*bufp, *bufszp, e)))
  2214. {
  2215. if (bfg_grow_buffer(bufp, bufszp, strlen(*bufp) + 2))
  2216. goto retry;
  2217. return *bufp;
  2218. }
  2219. // TODO: XSI strerror_r
  2220. // TODO: GNU strerror_r
  2221. #else
  2222. mutex_lock(&mutex);
  2223. have = bfg_strcpy_growing_buffer(bufp, bufszp, strerror(e));
  2224. mutex_unlock(&mutex);
  2225. #endif
  2226. }
  2227. }
  2228. if (have)
  2229. return *bufp;
  2230. // Failback: Stringify the number
  2231. static const char fmt[] = "%s error #%d", *typestr;
  2232. switch (type) {
  2233. case BST_ERRNO:
  2234. typestr = "System";
  2235. break;
  2236. case BST_SOCKET:
  2237. typestr = "Socket";
  2238. break;
  2239. case BST_LIBUSB:
  2240. typestr = "libusb";
  2241. break;
  2242. default:
  2243. typestr = "Unexpected";
  2244. }
  2245. int sz = snprintf((char*)bfgtls, 0, fmt, typestr, e) + 1;
  2246. bfg_grow_buffer(bufp, bufszp, sz);
  2247. sprintf(*bufp, fmt, typestr, e);
  2248. return *bufp;
  2249. }
  2250. void notifier_init(notifier_t pipefd)
  2251. {
  2252. #ifdef WIN32
  2253. #define WindowsErrorStr(e) bfg_strerror(e, BST_SOCKET)
  2254. SOCKET listener, connecter, acceptor;
  2255. listener = socket(AF_INET, SOCK_STREAM, 0);
  2256. if (listener == INVALID_SOCKET)
  2257. quit(1, "Failed to create listener socket"IN_FMT_FFL": %s",
  2258. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2259. connecter = socket(AF_INET, SOCK_STREAM, 0);
  2260. if (connecter == INVALID_SOCKET)
  2261. quit(1, "Failed to create connect socket"IN_FMT_FFL": %s",
  2262. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2263. struct sockaddr_in inaddr = {
  2264. .sin_family = AF_INET,
  2265. .sin_addr = {
  2266. .s_addr = htonl(INADDR_LOOPBACK),
  2267. },
  2268. .sin_port = 0,
  2269. };
  2270. {
  2271. static const int reuse = 1;
  2272. setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse));
  2273. }
  2274. if (bind(listener, (struct sockaddr*)&inaddr, sizeof(inaddr)) == SOCKET_ERROR)
  2275. quit(1, "Failed to bind listener socket"IN_FMT_FFL": %s",
  2276. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2277. socklen_t inaddr_sz = sizeof(inaddr);
  2278. if (getsockname(listener, (struct sockaddr*)&inaddr, &inaddr_sz) == SOCKET_ERROR)
  2279. quit(1, "Failed to getsockname"IN_FMT_FFL": %s",
  2280. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2281. if (listen(listener, 1) == SOCKET_ERROR)
  2282. quit(1, "Failed to listen"IN_FMT_FFL": %s",
  2283. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2284. inaddr.sin_family = AF_INET;
  2285. inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  2286. if (connect(connecter, (struct sockaddr*)&inaddr, inaddr_sz) == SOCKET_ERROR)
  2287. quit(1, "Failed to connect"IN_FMT_FFL": %s",
  2288. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2289. acceptor = accept(listener, NULL, NULL);
  2290. if (acceptor == INVALID_SOCKET)
  2291. quit(1, "Failed to accept"IN_FMT_FFL": %s",
  2292. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2293. closesocket(listener);
  2294. pipefd[0] = connecter;
  2295. pipefd[1] = acceptor;
  2296. #else
  2297. if (pipe(pipefd))
  2298. quithere(1, "Failed to create pipe");
  2299. #endif
  2300. }
  2301. void notifier_wake(notifier_t fd)
  2302. {
  2303. if (fd[1] == INVSOCK)
  2304. return;
  2305. if (1 !=
  2306. #ifdef WIN32
  2307. send(fd[1], "\0", 1, 0)
  2308. #else
  2309. write(fd[1], "\0", 1)
  2310. #endif
  2311. )
  2312. applog(LOG_WARNING, "Error trying to wake notifier");
  2313. }
  2314. void notifier_read(notifier_t fd)
  2315. {
  2316. char buf[0x10];
  2317. #ifdef WIN32
  2318. IGNORE_RETURN_VALUE(recv(fd[0], buf, sizeof(buf), 0));
  2319. #else
  2320. IGNORE_RETURN_VALUE(read(fd[0], buf, sizeof(buf)));
  2321. #endif
  2322. }
  2323. void notifier_init_invalid(notifier_t fd)
  2324. {
  2325. fd[0] = fd[1] = INVSOCK;
  2326. }
  2327. void notifier_destroy(notifier_t fd)
  2328. {
  2329. #ifdef WIN32
  2330. closesocket(fd[0]);
  2331. closesocket(fd[1]);
  2332. #else
  2333. close(fd[0]);
  2334. close(fd[1]);
  2335. #endif
  2336. fd[0] = fd[1] = INVSOCK;
  2337. }
  2338. void _bytes_alloc_failure(size_t sz)
  2339. {
  2340. quit(1, "bytes_resize failed to allocate %lu bytes", (unsigned long)sz);
  2341. }
  2342. void *cmd_thread(void *cmdp)
  2343. {
  2344. const char *cmd = cmdp;
  2345. applog(LOG_DEBUG, "Executing command: %s", cmd);
  2346. int rc = system(cmd);
  2347. if (rc)
  2348. applog(LOG_WARNING, "Command returned %d exit code: %s", rc, cmd);
  2349. return NULL;
  2350. }
  2351. void run_cmd(const char *cmd)
  2352. {
  2353. if (!cmd)
  2354. return;
  2355. pthread_t pth;
  2356. pthread_create(&pth, NULL, cmd_thread, (void*)cmd);
  2357. }