util.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. * Copyright 2012 Giel van Schijndel
  6. * Copyright 2012 Gavin Andresen
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <stdio.h>
  15. #include <stdlib.h>
  16. #include <ctype.h>
  17. #include <stdarg.h>
  18. #include <string.h>
  19. #include <pthread.h>
  20. #include <jansson.h>
  21. #include <curl/curl.h>
  22. #include <time.h>
  23. #include <errno.h>
  24. #include <unistd.h>
  25. #include <sys/types.h>
  26. #ifdef HAVE_SYS_PRCTL_H
  27. # include <sys/prctl.h>
  28. #endif
  29. #if defined(__FreeBSD__) || defined(__OpenBSD__)
  30. # include <pthread_np.h>
  31. #endif
  32. #ifndef WIN32
  33. #include <fcntl.h>
  34. # ifdef __linux
  35. # include <sys/prctl.h>
  36. # endif
  37. # include <sys/socket.h>
  38. # include <netinet/in.h>
  39. # include <netinet/tcp.h>
  40. # include <netdb.h>
  41. #else
  42. # include <windows.h>
  43. # include <winsock2.h>
  44. # include <mstcpip.h>
  45. # include <ws2tcpip.h>
  46. # include <mmsystem.h>
  47. #endif
  48. #include <utlist.h>
  49. #include "miner.h"
  50. #include "compat.h"
  51. #include "util.h"
  52. #define DEFAULT_SOCKWAIT 60
  53. bool successful_connect = false;
  54. struct timeval nettime;
  55. struct data_buffer {
  56. void *buf;
  57. size_t len;
  58. curl_socket_t *idlemarker;
  59. };
  60. struct upload_buffer {
  61. const void *buf;
  62. size_t len;
  63. };
  64. struct header_info {
  65. char *lp_path;
  66. int rolltime;
  67. char *reason;
  68. char *stratum_url;
  69. bool hadrolltime;
  70. bool canroll;
  71. bool hadexpire;
  72. };
  73. struct tq_ent {
  74. void *data;
  75. struct tq_ent *prev;
  76. struct tq_ent *next;
  77. };
  78. static void databuf_free(struct data_buffer *db)
  79. {
  80. if (!db)
  81. return;
  82. free(db->buf);
  83. #ifdef DEBUG_DATABUF
  84. applog(LOG_DEBUG, "databuf_free(%p)", db->buf);
  85. #endif
  86. memset(db, 0, sizeof(*db));
  87. }
  88. // aka data_buffer_write
  89. static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb,
  90. void *user_data)
  91. {
  92. struct data_buffer *db = user_data;
  93. size_t oldlen, newlen;
  94. oldlen = db->len;
  95. if (unlikely(nmemb == 0 || size == 0 || oldlen >= SIZE_MAX - size))
  96. return 0;
  97. if (unlikely(nmemb > (SIZE_MAX - oldlen) / size))
  98. nmemb = (SIZE_MAX - oldlen) / size;
  99. size_t len = size * nmemb;
  100. void *newmem;
  101. static const unsigned char zero = 0;
  102. if (db->idlemarker) {
  103. const unsigned char *cptr = ptr;
  104. for (size_t i = 0; i < len; ++i)
  105. if (!(isCspace(cptr[i]) || cptr[i] == '{')) {
  106. *db->idlemarker = CURL_SOCKET_BAD;
  107. db->idlemarker = NULL;
  108. break;
  109. }
  110. }
  111. newlen = oldlen + len;
  112. newmem = realloc(db->buf, newlen + 1);
  113. #ifdef DEBUG_DATABUF
  114. applog(LOG_DEBUG, "data_buffer_write realloc(%p, %lu) => %p", db->buf, (long unsigned)(newlen + 1), newmem);
  115. #endif
  116. if (!newmem)
  117. return 0;
  118. db->buf = newmem;
  119. db->len = newlen;
  120. memcpy(db->buf + oldlen, ptr, len);
  121. memcpy(db->buf + newlen, &zero, 1); /* null terminate */
  122. return nmemb;
  123. }
  124. static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb,
  125. void *user_data)
  126. {
  127. struct upload_buffer *ub = user_data;
  128. unsigned int len = size * nmemb;
  129. if (len > ub->len)
  130. len = ub->len;
  131. if (len) {
  132. memcpy(ptr, ub->buf, len);
  133. ub->buf += len;
  134. ub->len -= len;
  135. }
  136. return len;
  137. }
  138. static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
  139. {
  140. struct header_info *hi = user_data;
  141. size_t remlen, slen, ptrlen = size * nmemb;
  142. char *rem, *val = NULL, *key = NULL;
  143. void *tmp;
  144. val = calloc(1, ptrlen);
  145. key = calloc(1, ptrlen);
  146. if (!key || !val)
  147. goto out;
  148. tmp = memchr(ptr, ':', ptrlen);
  149. if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */
  150. goto out;
  151. slen = tmp - ptr;
  152. if ((slen + 1) == ptrlen) /* skip key w/ no value */
  153. goto out;
  154. memcpy(key, ptr, slen); /* store & nul term key */
  155. key[slen] = 0;
  156. rem = ptr + slen + 1; /* trim value's leading whitespace */
  157. remlen = ptrlen - slen - 1;
  158. while ((remlen > 0) && (isCspace(*rem))) {
  159. remlen--;
  160. rem++;
  161. }
  162. memcpy(val, rem, remlen); /* store value, trim trailing ws */
  163. val[remlen] = 0;
  164. while ((*val) && (isCspace(val[strlen(val) - 1])))
  165. val[strlen(val) - 1] = 0;
  166. if (!*val) /* skip blank value */
  167. goto out;
  168. if (opt_protocol)
  169. applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
  170. if (!strcasecmp("X-Roll-Ntime", key)) {
  171. hi->hadrolltime = true;
  172. if (!strncasecmp("N", val, 1))
  173. applog(LOG_DEBUG, "X-Roll-Ntime: N found");
  174. else {
  175. hi->canroll = true;
  176. /* Check to see if expire= is supported and if not, set
  177. * the rolltime to the default scantime */
  178. if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) {
  179. sscanf(val + 7, "%d", &hi->rolltime);
  180. hi->hadexpire = true;
  181. } else
  182. hi->rolltime = opt_scantime;
  183. applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
  184. }
  185. }
  186. if (!strcasecmp("X-Long-Polling", key)) {
  187. hi->lp_path = val; /* steal memory reference */
  188. val = NULL;
  189. }
  190. if (!strcasecmp("X-Reject-Reason", key)) {
  191. hi->reason = val; /* steal memory reference */
  192. val = NULL;
  193. }
  194. if (!strcasecmp("X-Stratum", key)) {
  195. hi->stratum_url = val;
  196. val = NULL;
  197. }
  198. out:
  199. free(key);
  200. free(val);
  201. return ptrlen;
  202. }
  203. static int keep_sockalive(SOCKETTYPE fd)
  204. {
  205. const int tcp_one = 1;
  206. const int tcp_keepidle = 45;
  207. const int tcp_keepintvl = 30;
  208. int ret = 0;
  209. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const char *)&tcp_one, sizeof(tcp_one))))
  210. ret = 1;
  211. #ifndef WIN32
  212. int flags = fcntl(fd, F_GETFL, 0);
  213. fcntl(fd, F_SETFL, O_NONBLOCK | flags);
  214. #else
  215. u_long flags = 1;
  216. ioctlsocket(fd, FIONBIO, &flags);
  217. #endif
  218. if (!opt_delaynet)
  219. #ifndef __linux
  220. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  221. #else /* __linux */
  222. if (unlikely(setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  223. #endif /* __linux */
  224. ret = 1;
  225. #ifdef __linux
  226. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one))))
  227. ret = 1;
  228. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle))))
  229. ret = 1;
  230. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  231. ret = 1;
  232. #endif /* __linux */
  233. #ifdef __APPLE_CC__
  234. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  235. ret = 1;
  236. #endif /* __APPLE_CC__ */
  237. #ifdef WIN32
  238. const int zero = 0;
  239. struct tcp_keepalive vals;
  240. vals.onoff = 1;
  241. vals.keepalivetime = tcp_keepidle * 1000;
  242. vals.keepaliveinterval = tcp_keepintvl * 1000;
  243. DWORD outputBytes;
  244. if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL)))
  245. ret = 1;
  246. /* Windows happily submits indefinitely to the send buffer blissfully
  247. * unaware nothing is getting there without gracefully failing unless
  248. * we disable the send buffer */
  249. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&zero, sizeof(zero))))
  250. ret = 1;
  251. #endif /* WIN32 */
  252. return ret;
  253. }
  254. int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
  255. curlsocktype __maybe_unused purpose)
  256. {
  257. return keep_sockalive(fd);
  258. }
  259. static void last_nettime(struct timeval *last)
  260. {
  261. rd_lock(&netacc_lock);
  262. last->tv_sec = nettime.tv_sec;
  263. last->tv_usec = nettime.tv_usec;
  264. rd_unlock(&netacc_lock);
  265. }
  266. static void set_nettime(void)
  267. {
  268. wr_lock(&netacc_lock);
  269. cgtime(&nettime);
  270. wr_unlock(&netacc_lock);
  271. }
  272. static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type,
  273. char *data, size_t size,
  274. void *userdata)
  275. {
  276. struct pool *pool = (struct pool *)userdata;
  277. switch(type) {
  278. case CURLINFO_HEADER_IN:
  279. case CURLINFO_DATA_IN:
  280. case CURLINFO_SSL_DATA_IN:
  281. pool->cgminer_pool_stats.bytes_received += size;
  282. total_bytes_rcvd += size;
  283. pool->cgminer_pool_stats.net_bytes_received += size;
  284. break;
  285. case CURLINFO_HEADER_OUT:
  286. case CURLINFO_DATA_OUT:
  287. case CURLINFO_SSL_DATA_OUT:
  288. pool->cgminer_pool_stats.bytes_sent += size;
  289. total_bytes_sent += size;
  290. pool->cgminer_pool_stats.net_bytes_sent += size;
  291. break;
  292. case CURLINFO_TEXT:
  293. {
  294. if (!opt_protocol)
  295. break;
  296. // data is not null-terminated, so we need to copy and terminate it for applog
  297. char datacp[size + 1];
  298. memcpy(datacp, data, size);
  299. while (likely(size) && unlikely(isCspace(datacp[size-1])))
  300. --size;
  301. if (unlikely(!size))
  302. break;
  303. datacp[size] = '\0';
  304. applog(LOG_DEBUG, "Pool %u: %s", pool->pool_no, datacp);
  305. break;
  306. }
  307. default:
  308. break;
  309. }
  310. return 0;
  311. }
  312. struct json_rpc_call_state {
  313. struct data_buffer all_data;
  314. struct header_info hi;
  315. void *priv;
  316. char curl_err_str[CURL_ERROR_SIZE];
  317. struct curl_slist *headers;
  318. struct upload_buffer upload_data;
  319. struct pool *pool;
  320. };
  321. void json_rpc_call_async(CURL *curl, const char *url,
  322. const char *userpass, const char *rpc_req,
  323. bool longpoll,
  324. struct pool *pool, bool share,
  325. void *priv)
  326. {
  327. struct json_rpc_call_state *state = malloc(sizeof(struct json_rpc_call_state));
  328. *state = (struct json_rpc_call_state){
  329. .priv = priv,
  330. .pool = pool,
  331. };
  332. long timeout = longpoll ? (60 * 60) : 60;
  333. char len_hdr[64], user_agent_hdr[128];
  334. struct curl_slist *headers = NULL;
  335. if (longpoll)
  336. state->all_data.idlemarker = &pool->lp_socket;
  337. /* it is assumed that 'curl' is freshly [re]initialized at this pt */
  338. curl_easy_setopt(curl, CURLOPT_PRIVATE, state);
  339. curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
  340. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  341. * to enable it */
  342. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  343. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  344. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  345. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  346. curl_easy_setopt(curl, CURLOPT_URL, url);
  347. curl_easy_setopt(curl, CURLOPT_ENCODING, "");
  348. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
  349. /* Shares are staggered already and delays in submission can be costly
  350. * so do not delay them */
  351. if (!opt_delaynet || share)
  352. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  353. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb);
  354. curl_easy_setopt(curl, CURLOPT_WRITEDATA, &state->all_data);
  355. curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb);
  356. curl_easy_setopt(curl, CURLOPT_READDATA, &state->upload_data);
  357. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, &state->curl_err_str[0]);
  358. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
  359. curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb);
  360. curl_easy_setopt(curl, CURLOPT_HEADERDATA, &state->hi);
  361. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  362. if (pool->rpc_proxy) {
  363. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  364. } else if (opt_socks_proxy) {
  365. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  366. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4);
  367. }
  368. if (userpass) {
  369. curl_easy_setopt(curl, CURLOPT_USERPWD, userpass);
  370. curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
  371. }
  372. if (longpoll)
  373. curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, json_rpc_call_sockopt_cb);
  374. curl_easy_setopt(curl, CURLOPT_POST, 1);
  375. if (opt_protocol)
  376. applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req);
  377. state->upload_data.buf = rpc_req;
  378. state->upload_data.len = strlen(rpc_req);
  379. sprintf(len_hdr, "Content-Length: %lu",
  380. (unsigned long) state->upload_data.len);
  381. sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE"/"VERSION);
  382. headers = curl_slist_append(headers,
  383. "Content-type: application/json");
  384. headers = curl_slist_append(headers,
  385. "X-Mining-Extensions: longpoll midstate rollntime submitold");
  386. if (longpoll)
  387. headers = curl_slist_append(headers,
  388. "X-Minimum-Wait: 0");
  389. if (likely(global_hashrate)) {
  390. char ghashrate[255];
  391. sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, (uint64_t)global_hashrate);
  392. headers = curl_slist_append(headers, ghashrate);
  393. }
  394. headers = curl_slist_append(headers, len_hdr);
  395. headers = curl_slist_append(headers, user_agent_hdr);
  396. headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/
  397. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
  398. state->headers = headers;
  399. if (opt_delaynet) {
  400. /* Don't delay share submission, but still track the nettime */
  401. if (!share) {
  402. long long now_msecs, last_msecs;
  403. struct timeval now, last;
  404. cgtime(&now);
  405. last_nettime(&last);
  406. now_msecs = (long long)now.tv_sec * 1000;
  407. now_msecs += now.tv_usec / 1000;
  408. last_msecs = (long long)last.tv_sec * 1000;
  409. last_msecs += last.tv_usec / 1000;
  410. if (now_msecs > last_msecs && now_msecs - last_msecs < 250) {
  411. struct timespec rgtp;
  412. rgtp.tv_sec = 0;
  413. rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000;
  414. nanosleep(&rgtp, NULL);
  415. }
  416. }
  417. set_nettime();
  418. }
  419. }
  420. json_t *json_rpc_call_completed(CURL *curl, int rc, bool probe, int *rolltime, void *out_priv)
  421. {
  422. struct json_rpc_call_state *state;
  423. if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &state) != CURLE_OK) {
  424. applog(LOG_ERR, "Failed to get private curl data");
  425. if (out_priv)
  426. *(void**)out_priv = NULL;
  427. goto err_out;
  428. }
  429. if (out_priv)
  430. *(void**)out_priv = state->priv;
  431. json_t *val, *err_val, *res_val;
  432. json_error_t err;
  433. struct pool *pool = state->pool;
  434. bool probing = probe && !pool->probed;
  435. if (rc) {
  436. applog(LOG_INFO, "HTTP request failed: %s", state->curl_err_str);
  437. goto err_out;
  438. }
  439. if (!state->all_data.buf) {
  440. applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
  441. goto err_out;
  442. }
  443. pool->cgminer_pool_stats.times_sent++;
  444. pool->cgminer_pool_stats.times_received++;
  445. if (probing) {
  446. pool->probed = true;
  447. /* If X-Long-Polling was found, activate long polling */
  448. if (state->hi.lp_path) {
  449. if (pool->hdr_path != NULL)
  450. free(pool->hdr_path);
  451. pool->hdr_path = state->hi.lp_path;
  452. } else
  453. pool->hdr_path = NULL;
  454. if (state->hi.stratum_url) {
  455. pool->stratum_url = state->hi.stratum_url;
  456. state->hi.stratum_url = NULL;
  457. }
  458. } else {
  459. if (state->hi.lp_path) {
  460. free(state->hi.lp_path);
  461. state->hi.lp_path = NULL;
  462. }
  463. if (state->hi.stratum_url) {
  464. free(state->hi.stratum_url);
  465. state->hi.stratum_url = NULL;
  466. }
  467. }
  468. if (pool->force_rollntime)
  469. {
  470. state->hi.canroll = true;
  471. state->hi.hadexpire = true;
  472. state->hi.rolltime = pool->force_rollntime;
  473. }
  474. if (rolltime)
  475. *rolltime = state->hi.rolltime;
  476. pool->cgminer_pool_stats.rolltime = state->hi.rolltime;
  477. pool->cgminer_pool_stats.hadrolltime = state->hi.hadrolltime;
  478. pool->cgminer_pool_stats.canroll = state->hi.canroll;
  479. pool->cgminer_pool_stats.hadexpire = state->hi.hadexpire;
  480. val = JSON_LOADS(state->all_data.buf, &err);
  481. if (!val) {
  482. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  483. if (opt_protocol)
  484. applog(LOG_DEBUG, "JSON protocol response:\n%s", (char*)state->all_data.buf);
  485. goto err_out;
  486. }
  487. if (opt_protocol) {
  488. char *s = json_dumps(val, JSON_INDENT(3));
  489. applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
  490. free(s);
  491. }
  492. /* JSON-RPC valid response returns a non-null 'result',
  493. * and a null 'error'.
  494. */
  495. res_val = json_object_get(val, "result");
  496. err_val = json_object_get(val, "error");
  497. if (!res_val ||(err_val && !json_is_null(err_val))) {
  498. char *s;
  499. if (err_val)
  500. s = json_dumps(err_val, JSON_INDENT(3));
  501. else
  502. s = strdup("(unknown reason)");
  503. applog(LOG_INFO, "JSON-RPC call failed: %s", s);
  504. free(s);
  505. json_decref(val);
  506. goto err_out;
  507. }
  508. if (state->hi.reason) {
  509. json_object_set_new(val, "reject-reason", json_string(state->hi.reason));
  510. free(state->hi.reason);
  511. state->hi.reason = NULL;
  512. }
  513. successful_connect = true;
  514. databuf_free(&state->all_data);
  515. curl_slist_free_all(state->headers);
  516. curl_easy_reset(curl);
  517. free(state);
  518. return val;
  519. err_out:
  520. databuf_free(&state->all_data);
  521. curl_slist_free_all(state->headers);
  522. curl_easy_reset(curl);
  523. if (!successful_connect)
  524. applog(LOG_DEBUG, "Failed to connect in json_rpc_call");
  525. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  526. free(state);
  527. return NULL;
  528. }
  529. json_t *json_rpc_call(CURL *curl, const char *url,
  530. const char *userpass, const char *rpc_req,
  531. bool probe, bool longpoll, int *rolltime,
  532. struct pool *pool, bool share)
  533. {
  534. json_rpc_call_async(curl, url, userpass, rpc_req, longpoll, pool, share, NULL);
  535. int rc = curl_easy_perform(curl);
  536. return json_rpc_call_completed(curl, rc, probe, rolltime, NULL);
  537. }
  538. bool our_curl_supports_proxy_uris()
  539. {
  540. curl_version_info_data *data = curl_version_info(CURLVERSION_NOW);
  541. return data->age && data->version_num >= (( 7 <<16)|( 21 <<8)| 7); // 7.21.7
  542. }
  543. // NOTE: This assumes reference URI is a root
  544. char *absolute_uri(char *uri, const char *ref)
  545. {
  546. if (strstr(uri, "://"))
  547. return strdup(uri);
  548. char *copy_start, *abs;
  549. bool need_slash = false;
  550. copy_start = (uri[0] == '/') ? &uri[1] : uri;
  551. if (ref[strlen(ref) - 1] != '/')
  552. need_slash = true;
  553. abs = malloc(strlen(ref) + strlen(copy_start) + 2);
  554. if (!abs) {
  555. applog(LOG_ERR, "Malloc failure in absolute_uri");
  556. return NULL;
  557. }
  558. sprintf(abs, "%s%s%s", ref, need_slash ? "/" : "", copy_start);
  559. return abs;
  560. }
  561. static const char _hexchars[0x10] = "0123456789abcdef";
  562. void bin2hex(char *out, const void *in, size_t len)
  563. {
  564. const unsigned char *p = in;
  565. while (len--)
  566. {
  567. (out++)[0] = _hexchars[p[0] >> 4];
  568. (out++)[0] = _hexchars[p[0] & 0xf];
  569. ++p;
  570. }
  571. out[0] = '\0';
  572. }
  573. static inline
  574. int _hex2bin_char(const char c)
  575. {
  576. if (c >= '0' && c <= '9')
  577. return c - '0';
  578. if (c >= 'a' && c <= 'f')
  579. return (c - 'a') + 10;
  580. if (c >= 'A' && c <= 'F')
  581. return (c - 'A') + 10;
  582. return -1;
  583. }
  584. /* Does the reverse of bin2hex but does not allocate any ram */
  585. bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
  586. {
  587. int n, o;
  588. while (len--)
  589. {
  590. n = _hex2bin_char((hexstr++)[0]);
  591. if (unlikely(n == -1))
  592. {
  593. badchar:
  594. if (!hexstr[-1])
  595. applog(LOG_ERR, "hex2bin: str truncated");
  596. else
  597. applog(LOG_ERR, "hex2bin: invalid character 0x%02x", (int)hexstr[-1]);
  598. return false;
  599. }
  600. o = _hex2bin_char((hexstr++)[0]);
  601. if (unlikely(o == -1))
  602. goto badchar;
  603. (p++)[0] = (n << 4) | o;
  604. }
  605. return likely(!hexstr[0]);
  606. }
  607. void hash_data(unsigned char *out_hash, const unsigned char *data)
  608. {
  609. unsigned char blkheader[80];
  610. // data is past the first SHA256 step (padding and interpreting as big endian on a little endian platform), so we need to flip each 32-bit chunk around to get the original input block header
  611. swap32yes(blkheader, data, 80 / 4);
  612. // double-SHA256 to get the block hash
  613. gen_hash(blkheader, out_hash, 80);
  614. }
  615. // Example output: 0000000000000000000000000000000000000000000000000000ffff00000000 (bdiff 1)
  616. void real_block_target(unsigned char *target, const unsigned char *data)
  617. {
  618. uint8_t targetshift;
  619. if (unlikely(data[72] < 3 || data[72] > 0x20))
  620. {
  621. // Invalid (out of bounds) target
  622. memset(target, 0xff, 32);
  623. return;
  624. }
  625. targetshift = data[72] - 3;
  626. memset(target, 0, targetshift);
  627. target[targetshift++] = data[75];
  628. target[targetshift++] = data[74];
  629. target[targetshift++] = data[73];
  630. memset(&target[targetshift], 0, 0x20 - targetshift);
  631. }
  632. bool hash_target_check(const unsigned char *hash, const unsigned char *target)
  633. {
  634. const uint32_t *h32 = (uint32_t*)&hash[0];
  635. const uint32_t *t32 = (uint32_t*)&target[0];
  636. for (int i = 7; i >= 0; --i) {
  637. uint32_t h32i = le32toh(h32[i]);
  638. uint32_t t32i = le32toh(t32[i]);
  639. if (h32i > t32i)
  640. return false;
  641. if (h32i < t32i)
  642. return true;
  643. }
  644. return true;
  645. }
  646. bool hash_target_check_v(const unsigned char *hash, const unsigned char *target)
  647. {
  648. bool rc;
  649. rc = hash_target_check(hash, target);
  650. if (opt_debug) {
  651. unsigned char hash_swap[32], target_swap[32];
  652. char hash_str[65];
  653. char target_str[65];
  654. for (int i = 0; i < 32; ++i) {
  655. hash_swap[i] = hash[31-i];
  656. target_swap[i] = target[31-i];
  657. }
  658. bin2hex(hash_str, hash_swap, 32);
  659. bin2hex(target_str, target_swap, 32);
  660. applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s",
  661. hash_str,
  662. target_str,
  663. rc ? "YES (hash <= target)" :
  664. "no (false positive; hash > target)");
  665. }
  666. return rc;
  667. }
  668. // This operates on a native-endian SHA256 state
  669. // In other words, on little endian platforms, every 4 bytes are in reverse order
  670. bool fulltest(const unsigned char *hash, const unsigned char *target)
  671. {
  672. unsigned char hash2[32];
  673. swap32tobe(hash2, hash, 32 / 4);
  674. return hash_target_check_v(hash2, target);
  675. }
  676. struct thread_q *tq_new(void)
  677. {
  678. struct thread_q *tq;
  679. tq = calloc(1, sizeof(*tq));
  680. if (!tq)
  681. return NULL;
  682. pthread_mutex_init(&tq->mutex, NULL);
  683. pthread_cond_init(&tq->cond, NULL);
  684. return tq;
  685. }
  686. void tq_free(struct thread_q *tq)
  687. {
  688. struct tq_ent *ent, *iter;
  689. if (!tq)
  690. return;
  691. DL_FOREACH_SAFE(tq->q, ent, iter) {
  692. DL_DELETE(tq->q, ent);
  693. free(ent);
  694. }
  695. pthread_cond_destroy(&tq->cond);
  696. pthread_mutex_destroy(&tq->mutex);
  697. memset(tq, 0, sizeof(*tq)); /* poison */
  698. free(tq);
  699. }
  700. static void tq_freezethaw(struct thread_q *tq, bool frozen)
  701. {
  702. mutex_lock(&tq->mutex);
  703. tq->frozen = frozen;
  704. pthread_cond_signal(&tq->cond);
  705. mutex_unlock(&tq->mutex);
  706. }
  707. void tq_freeze(struct thread_q *tq)
  708. {
  709. tq_freezethaw(tq, true);
  710. }
  711. void tq_thaw(struct thread_q *tq)
  712. {
  713. tq_freezethaw(tq, false);
  714. }
  715. bool tq_push(struct thread_q *tq, void *data)
  716. {
  717. struct tq_ent *ent;
  718. bool rc = true;
  719. ent = calloc(1, sizeof(*ent));
  720. if (!ent)
  721. return false;
  722. ent->data = data;
  723. mutex_lock(&tq->mutex);
  724. if (!tq->frozen) {
  725. DL_APPEND(tq->q, ent);
  726. } else {
  727. free(ent);
  728. rc = false;
  729. }
  730. pthread_cond_signal(&tq->cond);
  731. mutex_unlock(&tq->mutex);
  732. return rc;
  733. }
  734. void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
  735. {
  736. struct tq_ent *ent;
  737. void *rval = NULL;
  738. int rc;
  739. mutex_lock(&tq->mutex);
  740. if (tq->q)
  741. goto pop;
  742. if (abstime)
  743. rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime);
  744. else
  745. rc = pthread_cond_wait(&tq->cond, &tq->mutex);
  746. if (rc)
  747. goto out;
  748. if (!tq->q)
  749. goto out;
  750. pop:
  751. ent = tq->q;
  752. rval = ent->data;
  753. DL_DELETE(tq->q, ent);
  754. free(ent);
  755. out:
  756. mutex_unlock(&tq->mutex);
  757. return rval;
  758. }
  759. int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
  760. {
  761. int rv = pthread_create(&thr->pth, attr, start, arg);
  762. if (likely(!rv))
  763. thr->has_pth = true;
  764. return rv;
  765. }
  766. void thr_info_freeze(struct thr_info *thr)
  767. {
  768. struct tq_ent *ent, *iter;
  769. struct thread_q *tq;
  770. if (!thr)
  771. return;
  772. tq = thr->q;
  773. if (!tq)
  774. return;
  775. mutex_lock(&tq->mutex);
  776. tq->frozen = true;
  777. DL_FOREACH_SAFE(tq->q, ent, iter) {
  778. DL_DELETE(tq->q, ent);
  779. free(ent);
  780. }
  781. mutex_unlock(&tq->mutex);
  782. }
  783. void thr_info_cancel(struct thr_info *thr)
  784. {
  785. if (!thr)
  786. return;
  787. if (thr->has_pth) {
  788. pthread_cancel(thr->pth);
  789. thr->has_pth = false;
  790. }
  791. }
  792. #ifndef HAVE_PTHREAD_CANCEL
  793. // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill
  794. enum pthread_cancel_workaround_mode {
  795. PCWM_DEFAULT = 0,
  796. PCWM_TERMINATE = 1,
  797. PCWM_ASYNC = 2,
  798. PCWM_DISABLED = 4,
  799. PCWM_CANCELLED = 8,
  800. };
  801. static pthread_key_t key_pcwm;
  802. struct sigaction pcwm_orig_term_handler;
  803. static
  804. void do_pthread_cancel_exit(int flags)
  805. {
  806. if (!(flags & PCWM_ASYNC))
  807. // NOTE: Logging disables cancel while mutex held, so this is safe
  808. applog(LOG_WARNING, "pthread_cancel workaround: Cannot defer cancellation, terminating thread NOW");
  809. pthread_exit(PTHREAD_CANCELED);
  810. }
  811. static
  812. void sighandler_pthread_cancel(int sig)
  813. {
  814. int flags = (int)pthread_getspecific(key_pcwm);
  815. if (flags & PCWM_TERMINATE) // Main thread
  816. {
  817. // Restore original handler and call it
  818. if (sigaction(sig, &pcwm_orig_term_handler, NULL))
  819. quit(1, "pthread_cancel workaround: Failed to restore original handler");
  820. raise(SIGTERM);
  821. quit(1, "pthread_cancel workaround: Original handler returned");
  822. }
  823. if (flags & PCWM_CANCELLED) // Already pending cancel
  824. return;
  825. if (flags & PCWM_DISABLED)
  826. {
  827. flags |= PCWM_CANCELLED;
  828. if (pthread_setspecific(key_pcwm, (void*)flags))
  829. quit(1, "pthread_cancel workaround: pthread_setspecific failed (setting PCWM_CANCELLED)");
  830. return;
  831. }
  832. do_pthread_cancel_exit(flags);
  833. }
  834. void pthread_testcancel(void)
  835. {
  836. int flags = (int)pthread_getspecific(key_pcwm);
  837. if (flags & PCWM_CANCELLED && !(flags & PCWM_DISABLED))
  838. do_pthread_cancel_exit(flags);
  839. }
  840. int pthread_setcancelstate(int state, int *oldstate)
  841. {
  842. int flags = (int)pthread_getspecific(key_pcwm);
  843. if (oldstate)
  844. *oldstate = (flags & PCWM_DISABLED) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
  845. if (state == PTHREAD_CANCEL_DISABLE)
  846. flags |= PCWM_DISABLED;
  847. else
  848. {
  849. if (flags & PCWM_CANCELLED)
  850. do_pthread_cancel_exit(flags);
  851. flags &= ~PCWM_DISABLED;
  852. }
  853. if (pthread_setspecific(key_pcwm, (void*)flags))
  854. return -1;
  855. return 0;
  856. }
  857. int pthread_setcanceltype(int type, int *oldtype)
  858. {
  859. int flags = (int)pthread_getspecific(key_pcwm);
  860. if (oldtype)
  861. *oldtype = (flags & PCWM_ASYNC) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
  862. if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
  863. flags |= PCWM_ASYNC;
  864. else
  865. flags &= ~PCWM_ASYNC;
  866. if (pthread_setspecific(key_pcwm, (void*)flags))
  867. return -1;
  868. return 0;
  869. }
  870. void setup_pthread_cancel_workaround()
  871. {
  872. if (pthread_key_create(&key_pcwm, NULL))
  873. quit(1, "pthread_cancel workaround: pthread_key_create failed");
  874. if (pthread_setspecific(key_pcwm, (void*)PCWM_TERMINATE))
  875. quit(1, "pthread_cancel workaround: pthread_setspecific failed");
  876. struct sigaction new_sigact = {
  877. .sa_handler = sighandler_pthread_cancel,
  878. };
  879. if (sigaction(SIGTERM, &new_sigact, &pcwm_orig_term_handler))
  880. quit(1, "pthread_cancel workaround: Failed to install SIGTERM handler");
  881. }
  882. #endif
  883. static void _now_gettimeofday(struct timeval *);
  884. #ifdef HAVE_POOR_GETTIMEOFDAY
  885. static struct timeval tv_timeofday_offset;
  886. static struct timeval _tv_timeofday_lastchecked;
  887. static pthread_mutex_t _tv_timeofday_mutex = PTHREAD_MUTEX_INITIALIZER;
  888. static
  889. void bfg_calibrate_timeofday(struct timeval *expected, char *buf)
  890. {
  891. struct timeval actual, delta;
  892. timeradd(expected, &tv_timeofday_offset, expected);
  893. _now_gettimeofday(&actual);
  894. if (expected->tv_sec >= actual.tv_sec - 1 && expected->tv_sec <= actual.tv_sec + 1)
  895. // Within reason - no change necessary
  896. return;
  897. timersub(&actual, expected, &delta);
  898. timeradd(&tv_timeofday_offset, &delta, &tv_timeofday_offset);
  899. sprintf(buf, "Recalibrating timeofday offset (delta %ld.%06lds)", (long)delta.tv_sec, (long)delta.tv_usec);
  900. *expected = actual;
  901. }
  902. void bfg_gettimeofday(struct timeval *out)
  903. {
  904. char buf[64] = "";
  905. timer_set_now(out);
  906. mutex_lock(&_tv_timeofday_mutex);
  907. if (_tv_timeofday_lastchecked.tv_sec < out->tv_sec - 21)
  908. bfg_calibrate_timeofday(out, buf);
  909. else
  910. timeradd(out, &tv_timeofday_offset, out);
  911. mutex_unlock(&_tv_timeofday_mutex);
  912. if (unlikely(buf[0]))
  913. applog(LOG_WARNING, "%s", buf);
  914. }
  915. #endif
  916. #ifdef WIN32
  917. static LARGE_INTEGER _perffreq;
  918. static
  919. void _now_queryperformancecounter(struct timeval *tv)
  920. {
  921. LARGE_INTEGER now;
  922. if (unlikely(!QueryPerformanceCounter(&now)))
  923. quit(1, "QueryPerformanceCounter failed");
  924. *tv = (struct timeval){
  925. .tv_sec = now.QuadPart / _perffreq.QuadPart,
  926. .tv_usec = (now.QuadPart % _perffreq.QuadPart) * 1000000 / _perffreq.QuadPart,
  927. };
  928. }
  929. #endif
  930. static
  931. void _now_is_not_set(__maybe_unused struct timeval *tv)
  932. {
  933. // Might be unclean to swap algorithms after getting a timer
  934. quit(1, "timer_set_now called before bfg_init_time");
  935. }
  936. void (*timer_set_now)(struct timeval *tv) = _now_is_not_set;
  937. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  938. static clockid_t bfg_timer_clk;
  939. static
  940. void _now_clock_gettime(struct timeval *tv)
  941. {
  942. struct timespec ts;
  943. if (unlikely(clock_gettime(bfg_timer_clk, &ts)))
  944. quit(1, "clock_gettime failed");
  945. *tv = (struct timeval){
  946. .tv_sec = ts.tv_sec,
  947. .tv_usec = ts.tv_nsec / 1000,
  948. };
  949. }
  950. static
  951. bool _bfg_try_clock_gettime(clockid_t clk)
  952. {
  953. struct timespec ts;
  954. if (clock_gettime(clk, &ts))
  955. return false;
  956. bfg_timer_clk = clk;
  957. timer_set_now = _now_clock_gettime;
  958. return true;
  959. }
  960. #endif
  961. void bfg_init_time()
  962. {
  963. if (timer_set_now != _now_is_not_set)
  964. return;
  965. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  966. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC_RAW
  967. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC_RAW))
  968. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC_RAW)");
  969. else
  970. #endif
  971. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC))
  972. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC)");
  973. else
  974. #endif
  975. #ifdef WIN32
  976. if (QueryPerformanceFrequency(&_perffreq) && _perffreq.QuadPart)
  977. {
  978. timer_set_now = _now_queryperformancecounter;
  979. applog(LOG_DEBUG, "Timers: Using QueryPerformanceCounter");
  980. }
  981. else
  982. #endif
  983. {
  984. timer_set_now = _now_gettimeofday;
  985. applog(LOG_DEBUG, "Timers: Using gettimeofday");
  986. }
  987. #ifdef HAVE_POOR_GETTIMEOFDAY
  988. char buf[64] = "";
  989. struct timeval tv;
  990. timer_set_now(&tv);
  991. bfg_calibrate_timeofday(&tv, buf);
  992. applog(LOG_DEBUG, "%s", buf);
  993. #endif
  994. }
  995. void subtime(struct timeval *a, struct timeval *b)
  996. {
  997. timersub(a, b, b);
  998. }
  999. void addtime(struct timeval *a, struct timeval *b)
  1000. {
  1001. timeradd(a, b, b);
  1002. }
  1003. bool time_more(struct timeval *a, struct timeval *b)
  1004. {
  1005. return timercmp(a, b, >);
  1006. }
  1007. bool time_less(struct timeval *a, struct timeval *b)
  1008. {
  1009. return timercmp(a, b, <);
  1010. }
  1011. void copy_time(struct timeval *dest, const struct timeval *src)
  1012. {
  1013. memcpy(dest, src, sizeof(struct timeval));
  1014. }
  1015. void timespec_to_val(struct timeval *val, const struct timespec *spec)
  1016. {
  1017. val->tv_sec = spec->tv_sec;
  1018. val->tv_usec = spec->tv_nsec / 1000;
  1019. }
  1020. void timeval_to_spec(struct timespec *spec, const struct timeval *val)
  1021. {
  1022. spec->tv_sec = val->tv_sec;
  1023. spec->tv_nsec = val->tv_usec * 1000;
  1024. }
  1025. void us_to_timeval(struct timeval *val, int64_t us)
  1026. {
  1027. lldiv_t tvdiv = lldiv(us, 1000000);
  1028. val->tv_sec = tvdiv.quot;
  1029. val->tv_usec = tvdiv.rem;
  1030. }
  1031. void us_to_timespec(struct timespec *spec, int64_t us)
  1032. {
  1033. lldiv_t tvdiv = lldiv(us, 1000000);
  1034. spec->tv_sec = tvdiv.quot;
  1035. spec->tv_nsec = tvdiv.rem * 1000;
  1036. }
  1037. void ms_to_timespec(struct timespec *spec, int64_t ms)
  1038. {
  1039. lldiv_t tvdiv = lldiv(ms, 1000);
  1040. spec->tv_sec = tvdiv.quot;
  1041. spec->tv_nsec = tvdiv.rem * 1000000;
  1042. }
  1043. void timeraddspec(struct timespec *a, const struct timespec *b)
  1044. {
  1045. a->tv_sec += b->tv_sec;
  1046. a->tv_nsec += b->tv_nsec;
  1047. if (a->tv_nsec >= 1000000000) {
  1048. a->tv_nsec -= 1000000000;
  1049. a->tv_sec++;
  1050. }
  1051. }
  1052. static int timespec_to_ms(struct timespec *ts)
  1053. {
  1054. return ts->tv_sec * 1000 + ts->tv_nsec / 1000000;
  1055. }
  1056. /* These are cgminer specific sleep functions that use an absolute nanosecond
  1057. * resolution timer to avoid poor usleep accuracy and overruns. */
  1058. #ifndef WIN32
  1059. void cgtimer_time(cgtimer_t *ts_start)
  1060. {
  1061. clock_gettime(CLOCK_MONOTONIC, ts_start);
  1062. }
  1063. static void nanosleep_abstime(struct timespec *ts_end)
  1064. {
  1065. int ret;
  1066. do {
  1067. ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL);
  1068. } while (ret == EINTR);
  1069. }
  1070. /* Reentrant version of cgsleep functions allow start time to be set separately
  1071. * from the beginning of the actual sleep, allowing scheduling delays to be
  1072. * counted in the sleep. */
  1073. void cgsleep_ms_r(cgtimer_t *ts_start, int ms)
  1074. {
  1075. struct timespec ts_end;
  1076. ms_to_timespec(&ts_end, ms);
  1077. timeraddspec(&ts_end, ts_start);
  1078. nanosleep_abstime(&ts_end);
  1079. }
  1080. void cgsleep_us_r(cgtimer_t *ts_start, int64_t us)
  1081. {
  1082. struct timespec ts_end;
  1083. us_to_timespec(&ts_end, us);
  1084. timeraddspec(&ts_end, ts_start);
  1085. nanosleep_abstime(&ts_end);
  1086. }
  1087. int cgtimer_to_ms(cgtimer_t *cgt)
  1088. {
  1089. return timespec_to_ms(cgt);
  1090. }
  1091. /* Subtracts b from a and stores it in res. */
  1092. void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res)
  1093. {
  1094. res->tv_sec = a->tv_sec - b->tv_sec;
  1095. res->tv_nsec = a->tv_nsec - b->tv_nsec;
  1096. if (res->tv_nsec < 0) {
  1097. res->tv_nsec += 1000000000;
  1098. res->tv_sec--;
  1099. }
  1100. }
  1101. static
  1102. void _now_gettimeofday(struct timeval *tv)
  1103. {
  1104. gettimeofday(tv, NULL);
  1105. }
  1106. #else
  1107. /* Windows start time is since 1601 lol so convert it to unix epoch 1970. */
  1108. #define EPOCHFILETIME (116444736000000000LL)
  1109. /* Return the system time as an lldiv_t in decimicroseconds. */
  1110. static void decius_time(lldiv_t *lidiv)
  1111. {
  1112. FILETIME ft;
  1113. LARGE_INTEGER li;
  1114. GetSystemTimeAsFileTime(&ft);
  1115. li.LowPart = ft.dwLowDateTime;
  1116. li.HighPart = ft.dwHighDateTime;
  1117. li.QuadPart -= EPOCHFILETIME;
  1118. /* SystemTime is in decimicroseconds so divide by an unusual number */
  1119. *lidiv = lldiv(li.QuadPart, 10000000);
  1120. }
  1121. void _now_gettimeofday(struct timeval *tv)
  1122. {
  1123. lldiv_t lidiv;
  1124. decius_time(&lidiv);
  1125. tv->tv_sec = lidiv.quot;
  1126. tv->tv_usec = lidiv.rem / 10;
  1127. }
  1128. void cgtimer_time(cgtimer_t *ts_start)
  1129. {
  1130. lldiv_t lidiv;;
  1131. decius_time(&lidiv);
  1132. ts_start->tv_sec = lidiv.quot;
  1133. ts_start->tv_nsec = lidiv.quot * 100;
  1134. }
  1135. /* Subtract b from a */
  1136. static void timersubspec(struct timespec *a, const struct timespec *b)
  1137. {
  1138. a->tv_sec -= b->tv_sec;
  1139. a->tv_nsec -= b->tv_nsec;
  1140. if (a->tv_nsec < 0) {
  1141. a->tv_nsec += 1000000000;
  1142. a->tv_sec--;
  1143. }
  1144. }
  1145. static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start)
  1146. {
  1147. struct timespec now;
  1148. timeraddspec(ts_diff, ts_start);
  1149. cgtimer_time(&now);
  1150. timersubspec(ts_diff, &now);
  1151. if (unlikely(ts_diff->tv_sec < 0))
  1152. return;
  1153. nanosleep(ts_diff, NULL);
  1154. }
  1155. void cgsleep_ms_r(cgtimer_t *ts_start, int ms)
  1156. {
  1157. struct timespec ts_diff;
  1158. ms_to_timespec(&ts_diff, ms);
  1159. cgsleep_spec(&ts_diff, ts_start);
  1160. }
  1161. void cgsleep_us_r(cgtimer_t *ts_start, int64_t us)
  1162. {
  1163. struct timespec ts_diff;
  1164. us_to_timespec(&ts_diff, us);
  1165. cgsleep_spec(&ts_diff, ts_start);
  1166. }
  1167. int cgtimer_to_ms(cgtimer_t *cgt)
  1168. {
  1169. return timespec_to_ms(cgt);
  1170. }
  1171. void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res)
  1172. {
  1173. res->tv_sec = a->tv_sec - b->tv_sec;
  1174. res->tv_nsec = a->tv_nsec - b->tv_nsec;
  1175. if (res->tv_nsec < 0) {
  1176. res->tv_nsec += 1000000000;;
  1177. res->tv_sec--;
  1178. }
  1179. }
  1180. #endif
  1181. void cgsleep_ms(int ms)
  1182. {
  1183. cgtimer_t ts_start;
  1184. cgsleep_prepare_r(&ts_start);
  1185. cgsleep_ms_r(&ts_start, ms);
  1186. }
  1187. void cgsleep_us(int64_t us)
  1188. {
  1189. cgtimer_t ts_start;
  1190. cgsleep_prepare_r(&ts_start);
  1191. cgsleep_us_r(&ts_start, us);
  1192. }
  1193. /* Returns the microseconds difference between end and start times as a double */
  1194. double us_tdiff(struct timeval *end, struct timeval *start)
  1195. {
  1196. return end->tv_sec * 1000000 + end->tv_usec - start->tv_sec * 1000000 - start->tv_usec;
  1197. }
  1198. /* Returns the seconds difference between end and start times as a double */
  1199. double tdiff(struct timeval *end, struct timeval *start)
  1200. {
  1201. return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0;
  1202. }
  1203. bool extract_sockaddr(struct pool *pool, char *url)
  1204. {
  1205. char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
  1206. char url_address[256], port[6];
  1207. int url_len, port_len = 0;
  1208. url_begin = strstr(url, "//");
  1209. if (!url_begin)
  1210. url_begin = url;
  1211. else
  1212. url_begin += 2;
  1213. /* Look for numeric ipv6 entries */
  1214. ipv6_begin = strstr(url_begin, "[");
  1215. ipv6_end = strstr(url_begin, "]");
  1216. if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
  1217. url_end = strstr(ipv6_end, ":");
  1218. else
  1219. url_end = strstr(url_begin, ":");
  1220. if (url_end) {
  1221. url_len = url_end - url_begin;
  1222. port_len = strlen(url_begin) - url_len - 1;
  1223. if (port_len < 1)
  1224. return false;
  1225. port_start = url_end + 1;
  1226. } else
  1227. url_len = strlen(url_begin);
  1228. if (url_len < 1)
  1229. return false;
  1230. sprintf(url_address, "%.*s", url_len, url_begin);
  1231. if (port_len)
  1232. snprintf(port, 6, "%.*s", port_len, port_start);
  1233. else
  1234. strcpy(port, "80");
  1235. free(pool->stratum_port);
  1236. pool->stratum_port = strdup(port);
  1237. free(pool->sockaddr_url);
  1238. pool->sockaddr_url = strdup(url_address);
  1239. return true;
  1240. }
  1241. enum send_ret {
  1242. SEND_OK,
  1243. SEND_SELECTFAIL,
  1244. SEND_SENDFAIL,
  1245. SEND_INACTIVE
  1246. };
  1247. /* Send a single command across a socket, appending \n to it. This should all
  1248. * be done under stratum lock except when first establishing the socket */
  1249. static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len)
  1250. {
  1251. SOCKETTYPE sock = pool->sock;
  1252. ssize_t ssent = 0;
  1253. strcat(s, "\n");
  1254. len++;
  1255. while (len > 0 ) {
  1256. struct timeval timeout = {1, 0};
  1257. ssize_t sent;
  1258. fd_set wd;
  1259. FD_ZERO(&wd);
  1260. FD_SET(sock, &wd);
  1261. if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1)
  1262. return SEND_SELECTFAIL;
  1263. #ifdef __APPLE__
  1264. sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE);
  1265. #elif WIN32
  1266. sent = send(pool->sock, s + ssent, len, 0);
  1267. #else
  1268. sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL);
  1269. #endif
  1270. if (sent < 0) {
  1271. if (!sock_blocks())
  1272. return SEND_SENDFAIL;
  1273. sent = 0;
  1274. }
  1275. ssent += sent;
  1276. len -= sent;
  1277. }
  1278. pool->cgminer_pool_stats.times_sent++;
  1279. pool->cgminer_pool_stats.bytes_sent += ssent;
  1280. total_bytes_sent += ssent;
  1281. pool->cgminer_pool_stats.net_bytes_sent += ssent;
  1282. return SEND_OK;
  1283. }
  1284. bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force)
  1285. {
  1286. enum send_ret ret = SEND_INACTIVE;
  1287. if (opt_protocol)
  1288. applog(LOG_DEBUG, "Pool %u: SEND: %s", pool->pool_no, s);
  1289. mutex_lock(&pool->stratum_lock);
  1290. if (pool->stratum_active || force)
  1291. ret = __stratum_send(pool, s, len);
  1292. mutex_unlock(&pool->stratum_lock);
  1293. /* This is to avoid doing applog under stratum_lock */
  1294. switch (ret) {
  1295. default:
  1296. case SEND_OK:
  1297. break;
  1298. case SEND_SELECTFAIL:
  1299. applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
  1300. suspend_stratum(pool);
  1301. break;
  1302. case SEND_SENDFAIL:
  1303. applog(LOG_DEBUG, "Failed to send in stratum_send");
  1304. suspend_stratum(pool);
  1305. break;
  1306. case SEND_INACTIVE:
  1307. applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
  1308. break;
  1309. }
  1310. return (ret == SEND_OK);
  1311. }
  1312. static bool socket_full(struct pool *pool, int wait)
  1313. {
  1314. SOCKETTYPE sock = pool->sock;
  1315. struct timeval timeout;
  1316. fd_set rd;
  1317. if (unlikely(wait < 0))
  1318. wait = 0;
  1319. FD_ZERO(&rd);
  1320. FD_SET(sock, &rd);
  1321. timeout.tv_usec = 0;
  1322. timeout.tv_sec = wait;
  1323. if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0)
  1324. return true;
  1325. return false;
  1326. }
  1327. /* Check to see if Santa's been good to you */
  1328. bool sock_full(struct pool *pool)
  1329. {
  1330. if (strlen(pool->sockbuf))
  1331. return true;
  1332. return (socket_full(pool, 0));
  1333. }
  1334. static void clear_sockbuf(struct pool *pool)
  1335. {
  1336. strcpy(pool->sockbuf, "");
  1337. }
  1338. static void clear_sock(struct pool *pool)
  1339. {
  1340. ssize_t n;
  1341. mutex_lock(&pool->stratum_lock);
  1342. do {
  1343. if (pool->sock)
  1344. n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0);
  1345. else
  1346. n = 0;
  1347. } while (n > 0);
  1348. mutex_unlock(&pool->stratum_lock);
  1349. clear_sockbuf(pool);
  1350. }
  1351. /* Make sure the pool sockbuf is large enough to cope with any coinbase size
  1352. * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE
  1353. * and zeroing the new memory */
  1354. static void recalloc_sock(struct pool *pool, size_t len)
  1355. {
  1356. size_t old, new;
  1357. old = strlen(pool->sockbuf);
  1358. new = old + len + 1;
  1359. if (new < pool->sockbuf_size)
  1360. return;
  1361. new = new + (RBUFSIZE - (new % RBUFSIZE));
  1362. // Avoid potentially recursive locking
  1363. // applog(LOG_DEBUG, "Recallocing pool sockbuf to %lu", (unsigned long)new);
  1364. pool->sockbuf = realloc(pool->sockbuf, new);
  1365. if (!pool->sockbuf)
  1366. quithere(1, "Failed to realloc pool sockbuf");
  1367. memset(pool->sockbuf + old, 0, new - old);
  1368. pool->sockbuf_size = new;
  1369. }
  1370. /* Peeks at a socket to find the first end of line and then reads just that
  1371. * from the socket and returns that as a malloced char */
  1372. char *recv_line(struct pool *pool)
  1373. {
  1374. char *tok, *sret = NULL;
  1375. ssize_t len, buflen;
  1376. int waited = 0;
  1377. if (!strstr(pool->sockbuf, "\n")) {
  1378. struct timeval rstart, now;
  1379. cgtime(&rstart);
  1380. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1381. applog(LOG_DEBUG, "Timed out waiting for data on socket_full");
  1382. goto out;
  1383. }
  1384. do {
  1385. char s[RBUFSIZE];
  1386. size_t slen;
  1387. ssize_t n;
  1388. memset(s, 0, RBUFSIZE);
  1389. n = recv(pool->sock, s, RECVSIZE, 0);
  1390. if (!n) {
  1391. applog(LOG_DEBUG, "Socket closed waiting in recv_line");
  1392. suspend_stratum(pool);
  1393. break;
  1394. }
  1395. cgtime(&now);
  1396. waited = tdiff(&now, &rstart);
  1397. if (n < 0) {
  1398. //Save errno from being overweitten bei socket_ commands
  1399. int socket_recv_errno;
  1400. socket_recv_errno = SOCKERR;
  1401. if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) {
  1402. applog(LOG_DEBUG, "Failed to recv sock in recv_line: %s", bfg_strerror(socket_recv_errno, BST_SOCKET));
  1403. suspend_stratum(pool);
  1404. break;
  1405. }
  1406. } else {
  1407. slen = strlen(s);
  1408. recalloc_sock(pool, slen);
  1409. strcat(pool->sockbuf, s);
  1410. }
  1411. } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n"));
  1412. }
  1413. buflen = strlen(pool->sockbuf);
  1414. tok = strtok(pool->sockbuf, "\n");
  1415. if (!tok) {
  1416. applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line");
  1417. goto out;
  1418. }
  1419. sret = strdup(tok);
  1420. len = strlen(sret);
  1421. /* Copy what's left in the buffer after the \n, including the
  1422. * terminating \0 */
  1423. if (buflen > len + 1)
  1424. memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1);
  1425. else
  1426. strcpy(pool->sockbuf, "");
  1427. pool->cgminer_pool_stats.times_received++;
  1428. pool->cgminer_pool_stats.bytes_received += len;
  1429. total_bytes_rcvd += len;
  1430. pool->cgminer_pool_stats.net_bytes_received += len;
  1431. out:
  1432. if (!sret)
  1433. clear_sock(pool);
  1434. else if (opt_protocol)
  1435. applog(LOG_DEBUG, "Pool %u: RECV: %s", pool->pool_no, sret);
  1436. return sret;
  1437. }
  1438. /* Dumps any JSON value as a string. Just like jansson 2.1's JSON_ENCODE_ANY
  1439. * flag, but this is compatible with 2.0. */
  1440. char *json_dumps_ANY(json_t *json, size_t flags)
  1441. {
  1442. switch (json_typeof(json))
  1443. {
  1444. case JSON_ARRAY:
  1445. case JSON_OBJECT:
  1446. return json_dumps(json, flags);
  1447. default:
  1448. break;
  1449. }
  1450. char *rv;
  1451. #ifdef JSON_ENCODE_ANY
  1452. rv = json_dumps(json, JSON_ENCODE_ANY | flags);
  1453. if (rv)
  1454. return rv;
  1455. #endif
  1456. json_t *tmp = json_array();
  1457. char *s;
  1458. int i;
  1459. size_t len;
  1460. if (!tmp)
  1461. quithere(1, "Failed to allocate json array");
  1462. if (json_array_append(tmp, json))
  1463. quithere(1, "Failed to append temporary array");
  1464. s = json_dumps(tmp, flags);
  1465. if (!s)
  1466. return NULL;
  1467. for (i = 0; s[i] != '['; ++i)
  1468. if (unlikely(!(s[i] && isCspace(s[i]))))
  1469. quithere(1, "Failed to find opening bracket in array dump");
  1470. len = strlen(&s[++i]) - 1;
  1471. if (unlikely(s[i+len] != ']'))
  1472. quithere(1, "Failed to find closing bracket in array dump");
  1473. rv = malloc(len + 1);
  1474. memcpy(rv, &s[i], len);
  1475. rv[len] = '\0';
  1476. free(s);
  1477. json_decref(tmp);
  1478. return rv;
  1479. }
  1480. /* Extracts a string value from a json array with error checking. To be used
  1481. * when the value of the string returned is only examined and not to be stored.
  1482. * See json_array_string below */
  1483. char *__json_array_string(json_t *val, unsigned int entry)
  1484. {
  1485. json_t *arr_entry;
  1486. if (json_is_null(val))
  1487. return NULL;
  1488. if (!json_is_array(val))
  1489. return NULL;
  1490. if (entry > json_array_size(val))
  1491. return NULL;
  1492. arr_entry = json_array_get(val, entry);
  1493. if (!json_is_string(arr_entry))
  1494. return NULL;
  1495. return (char *)json_string_value(arr_entry);
  1496. }
  1497. /* Creates a freshly malloced dup of __json_array_string */
  1498. static char *json_array_string(json_t *val, unsigned int entry)
  1499. {
  1500. char *buf = __json_array_string(val, entry);
  1501. if (buf)
  1502. return strdup(buf);
  1503. return NULL;
  1504. }
  1505. void stratum_probe_transparency(struct pool *pool)
  1506. {
  1507. // Request transaction data to discourage pools from doing anything shady
  1508. char s[1024];
  1509. int sLen;
  1510. sLen = sprintf(s, "{\"params\": [\"%s\"], \"id\": \"txlist%s\", \"method\": \"mining.get_transactions\"}",
  1511. pool->swork.job_id,
  1512. pool->swork.job_id);
  1513. stratum_send(pool, s, sLen);
  1514. if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency))
  1515. cgtime(&pool->swork.tv_transparency);
  1516. pool->swork.transparency_probed = true;
  1517. }
  1518. static bool parse_notify(struct pool *pool, json_t *val)
  1519. {
  1520. char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime;
  1521. bool clean, ret = false;
  1522. int merkles, i;
  1523. size_t cb1_len, cb2_len;
  1524. json_t *arr;
  1525. arr = json_array_get(val, 4);
  1526. if (!arr || !json_is_array(arr))
  1527. goto out;
  1528. merkles = json_array_size(arr);
  1529. for (i = 0; i < merkles; i++)
  1530. if (!json_is_string(json_array_get(arr, i)))
  1531. goto out;
  1532. prev_hash = __json_array_string(val, 1);
  1533. coinbase1 = __json_array_string(val, 2);
  1534. coinbase2 = __json_array_string(val, 3);
  1535. bbversion = __json_array_string(val, 5);
  1536. nbit = __json_array_string(val, 6);
  1537. ntime = __json_array_string(val, 7);
  1538. clean = json_is_true(json_array_get(val, 8));
  1539. if (!prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime)
  1540. goto out;
  1541. job_id = json_array_string(val, 0);
  1542. if (!job_id)
  1543. goto out;
  1544. cg_wlock(&pool->data_lock);
  1545. cgtime(&pool->swork.tv_received);
  1546. free(pool->swork.job_id);
  1547. pool->swork.job_id = job_id;
  1548. pool->submit_old = !clean;
  1549. pool->swork.clean = true;
  1550. hex2bin(&pool->swork.header1[0], bbversion, 4);
  1551. hex2bin(&pool->swork.header1[4], prev_hash, 32);
  1552. hex2bin((void*)&pool->swork.ntime, ntime, 4);
  1553. pool->swork.ntime = be32toh(pool->swork.ntime);
  1554. hex2bin(&pool->swork.diffbits[0], nbit, 4);
  1555. cb1_len = strlen(coinbase1) / 2;
  1556. pool->swork.nonce2_offset = cb1_len + pool->n1_len;
  1557. cb2_len = strlen(coinbase2) / 2;
  1558. bytes_resize(&pool->swork.coinbase, pool->swork.nonce2_offset + pool->n2size + cb2_len);
  1559. uint8_t *coinbase = bytes_buf(&pool->swork.coinbase);
  1560. hex2bin(coinbase, coinbase1, cb1_len);
  1561. hex2bin(&coinbase[cb1_len], pool->nonce1, pool->n1_len);
  1562. // NOTE: gap for nonce2, filled at work generation time
  1563. hex2bin(&coinbase[pool->swork.nonce2_offset + pool->n2size], coinbase2, cb2_len);
  1564. bytes_resize(&pool->swork.merkle_bin, 32 * merkles);
  1565. for (i = 0; i < merkles; i++)
  1566. hex2bin(&bytes_buf(&pool->swork.merkle_bin)[i * 32], json_string_value(json_array_get(arr, i)), 32);
  1567. pool->swork.merkles = merkles;
  1568. if (clean)
  1569. pool->nonce2 = 0;
  1570. cg_wunlock(&pool->data_lock);
  1571. applog(LOG_DEBUG, "Received stratum notify from pool %u with job_id=%s",
  1572. pool->pool_no, job_id);
  1573. if (opt_debug && opt_protocol)
  1574. {
  1575. applog(LOG_DEBUG, "job_id: %s", job_id);
  1576. applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
  1577. applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
  1578. applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
  1579. for (i = 0; i < merkles; i++)
  1580. applog(LOG_DEBUG, "merkle%d: %s", i, json_string_value(json_array_get(arr, i)));
  1581. applog(LOG_DEBUG, "bbversion: %s", bbversion);
  1582. applog(LOG_DEBUG, "nbit: %s", nbit);
  1583. applog(LOG_DEBUG, "ntime: %s", ntime);
  1584. applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
  1585. }
  1586. /* A notify message is the closest stratum gets to a getwork */
  1587. pool->getwork_requested++;
  1588. total_getworks++;
  1589. if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency))
  1590. if (pool->probed)
  1591. stratum_probe_transparency(pool);
  1592. ret = true;
  1593. out:
  1594. return ret;
  1595. }
  1596. static bool parse_diff(struct pool *pool, json_t *val)
  1597. {
  1598. double diff;
  1599. diff = json_number_value(json_array_get(val, 0));
  1600. if (diff == 0)
  1601. return false;
  1602. cg_wlock(&pool->data_lock);
  1603. pool->swork.diff = diff;
  1604. cg_wunlock(&pool->data_lock);
  1605. applog(LOG_DEBUG, "Pool %d stratum bdifficulty set to %f", pool->pool_no, diff);
  1606. return true;
  1607. }
  1608. static bool parse_reconnect(struct pool *pool, json_t *val)
  1609. {
  1610. char *url, *port, address[256];
  1611. memset(address, 0, 255);
  1612. url = (char *)json_string_value(json_array_get(val, 0));
  1613. if (!url)
  1614. url = pool->sockaddr_url;
  1615. port = (char *)json_string_value(json_array_get(val, 1));
  1616. if (!port)
  1617. port = pool->stratum_port;
  1618. sprintf(address, "%s:%s", url, port);
  1619. if (!extract_sockaddr(pool, address))
  1620. return false;
  1621. pool->stratum_url = pool->sockaddr_url;
  1622. applog(LOG_NOTICE, "Reconnect requested from pool %d to %s", pool->pool_no, address);
  1623. if (!restart_stratum(pool))
  1624. return false;
  1625. return true;
  1626. }
  1627. static bool send_version(struct pool *pool, json_t *val)
  1628. {
  1629. char s[RBUFSIZE], *idstr;
  1630. json_t *id = json_object_get(val, "id");
  1631. if (!(id && !json_is_null(id)))
  1632. return false;
  1633. idstr = json_dumps_ANY(id, 0);
  1634. sprintf(s, "{\"id\": %s, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", idstr);
  1635. free(idstr);
  1636. if (!stratum_send(pool, s, strlen(s)))
  1637. return false;
  1638. return true;
  1639. }
  1640. static bool stratum_show_message(struct pool *pool, json_t *val, json_t *params)
  1641. {
  1642. char *msg;
  1643. char s[RBUFSIZE], *idstr;
  1644. json_t *id = json_object_get(val, "id");
  1645. msg = json_array_string(params, 0);
  1646. if (likely(msg))
  1647. {
  1648. free(pool->admin_msg);
  1649. pool->admin_msg = msg;
  1650. applog(LOG_NOTICE, "Message from pool %u: %s", pool->pool_no, msg);
  1651. }
  1652. if (!(id && !json_is_null(id)))
  1653. return true;
  1654. idstr = json_dumps_ANY(id, 0);
  1655. if (likely(msg))
  1656. sprintf(s, "{\"id\": %s, \"result\": true, \"error\": null}", idstr);
  1657. else
  1658. sprintf(s, "{\"id\": %s, \"result\": null, \"error\": [-1, \"Failed to parse message\", null]}", idstr);
  1659. free(idstr);
  1660. if (!stratum_send(pool, s, strlen(s)))
  1661. return false;
  1662. return true;
  1663. }
  1664. bool parse_method(struct pool *pool, char *s)
  1665. {
  1666. json_t *val = NULL, *method, *err_val, *params;
  1667. json_error_t err;
  1668. bool ret = false;
  1669. char *buf;
  1670. if (!s)
  1671. goto out;
  1672. val = JSON_LOADS(s, &err);
  1673. if (!val) {
  1674. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1675. goto out;
  1676. }
  1677. method = json_object_get(val, "method");
  1678. if (!method)
  1679. goto out;
  1680. err_val = json_object_get(val, "error");
  1681. params = json_object_get(val, "params");
  1682. if (err_val && !json_is_null(err_val)) {
  1683. char *ss;
  1684. if (err_val)
  1685. ss = json_dumps(err_val, JSON_INDENT(3));
  1686. else
  1687. ss = strdup("(unknown reason)");
  1688. applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss);
  1689. free(ss);
  1690. goto out;
  1691. }
  1692. buf = (char *)json_string_value(method);
  1693. if (!buf)
  1694. goto out;
  1695. if (!strncasecmp(buf, "mining.notify", 13)) {
  1696. if (parse_notify(pool, params))
  1697. pool->stratum_notify = ret = true;
  1698. else
  1699. pool->stratum_notify = ret = false;
  1700. goto out;
  1701. }
  1702. if (!strncasecmp(buf, "mining.set_difficulty", 21) && parse_diff(pool, params)) {
  1703. ret = true;
  1704. goto out;
  1705. }
  1706. if (!strncasecmp(buf, "client.reconnect", 16) && parse_reconnect(pool, params)) {
  1707. ret = true;
  1708. goto out;
  1709. }
  1710. if (!strncasecmp(buf, "client.get_version", 18) && send_version(pool, val)) {
  1711. ret = true;
  1712. goto out;
  1713. }
  1714. if (!strncasecmp(buf, "client.show_message", 19) && stratum_show_message(pool, val, params)) {
  1715. ret = true;
  1716. goto out;
  1717. }
  1718. out:
  1719. if (val)
  1720. json_decref(val);
  1721. return ret;
  1722. }
  1723. extern bool parse_stratum_response(struct pool *, char *s);
  1724. bool auth_stratum(struct pool *pool)
  1725. {
  1726. json_t *val = NULL, *res_val, *err_val;
  1727. char s[RBUFSIZE], *sret = NULL;
  1728. json_error_t err;
  1729. bool ret = false;
  1730. sprintf(s, "{\"id\": \"auth\", \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}",
  1731. pool->rpc_user, pool->rpc_pass);
  1732. if (!stratum_send(pool, s, strlen(s)))
  1733. goto out;
  1734. /* Parse all data in the queue and anything left should be auth */
  1735. while (42) {
  1736. sret = recv_line(pool);
  1737. if (!sret)
  1738. goto out;
  1739. if (parse_method(pool, sret))
  1740. free(sret);
  1741. else
  1742. break;
  1743. }
  1744. val = JSON_LOADS(sret, &err);
  1745. free(sret);
  1746. res_val = json_object_get(val, "result");
  1747. err_val = json_object_get(val, "error");
  1748. if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) {
  1749. char *ss;
  1750. if (err_val)
  1751. ss = json_dumps(err_val, JSON_INDENT(3));
  1752. else
  1753. ss = strdup("(unknown reason)");
  1754. applog(LOG_WARNING, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss);
  1755. free(ss);
  1756. goto out;
  1757. }
  1758. ret = true;
  1759. applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no);
  1760. pool->probed = true;
  1761. successful_connect = true;
  1762. out:
  1763. if (val)
  1764. json_decref(val);
  1765. if (pool->stratum_notify)
  1766. stratum_probe_transparency(pool);
  1767. return ret;
  1768. }
  1769. curl_socket_t grab_socket_opensocket_cb(void *clientp, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr)
  1770. {
  1771. struct pool *pool = clientp;
  1772. curl_socket_t sck = socket(addr->family, addr->socktype, addr->protocol);
  1773. pool->sock = sck;
  1774. return sck;
  1775. }
  1776. static bool setup_stratum_curl(struct pool *pool)
  1777. {
  1778. char curl_err_str[CURL_ERROR_SIZE];
  1779. CURL *curl = NULL;
  1780. char s[RBUFSIZE];
  1781. bool ret = false;
  1782. applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf);
  1783. mutex_lock(&pool->stratum_lock);
  1784. timer_unset(&pool->swork.tv_transparency);
  1785. pool->stratum_active = false;
  1786. pool->stratum_notify = false;
  1787. pool->swork.transparency_probed = false;
  1788. if (pool->stratum_curl)
  1789. curl_easy_cleanup(pool->stratum_curl);
  1790. pool->stratum_curl = curl_easy_init();
  1791. if (unlikely(!pool->stratum_curl))
  1792. quithere(1, "Failed to curl_easy_init");
  1793. if (pool->sockbuf)
  1794. pool->sockbuf[0] = '\0';
  1795. curl = pool->stratum_curl;
  1796. if (!pool->sockbuf) {
  1797. pool->sockbuf = calloc(RBUFSIZE, 1);
  1798. if (!pool->sockbuf)
  1799. quithere(1, "Failed to calloc pool sockbuf");
  1800. pool->sockbuf_size = RBUFSIZE;
  1801. }
  1802. /* Create a http url for use with curl */
  1803. sprintf(s, "http://%s:%s", pool->sockaddr_url, pool->stratum_port);
  1804. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  1805. curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 30);
  1806. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
  1807. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  1808. curl_easy_setopt(curl, CURLOPT_URL, s);
  1809. if (!opt_delaynet)
  1810. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  1811. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  1812. * to enable it */
  1813. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  1814. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  1815. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  1816. // CURLINFO_LASTSOCKET is broken on Win64 (which has a wider SOCKET type than curl_easy_getinfo returns), so we use this hack for now
  1817. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, grab_socket_opensocket_cb);
  1818. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  1819. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  1820. if (pool->rpc_proxy) {
  1821. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  1822. } else if (opt_socks_proxy) {
  1823. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  1824. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4);
  1825. }
  1826. curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1);
  1827. pool->sock = INVSOCK;
  1828. if (curl_easy_perform(curl)) {
  1829. applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str);
  1830. errout:
  1831. curl_easy_cleanup(curl);
  1832. pool->stratum_curl = NULL;
  1833. goto out;
  1834. }
  1835. if (pool->sock == INVSOCK)
  1836. {
  1837. applog(LOG_ERR, "Stratum connect succeeded, but technical problem extracting socket (pool %u)", pool->pool_no);
  1838. goto errout;
  1839. }
  1840. keep_sockalive(pool->sock);
  1841. pool->cgminer_pool_stats.times_sent++;
  1842. pool->cgminer_pool_stats.times_received++;
  1843. ret = true;
  1844. out:
  1845. mutex_unlock(&pool->stratum_lock);
  1846. return ret;
  1847. }
  1848. static char *get_sessionid(json_t *val)
  1849. {
  1850. char *ret = NULL;
  1851. json_t *arr_val;
  1852. int arrsize, i;
  1853. arr_val = json_array_get(val, 0);
  1854. if (!arr_val || !json_is_array(arr_val))
  1855. goto out;
  1856. arrsize = json_array_size(arr_val);
  1857. for (i = 0; i < arrsize; i++) {
  1858. json_t *arr = json_array_get(arr_val, i);
  1859. char *notify;
  1860. if (!arr | !json_is_array(arr))
  1861. break;
  1862. notify = __json_array_string(arr, 0);
  1863. if (!notify)
  1864. continue;
  1865. if (!strncasecmp(notify, "mining.notify", 13)) {
  1866. ret = json_array_string(arr, 1);
  1867. break;
  1868. }
  1869. }
  1870. out:
  1871. return ret;
  1872. }
  1873. void suspend_stratum(struct pool *pool)
  1874. {
  1875. clear_sockbuf(pool);
  1876. applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no);
  1877. mutex_lock(&pool->stratum_lock);
  1878. pool->stratum_active = pool->stratum_notify = false;
  1879. if (pool->stratum_curl) {
  1880. curl_easy_cleanup(pool->stratum_curl);
  1881. }
  1882. pool->stratum_curl = NULL;
  1883. pool->sock = INVSOCK;
  1884. mutex_unlock(&pool->stratum_lock);
  1885. }
  1886. bool initiate_stratum(struct pool *pool)
  1887. {
  1888. bool ret = false, recvd = false, noresume = false, sockd = false;
  1889. bool trysuggest = request_target_str;
  1890. char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid;
  1891. json_t *val = NULL, *res_val, *err_val;
  1892. json_error_t err;
  1893. int n2size;
  1894. resend:
  1895. if (!setup_stratum_curl(pool)) {
  1896. sockd = false;
  1897. goto out;
  1898. }
  1899. sockd = true;
  1900. clear_sock(pool);
  1901. if (trysuggest)
  1902. {
  1903. int sz = sprintf(s, "{\"id\": null, \"method\": \"mining.suggest_target\", \"params\": [\"%s\"]}", request_target_str);
  1904. if (!_stratum_send(pool, s, sz, true))
  1905. {
  1906. applog(LOG_DEBUG, "Pool %u: Failed to send suggest_target in initiate_stratum", pool->pool_no);
  1907. goto out;
  1908. }
  1909. recvd = true;
  1910. }
  1911. if (noresume) {
  1912. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++);
  1913. } else {
  1914. if (pool->sessionid)
  1915. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid);
  1916. else
  1917. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++);
  1918. }
  1919. if (!_stratum_send(pool, s, strlen(s), true)) {
  1920. applog(LOG_DEBUG, "Failed to send s in initiate_stratum");
  1921. goto out;
  1922. }
  1923. recvd = true;
  1924. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1925. applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum");
  1926. goto out;
  1927. }
  1928. sret = recv_line(pool);
  1929. if (!sret)
  1930. goto out;
  1931. val = JSON_LOADS(sret, &err);
  1932. free(sret);
  1933. if (!val) {
  1934. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1935. goto out;
  1936. }
  1937. res_val = json_object_get(val, "result");
  1938. err_val = json_object_get(val, "error");
  1939. if (!res_val || json_is_null(res_val) ||
  1940. (err_val && !json_is_null(err_val))) {
  1941. char *ss;
  1942. if (err_val)
  1943. ss = json_dumps(err_val, JSON_INDENT(3));
  1944. else
  1945. ss = strdup("(unknown reason)");
  1946. applog(LOG_INFO, "JSON-RPC decode failed: %s", ss);
  1947. free(ss);
  1948. goto out;
  1949. }
  1950. sessionid = get_sessionid(res_val);
  1951. if (!sessionid)
  1952. applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum");
  1953. nonce1 = json_array_string(res_val, 1);
  1954. if (!nonce1) {
  1955. applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");
  1956. free(sessionid);
  1957. goto out;
  1958. }
  1959. n2size = json_integer_value(json_array_get(res_val, 2));
  1960. if (!n2size) {
  1961. applog(LOG_INFO, "Failed to get n2size in initiate_stratum");
  1962. free(sessionid);
  1963. free(nonce1);
  1964. goto out;
  1965. }
  1966. cg_wlock(&pool->data_lock);
  1967. free(pool->sessionid);
  1968. pool->sessionid = sessionid;
  1969. free(pool->nonce1);
  1970. pool->nonce1 = nonce1;
  1971. pool->n1_len = strlen(nonce1) / 2;
  1972. pool->n2size = n2size;
  1973. pool->nonce2sz = (n2size > sizeof(pool->nonce2)) ? sizeof(pool->nonce2) : n2size;
  1974. #ifdef WORDS_BIGENDIAN
  1975. pool->nonce2off = (n2size < sizeof(pool->nonce2)) ? (sizeof(pool->nonce2) - n2size) : 0;
  1976. #endif
  1977. cg_wunlock(&pool->data_lock);
  1978. if (sessionid)
  1979. applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid);
  1980. ret = true;
  1981. out:
  1982. if (val)
  1983. json_decref(val);
  1984. if (ret) {
  1985. if (!pool->stratum_url)
  1986. pool->stratum_url = pool->sockaddr_url;
  1987. pool->stratum_active = true;
  1988. pool->swork.diff = 1;
  1989. if (opt_protocol) {
  1990. applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d",
  1991. pool->pool_no, pool->nonce1, pool->n2size);
  1992. }
  1993. } else {
  1994. if (recvd)
  1995. {
  1996. if (trysuggest)
  1997. {
  1998. applog(LOG_DEBUG, "Pool %u: Failed to connect stratum with mining.suggest_target, retrying without", pool->pool_no);
  1999. trysuggest = false;
  2000. goto resend;
  2001. }
  2002. if (!noresume)
  2003. {
  2004. applog(LOG_DEBUG, "Failed to resume stratum, trying afresh");
  2005. noresume = true;
  2006. goto resend;
  2007. }
  2008. }
  2009. applog(LOG_DEBUG, "Initiate stratum failed");
  2010. if (sockd)
  2011. suspend_stratum(pool);
  2012. }
  2013. return ret;
  2014. }
  2015. bool restart_stratum(struct pool *pool)
  2016. {
  2017. if (pool->stratum_active)
  2018. suspend_stratum(pool);
  2019. if (!initiate_stratum(pool))
  2020. return false;
  2021. if (!auth_stratum(pool))
  2022. return false;
  2023. return true;
  2024. }
  2025. void dev_error_update(struct cgpu_info *dev, enum dev_reason reason)
  2026. {
  2027. dev->device_last_not_well = time(NULL);
  2028. cgtime(&dev->tv_device_last_not_well);
  2029. dev->device_not_well_reason = reason;
  2030. }
  2031. void dev_error(struct cgpu_info *dev, enum dev_reason reason)
  2032. {
  2033. dev_error_update(dev, reason);
  2034. switch (reason) {
  2035. case REASON_THREAD_FAIL_INIT:
  2036. dev->thread_fail_init_count++;
  2037. break;
  2038. case REASON_THREAD_ZERO_HASH:
  2039. dev->thread_zero_hash_count++;
  2040. break;
  2041. case REASON_THREAD_FAIL_QUEUE:
  2042. dev->thread_fail_queue_count++;
  2043. break;
  2044. case REASON_DEV_SICK_IDLE_60:
  2045. dev->dev_sick_idle_60_count++;
  2046. break;
  2047. case REASON_DEV_DEAD_IDLE_600:
  2048. dev->dev_dead_idle_600_count++;
  2049. break;
  2050. case REASON_DEV_NOSTART:
  2051. dev->dev_nostart_count++;
  2052. break;
  2053. case REASON_DEV_OVER_HEAT:
  2054. dev->dev_over_heat_count++;
  2055. break;
  2056. case REASON_DEV_THERMAL_CUTOFF:
  2057. dev->dev_thermal_cutoff_count++;
  2058. break;
  2059. case REASON_DEV_COMMS_ERROR:
  2060. dev->dev_comms_error_count++;
  2061. break;
  2062. case REASON_DEV_THROTTLE:
  2063. dev->dev_throttle_count++;
  2064. break;
  2065. }
  2066. }
  2067. /* Realloc an existing string to fit an extra string s, appending s to it. */
  2068. void *realloc_strcat(char *ptr, char *s)
  2069. {
  2070. size_t old = strlen(ptr), len = strlen(s);
  2071. char *ret;
  2072. if (!len)
  2073. return ptr;
  2074. len += old + 1;
  2075. align_len(&len);
  2076. ret = malloc(len);
  2077. if (unlikely(!ret))
  2078. quithere(1, "Failed to malloc");
  2079. sprintf(ret, "%s%s", ptr, s);
  2080. free(ptr);
  2081. return ret;
  2082. }
  2083. static
  2084. bool sanechars[] = {
  2085. false, false, false, false, false, false, false, false,
  2086. false, false, false, false, false, false, false, false,
  2087. false, false, false, false, false, false, false, false,
  2088. false, false, false, false, false, false, false, false,
  2089. false, false, false, false, false, false, false, false,
  2090. false, false, false, false, false, false, false, false,
  2091. true , true , true , true , true , true , true , true ,
  2092. true , true , false, false, false, false, false, false,
  2093. false, true , true , true , true , true , true , true ,
  2094. true , true , true , true , true , true , true , true ,
  2095. true , true , true , true , true , true , true , true ,
  2096. true , true , true , false, false, false, false, false,
  2097. false, true , true , true , true , true , true , true ,
  2098. true , true , true , true , true , true , true , true ,
  2099. true , true , true , true , true , true , true , true ,
  2100. true , true , true , false, false, false, false, false,
  2101. };
  2102. char *sanestr(char *o, char *s)
  2103. {
  2104. char *rv = o;
  2105. bool br = false;
  2106. for ( ; s[0]; ++s)
  2107. {
  2108. if (sanechars[s[0] & 0x7f])
  2109. {
  2110. if (br)
  2111. {
  2112. br = false;
  2113. if (s[0] >= '0' && s[0] <= '9')
  2114. (o++)[0] = '_';
  2115. }
  2116. (o++)[0] = s[0];
  2117. }
  2118. else
  2119. if (o != s && o[-1] >= '0' && o[-1] <= '9')
  2120. br = true;
  2121. }
  2122. o[0] = '\0';
  2123. return rv;
  2124. }
  2125. void RenameThread(const char* name)
  2126. {
  2127. #if defined(PR_SET_NAME)
  2128. // Only the first 15 characters are used (16 - NUL terminator)
  2129. prctl(PR_SET_NAME, name, 0, 0, 0);
  2130. #elif defined(__APPLE__)
  2131. pthread_setname_np(name);
  2132. #elif (defined(__FreeBSD__) || defined(__OpenBSD__))
  2133. pthread_set_name_np(pthread_self(), name);
  2134. #else
  2135. // Prevent warnings for unused parameters...
  2136. (void)name;
  2137. #endif
  2138. }
  2139. static pthread_key_t key_bfgtls;
  2140. struct bfgtls_data {
  2141. char *bfg_strerror_result;
  2142. size_t bfg_strerror_resultsz;
  2143. #ifdef WIN32
  2144. LPSTR bfg_strerror_socketresult;
  2145. #endif
  2146. };
  2147. static
  2148. struct bfgtls_data *get_bfgtls()
  2149. {
  2150. struct bfgtls_data *bfgtls = pthread_getspecific(key_bfgtls);
  2151. if (bfgtls)
  2152. return bfgtls;
  2153. void *p;
  2154. bfgtls = malloc(sizeof(*bfgtls));
  2155. if (!bfgtls)
  2156. quithere(1, "malloc bfgtls failed");
  2157. p = malloc(64);
  2158. if (!p)
  2159. quithere(1, "malloc bfg_strerror_result failed");
  2160. *bfgtls = (struct bfgtls_data){
  2161. .bfg_strerror_resultsz = 64,
  2162. .bfg_strerror_result = p,
  2163. };
  2164. if (pthread_setspecific(key_bfgtls, bfgtls))
  2165. quithere(1, "pthread_setspecific failed");
  2166. return bfgtls;
  2167. }
  2168. void bfg_init_threadlocal()
  2169. {
  2170. if (pthread_key_create(&key_bfgtls, NULL))
  2171. quithere(1, "pthread_key_create failed");
  2172. }
  2173. static
  2174. bool bfg_grow_buffer(char ** const bufp, size_t * const bufszp, size_t minimum)
  2175. {
  2176. if (minimum <= *bufszp)
  2177. return false;
  2178. while (minimum > *bufszp)
  2179. *bufszp = 2;
  2180. *bufp = realloc(*bufp, *bufszp);
  2181. if (unlikely(!*bufp))
  2182. quithere(1, "realloc failed");
  2183. return true;
  2184. }
  2185. static
  2186. const char *bfg_strcpy_growing_buffer(char ** const bufp, size_t * const bufszp, const char *src)
  2187. {
  2188. if (!src)
  2189. return NULL;
  2190. const size_t srcsz = strlen(src) + 1;
  2191. bfg_grow_buffer(bufp, bufszp, srcsz);
  2192. memcpy(*bufp, src, srcsz);
  2193. return *bufp;
  2194. }
  2195. // Guaranteed to always return some string (or quit)
  2196. const char *bfg_strerror(int e, enum bfg_strerror_type type)
  2197. {
  2198. static __maybe_unused pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  2199. struct bfgtls_data *bfgtls = get_bfgtls();
  2200. size_t * const bufszp = &bfgtls->bfg_strerror_resultsz;
  2201. char ** const bufp = &bfgtls->bfg_strerror_result;
  2202. const char *have = NULL;
  2203. switch (type) {
  2204. case BST_LIBUSB:
  2205. // NOTE: Nested preprocessor checks since the latter isn't defined at all without the former
  2206. #ifdef HAVE_LIBUSB
  2207. # if HAVE_DECL_LIBUSB_ERROR_NAME
  2208. // libusb makes no guarantees for thread-safety or persistence
  2209. mutex_lock(&mutex);
  2210. have = bfg_strcpy_growing_buffer(bufp, bufszp, libusb_error_name(e));
  2211. mutex_unlock(&mutex);
  2212. # endif
  2213. #endif
  2214. break;
  2215. case BST_SOCKET:
  2216. {
  2217. #ifdef WIN32
  2218. // Windows has a different namespace for socket errors
  2219. LPSTR *msg = &bfgtls->bfg_strerror_socketresult;
  2220. if (*msg)
  2221. LocalFree(*msg);
  2222. if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, 0, e, 0, (LPSTR)msg, 0, 0))
  2223. return *msg;
  2224. *msg = NULL;
  2225. break;
  2226. #endif
  2227. }
  2228. // Fallthru on non-WIN32
  2229. case BST_ERRNO:
  2230. {
  2231. #ifdef __STRERROR_S_WORKS
  2232. // FIXME: Not sure how to get this on MingW64
  2233. retry:
  2234. if (likely(!strerror_s(*bufp, *bufszp, e)))
  2235. {
  2236. if (bfg_grow_buffer(bufp, bufszp, strlen(*bufp) + 2))
  2237. goto retry;
  2238. return *bufp;
  2239. }
  2240. // TODO: XSI strerror_r
  2241. // TODO: GNU strerror_r
  2242. #else
  2243. mutex_lock(&mutex);
  2244. have = bfg_strcpy_growing_buffer(bufp, bufszp, strerror(e));
  2245. mutex_unlock(&mutex);
  2246. #endif
  2247. }
  2248. }
  2249. if (have)
  2250. return *bufp;
  2251. // Failback: Stringify the number
  2252. static const char fmt[] = "%s error #%d", *typestr;
  2253. switch (type) {
  2254. case BST_ERRNO:
  2255. typestr = "System";
  2256. break;
  2257. case BST_SOCKET:
  2258. typestr = "Socket";
  2259. break;
  2260. case BST_LIBUSB:
  2261. typestr = "libusb";
  2262. break;
  2263. default:
  2264. typestr = "Unexpected";
  2265. }
  2266. int sz = snprintf((char*)bfgtls, 0, fmt, typestr, e) + 1;
  2267. bfg_grow_buffer(bufp, bufszp, sz);
  2268. sprintf(*bufp, fmt, typestr, e);
  2269. return *bufp;
  2270. }
  2271. void notifier_init(notifier_t pipefd)
  2272. {
  2273. #ifdef WIN32
  2274. #define WindowsErrorStr(e) bfg_strerror(e, BST_SOCKET)
  2275. SOCKET listener, connecter, acceptor;
  2276. listener = socket(AF_INET, SOCK_STREAM, 0);
  2277. if (listener == INVALID_SOCKET)
  2278. quit(1, "Failed to create listener socket"IN_FMT_FFL": %s",
  2279. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2280. connecter = socket(AF_INET, SOCK_STREAM, 0);
  2281. if (connecter == INVALID_SOCKET)
  2282. quit(1, "Failed to create connect socket"IN_FMT_FFL": %s",
  2283. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2284. struct sockaddr_in inaddr = {
  2285. .sin_family = AF_INET,
  2286. .sin_addr = {
  2287. .s_addr = htonl(INADDR_LOOPBACK),
  2288. },
  2289. .sin_port = 0,
  2290. };
  2291. {
  2292. static const int reuse = 1;
  2293. setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse));
  2294. }
  2295. if (bind(listener, (struct sockaddr*)&inaddr, sizeof(inaddr)) == SOCKET_ERROR)
  2296. quit(1, "Failed to bind listener socket"IN_FMT_FFL": %s",
  2297. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2298. socklen_t inaddr_sz = sizeof(inaddr);
  2299. if (getsockname(listener, (struct sockaddr*)&inaddr, &inaddr_sz) == SOCKET_ERROR)
  2300. quit(1, "Failed to getsockname"IN_FMT_FFL": %s",
  2301. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2302. if (listen(listener, 1) == SOCKET_ERROR)
  2303. quit(1, "Failed to listen"IN_FMT_FFL": %s",
  2304. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2305. inaddr.sin_family = AF_INET;
  2306. inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  2307. if (connect(connecter, (struct sockaddr*)&inaddr, inaddr_sz) == SOCKET_ERROR)
  2308. quit(1, "Failed to connect"IN_FMT_FFL": %s",
  2309. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2310. acceptor = accept(listener, NULL, NULL);
  2311. if (acceptor == INVALID_SOCKET)
  2312. quit(1, "Failed to accept"IN_FMT_FFL": %s",
  2313. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2314. closesocket(listener);
  2315. pipefd[0] = connecter;
  2316. pipefd[1] = acceptor;
  2317. #else
  2318. if (pipe(pipefd))
  2319. quithere(1, "Failed to create pipe");
  2320. #endif
  2321. }
  2322. void notifier_wake(notifier_t fd)
  2323. {
  2324. if (fd[1] == INVSOCK)
  2325. return;
  2326. #ifdef WIN32
  2327. (void)send(fd[1], "\0", 1, 0);
  2328. #else
  2329. (void)write(fd[1], "\0", 1);
  2330. #endif
  2331. }
  2332. void notifier_read(notifier_t fd)
  2333. {
  2334. char buf[0x10];
  2335. #ifdef WIN32
  2336. (void)recv(fd[0], buf, sizeof(buf), 0);
  2337. #else
  2338. (void)read(fd[0], buf, sizeof(buf));
  2339. #endif
  2340. }
  2341. void notifier_destroy(notifier_t fd)
  2342. {
  2343. #ifdef WIN32
  2344. closesocket(fd[0]);
  2345. closesocket(fd[1]);
  2346. #else
  2347. close(fd[0]);
  2348. close(fd[1]);
  2349. #endif
  2350. fd[0] = fd[1] = INVSOCK;
  2351. }
  2352. void _bytes_alloc_failure(size_t sz)
  2353. {
  2354. quit(1, "bytes_resize failed to allocate %lu bytes", (unsigned long)sz);
  2355. }
  2356. void *cmd_thread(void *cmdp)
  2357. {
  2358. const char *cmd = cmdp;
  2359. applog(LOG_DEBUG, "Executing command: %s", cmd);
  2360. system(cmd);
  2361. return NULL;
  2362. }
  2363. void run_cmd(const char *cmd)
  2364. {
  2365. if (!cmd)
  2366. return;
  2367. pthread_t pth;
  2368. pthread_create(&pth, NULL, cmd_thread, (void*)cmd);
  2369. }