util.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. * Copyright 2012 Giel van Schijndel
  6. * Copyright 2012 Gavin Andresen
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <stdbool.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <ctype.h>
  19. #include <stdarg.h>
  20. #include <string.h>
  21. #include <pthread.h>
  22. #include <jansson.h>
  23. #include <curl/curl.h>
  24. #include <time.h>
  25. #include <errno.h>
  26. #include <unistd.h>
  27. #include <sys/types.h>
  28. #ifdef HAVE_SYS_PRCTL_H
  29. # include <sys/prctl.h>
  30. #endif
  31. #if defined(__FreeBSD__) || defined(__OpenBSD__)
  32. # include <pthread_np.h>
  33. #endif
  34. #ifndef WIN32
  35. #include <fcntl.h>
  36. # ifdef __linux
  37. # include <sys/prctl.h>
  38. # endif
  39. # include <sys/socket.h>
  40. # include <netinet/in.h>
  41. # include <netinet/tcp.h>
  42. # include <netdb.h>
  43. #else
  44. # include <windows.h>
  45. # include <winsock2.h>
  46. # include <mstcpip.h>
  47. # include <ws2tcpip.h>
  48. # include <mmsystem.h>
  49. #endif
  50. #include <utlist.h>
  51. #include "miner.h"
  52. #include "compat.h"
  53. #include "util.h"
  54. #define DEFAULT_SOCKWAIT 60
  55. bool successful_connect = false;
  56. struct timeval nettime;
  57. struct data_buffer {
  58. void *buf;
  59. size_t len;
  60. curl_socket_t *idlemarker;
  61. };
  62. struct upload_buffer {
  63. const void *buf;
  64. size_t len;
  65. };
  66. struct header_info {
  67. char *lp_path;
  68. int rolltime;
  69. char *reason;
  70. char *stratum_url;
  71. bool hadrolltime;
  72. bool canroll;
  73. bool hadexpire;
  74. };
  75. struct tq_ent {
  76. void *data;
  77. struct tq_ent *prev;
  78. struct tq_ent *next;
  79. };
  80. static void databuf_free(struct data_buffer *db)
  81. {
  82. if (!db)
  83. return;
  84. free(db->buf);
  85. #ifdef DEBUG_DATABUF
  86. applog(LOG_DEBUG, "databuf_free(%p)", db->buf);
  87. #endif
  88. memset(db, 0, sizeof(*db));
  89. }
  90. // aka data_buffer_write
  91. static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb,
  92. void *user_data)
  93. {
  94. struct data_buffer *db = user_data;
  95. size_t oldlen, newlen;
  96. oldlen = db->len;
  97. if (unlikely(nmemb == 0 || size == 0 || oldlen >= SIZE_MAX - size))
  98. return 0;
  99. if (unlikely(nmemb > (SIZE_MAX - oldlen) / size))
  100. nmemb = (SIZE_MAX - oldlen) / size;
  101. size_t len = size * nmemb;
  102. void *newmem;
  103. static const unsigned char zero = 0;
  104. if (db->idlemarker) {
  105. const unsigned char *cptr = ptr;
  106. for (size_t i = 0; i < len; ++i)
  107. if (!(isCspace(cptr[i]) || cptr[i] == '{')) {
  108. *db->idlemarker = CURL_SOCKET_BAD;
  109. db->idlemarker = NULL;
  110. break;
  111. }
  112. }
  113. newlen = oldlen + len;
  114. newmem = realloc(db->buf, newlen + 1);
  115. #ifdef DEBUG_DATABUF
  116. applog(LOG_DEBUG, "data_buffer_write realloc(%p, %lu) => %p", db->buf, (long unsigned)(newlen + 1), newmem);
  117. #endif
  118. if (!newmem)
  119. return 0;
  120. db->buf = newmem;
  121. db->len = newlen;
  122. memcpy(db->buf + oldlen, ptr, len);
  123. memcpy(db->buf + newlen, &zero, 1); /* null terminate */
  124. return nmemb;
  125. }
  126. static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb,
  127. void *user_data)
  128. {
  129. struct upload_buffer *ub = user_data;
  130. unsigned int len = size * nmemb;
  131. if (len > ub->len)
  132. len = ub->len;
  133. if (len) {
  134. memcpy(ptr, ub->buf, len);
  135. ub->buf += len;
  136. ub->len -= len;
  137. }
  138. return len;
  139. }
  140. static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
  141. {
  142. struct header_info *hi = user_data;
  143. size_t remlen, slen, ptrlen = size * nmemb;
  144. char *rem, *val = NULL, *key = NULL;
  145. void *tmp;
  146. val = calloc(1, ptrlen);
  147. key = calloc(1, ptrlen);
  148. if (!key || !val)
  149. goto out;
  150. tmp = memchr(ptr, ':', ptrlen);
  151. if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */
  152. goto out;
  153. slen = tmp - ptr;
  154. if ((slen + 1) == ptrlen) /* skip key w/ no value */
  155. goto out;
  156. memcpy(key, ptr, slen); /* store & nul term key */
  157. key[slen] = 0;
  158. rem = ptr + slen + 1; /* trim value's leading whitespace */
  159. remlen = ptrlen - slen - 1;
  160. while ((remlen > 0) && (isCspace(*rem))) {
  161. remlen--;
  162. rem++;
  163. }
  164. memcpy(val, rem, remlen); /* store value, trim trailing ws */
  165. val[remlen] = 0;
  166. while ((*val) && (isCspace(val[strlen(val) - 1])))
  167. val[strlen(val) - 1] = 0;
  168. if (!*val) /* skip blank value */
  169. goto out;
  170. if (opt_protocol)
  171. applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
  172. if (!strcasecmp("X-Roll-Ntime", key)) {
  173. hi->hadrolltime = true;
  174. if (!strncasecmp("N", val, 1))
  175. applog(LOG_DEBUG, "X-Roll-Ntime: N found");
  176. else {
  177. hi->canroll = true;
  178. /* Check to see if expire= is supported and if not, set
  179. * the rolltime to the default scantime */
  180. if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) {
  181. sscanf(val + 7, "%d", &hi->rolltime);
  182. hi->hadexpire = true;
  183. } else
  184. hi->rolltime = opt_scantime;
  185. applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
  186. }
  187. }
  188. if (!strcasecmp("X-Long-Polling", key)) {
  189. hi->lp_path = val; /* steal memory reference */
  190. val = NULL;
  191. }
  192. if (!strcasecmp("X-Reject-Reason", key)) {
  193. hi->reason = val; /* steal memory reference */
  194. val = NULL;
  195. }
  196. if (!strcasecmp("X-Stratum", key)) {
  197. hi->stratum_url = val;
  198. val = NULL;
  199. }
  200. out:
  201. free(key);
  202. free(val);
  203. return ptrlen;
  204. }
  205. static int keep_sockalive(SOCKETTYPE fd)
  206. {
  207. const int tcp_one = 1;
  208. const int tcp_keepidle = 45;
  209. const int tcp_keepintvl = 30;
  210. int ret = 0;
  211. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const char *)&tcp_one, sizeof(tcp_one))))
  212. ret = 1;
  213. #ifndef WIN32
  214. int flags = fcntl(fd, F_GETFL, 0);
  215. fcntl(fd, F_SETFL, O_NONBLOCK | flags);
  216. #else
  217. u_long flags = 1;
  218. ioctlsocket(fd, FIONBIO, &flags);
  219. #endif
  220. if (!opt_delaynet)
  221. #ifndef __linux
  222. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  223. #else /* __linux */
  224. if (unlikely(setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one))))
  225. #endif /* __linux */
  226. ret = 1;
  227. #ifdef __linux
  228. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one))))
  229. ret = 1;
  230. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle))))
  231. ret = 1;
  232. if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  233. ret = 1;
  234. #endif /* __linux */
  235. #ifdef __APPLE_CC__
  236. if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl))))
  237. ret = 1;
  238. #endif /* __APPLE_CC__ */
  239. #ifdef WIN32
  240. const int zero = 0;
  241. struct tcp_keepalive vals;
  242. vals.onoff = 1;
  243. vals.keepalivetime = tcp_keepidle * 1000;
  244. vals.keepaliveinterval = tcp_keepintvl * 1000;
  245. DWORD outputBytes;
  246. if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL)))
  247. ret = 1;
  248. /* Windows happily submits indefinitely to the send buffer blissfully
  249. * unaware nothing is getting there without gracefully failing unless
  250. * we disable the send buffer */
  251. if (unlikely(setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&zero, sizeof(zero))))
  252. ret = 1;
  253. #endif /* WIN32 */
  254. return ret;
  255. }
  256. void set_cloexec_socket(SOCKETTYPE sock, const bool cloexec)
  257. {
  258. #ifdef WIN32
  259. SetHandleInformation((HANDLE)sock, HANDLE_FLAG_INHERIT, cloexec ? 0 : HANDLE_FLAG_INHERIT);
  260. #elif defined(F_GETFD) && defined(F_SETFD) && defined(O_CLOEXEC)
  261. const int curflags = fcntl(sock, F_GETFD);
  262. int flags = curflags;
  263. if (cloexec)
  264. flags |= FD_CLOEXEC;
  265. else
  266. flags &= ~FD_CLOEXEC;
  267. if (flags != curflags)
  268. fcntl(sock, F_SETFD, flags);
  269. #endif
  270. }
  271. int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
  272. curlsocktype __maybe_unused purpose)
  273. {
  274. return keep_sockalive(fd);
  275. }
  276. static void last_nettime(struct timeval *last)
  277. {
  278. rd_lock(&netacc_lock);
  279. last->tv_sec = nettime.tv_sec;
  280. last->tv_usec = nettime.tv_usec;
  281. rd_unlock(&netacc_lock);
  282. }
  283. static void set_nettime(void)
  284. {
  285. wr_lock(&netacc_lock);
  286. cgtime(&nettime);
  287. wr_unlock(&netacc_lock);
  288. }
  289. static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type,
  290. char *data, size_t size,
  291. void *userdata)
  292. {
  293. struct pool *pool = (struct pool *)userdata;
  294. switch(type) {
  295. case CURLINFO_HEADER_IN:
  296. case CURLINFO_DATA_IN:
  297. case CURLINFO_SSL_DATA_IN:
  298. pool->cgminer_pool_stats.bytes_received += size;
  299. total_bytes_rcvd += size;
  300. pool->cgminer_pool_stats.net_bytes_received += size;
  301. break;
  302. case CURLINFO_HEADER_OUT:
  303. case CURLINFO_DATA_OUT:
  304. case CURLINFO_SSL_DATA_OUT:
  305. pool->cgminer_pool_stats.bytes_sent += size;
  306. total_bytes_sent += size;
  307. pool->cgminer_pool_stats.net_bytes_sent += size;
  308. break;
  309. case CURLINFO_TEXT:
  310. {
  311. if (!opt_protocol)
  312. break;
  313. // data is not null-terminated, so we need to copy and terminate it for applog
  314. char datacp[size + 1];
  315. memcpy(datacp, data, size);
  316. while (likely(size) && unlikely(isCspace(datacp[size-1])))
  317. --size;
  318. if (unlikely(!size))
  319. break;
  320. datacp[size] = '\0';
  321. applog(LOG_DEBUG, "Pool %u: %s", pool->pool_no, datacp);
  322. break;
  323. }
  324. default:
  325. break;
  326. }
  327. return 0;
  328. }
  329. struct json_rpc_call_state {
  330. struct data_buffer all_data;
  331. struct header_info hi;
  332. void *priv;
  333. char curl_err_str[CURL_ERROR_SIZE];
  334. struct curl_slist *headers;
  335. struct upload_buffer upload_data;
  336. struct pool *pool;
  337. };
  338. void json_rpc_call_async(CURL *curl, const char *url,
  339. const char *userpass, const char *rpc_req,
  340. bool longpoll,
  341. struct pool *pool, bool share,
  342. void *priv)
  343. {
  344. struct json_rpc_call_state *state = malloc(sizeof(struct json_rpc_call_state));
  345. *state = (struct json_rpc_call_state){
  346. .priv = priv,
  347. .pool = pool,
  348. };
  349. long timeout = longpoll ? (60 * 60) : 60;
  350. char len_hdr[64], user_agent_hdr[128];
  351. struct curl_slist *headers = NULL;
  352. if (longpoll)
  353. state->all_data.idlemarker = &pool->lp_socket;
  354. /* it is assumed that 'curl' is freshly [re]initialized at this pt */
  355. curl_easy_setopt(curl, CURLOPT_PRIVATE, state);
  356. curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
  357. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  358. * to enable it */
  359. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  360. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  361. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  362. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  363. curl_easy_setopt(curl, CURLOPT_URL, url);
  364. curl_easy_setopt(curl, CURLOPT_ENCODING, "");
  365. curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
  366. /* Shares are staggered already and delays in submission can be costly
  367. * so do not delay them */
  368. if (!opt_delaynet || share)
  369. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  370. curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb);
  371. curl_easy_setopt(curl, CURLOPT_WRITEDATA, &state->all_data);
  372. curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb);
  373. curl_easy_setopt(curl, CURLOPT_READDATA, &state->upload_data);
  374. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, &state->curl_err_str[0]);
  375. curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
  376. curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb);
  377. curl_easy_setopt(curl, CURLOPT_HEADERDATA, &state->hi);
  378. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  379. if (pool->rpc_proxy) {
  380. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  381. } else if (opt_socks_proxy) {
  382. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  383. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  384. }
  385. if (userpass) {
  386. curl_easy_setopt(curl, CURLOPT_USERPWD, userpass);
  387. curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
  388. }
  389. if (longpoll)
  390. curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, json_rpc_call_sockopt_cb);
  391. curl_easy_setopt(curl, CURLOPT_POST, 1);
  392. if (opt_protocol)
  393. applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req);
  394. state->upload_data.buf = rpc_req;
  395. state->upload_data.len = strlen(rpc_req);
  396. sprintf(len_hdr, "Content-Length: %lu",
  397. (unsigned long) state->upload_data.len);
  398. sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE"/"VERSION);
  399. headers = curl_slist_append(headers,
  400. "Content-type: application/json");
  401. headers = curl_slist_append(headers,
  402. "X-Mining-Extensions: longpoll midstate rollntime submitold");
  403. if (longpoll)
  404. headers = curl_slist_append(headers,
  405. "X-Minimum-Wait: 0");
  406. if (likely(global_hashrate)) {
  407. char ghashrate[255];
  408. sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, (uint64_t)global_hashrate);
  409. headers = curl_slist_append(headers, ghashrate);
  410. }
  411. headers = curl_slist_append(headers, len_hdr);
  412. headers = curl_slist_append(headers, user_agent_hdr);
  413. headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/
  414. curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
  415. state->headers = headers;
  416. if (opt_delaynet) {
  417. /* Don't delay share submission, but still track the nettime */
  418. if (!share) {
  419. long long now_msecs, last_msecs;
  420. struct timeval now, last;
  421. cgtime(&now);
  422. last_nettime(&last);
  423. now_msecs = (long long)now.tv_sec * 1000;
  424. now_msecs += now.tv_usec / 1000;
  425. last_msecs = (long long)last.tv_sec * 1000;
  426. last_msecs += last.tv_usec / 1000;
  427. if (now_msecs > last_msecs && now_msecs - last_msecs < 250) {
  428. struct timespec rgtp;
  429. rgtp.tv_sec = 0;
  430. rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000;
  431. nanosleep(&rgtp, NULL);
  432. }
  433. }
  434. set_nettime();
  435. }
  436. }
  437. json_t *json_rpc_call_completed(CURL *curl, int rc, bool probe, int *rolltime, void *out_priv)
  438. {
  439. struct json_rpc_call_state *state;
  440. if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, (void*)&state) != CURLE_OK) {
  441. applog(LOG_ERR, "Failed to get private curl data");
  442. if (out_priv)
  443. *(void**)out_priv = NULL;
  444. goto err_out;
  445. }
  446. if (out_priv)
  447. *(void**)out_priv = state->priv;
  448. json_t *val, *err_val, *res_val;
  449. json_error_t err;
  450. struct pool *pool = state->pool;
  451. bool probing = probe && !pool->probed;
  452. if (rc) {
  453. applog(LOG_INFO, "HTTP request failed: %s", state->curl_err_str);
  454. goto err_out;
  455. }
  456. if (!state->all_data.buf) {
  457. applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
  458. goto err_out;
  459. }
  460. pool->cgminer_pool_stats.times_sent++;
  461. pool->cgminer_pool_stats.times_received++;
  462. if (probing) {
  463. pool->probed = true;
  464. /* If X-Long-Polling was found, activate long polling */
  465. if (state->hi.lp_path) {
  466. if (pool->hdr_path != NULL)
  467. free(pool->hdr_path);
  468. pool->hdr_path = state->hi.lp_path;
  469. } else
  470. pool->hdr_path = NULL;
  471. if (state->hi.stratum_url) {
  472. pool->stratum_url = state->hi.stratum_url;
  473. state->hi.stratum_url = NULL;
  474. }
  475. } else {
  476. if (state->hi.lp_path) {
  477. free(state->hi.lp_path);
  478. state->hi.lp_path = NULL;
  479. }
  480. if (state->hi.stratum_url) {
  481. free(state->hi.stratum_url);
  482. state->hi.stratum_url = NULL;
  483. }
  484. }
  485. if (pool->force_rollntime)
  486. {
  487. state->hi.canroll = true;
  488. state->hi.hadexpire = true;
  489. state->hi.rolltime = pool->force_rollntime;
  490. }
  491. if (rolltime)
  492. *rolltime = state->hi.rolltime;
  493. pool->cgminer_pool_stats.rolltime = state->hi.rolltime;
  494. pool->cgminer_pool_stats.hadrolltime = state->hi.hadrolltime;
  495. pool->cgminer_pool_stats.canroll = state->hi.canroll;
  496. pool->cgminer_pool_stats.hadexpire = state->hi.hadexpire;
  497. val = JSON_LOADS(state->all_data.buf, &err);
  498. if (!val) {
  499. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  500. if (opt_protocol)
  501. applog(LOG_DEBUG, "JSON protocol response:\n%s", (char*)state->all_data.buf);
  502. goto err_out;
  503. }
  504. if (opt_protocol) {
  505. char *s = json_dumps(val, JSON_INDENT(3));
  506. applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
  507. free(s);
  508. }
  509. /* JSON-RPC valid response returns a non-null 'result',
  510. * and a null 'error'.
  511. */
  512. res_val = json_object_get(val, "result");
  513. err_val = json_object_get(val, "error");
  514. if (!res_val ||(err_val && !json_is_null(err_val))) {
  515. char *s;
  516. if (err_val)
  517. s = json_dumps(err_val, JSON_INDENT(3));
  518. else
  519. s = strdup("(unknown reason)");
  520. applog(LOG_INFO, "JSON-RPC call failed: %s", s);
  521. free(s);
  522. json_decref(val);
  523. goto err_out;
  524. }
  525. if (state->hi.reason) {
  526. json_object_set_new(val, "reject-reason", json_string(state->hi.reason));
  527. free(state->hi.reason);
  528. state->hi.reason = NULL;
  529. }
  530. successful_connect = true;
  531. databuf_free(&state->all_data);
  532. curl_slist_free_all(state->headers);
  533. curl_easy_reset(curl);
  534. free(state);
  535. return val;
  536. err_out:
  537. databuf_free(&state->all_data);
  538. curl_slist_free_all(state->headers);
  539. curl_easy_reset(curl);
  540. if (!successful_connect)
  541. applog(LOG_DEBUG, "Failed to connect in json_rpc_call");
  542. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  543. free(state);
  544. return NULL;
  545. }
  546. json_t *json_rpc_call(CURL *curl, const char *url,
  547. const char *userpass, const char *rpc_req,
  548. bool probe, bool longpoll, int *rolltime,
  549. struct pool *pool, bool share)
  550. {
  551. json_rpc_call_async(curl, url, userpass, rpc_req, longpoll, pool, share, NULL);
  552. int rc = curl_easy_perform(curl);
  553. return json_rpc_call_completed(curl, rc, probe, rolltime, NULL);
  554. }
  555. bool our_curl_supports_proxy_uris()
  556. {
  557. curl_version_info_data *data = curl_version_info(CURLVERSION_NOW);
  558. return data->age && data->version_num >= (( 7 <<16)|( 21 <<8)| 7); // 7.21.7
  559. }
  560. // NOTE: This assumes reference URI is a root
  561. char *absolute_uri(char *uri, const char *ref)
  562. {
  563. if (strstr(uri, "://"))
  564. return strdup(uri);
  565. char *copy_start, *abs;
  566. bool need_slash = false;
  567. copy_start = (uri[0] == '/') ? &uri[1] : uri;
  568. if (ref[strlen(ref) - 1] != '/')
  569. need_slash = true;
  570. abs = malloc(strlen(ref) + strlen(copy_start) + 2);
  571. if (!abs) {
  572. applog(LOG_ERR, "Malloc failure in absolute_uri");
  573. return NULL;
  574. }
  575. sprintf(abs, "%s%s%s", ref, need_slash ? "/" : "", copy_start);
  576. return abs;
  577. }
  578. static const char _hexchars[0x10] = "0123456789abcdef";
  579. void bin2hex(char *out, const void *in, size_t len)
  580. {
  581. const unsigned char *p = in;
  582. while (len--)
  583. {
  584. (out++)[0] = _hexchars[p[0] >> 4];
  585. (out++)[0] = _hexchars[p[0] & 0xf];
  586. ++p;
  587. }
  588. out[0] = '\0';
  589. }
  590. static inline
  591. int _hex2bin_char(const char c)
  592. {
  593. if (c >= '0' && c <= '9')
  594. return c - '0';
  595. if (c >= 'a' && c <= 'f')
  596. return (c - 'a') + 10;
  597. if (c >= 'A' && c <= 'F')
  598. return (c - 'A') + 10;
  599. return -1;
  600. }
  601. /* Does the reverse of bin2hex but does not allocate any ram */
  602. bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
  603. {
  604. int n, o;
  605. while (len--)
  606. {
  607. n = _hex2bin_char((hexstr++)[0]);
  608. if (unlikely(n == -1))
  609. {
  610. badchar:
  611. if (!hexstr[-1])
  612. applog(LOG_ERR, "hex2bin: str truncated");
  613. else
  614. applog(LOG_ERR, "hex2bin: invalid character 0x%02x", (int)hexstr[-1]);
  615. return false;
  616. }
  617. o = _hex2bin_char((hexstr++)[0]);
  618. if (unlikely(o == -1))
  619. goto badchar;
  620. (p++)[0] = (n << 4) | o;
  621. }
  622. return likely(!hexstr[0]);
  623. }
  624. void ucs2tochar(char * const out, const uint16_t * const in, const size_t sz)
  625. {
  626. for (int i = 0; i < sz; ++i)
  627. out[i] = in[i];
  628. }
  629. char *ucs2tochar_dup(uint16_t * const in, const size_t sz)
  630. {
  631. char *out = malloc(sz + 1);
  632. ucs2tochar(out, in, sz);
  633. out[sz] = '\0';
  634. return out;
  635. }
  636. void hash_data(unsigned char *out_hash, const unsigned char *data)
  637. {
  638. unsigned char blkheader[80];
  639. // data is past the first SHA256 step (padding and interpreting as big endian on a little endian platform), so we need to flip each 32-bit chunk around to get the original input block header
  640. swap32yes(blkheader, data, 80 / 4);
  641. // double-SHA256 to get the block hash
  642. gen_hash(blkheader, out_hash, 80);
  643. }
  644. // Example output: 0000000000000000000000000000000000000000000000000000ffff00000000 (bdiff 1)
  645. void real_block_target(unsigned char *target, const unsigned char *data)
  646. {
  647. uint8_t targetshift;
  648. if (unlikely(data[72] < 3 || data[72] > 0x20))
  649. {
  650. // Invalid (out of bounds) target
  651. memset(target, 0xff, 32);
  652. return;
  653. }
  654. targetshift = data[72] - 3;
  655. memset(target, 0, targetshift);
  656. target[targetshift++] = data[75];
  657. target[targetshift++] = data[74];
  658. target[targetshift++] = data[73];
  659. memset(&target[targetshift], 0, 0x20 - targetshift);
  660. }
  661. bool hash_target_check(const unsigned char *hash, const unsigned char *target)
  662. {
  663. const uint32_t *h32 = (uint32_t*)&hash[0];
  664. const uint32_t *t32 = (uint32_t*)&target[0];
  665. for (int i = 7; i >= 0; --i) {
  666. uint32_t h32i = le32toh(h32[i]);
  667. uint32_t t32i = le32toh(t32[i]);
  668. if (h32i > t32i)
  669. return false;
  670. if (h32i < t32i)
  671. return true;
  672. }
  673. return true;
  674. }
  675. bool hash_target_check_v(const unsigned char *hash, const unsigned char *target)
  676. {
  677. bool rc;
  678. rc = hash_target_check(hash, target);
  679. if (opt_debug) {
  680. unsigned char hash_swap[32], target_swap[32];
  681. char hash_str[65];
  682. char target_str[65];
  683. for (int i = 0; i < 32; ++i) {
  684. hash_swap[i] = hash[31-i];
  685. target_swap[i] = target[31-i];
  686. }
  687. bin2hex(hash_str, hash_swap, 32);
  688. bin2hex(target_str, target_swap, 32);
  689. applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s",
  690. hash_str,
  691. target_str,
  692. rc ? "YES (hash <= target)" :
  693. "no (false positive; hash > target)");
  694. }
  695. return rc;
  696. }
  697. // This operates on a native-endian SHA256 state
  698. // In other words, on little endian platforms, every 4 bytes are in reverse order
  699. bool fulltest(const unsigned char *hash, const unsigned char *target)
  700. {
  701. unsigned char hash2[32];
  702. swap32tobe(hash2, hash, 32 / 4);
  703. return hash_target_check_v(hash2, target);
  704. }
  705. struct thread_q *tq_new(void)
  706. {
  707. struct thread_q *tq;
  708. tq = calloc(1, sizeof(*tq));
  709. if (!tq)
  710. return NULL;
  711. pthread_mutex_init(&tq->mutex, NULL);
  712. pthread_cond_init(&tq->cond, NULL);
  713. return tq;
  714. }
  715. void tq_free(struct thread_q *tq)
  716. {
  717. struct tq_ent *ent, *iter;
  718. if (!tq)
  719. return;
  720. DL_FOREACH_SAFE(tq->q, ent, iter) {
  721. DL_DELETE(tq->q, ent);
  722. free(ent);
  723. }
  724. pthread_cond_destroy(&tq->cond);
  725. pthread_mutex_destroy(&tq->mutex);
  726. memset(tq, 0, sizeof(*tq)); /* poison */
  727. free(tq);
  728. }
  729. static void tq_freezethaw(struct thread_q *tq, bool frozen)
  730. {
  731. mutex_lock(&tq->mutex);
  732. tq->frozen = frozen;
  733. pthread_cond_signal(&tq->cond);
  734. mutex_unlock(&tq->mutex);
  735. }
  736. void tq_freeze(struct thread_q *tq)
  737. {
  738. tq_freezethaw(tq, true);
  739. }
  740. void tq_thaw(struct thread_q *tq)
  741. {
  742. tq_freezethaw(tq, false);
  743. }
  744. bool tq_push(struct thread_q *tq, void *data)
  745. {
  746. struct tq_ent *ent;
  747. bool rc = true;
  748. ent = calloc(1, sizeof(*ent));
  749. if (!ent)
  750. return false;
  751. ent->data = data;
  752. mutex_lock(&tq->mutex);
  753. if (!tq->frozen) {
  754. DL_APPEND(tq->q, ent);
  755. } else {
  756. free(ent);
  757. rc = false;
  758. }
  759. pthread_cond_signal(&tq->cond);
  760. mutex_unlock(&tq->mutex);
  761. return rc;
  762. }
  763. void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
  764. {
  765. struct tq_ent *ent;
  766. void *rval = NULL;
  767. int rc;
  768. mutex_lock(&tq->mutex);
  769. if (tq->q)
  770. goto pop;
  771. if (abstime)
  772. rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime);
  773. else
  774. rc = pthread_cond_wait(&tq->cond, &tq->mutex);
  775. if (rc)
  776. goto out;
  777. if (!tq->q)
  778. goto out;
  779. pop:
  780. ent = tq->q;
  781. rval = ent->data;
  782. DL_DELETE(tq->q, ent);
  783. free(ent);
  784. out:
  785. mutex_unlock(&tq->mutex);
  786. return rval;
  787. }
  788. int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
  789. {
  790. int rv = pthread_create(&thr->pth, attr, start, arg);
  791. if (likely(!rv))
  792. thr->has_pth = true;
  793. return rv;
  794. }
  795. void thr_info_freeze(struct thr_info *thr)
  796. {
  797. struct tq_ent *ent, *iter;
  798. struct thread_q *tq;
  799. if (!thr)
  800. return;
  801. tq = thr->q;
  802. if (!tq)
  803. return;
  804. mutex_lock(&tq->mutex);
  805. tq->frozen = true;
  806. DL_FOREACH_SAFE(tq->q, ent, iter) {
  807. DL_DELETE(tq->q, ent);
  808. free(ent);
  809. }
  810. mutex_unlock(&tq->mutex);
  811. }
  812. void thr_info_cancel(struct thr_info *thr)
  813. {
  814. if (!thr)
  815. return;
  816. if (thr->has_pth) {
  817. pthread_cancel(thr->pth);
  818. thr->has_pth = false;
  819. }
  820. }
  821. #ifndef HAVE_PTHREAD_CANCEL
  822. // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill
  823. enum pthread_cancel_workaround_mode {
  824. PCWM_DEFAULT = 0,
  825. PCWM_TERMINATE = 1,
  826. PCWM_ASYNC = 2,
  827. PCWM_DISABLED = 4,
  828. PCWM_CANCELLED = 8,
  829. };
  830. static pthread_key_t key_pcwm;
  831. struct sigaction pcwm_orig_term_handler;
  832. static
  833. void do_pthread_cancel_exit(int flags)
  834. {
  835. if (!(flags & PCWM_ASYNC))
  836. // NOTE: Logging disables cancel while mutex held, so this is safe
  837. applog(LOG_WARNING, "pthread_cancel workaround: Cannot defer cancellation, terminating thread NOW");
  838. pthread_exit(PTHREAD_CANCELED);
  839. }
  840. static
  841. void sighandler_pthread_cancel(int sig)
  842. {
  843. int flags = (int)pthread_getspecific(key_pcwm);
  844. if (flags & PCWM_TERMINATE) // Main thread
  845. {
  846. // Restore original handler and call it
  847. if (sigaction(sig, &pcwm_orig_term_handler, NULL))
  848. quit(1, "pthread_cancel workaround: Failed to restore original handler");
  849. raise(SIGTERM);
  850. quit(1, "pthread_cancel workaround: Original handler returned");
  851. }
  852. if (flags & PCWM_CANCELLED) // Already pending cancel
  853. return;
  854. if (flags & PCWM_DISABLED)
  855. {
  856. flags |= PCWM_CANCELLED;
  857. if (pthread_setspecific(key_pcwm, (void*)flags))
  858. quit(1, "pthread_cancel workaround: pthread_setspecific failed (setting PCWM_CANCELLED)");
  859. return;
  860. }
  861. do_pthread_cancel_exit(flags);
  862. }
  863. void pthread_testcancel(void)
  864. {
  865. int flags = (int)pthread_getspecific(key_pcwm);
  866. if (flags & PCWM_CANCELLED && !(flags & PCWM_DISABLED))
  867. do_pthread_cancel_exit(flags);
  868. }
  869. int pthread_setcancelstate(int state, int *oldstate)
  870. {
  871. int flags = (int)pthread_getspecific(key_pcwm);
  872. if (oldstate)
  873. *oldstate = (flags & PCWM_DISABLED) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
  874. if (state == PTHREAD_CANCEL_DISABLE)
  875. flags |= PCWM_DISABLED;
  876. else
  877. {
  878. if (flags & PCWM_CANCELLED)
  879. do_pthread_cancel_exit(flags);
  880. flags &= ~PCWM_DISABLED;
  881. }
  882. if (pthread_setspecific(key_pcwm, (void*)flags))
  883. return -1;
  884. return 0;
  885. }
  886. int pthread_setcanceltype(int type, int *oldtype)
  887. {
  888. int flags = (int)pthread_getspecific(key_pcwm);
  889. if (oldtype)
  890. *oldtype = (flags & PCWM_ASYNC) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
  891. if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
  892. flags |= PCWM_ASYNC;
  893. else
  894. flags &= ~PCWM_ASYNC;
  895. if (pthread_setspecific(key_pcwm, (void*)flags))
  896. return -1;
  897. return 0;
  898. }
  899. void setup_pthread_cancel_workaround()
  900. {
  901. if (pthread_key_create(&key_pcwm, NULL))
  902. quit(1, "pthread_cancel workaround: pthread_key_create failed");
  903. if (pthread_setspecific(key_pcwm, (void*)PCWM_TERMINATE))
  904. quit(1, "pthread_cancel workaround: pthread_setspecific failed");
  905. struct sigaction new_sigact = {
  906. .sa_handler = sighandler_pthread_cancel,
  907. };
  908. if (sigaction(SIGTERM, &new_sigact, &pcwm_orig_term_handler))
  909. quit(1, "pthread_cancel workaround: Failed to install SIGTERM handler");
  910. }
  911. #endif
  912. static void _now_gettimeofday(struct timeval *);
  913. static void _cgsleep_us_r_nanosleep(cgtimer_t *, int64_t);
  914. #ifdef HAVE_POOR_GETTIMEOFDAY
  915. static struct timeval tv_timeofday_offset;
  916. static struct timeval _tv_timeofday_lastchecked;
  917. static pthread_mutex_t _tv_timeofday_mutex = PTHREAD_MUTEX_INITIALIZER;
  918. static
  919. void bfg_calibrate_timeofday(struct timeval *expected, char *buf)
  920. {
  921. struct timeval actual, delta;
  922. timeradd(expected, &tv_timeofday_offset, expected);
  923. _now_gettimeofday(&actual);
  924. if (expected->tv_sec >= actual.tv_sec - 1 && expected->tv_sec <= actual.tv_sec + 1)
  925. // Within reason - no change necessary
  926. return;
  927. timersub(&actual, expected, &delta);
  928. timeradd(&tv_timeofday_offset, &delta, &tv_timeofday_offset);
  929. sprintf(buf, "Recalibrating timeofday offset (delta %ld.%06lds)", (long)delta.tv_sec, (long)delta.tv_usec);
  930. *expected = actual;
  931. }
  932. void bfg_gettimeofday(struct timeval *out)
  933. {
  934. char buf[64] = "";
  935. timer_set_now(out);
  936. mutex_lock(&_tv_timeofday_mutex);
  937. if (_tv_timeofday_lastchecked.tv_sec < out->tv_sec - 21)
  938. bfg_calibrate_timeofday(out, buf);
  939. else
  940. timeradd(out, &tv_timeofday_offset, out);
  941. mutex_unlock(&_tv_timeofday_mutex);
  942. if (unlikely(buf[0]))
  943. applog(LOG_WARNING, "%s", buf);
  944. }
  945. #endif
  946. #ifdef WIN32
  947. static LARGE_INTEGER _perffreq;
  948. static
  949. void _now_queryperformancecounter(struct timeval *tv)
  950. {
  951. LARGE_INTEGER now;
  952. if (unlikely(!QueryPerformanceCounter(&now)))
  953. quit(1, "QueryPerformanceCounter failed");
  954. *tv = (struct timeval){
  955. .tv_sec = now.QuadPart / _perffreq.QuadPart,
  956. .tv_usec = (now.QuadPart % _perffreq.QuadPart) * 1000000 / _perffreq.QuadPart,
  957. };
  958. }
  959. #endif
  960. static void bfg_init_time();
  961. static
  962. void _now_is_not_set(__maybe_unused struct timeval *tv)
  963. {
  964. bfg_init_time();
  965. timer_set_now(tv);
  966. }
  967. void (*timer_set_now)(struct timeval *tv) = _now_is_not_set;
  968. void (*cgsleep_us_r)(cgtimer_t *, int64_t) = _cgsleep_us_r_nanosleep;
  969. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  970. static clockid_t bfg_timer_clk;
  971. static
  972. void _now_clock_gettime(struct timeval *tv)
  973. {
  974. struct timespec ts;
  975. if (unlikely(clock_gettime(bfg_timer_clk, &ts)))
  976. quit(1, "clock_gettime failed");
  977. *tv = (struct timeval){
  978. .tv_sec = ts.tv_sec,
  979. .tv_usec = ts.tv_nsec / 1000,
  980. };
  981. }
  982. #ifdef HAVE_CLOCK_NANOSLEEP
  983. static
  984. void _cgsleep_us_r_monotonic(cgtimer_t *tv_start, int64_t us)
  985. {
  986. struct timeval tv_end[1];
  987. struct timespec ts_end[1];
  988. int ret;
  989. timer_set_delay(tv_end, tv_start, us);
  990. timeval_to_spec(ts_end, tv_end);
  991. do {
  992. ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL);
  993. } while (ret == EINTR);
  994. }
  995. #endif
  996. static
  997. bool _bfg_try_clock_gettime(clockid_t clk)
  998. {
  999. struct timespec ts;
  1000. if (clock_gettime(clk, &ts))
  1001. return false;
  1002. bfg_timer_clk = clk;
  1003. timer_set_now = _now_clock_gettime;
  1004. return true;
  1005. }
  1006. #endif
  1007. static
  1008. void bfg_init_time()
  1009. {
  1010. if (timer_set_now != _now_is_not_set)
  1011. return;
  1012. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC
  1013. #ifdef HAVE_CLOCK_GETTIME_MONOTONIC_RAW
  1014. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC_RAW))
  1015. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC_RAW)");
  1016. else
  1017. #endif
  1018. if (_bfg_try_clock_gettime(CLOCK_MONOTONIC))
  1019. {
  1020. applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC)");
  1021. #ifdef HAVE_CLOCK_NANOSLEEP
  1022. cgsleep_us_r = _cgsleep_us_r_monotonic;
  1023. #endif
  1024. }
  1025. else
  1026. #endif
  1027. #ifdef WIN32
  1028. if (QueryPerformanceFrequency(&_perffreq) && _perffreq.QuadPart)
  1029. {
  1030. timer_set_now = _now_queryperformancecounter;
  1031. applog(LOG_DEBUG, "Timers: Using QueryPerformanceCounter");
  1032. }
  1033. else
  1034. #endif
  1035. {
  1036. timer_set_now = _now_gettimeofday;
  1037. applog(LOG_DEBUG, "Timers: Using gettimeofday");
  1038. }
  1039. #ifdef HAVE_POOR_GETTIMEOFDAY
  1040. char buf[64] = "";
  1041. struct timeval tv;
  1042. timer_set_now(&tv);
  1043. bfg_calibrate_timeofday(&tv, buf);
  1044. applog(LOG_DEBUG, "%s", buf);
  1045. #endif
  1046. }
  1047. void subtime(struct timeval *a, struct timeval *b)
  1048. {
  1049. timersub(a, b, b);
  1050. }
  1051. void addtime(struct timeval *a, struct timeval *b)
  1052. {
  1053. timeradd(a, b, b);
  1054. }
  1055. bool time_more(struct timeval *a, struct timeval *b)
  1056. {
  1057. return timercmp(a, b, >);
  1058. }
  1059. bool time_less(struct timeval *a, struct timeval *b)
  1060. {
  1061. return timercmp(a, b, <);
  1062. }
  1063. void copy_time(struct timeval *dest, const struct timeval *src)
  1064. {
  1065. memcpy(dest, src, sizeof(struct timeval));
  1066. }
  1067. void timespec_to_val(struct timeval *val, const struct timespec *spec)
  1068. {
  1069. val->tv_sec = spec->tv_sec;
  1070. val->tv_usec = spec->tv_nsec / 1000;
  1071. }
  1072. void timeval_to_spec(struct timespec *spec, const struct timeval *val)
  1073. {
  1074. spec->tv_sec = val->tv_sec;
  1075. spec->tv_nsec = val->tv_usec * 1000;
  1076. }
  1077. void us_to_timeval(struct timeval *val, int64_t us)
  1078. {
  1079. lldiv_t tvdiv = lldiv(us, 1000000);
  1080. val->tv_sec = tvdiv.quot;
  1081. val->tv_usec = tvdiv.rem;
  1082. }
  1083. void us_to_timespec(struct timespec *spec, int64_t us)
  1084. {
  1085. lldiv_t tvdiv = lldiv(us, 1000000);
  1086. spec->tv_sec = tvdiv.quot;
  1087. spec->tv_nsec = tvdiv.rem * 1000;
  1088. }
  1089. void ms_to_timespec(struct timespec *spec, int64_t ms)
  1090. {
  1091. lldiv_t tvdiv = lldiv(ms, 1000);
  1092. spec->tv_sec = tvdiv.quot;
  1093. spec->tv_nsec = tvdiv.rem * 1000000;
  1094. }
  1095. void timeraddspec(struct timespec *a, const struct timespec *b)
  1096. {
  1097. a->tv_sec += b->tv_sec;
  1098. a->tv_nsec += b->tv_nsec;
  1099. if (a->tv_nsec >= 1000000000) {
  1100. a->tv_nsec -= 1000000000;
  1101. a->tv_sec++;
  1102. }
  1103. }
  1104. #ifndef WIN32
  1105. static
  1106. void _now_gettimeofday(struct timeval *tv)
  1107. {
  1108. gettimeofday(tv, NULL);
  1109. }
  1110. #else
  1111. /* Windows start time is since 1601 lol so convert it to unix epoch 1970. */
  1112. #define EPOCHFILETIME (116444736000000000LL)
  1113. /* Return the system time as an lldiv_t in decimicroseconds. */
  1114. static void decius_time(lldiv_t *lidiv)
  1115. {
  1116. FILETIME ft;
  1117. LARGE_INTEGER li;
  1118. GetSystemTimeAsFileTime(&ft);
  1119. li.LowPart = ft.dwLowDateTime;
  1120. li.HighPart = ft.dwHighDateTime;
  1121. li.QuadPart -= EPOCHFILETIME;
  1122. /* SystemTime is in decimicroseconds so divide by an unusual number */
  1123. *lidiv = lldiv(li.QuadPart, 10000000);
  1124. }
  1125. void _now_gettimeofday(struct timeval *tv)
  1126. {
  1127. lldiv_t lidiv;
  1128. decius_time(&lidiv);
  1129. tv->tv_sec = lidiv.quot;
  1130. tv->tv_usec = lidiv.rem / 10;
  1131. }
  1132. #endif
  1133. void cgsleep_ms_r(cgtimer_t *tv_start, int ms)
  1134. {
  1135. cgsleep_us_r(tv_start, ((int64_t)ms) * 1000);
  1136. }
  1137. static
  1138. void _cgsleep_us_r_nanosleep(cgtimer_t *tv_start, int64_t us)
  1139. {
  1140. struct timeval tv_timer[1], tv[1];
  1141. struct timespec ts[1];
  1142. timer_set_delay(tv_timer, tv_start, us);
  1143. while (true)
  1144. {
  1145. timer_set_now(tv);
  1146. if (!timercmp(tv_timer, tv, >))
  1147. return;
  1148. timersub(tv_timer, tv, tv);
  1149. timeval_to_spec(ts, tv);
  1150. nanosleep(ts, NULL);
  1151. }
  1152. }
  1153. void cgsleep_ms(int ms)
  1154. {
  1155. cgtimer_t ts_start;
  1156. cgsleep_prepare_r(&ts_start);
  1157. cgsleep_ms_r(&ts_start, ms);
  1158. }
  1159. void cgsleep_us(int64_t us)
  1160. {
  1161. cgtimer_t ts_start;
  1162. cgsleep_prepare_r(&ts_start);
  1163. cgsleep_us_r(&ts_start, us);
  1164. }
  1165. /* Returns the microseconds difference between end and start times as a double */
  1166. double us_tdiff(struct timeval *end, struct timeval *start)
  1167. {
  1168. return end->tv_sec * 1000000 + end->tv_usec - start->tv_sec * 1000000 - start->tv_usec;
  1169. }
  1170. /* Returns the seconds difference between end and start times as a double */
  1171. double tdiff(struct timeval *end, struct timeval *start)
  1172. {
  1173. return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0;
  1174. }
  1175. bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
  1176. {
  1177. char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
  1178. char url_address[256], port[6];
  1179. int url_len, port_len = 0;
  1180. url_begin = strstr(url, "//");
  1181. if (!url_begin)
  1182. url_begin = url;
  1183. else
  1184. url_begin += 2;
  1185. /* Look for numeric ipv6 entries */
  1186. ipv6_begin = strstr(url_begin, "[");
  1187. ipv6_end = strstr(url_begin, "]");
  1188. if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
  1189. url_end = strstr(ipv6_end, ":");
  1190. else
  1191. url_end = strstr(url_begin, ":");
  1192. if (url_end) {
  1193. url_len = url_end - url_begin;
  1194. port_len = strlen(url_begin) - url_len - 1;
  1195. if (port_len < 1)
  1196. return false;
  1197. port_start = url_end + 1;
  1198. } else
  1199. url_len = strlen(url_begin);
  1200. if (url_len < 1)
  1201. return false;
  1202. sprintf(url_address, "%.*s", url_len, url_begin);
  1203. if (port_len) {
  1204. char *slash;
  1205. snprintf(port, 6, "%.*s", port_len, port_start);
  1206. slash = strchr(port, '/');
  1207. if (slash)
  1208. *slash = '\0';
  1209. } else
  1210. strcpy(port, "80");
  1211. free(*sockaddr_port);
  1212. *sockaddr_port = strdup(port);
  1213. free(*sockaddr_url);
  1214. *sockaddr_url = strdup(url_address);
  1215. return true;
  1216. }
  1217. enum send_ret {
  1218. SEND_OK,
  1219. SEND_SELECTFAIL,
  1220. SEND_SENDFAIL,
  1221. SEND_INACTIVE
  1222. };
  1223. /* Send a single command across a socket, appending \n to it. This should all
  1224. * be done under stratum lock except when first establishing the socket */
  1225. static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len)
  1226. {
  1227. SOCKETTYPE sock = pool->sock;
  1228. ssize_t ssent = 0;
  1229. strcat(s, "\n");
  1230. len++;
  1231. while (len > 0 ) {
  1232. struct timeval timeout = {1, 0};
  1233. ssize_t sent;
  1234. fd_set wd;
  1235. FD_ZERO(&wd);
  1236. FD_SET(sock, &wd);
  1237. if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1)
  1238. return SEND_SELECTFAIL;
  1239. #ifdef __APPLE__
  1240. sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE);
  1241. #elif WIN32
  1242. sent = send(pool->sock, s + ssent, len, 0);
  1243. #else
  1244. sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL);
  1245. #endif
  1246. if (sent < 0) {
  1247. if (!sock_blocks())
  1248. return SEND_SENDFAIL;
  1249. sent = 0;
  1250. }
  1251. ssent += sent;
  1252. len -= sent;
  1253. }
  1254. pool->cgminer_pool_stats.times_sent++;
  1255. pool->cgminer_pool_stats.bytes_sent += ssent;
  1256. total_bytes_sent += ssent;
  1257. pool->cgminer_pool_stats.net_bytes_sent += ssent;
  1258. return SEND_OK;
  1259. }
  1260. bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force)
  1261. {
  1262. enum send_ret ret = SEND_INACTIVE;
  1263. if (opt_protocol)
  1264. applog(LOG_DEBUG, "Pool %u: SEND: %s", pool->pool_no, s);
  1265. mutex_lock(&pool->stratum_lock);
  1266. if (pool->stratum_active || force)
  1267. ret = __stratum_send(pool, s, len);
  1268. mutex_unlock(&pool->stratum_lock);
  1269. /* This is to avoid doing applog under stratum_lock */
  1270. switch (ret) {
  1271. default:
  1272. case SEND_OK:
  1273. break;
  1274. case SEND_SELECTFAIL:
  1275. applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
  1276. suspend_stratum(pool);
  1277. break;
  1278. case SEND_SENDFAIL:
  1279. applog(LOG_DEBUG, "Failed to send in stratum_send");
  1280. suspend_stratum(pool);
  1281. break;
  1282. case SEND_INACTIVE:
  1283. applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
  1284. break;
  1285. }
  1286. return (ret == SEND_OK);
  1287. }
  1288. static bool socket_full(struct pool *pool, int wait)
  1289. {
  1290. SOCKETTYPE sock = pool->sock;
  1291. struct timeval timeout;
  1292. fd_set rd;
  1293. if (sock == INVSOCK)
  1294. return true;
  1295. if (unlikely(wait < 0))
  1296. wait = 0;
  1297. FD_ZERO(&rd);
  1298. FD_SET(sock, &rd);
  1299. timeout.tv_usec = 0;
  1300. timeout.tv_sec = wait;
  1301. if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0)
  1302. return true;
  1303. return false;
  1304. }
  1305. /* Check to see if Santa's been good to you */
  1306. bool sock_full(struct pool *pool)
  1307. {
  1308. if (strlen(pool->sockbuf))
  1309. return true;
  1310. return (socket_full(pool, 0));
  1311. }
  1312. static void clear_sockbuf(struct pool *pool)
  1313. {
  1314. strcpy(pool->sockbuf, "");
  1315. }
  1316. static void clear_sock(struct pool *pool)
  1317. {
  1318. ssize_t n;
  1319. mutex_lock(&pool->stratum_lock);
  1320. do {
  1321. if (pool->sock)
  1322. n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0);
  1323. else
  1324. n = 0;
  1325. } while (n > 0);
  1326. mutex_unlock(&pool->stratum_lock);
  1327. clear_sockbuf(pool);
  1328. }
  1329. /* Make sure the pool sockbuf is large enough to cope with any coinbase size
  1330. * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE
  1331. * and zeroing the new memory */
  1332. static void recalloc_sock(struct pool *pool, size_t len)
  1333. {
  1334. size_t old, new;
  1335. old = strlen(pool->sockbuf);
  1336. new = old + len + 1;
  1337. if (new < pool->sockbuf_size)
  1338. return;
  1339. new = new + (RBUFSIZE - (new % RBUFSIZE));
  1340. // Avoid potentially recursive locking
  1341. // applog(LOG_DEBUG, "Recallocing pool sockbuf to %lu", (unsigned long)new);
  1342. pool->sockbuf = realloc(pool->sockbuf, new);
  1343. if (!pool->sockbuf)
  1344. quithere(1, "Failed to realloc pool sockbuf");
  1345. memset(pool->sockbuf + old, 0, new - old);
  1346. pool->sockbuf_size = new;
  1347. }
  1348. /* Peeks at a socket to find the first end of line and then reads just that
  1349. * from the socket and returns that as a malloced char */
  1350. char *recv_line(struct pool *pool)
  1351. {
  1352. char *tok, *sret = NULL;
  1353. ssize_t len, buflen;
  1354. int waited = 0;
  1355. if (!strstr(pool->sockbuf, "\n")) {
  1356. struct timeval rstart, now;
  1357. cgtime(&rstart);
  1358. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1359. applog(LOG_DEBUG, "Timed out waiting for data on socket_full");
  1360. goto out;
  1361. }
  1362. do {
  1363. char s[RBUFSIZE];
  1364. size_t slen;
  1365. ssize_t n;
  1366. memset(s, 0, RBUFSIZE);
  1367. n = recv(pool->sock, s, RECVSIZE, 0);
  1368. if (!n) {
  1369. applog(LOG_DEBUG, "Socket closed waiting in recv_line");
  1370. suspend_stratum(pool);
  1371. break;
  1372. }
  1373. cgtime(&now);
  1374. waited = tdiff(&now, &rstart);
  1375. if (n < 0) {
  1376. //Save errno from being overweitten bei socket_ commands
  1377. int socket_recv_errno;
  1378. socket_recv_errno = SOCKERR;
  1379. if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) {
  1380. applog(LOG_DEBUG, "Failed to recv sock in recv_line: %s", bfg_strerror(socket_recv_errno, BST_SOCKET));
  1381. suspend_stratum(pool);
  1382. break;
  1383. }
  1384. } else {
  1385. slen = strlen(s);
  1386. recalloc_sock(pool, slen);
  1387. strcat(pool->sockbuf, s);
  1388. }
  1389. } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n"));
  1390. }
  1391. buflen = strlen(pool->sockbuf);
  1392. tok = strtok(pool->sockbuf, "\n");
  1393. if (!tok) {
  1394. applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line");
  1395. goto out;
  1396. }
  1397. sret = strdup(tok);
  1398. len = strlen(sret);
  1399. /* Copy what's left in the buffer after the \n, including the
  1400. * terminating \0 */
  1401. if (buflen > len + 1)
  1402. memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1);
  1403. else
  1404. strcpy(pool->sockbuf, "");
  1405. pool->cgminer_pool_stats.times_received++;
  1406. pool->cgminer_pool_stats.bytes_received += len;
  1407. total_bytes_rcvd += len;
  1408. pool->cgminer_pool_stats.net_bytes_received += len;
  1409. out:
  1410. if (!sret)
  1411. clear_sock(pool);
  1412. else if (opt_protocol)
  1413. applog(LOG_DEBUG, "Pool %u: RECV: %s", pool->pool_no, sret);
  1414. return sret;
  1415. }
  1416. /* Dumps any JSON value as a string. Just like jansson 2.1's JSON_ENCODE_ANY
  1417. * flag, but this is compatible with 2.0. */
  1418. char *json_dumps_ANY(json_t *json, size_t flags)
  1419. {
  1420. switch (json_typeof(json))
  1421. {
  1422. case JSON_ARRAY:
  1423. case JSON_OBJECT:
  1424. return json_dumps(json, flags);
  1425. default:
  1426. break;
  1427. }
  1428. char *rv;
  1429. #ifdef JSON_ENCODE_ANY
  1430. rv = json_dumps(json, JSON_ENCODE_ANY | flags);
  1431. if (rv)
  1432. return rv;
  1433. #endif
  1434. json_t *tmp = json_array();
  1435. char *s;
  1436. int i;
  1437. size_t len;
  1438. if (!tmp)
  1439. quithere(1, "Failed to allocate json array");
  1440. if (json_array_append(tmp, json))
  1441. quithere(1, "Failed to append temporary array");
  1442. s = json_dumps(tmp, flags);
  1443. if (!s)
  1444. return NULL;
  1445. for (i = 0; s[i] != '['; ++i)
  1446. if (unlikely(!(s[i] && isCspace(s[i]))))
  1447. quithere(1, "Failed to find opening bracket in array dump");
  1448. len = strlen(&s[++i]) - 1;
  1449. if (unlikely(s[i+len] != ']'))
  1450. quithere(1, "Failed to find closing bracket in array dump");
  1451. rv = malloc(len + 1);
  1452. memcpy(rv, &s[i], len);
  1453. rv[len] = '\0';
  1454. free(s);
  1455. json_decref(tmp);
  1456. return rv;
  1457. }
  1458. /* Extracts a string value from a json array with error checking. To be used
  1459. * when the value of the string returned is only examined and not to be stored.
  1460. * See json_array_string below */
  1461. const char *__json_array_string(json_t *val, unsigned int entry)
  1462. {
  1463. json_t *arr_entry;
  1464. if (json_is_null(val))
  1465. return NULL;
  1466. if (!json_is_array(val))
  1467. return NULL;
  1468. if (entry > json_array_size(val))
  1469. return NULL;
  1470. arr_entry = json_array_get(val, entry);
  1471. if (!json_is_string(arr_entry))
  1472. return NULL;
  1473. return json_string_value(arr_entry);
  1474. }
  1475. /* Creates a freshly malloced dup of __json_array_string */
  1476. static char *json_array_string(json_t *val, unsigned int entry)
  1477. {
  1478. const char *buf = __json_array_string(val, entry);
  1479. if (buf)
  1480. return strdup(buf);
  1481. return NULL;
  1482. }
  1483. void stratum_probe_transparency(struct pool *pool)
  1484. {
  1485. // Request transaction data to discourage pools from doing anything shady
  1486. char s[1024];
  1487. int sLen;
  1488. sLen = sprintf(s, "{\"params\": [\"%s\"], \"id\": \"txlist%s\", \"method\": \"mining.get_transactions\"}",
  1489. pool->swork.job_id,
  1490. pool->swork.job_id);
  1491. stratum_send(pool, s, sLen);
  1492. if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency))
  1493. cgtime(&pool->swork.tv_transparency);
  1494. pool->swork.transparency_probed = true;
  1495. }
  1496. static bool parse_notify(struct pool *pool, json_t *val)
  1497. {
  1498. const char *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime;
  1499. char *job_id;
  1500. bool clean, ret = false;
  1501. int merkles, i;
  1502. size_t cb1_len, cb2_len;
  1503. json_t *arr;
  1504. arr = json_array_get(val, 4);
  1505. if (!arr || !json_is_array(arr))
  1506. goto out;
  1507. merkles = json_array_size(arr);
  1508. for (i = 0; i < merkles; i++)
  1509. if (!json_is_string(json_array_get(arr, i)))
  1510. goto out;
  1511. prev_hash = __json_array_string(val, 1);
  1512. coinbase1 = __json_array_string(val, 2);
  1513. coinbase2 = __json_array_string(val, 3);
  1514. bbversion = __json_array_string(val, 5);
  1515. nbit = __json_array_string(val, 6);
  1516. ntime = __json_array_string(val, 7);
  1517. clean = json_is_true(json_array_get(val, 8));
  1518. if (!prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime)
  1519. goto out;
  1520. job_id = json_array_string(val, 0);
  1521. if (!job_id)
  1522. goto out;
  1523. cg_wlock(&pool->data_lock);
  1524. cgtime(&pool->swork.tv_received);
  1525. free(pool->swork.job_id);
  1526. pool->swork.job_id = job_id;
  1527. pool->submit_old = !clean;
  1528. pool->swork.clean = true;
  1529. hex2bin(&pool->swork.header1[0], bbversion, 4);
  1530. hex2bin(&pool->swork.header1[4], prev_hash, 32);
  1531. hex2bin((void*)&pool->swork.ntime, ntime, 4);
  1532. pool->swork.ntime = be32toh(pool->swork.ntime);
  1533. hex2bin(&pool->swork.diffbits[0], nbit, 4);
  1534. cb1_len = strlen(coinbase1) / 2;
  1535. pool->swork.nonce2_offset = cb1_len + pool->n1_len;
  1536. cb2_len = strlen(coinbase2) / 2;
  1537. bytes_resize(&pool->swork.coinbase, pool->swork.nonce2_offset + pool->n2size + cb2_len);
  1538. uint8_t *coinbase = bytes_buf(&pool->swork.coinbase);
  1539. hex2bin(coinbase, coinbase1, cb1_len);
  1540. hex2bin(&coinbase[cb1_len], pool->nonce1, pool->n1_len);
  1541. // NOTE: gap for nonce2, filled at work generation time
  1542. hex2bin(&coinbase[pool->swork.nonce2_offset + pool->n2size], coinbase2, cb2_len);
  1543. bytes_resize(&pool->swork.merkle_bin, 32 * merkles);
  1544. for (i = 0; i < merkles; i++)
  1545. hex2bin(&bytes_buf(&pool->swork.merkle_bin)[i * 32], json_string_value(json_array_get(arr, i)), 32);
  1546. pool->swork.merkles = merkles;
  1547. pool->nonce2 = 0;
  1548. cg_wunlock(&pool->data_lock);
  1549. applog(LOG_DEBUG, "Received stratum notify from pool %u with job_id=%s",
  1550. pool->pool_no, job_id);
  1551. if (opt_debug && opt_protocol)
  1552. {
  1553. applog(LOG_DEBUG, "job_id: %s", job_id);
  1554. applog(LOG_DEBUG, "prev_hash: %s", prev_hash);
  1555. applog(LOG_DEBUG, "coinbase1: %s", coinbase1);
  1556. applog(LOG_DEBUG, "coinbase2: %s", coinbase2);
  1557. for (i = 0; i < merkles; i++)
  1558. applog(LOG_DEBUG, "merkle%d: %s", i, json_string_value(json_array_get(arr, i)));
  1559. applog(LOG_DEBUG, "bbversion: %s", bbversion);
  1560. applog(LOG_DEBUG, "nbit: %s", nbit);
  1561. applog(LOG_DEBUG, "ntime: %s", ntime);
  1562. applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no");
  1563. }
  1564. /* A notify message is the closest stratum gets to a getwork */
  1565. pool->getwork_requested++;
  1566. total_getworks++;
  1567. if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency))
  1568. if (pool->probed)
  1569. stratum_probe_transparency(pool);
  1570. ret = true;
  1571. out:
  1572. return ret;
  1573. }
  1574. static bool parse_diff(struct pool *pool, json_t *val)
  1575. {
  1576. double diff;
  1577. diff = json_number_value(json_array_get(val, 0));
  1578. if (diff == 0)
  1579. return false;
  1580. cg_wlock(&pool->data_lock);
  1581. pool->swork.diff = diff;
  1582. cg_wunlock(&pool->data_lock);
  1583. applog(LOG_DEBUG, "Pool %d stratum bdifficulty set to %f", pool->pool_no, diff);
  1584. return true;
  1585. }
  1586. static bool parse_reconnect(struct pool *pool, json_t *val)
  1587. {
  1588. const char *url, *port;
  1589. char address[256];
  1590. url = __json_array_string(val, 0);
  1591. if (!url)
  1592. url = pool->sockaddr_url;
  1593. port = __json_array_string(val, 1);
  1594. if (!port)
  1595. port = pool->stratum_port;
  1596. snprintf(address, sizeof(address), "%s:%s", url, port);
  1597. if (!extract_sockaddr(address, &pool->sockaddr_url, &pool->stratum_port))
  1598. return false;
  1599. pool->stratum_url = pool->sockaddr_url;
  1600. applog(LOG_NOTICE, "Reconnect requested from pool %d to %s", pool->pool_no, address);
  1601. if (!restart_stratum(pool))
  1602. return false;
  1603. return true;
  1604. }
  1605. static bool send_version(struct pool *pool, json_t *val)
  1606. {
  1607. char s[RBUFSIZE], *idstr;
  1608. json_t *id = json_object_get(val, "id");
  1609. if (!(id && !json_is_null(id)))
  1610. return false;
  1611. idstr = json_dumps_ANY(id, 0);
  1612. sprintf(s, "{\"id\": %s, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", idstr);
  1613. free(idstr);
  1614. if (!stratum_send(pool, s, strlen(s)))
  1615. return false;
  1616. return true;
  1617. }
  1618. static bool stratum_show_message(struct pool *pool, json_t *val, json_t *params)
  1619. {
  1620. char *msg;
  1621. char s[RBUFSIZE], *idstr;
  1622. json_t *id = json_object_get(val, "id");
  1623. msg = json_array_string(params, 0);
  1624. if (likely(msg))
  1625. {
  1626. free(pool->admin_msg);
  1627. pool->admin_msg = msg;
  1628. applog(LOG_NOTICE, "Message from pool %u: %s", pool->pool_no, msg);
  1629. }
  1630. if (!(id && !json_is_null(id)))
  1631. return true;
  1632. idstr = json_dumps_ANY(id, 0);
  1633. if (likely(msg))
  1634. sprintf(s, "{\"id\": %s, \"result\": true, \"error\": null}", idstr);
  1635. else
  1636. sprintf(s, "{\"id\": %s, \"result\": null, \"error\": [-1, \"Failed to parse message\", null]}", idstr);
  1637. free(idstr);
  1638. if (!stratum_send(pool, s, strlen(s)))
  1639. return false;
  1640. return true;
  1641. }
  1642. bool parse_method(struct pool *pool, char *s)
  1643. {
  1644. json_t *val = NULL, *method, *err_val, *params;
  1645. json_error_t err;
  1646. bool ret = false;
  1647. const char *buf;
  1648. if (!s)
  1649. goto out;
  1650. val = JSON_LOADS(s, &err);
  1651. if (!val) {
  1652. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1653. goto out;
  1654. }
  1655. method = json_object_get(val, "method");
  1656. if (!method)
  1657. goto out;
  1658. err_val = json_object_get(val, "error");
  1659. params = json_object_get(val, "params");
  1660. if (err_val && !json_is_null(err_val)) {
  1661. char *ss;
  1662. if (err_val)
  1663. ss = json_dumps(err_val, JSON_INDENT(3));
  1664. else
  1665. ss = strdup("(unknown reason)");
  1666. applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss);
  1667. free(ss);
  1668. goto out;
  1669. }
  1670. buf = json_string_value(method);
  1671. if (!buf)
  1672. goto out;
  1673. if (!strncasecmp(buf, "mining.notify", 13)) {
  1674. if (parse_notify(pool, params))
  1675. pool->stratum_notify = ret = true;
  1676. else
  1677. pool->stratum_notify = ret = false;
  1678. goto out;
  1679. }
  1680. if (!strncasecmp(buf, "mining.set_difficulty", 21) && parse_diff(pool, params)) {
  1681. ret = true;
  1682. goto out;
  1683. }
  1684. if (!strncasecmp(buf, "client.reconnect", 16) && parse_reconnect(pool, params)) {
  1685. ret = true;
  1686. goto out;
  1687. }
  1688. if (!strncasecmp(buf, "client.get_version", 18) && send_version(pool, val)) {
  1689. ret = true;
  1690. goto out;
  1691. }
  1692. if (!strncasecmp(buf, "client.show_message", 19) && stratum_show_message(pool, val, params)) {
  1693. ret = true;
  1694. goto out;
  1695. }
  1696. out:
  1697. if (val)
  1698. json_decref(val);
  1699. return ret;
  1700. }
  1701. extern bool parse_stratum_response(struct pool *, char *s);
  1702. bool auth_stratum(struct pool *pool)
  1703. {
  1704. json_t *val = NULL, *res_val, *err_val;
  1705. char s[RBUFSIZE], *sret = NULL;
  1706. json_error_t err;
  1707. bool ret = false;
  1708. sprintf(s, "{\"id\": \"auth\", \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}",
  1709. pool->rpc_user, pool->rpc_pass);
  1710. if (!stratum_send(pool, s, strlen(s)))
  1711. goto out;
  1712. /* Parse all data in the queue and anything left should be auth */
  1713. while (42) {
  1714. sret = recv_line(pool);
  1715. if (!sret)
  1716. goto out;
  1717. if (parse_method(pool, sret))
  1718. free(sret);
  1719. else
  1720. break;
  1721. }
  1722. val = JSON_LOADS(sret, &err);
  1723. free(sret);
  1724. res_val = json_object_get(val, "result");
  1725. err_val = json_object_get(val, "error");
  1726. if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) {
  1727. char *ss;
  1728. if (err_val)
  1729. ss = json_dumps(err_val, JSON_INDENT(3));
  1730. else
  1731. ss = strdup("(unknown reason)");
  1732. applog(LOG_WARNING, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss);
  1733. free(ss);
  1734. goto out;
  1735. }
  1736. ret = true;
  1737. applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no);
  1738. pool->probed = true;
  1739. successful_connect = true;
  1740. out:
  1741. if (val)
  1742. json_decref(val);
  1743. if (pool->stratum_notify)
  1744. stratum_probe_transparency(pool);
  1745. return ret;
  1746. }
  1747. curl_socket_t grab_socket_opensocket_cb(void *clientp, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr)
  1748. {
  1749. struct pool *pool = clientp;
  1750. curl_socket_t sck = bfg_socket(addr->family, addr->socktype, addr->protocol);
  1751. pool->sock = sck;
  1752. return sck;
  1753. }
  1754. static bool setup_stratum_curl(struct pool *pool)
  1755. {
  1756. char curl_err_str[CURL_ERROR_SIZE];
  1757. CURL *curl = NULL;
  1758. char s[RBUFSIZE];
  1759. bool ret = false;
  1760. applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf);
  1761. mutex_lock(&pool->stratum_lock);
  1762. timer_unset(&pool->swork.tv_transparency);
  1763. pool->stratum_active = false;
  1764. pool->stratum_notify = false;
  1765. pool->swork.transparency_probed = false;
  1766. if (pool->stratum_curl)
  1767. curl_easy_cleanup(pool->stratum_curl);
  1768. pool->stratum_curl = curl_easy_init();
  1769. if (unlikely(!pool->stratum_curl))
  1770. quithere(1, "Failed to curl_easy_init");
  1771. if (pool->sockbuf)
  1772. pool->sockbuf[0] = '\0';
  1773. curl = pool->stratum_curl;
  1774. if (!pool->sockbuf) {
  1775. pool->sockbuf = calloc(RBUFSIZE, 1);
  1776. if (!pool->sockbuf)
  1777. quithere(1, "Failed to calloc pool sockbuf");
  1778. pool->sockbuf_size = RBUFSIZE;
  1779. }
  1780. /* Create a http url for use with curl */
  1781. sprintf(s, "http://%s:%s", pool->sockaddr_url, pool->stratum_port);
  1782. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  1783. curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 30);
  1784. curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
  1785. curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
  1786. curl_easy_setopt(curl, CURLOPT_URL, s);
  1787. if (!opt_delaynet)
  1788. curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
  1789. /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed
  1790. * to enable it */
  1791. curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb);
  1792. curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool);
  1793. curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
  1794. // CURLINFO_LASTSOCKET is broken on Win64 (which has a wider SOCKET type than curl_easy_getinfo returns), so we use this hack for now
  1795. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, grab_socket_opensocket_cb);
  1796. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  1797. curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY);
  1798. if (pool->rpc_proxy) {
  1799. curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy);
  1800. } else if (opt_socks_proxy) {
  1801. curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy);
  1802. curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
  1803. }
  1804. curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1);
  1805. pool->sock = INVSOCK;
  1806. if (curl_easy_perform(curl)) {
  1807. applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str);
  1808. errout:
  1809. curl_easy_cleanup(curl);
  1810. pool->stratum_curl = NULL;
  1811. goto out;
  1812. }
  1813. if (pool->sock == INVSOCK)
  1814. {
  1815. applog(LOG_ERR, "Stratum connect succeeded, but technical problem extracting socket (pool %u)", pool->pool_no);
  1816. goto errout;
  1817. }
  1818. keep_sockalive(pool->sock);
  1819. pool->cgminer_pool_stats.times_sent++;
  1820. pool->cgminer_pool_stats.times_received++;
  1821. ret = true;
  1822. out:
  1823. mutex_unlock(&pool->stratum_lock);
  1824. return ret;
  1825. }
  1826. static char *get_sessionid(json_t *val)
  1827. {
  1828. char *ret = NULL;
  1829. json_t *arr_val;
  1830. int arrsize, i;
  1831. arr_val = json_array_get(val, 0);
  1832. if (!arr_val || !json_is_array(arr_val))
  1833. goto out;
  1834. arrsize = json_array_size(arr_val);
  1835. for (i = 0; i < arrsize; i++) {
  1836. json_t *arr = json_array_get(arr_val, i);
  1837. const char *notify;
  1838. if (!arr | !json_is_array(arr))
  1839. break;
  1840. notify = __json_array_string(arr, 0);
  1841. if (!notify)
  1842. continue;
  1843. if (!strncasecmp(notify, "mining.notify", 13)) {
  1844. ret = json_array_string(arr, 1);
  1845. break;
  1846. }
  1847. }
  1848. out:
  1849. return ret;
  1850. }
  1851. void suspend_stratum(struct pool *pool)
  1852. {
  1853. clear_sockbuf(pool);
  1854. applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no);
  1855. mutex_lock(&pool->stratum_lock);
  1856. pool->stratum_active = pool->stratum_notify = false;
  1857. if (pool->stratum_curl) {
  1858. curl_easy_cleanup(pool->stratum_curl);
  1859. }
  1860. pool->stratum_curl = NULL;
  1861. pool->sock = INVSOCK;
  1862. mutex_unlock(&pool->stratum_lock);
  1863. }
  1864. bool initiate_stratum(struct pool *pool)
  1865. {
  1866. bool ret = false, recvd = false, noresume = false, sockd = false;
  1867. bool trysuggest = request_target_str;
  1868. char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid;
  1869. json_t *val = NULL, *res_val, *err_val;
  1870. json_error_t err;
  1871. int n2size;
  1872. resend:
  1873. if (!setup_stratum_curl(pool)) {
  1874. sockd = false;
  1875. goto out;
  1876. }
  1877. sockd = true;
  1878. clear_sock(pool);
  1879. if (trysuggest)
  1880. {
  1881. int sz = sprintf(s, "{\"id\": null, \"method\": \"mining.suggest_target\", \"params\": [\"%s\"]}", request_target_str);
  1882. if (!_stratum_send(pool, s, sz, true))
  1883. {
  1884. applog(LOG_DEBUG, "Pool %u: Failed to send suggest_target in initiate_stratum", pool->pool_no);
  1885. goto out;
  1886. }
  1887. recvd = true;
  1888. }
  1889. if (noresume) {
  1890. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++);
  1891. } else {
  1892. if (pool->sessionid)
  1893. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid);
  1894. else
  1895. sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++);
  1896. }
  1897. if (!_stratum_send(pool, s, strlen(s), true)) {
  1898. applog(LOG_DEBUG, "Failed to send s in initiate_stratum");
  1899. goto out;
  1900. }
  1901. recvd = true;
  1902. if (!socket_full(pool, DEFAULT_SOCKWAIT)) {
  1903. applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum");
  1904. goto out;
  1905. }
  1906. sret = recv_line(pool);
  1907. if (!sret)
  1908. goto out;
  1909. val = JSON_LOADS(sret, &err);
  1910. free(sret);
  1911. if (!val) {
  1912. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  1913. goto out;
  1914. }
  1915. res_val = json_object_get(val, "result");
  1916. err_val = json_object_get(val, "error");
  1917. if (!res_val || json_is_null(res_val) ||
  1918. (err_val && !json_is_null(err_val))) {
  1919. char *ss;
  1920. if (err_val)
  1921. ss = json_dumps(err_val, JSON_INDENT(3));
  1922. else
  1923. ss = strdup("(unknown reason)");
  1924. applog(LOG_INFO, "JSON-RPC decode failed: %s", ss);
  1925. free(ss);
  1926. goto out;
  1927. }
  1928. sessionid = get_sessionid(res_val);
  1929. if (!sessionid)
  1930. applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum");
  1931. nonce1 = json_array_string(res_val, 1);
  1932. if (!nonce1) {
  1933. applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");
  1934. free(sessionid);
  1935. goto out;
  1936. }
  1937. n2size = json_integer_value(json_array_get(res_val, 2));
  1938. if (!n2size) {
  1939. applog(LOG_INFO, "Failed to get n2size in initiate_stratum");
  1940. free(sessionid);
  1941. free(nonce1);
  1942. goto out;
  1943. }
  1944. cg_wlock(&pool->data_lock);
  1945. free(pool->sessionid);
  1946. pool->sessionid = sessionid;
  1947. free(pool->nonce1);
  1948. pool->nonce1 = nonce1;
  1949. pool->n1_len = strlen(nonce1) / 2;
  1950. pool->n2size = n2size;
  1951. pool->nonce2sz = (n2size > sizeof(pool->nonce2)) ? sizeof(pool->nonce2) : n2size;
  1952. #ifdef WORDS_BIGENDIAN
  1953. pool->nonce2off = (n2size < sizeof(pool->nonce2)) ? (sizeof(pool->nonce2) - n2size) : 0;
  1954. #endif
  1955. cg_wunlock(&pool->data_lock);
  1956. if (sessionid)
  1957. applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid);
  1958. ret = true;
  1959. out:
  1960. if (val)
  1961. {
  1962. json_decref(val);
  1963. val = NULL;
  1964. }
  1965. if (ret) {
  1966. if (!pool->stratum_url)
  1967. pool->stratum_url = pool->sockaddr_url;
  1968. pool->stratum_active = true;
  1969. pool->swork.diff = 1;
  1970. if (opt_protocol) {
  1971. applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d",
  1972. pool->pool_no, pool->nonce1, pool->n2size);
  1973. }
  1974. } else {
  1975. if (recvd)
  1976. {
  1977. if (trysuggest)
  1978. {
  1979. applog(LOG_DEBUG, "Pool %u: Failed to connect stratum with mining.suggest_target, retrying without", pool->pool_no);
  1980. trysuggest = false;
  1981. goto resend;
  1982. }
  1983. if (!noresume)
  1984. {
  1985. applog(LOG_DEBUG, "Failed to resume stratum, trying afresh");
  1986. noresume = true;
  1987. goto resend;
  1988. }
  1989. }
  1990. applog(LOG_DEBUG, "Initiate stratum failed");
  1991. if (sockd)
  1992. suspend_stratum(pool);
  1993. }
  1994. return ret;
  1995. }
  1996. bool restart_stratum(struct pool *pool)
  1997. {
  1998. if (pool->stratum_active)
  1999. suspend_stratum(pool);
  2000. if (!initiate_stratum(pool))
  2001. return false;
  2002. if (!auth_stratum(pool))
  2003. return false;
  2004. return true;
  2005. }
  2006. void dev_error_update(struct cgpu_info *dev, enum dev_reason reason)
  2007. {
  2008. dev->device_last_not_well = time(NULL);
  2009. cgtime(&dev->tv_device_last_not_well);
  2010. dev->device_not_well_reason = reason;
  2011. }
  2012. void dev_error(struct cgpu_info *dev, enum dev_reason reason)
  2013. {
  2014. dev_error_update(dev, reason);
  2015. switch (reason) {
  2016. case REASON_THREAD_FAIL_INIT:
  2017. dev->thread_fail_init_count++;
  2018. break;
  2019. case REASON_THREAD_ZERO_HASH:
  2020. dev->thread_zero_hash_count++;
  2021. break;
  2022. case REASON_THREAD_FAIL_QUEUE:
  2023. dev->thread_fail_queue_count++;
  2024. break;
  2025. case REASON_DEV_SICK_IDLE_60:
  2026. dev->dev_sick_idle_60_count++;
  2027. break;
  2028. case REASON_DEV_DEAD_IDLE_600:
  2029. dev->dev_dead_idle_600_count++;
  2030. break;
  2031. case REASON_DEV_NOSTART:
  2032. dev->dev_nostart_count++;
  2033. break;
  2034. case REASON_DEV_OVER_HEAT:
  2035. dev->dev_over_heat_count++;
  2036. break;
  2037. case REASON_DEV_THERMAL_CUTOFF:
  2038. dev->dev_thermal_cutoff_count++;
  2039. break;
  2040. case REASON_DEV_COMMS_ERROR:
  2041. dev->dev_comms_error_count++;
  2042. break;
  2043. case REASON_DEV_THROTTLE:
  2044. dev->dev_throttle_count++;
  2045. break;
  2046. }
  2047. }
  2048. /* Realloc an existing string to fit an extra string s, appending s to it. */
  2049. void *realloc_strcat(char *ptr, char *s)
  2050. {
  2051. size_t old = strlen(ptr), len = strlen(s);
  2052. char *ret;
  2053. if (!len)
  2054. return ptr;
  2055. len += old + 1;
  2056. align_len(&len);
  2057. ret = malloc(len);
  2058. if (unlikely(!ret))
  2059. quithere(1, "Failed to malloc");
  2060. sprintf(ret, "%s%s", ptr, s);
  2061. free(ptr);
  2062. return ret;
  2063. }
  2064. static
  2065. bool sanechars[] = {
  2066. false, false, false, false, false, false, false, false,
  2067. false, false, false, false, false, false, false, false,
  2068. false, false, false, false, false, false, false, false,
  2069. false, false, false, false, false, false, false, false,
  2070. false, false, false, false, false, false, false, false,
  2071. false, false, false, false, false, false, false, false,
  2072. true , true , true , true , true , true , true , true ,
  2073. true , true , false, false, false, false, false, false,
  2074. false, true , true , true , true , true , true , true ,
  2075. true , true , true , true , true , true , true , true ,
  2076. true , true , true , true , true , true , true , true ,
  2077. true , true , true , false, false, false, false, false,
  2078. false, true , true , true , true , true , true , true ,
  2079. true , true , true , true , true , true , true , true ,
  2080. true , true , true , true , true , true , true , true ,
  2081. true , true , true , false, false, false, false, false,
  2082. };
  2083. char *sanestr(char *o, char *s)
  2084. {
  2085. char *rv = o;
  2086. bool br = false;
  2087. for ( ; s[0]; ++s)
  2088. {
  2089. if (sanechars[s[0] & 0x7f])
  2090. {
  2091. if (br)
  2092. {
  2093. br = false;
  2094. if (s[0] >= '0' && s[0] <= '9')
  2095. (o++)[0] = '_';
  2096. }
  2097. (o++)[0] = s[0];
  2098. }
  2099. else
  2100. if (o != s && o[-1] >= '0' && o[-1] <= '9')
  2101. br = true;
  2102. }
  2103. o[0] = '\0';
  2104. return rv;
  2105. }
  2106. void RenameThread(const char* name)
  2107. {
  2108. #if defined(PR_SET_NAME)
  2109. // Only the first 15 characters are used (16 - NUL terminator)
  2110. prctl(PR_SET_NAME, name, 0, 0, 0);
  2111. #elif defined(__APPLE__)
  2112. pthread_setname_np(name);
  2113. #elif (defined(__FreeBSD__) || defined(__OpenBSD__))
  2114. pthread_set_name_np(pthread_self(), name);
  2115. #else
  2116. // Prevent warnings for unused parameters...
  2117. (void)name;
  2118. #endif
  2119. }
  2120. static pthread_key_t key_bfgtls;
  2121. struct bfgtls_data {
  2122. char *bfg_strerror_result;
  2123. size_t bfg_strerror_resultsz;
  2124. #ifdef WIN32
  2125. LPSTR bfg_strerror_socketresult;
  2126. #endif
  2127. };
  2128. static
  2129. struct bfgtls_data *get_bfgtls()
  2130. {
  2131. struct bfgtls_data *bfgtls = pthread_getspecific(key_bfgtls);
  2132. if (bfgtls)
  2133. return bfgtls;
  2134. void *p;
  2135. bfgtls = malloc(sizeof(*bfgtls));
  2136. if (!bfgtls)
  2137. quithere(1, "malloc bfgtls failed");
  2138. p = malloc(64);
  2139. if (!p)
  2140. quithere(1, "malloc bfg_strerror_result failed");
  2141. *bfgtls = (struct bfgtls_data){
  2142. .bfg_strerror_resultsz = 64,
  2143. .bfg_strerror_result = p,
  2144. };
  2145. if (pthread_setspecific(key_bfgtls, bfgtls))
  2146. quithere(1, "pthread_setspecific failed");
  2147. return bfgtls;
  2148. }
  2149. void bfg_init_threadlocal()
  2150. {
  2151. if (pthread_key_create(&key_bfgtls, NULL))
  2152. quithere(1, "pthread_key_create failed");
  2153. }
  2154. static
  2155. bool bfg_grow_buffer(char ** const bufp, size_t * const bufszp, size_t minimum)
  2156. {
  2157. if (minimum <= *bufszp)
  2158. return false;
  2159. while (minimum > *bufszp)
  2160. *bufszp = 2;
  2161. *bufp = realloc(*bufp, *bufszp);
  2162. if (unlikely(!*bufp))
  2163. quithere(1, "realloc failed");
  2164. return true;
  2165. }
  2166. static
  2167. const char *bfg_strcpy_growing_buffer(char ** const bufp, size_t * const bufszp, const char *src)
  2168. {
  2169. if (!src)
  2170. return NULL;
  2171. const size_t srcsz = strlen(src) + 1;
  2172. bfg_grow_buffer(bufp, bufszp, srcsz);
  2173. memcpy(*bufp, src, srcsz);
  2174. return *bufp;
  2175. }
  2176. // Guaranteed to always return some string (or quit)
  2177. const char *bfg_strerror(int e, enum bfg_strerror_type type)
  2178. {
  2179. static __maybe_unused pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  2180. struct bfgtls_data *bfgtls = get_bfgtls();
  2181. size_t * const bufszp = &bfgtls->bfg_strerror_resultsz;
  2182. char ** const bufp = &bfgtls->bfg_strerror_result;
  2183. const char *have = NULL;
  2184. switch (type) {
  2185. case BST_LIBUSB:
  2186. // NOTE: Nested preprocessor checks since the latter isn't defined at all without the former
  2187. #ifdef HAVE_LIBUSB
  2188. # if HAVE_DECL_LIBUSB_ERROR_NAME
  2189. // libusb makes no guarantees for thread-safety or persistence
  2190. mutex_lock(&mutex);
  2191. have = bfg_strcpy_growing_buffer(bufp, bufszp, libusb_error_name(e));
  2192. mutex_unlock(&mutex);
  2193. # endif
  2194. #endif
  2195. break;
  2196. case BST_SOCKET:
  2197. case BST_SYSTEM:
  2198. {
  2199. #ifdef WIN32
  2200. // Windows has a different namespace for system and socket errors
  2201. LPSTR *msg = &bfgtls->bfg_strerror_socketresult;
  2202. if (*msg)
  2203. LocalFree(*msg);
  2204. if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, 0, e, 0, (LPSTR)msg, 0, 0))
  2205. return *msg;
  2206. *msg = NULL;
  2207. break;
  2208. #endif
  2209. }
  2210. // Fallthru on non-WIN32
  2211. case BST_ERRNO:
  2212. {
  2213. #ifdef __STRERROR_S_WORKS
  2214. // FIXME: Not sure how to get this on MingW64
  2215. retry:
  2216. if (likely(!strerror_s(*bufp, *bufszp, e)))
  2217. {
  2218. if (bfg_grow_buffer(bufp, bufszp, strlen(*bufp) + 2))
  2219. goto retry;
  2220. return *bufp;
  2221. }
  2222. // TODO: XSI strerror_r
  2223. // TODO: GNU strerror_r
  2224. #else
  2225. mutex_lock(&mutex);
  2226. have = bfg_strcpy_growing_buffer(bufp, bufszp, strerror(e));
  2227. mutex_unlock(&mutex);
  2228. #endif
  2229. }
  2230. }
  2231. if (have)
  2232. return *bufp;
  2233. // Failback: Stringify the number
  2234. static const char fmt[] = "%s error #%d", *typestr;
  2235. switch (type) {
  2236. case BST_ERRNO:
  2237. typestr = "System";
  2238. break;
  2239. case BST_SOCKET:
  2240. typestr = "Socket";
  2241. break;
  2242. case BST_LIBUSB:
  2243. typestr = "libusb";
  2244. break;
  2245. default:
  2246. typestr = "Unexpected";
  2247. }
  2248. int sz = snprintf((char*)bfgtls, 0, fmt, typestr, e) + 1;
  2249. bfg_grow_buffer(bufp, bufszp, sz);
  2250. sprintf(*bufp, fmt, typestr, e);
  2251. return *bufp;
  2252. }
  2253. void notifier_init(notifier_t pipefd)
  2254. {
  2255. #ifdef WIN32
  2256. #define WindowsErrorStr(e) bfg_strerror(e, BST_SOCKET)
  2257. SOCKET listener, connecter, acceptor;
  2258. listener = bfg_socket(AF_INET, SOCK_STREAM, 0);
  2259. if (listener == INVALID_SOCKET)
  2260. quit(1, "Failed to create listener socket"IN_FMT_FFL": %s",
  2261. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2262. connecter = bfg_socket(AF_INET, SOCK_STREAM, 0);
  2263. if (connecter == INVALID_SOCKET)
  2264. quit(1, "Failed to create connect socket"IN_FMT_FFL": %s",
  2265. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2266. struct sockaddr_in inaddr = {
  2267. .sin_family = AF_INET,
  2268. .sin_addr = {
  2269. .s_addr = htonl(INADDR_LOOPBACK),
  2270. },
  2271. .sin_port = 0,
  2272. };
  2273. {
  2274. static const int reuse = 1;
  2275. setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse));
  2276. }
  2277. if (bind(listener, (struct sockaddr*)&inaddr, sizeof(inaddr)) == SOCKET_ERROR)
  2278. quit(1, "Failed to bind listener socket"IN_FMT_FFL": %s",
  2279. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2280. socklen_t inaddr_sz = sizeof(inaddr);
  2281. if (getsockname(listener, (struct sockaddr*)&inaddr, &inaddr_sz) == SOCKET_ERROR)
  2282. quit(1, "Failed to getsockname"IN_FMT_FFL": %s",
  2283. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2284. if (listen(listener, 1) == SOCKET_ERROR)
  2285. quit(1, "Failed to listen"IN_FMT_FFL": %s",
  2286. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2287. inaddr.sin_family = AF_INET;
  2288. inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  2289. if (connect(connecter, (struct sockaddr*)&inaddr, inaddr_sz) == SOCKET_ERROR)
  2290. quit(1, "Failed to connect"IN_FMT_FFL": %s",
  2291. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2292. acceptor = accept(listener, NULL, NULL);
  2293. if (acceptor == INVALID_SOCKET)
  2294. quit(1, "Failed to accept"IN_FMT_FFL": %s",
  2295. __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError()));
  2296. closesocket(listener);
  2297. pipefd[0] = connecter;
  2298. pipefd[1] = acceptor;
  2299. #else
  2300. if (pipe(pipefd))
  2301. quithere(1, "Failed to create pipe");
  2302. #endif
  2303. }
  2304. void notifier_wake(notifier_t fd)
  2305. {
  2306. if (fd[1] == INVSOCK)
  2307. return;
  2308. if (1 !=
  2309. #ifdef WIN32
  2310. send(fd[1], "\0", 1, 0)
  2311. #else
  2312. write(fd[1], "\0", 1)
  2313. #endif
  2314. )
  2315. applog(LOG_WARNING, "Error trying to wake notifier");
  2316. }
  2317. void notifier_read(notifier_t fd)
  2318. {
  2319. char buf[0x10];
  2320. #ifdef WIN32
  2321. IGNORE_RETURN_VALUE(recv(fd[0], buf, sizeof(buf), 0));
  2322. #else
  2323. IGNORE_RETURN_VALUE(read(fd[0], buf, sizeof(buf)));
  2324. #endif
  2325. }
  2326. void notifier_init_invalid(notifier_t fd)
  2327. {
  2328. fd[0] = fd[1] = INVSOCK;
  2329. }
  2330. void notifier_destroy(notifier_t fd)
  2331. {
  2332. #ifdef WIN32
  2333. closesocket(fd[0]);
  2334. closesocket(fd[1]);
  2335. #else
  2336. close(fd[0]);
  2337. close(fd[1]);
  2338. #endif
  2339. fd[0] = fd[1] = INVSOCK;
  2340. }
  2341. void _bytes_alloc_failure(size_t sz)
  2342. {
  2343. quit(1, "bytes_resize failed to allocate %lu bytes", (unsigned long)sz);
  2344. }
  2345. void *cmd_thread(void *cmdp)
  2346. {
  2347. const char *cmd = cmdp;
  2348. applog(LOG_DEBUG, "Executing command: %s", cmd);
  2349. int rc = system(cmd);
  2350. if (rc)
  2351. applog(LOG_WARNING, "Command returned %d exit code: %s", rc, cmd);
  2352. return NULL;
  2353. }
  2354. void run_cmd(const char *cmd)
  2355. {
  2356. if (!cmd)
  2357. return;
  2358. pthread_t pth;
  2359. pthread_create(&pth, NULL, cmd_thread, (void*)cmd);
  2360. }