miner.h 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541
  1. #ifndef __MINER_H__
  2. #define __MINER_H__
  3. #include "config.h"
  4. #include <stdbool.h>
  5. #include <stdint.h>
  6. #include <sys/time.h>
  7. #include <pthread.h>
  8. #include <jansson.h>
  9. #ifdef HAVE_LIBCURL
  10. #include <curl/curl.h>
  11. #else
  12. typedef char CURL;
  13. extern char *curly;
  14. #define curl_easy_init(curl) (curly)
  15. #define curl_easy_cleanup(curl) {}
  16. #define curl_global_cleanup() {}
  17. #define CURL_GLOBAL_ALL 0
  18. #define curl_global_init(X) (0)
  19. #endif
  20. #include <sched.h>
  21. #include "elist.h"
  22. #include "uthash.h"
  23. #include "logging.h"
  24. #include "util.h"
  25. #include <sys/types.h>
  26. #ifndef WIN32
  27. # include <sys/socket.h>
  28. # include <netdb.h>
  29. #endif
  30. #ifdef USE_USBUTILS
  31. #include <semaphore.h>
  32. #endif
  33. #ifdef STDC_HEADERS
  34. # include <stdlib.h>
  35. # include <stddef.h>
  36. #else
  37. # ifdef HAVE_STDLIB_H
  38. # include <stdlib.h>
  39. # endif
  40. #endif
  41. #ifdef HAVE_ALLOCA_H
  42. # include <alloca.h>
  43. #elif defined __GNUC__
  44. # ifndef WIN32
  45. # define alloca __builtin_alloca
  46. # else
  47. # include <malloc.h>
  48. # endif
  49. #elif defined _AIX
  50. # define alloca __alloca
  51. #elif defined _MSC_VER
  52. # include <malloc.h>
  53. # define alloca _alloca
  54. #else
  55. # ifndef HAVE_ALLOCA
  56. # ifdef __cplusplus
  57. extern "C"
  58. # endif
  59. void *alloca (size_t);
  60. # endif
  61. #endif
  62. #ifdef __MINGW32__
  63. #include <windows.h>
  64. #include <io.h>
  65. static inline int fsync (int fd)
  66. {
  67. return (FlushFileBuffers ((HANDLE) _get_osfhandle (fd))) ? 0 : -1;
  68. }
  69. #ifndef EWOULDBLOCK
  70. # define EWOULDBLOCK EAGAIN
  71. #endif
  72. #ifndef MSG_DONTWAIT
  73. # define MSG_DONTWAIT 0x1000000
  74. #endif
  75. #endif /* __MINGW32__ */
  76. #if defined (__linux)
  77. #ifndef LINUX
  78. #define LINUX
  79. #endif
  80. #endif
  81. #ifdef WIN32
  82. #ifndef timersub
  83. #define timersub(a, b, result) \
  84. do { \
  85. (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
  86. (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
  87. if ((result)->tv_usec < 0) { \
  88. --(result)->tv_sec; \
  89. (result)->tv_usec += 1000000; \
  90. } \
  91. } while (0)
  92. #endif
  93. #ifndef timeradd
  94. # define timeradd(a, b, result) \
  95. do { \
  96. (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
  97. (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
  98. if ((result)->tv_usec >= 1000000) \
  99. { \
  100. ++(result)->tv_sec; \
  101. (result)->tv_usec -= 1000000; \
  102. } \
  103. } while (0)
  104. #endif
  105. #endif
  106. #ifdef HAVE_ADL
  107. #include "ADL_SDK/adl_sdk.h"
  108. #endif
  109. #ifdef USE_USBUTILS
  110. #include <libusb.h>
  111. #endif
  112. #ifdef USE_USBUTILS
  113. #include "usbutils.h"
  114. #endif
  115. #if (!defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
  116. || (defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)))
  117. #ifndef bswap_16
  118. #define bswap_16 __builtin_bswap16
  119. #define bswap_32 __builtin_bswap32
  120. #define bswap_64 __builtin_bswap64
  121. #endif
  122. #else
  123. #if HAVE_BYTESWAP_H
  124. #include <byteswap.h>
  125. #elif defined(USE_SYS_ENDIAN_H)
  126. #include <sys/endian.h>
  127. #elif defined(__APPLE__)
  128. #include <libkern/OSByteOrder.h>
  129. #define bswap_16 OSSwapInt16
  130. #define bswap_32 OSSwapInt32
  131. #define bswap_64 OSSwapInt64
  132. #else
  133. #define bswap_16(value) \
  134. ((((value) & 0xff) << 8) | ((value) >> 8))
  135. #define bswap_32(value) \
  136. (((uint32_t)bswap_16((uint16_t)((value) & 0xffff)) << 16) | \
  137. (uint32_t)bswap_16((uint16_t)((value) >> 16)))
  138. #define bswap_64(value) \
  139. (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) \
  140. << 32) | \
  141. (uint64_t)bswap_32((uint32_t)((value) >> 32)))
  142. #endif
  143. #endif /* !defined(__GLXBYTEORDER_H__) */
  144. /* This assumes htobe32 is a macro in endian.h, and if it doesn't exist, then
  145. * htobe64 also won't exist */
  146. #ifndef htobe32
  147. # if __BYTE_ORDER == __LITTLE_ENDIAN
  148. # define htole16(x) (x)
  149. # define htole32(x) (x)
  150. # define htole64(x) (x)
  151. # define le32toh(x) (x)
  152. # define le64toh(x) (x)
  153. # define be32toh(x) bswap_32(x)
  154. # define be64toh(x) bswap_64(x)
  155. # define htobe32(x) bswap_32(x)
  156. # define htobe64(x) bswap_64(x)
  157. # elif __BYTE_ORDER == __BIG_ENDIAN
  158. # define htole16(x) bswap_16(x)
  159. # define htole32(x) bswap_32(x)
  160. # define le32toh(x) bswap_32(x)
  161. # define le64toh(x) bswap_64(x)
  162. # define htole64(x) bswap_64(x)
  163. # define be32toh(x) (x)
  164. # define be64toh(x) (x)
  165. # define htobe32(x) (x)
  166. # define htobe64(x) (x)
  167. #else
  168. #error UNKNOWN BYTE ORDER
  169. #endif
  170. #endif
  171. #undef unlikely
  172. #undef likely
  173. #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
  174. #define unlikely(expr) (__builtin_expect(!!(expr), 0))
  175. #define likely(expr) (__builtin_expect(!!(expr), 1))
  176. #else
  177. #define unlikely(expr) (expr)
  178. #define likely(expr) (expr)
  179. #endif
  180. #define __maybe_unused __attribute__((unused))
  181. #define uninitialised_var(x) x = x
  182. #if defined(__i386__)
  183. #define WANT_CRYPTOPP_ASM32
  184. #endif
  185. #ifndef ARRAY_SIZE
  186. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
  187. #endif
  188. #ifdef MIPSEB
  189. #ifndef roundl
  190. #define roundl(x) (long double)((long long)((x==0)?0.0:((x)+((x)>0)?0.5:-0.5)))
  191. #endif
  192. #endif
  193. /* No semtimedop on apple so ignore timeout till we implement one */
  194. #ifdef __APPLE__
  195. #define semtimedop(SEM, SOPS, VAL, TIMEOUT) semop(SEM, SOPS, VAL)
  196. #endif
  197. #ifndef MIN
  198. #define MIN(x, y) ((x) > (y) ? (y) : (x))
  199. #endif
  200. #ifndef MAX
  201. #define MAX(x, y) ((x) > (y) ? (x) : (y))
  202. #endif
  203. /* Put avalon last to make it the last device it tries to detect to prevent it
  204. * trying to claim same chip but different devices. Adding a device here will
  205. * update all macros in the code that use the *_PARSE_COMMANDS macros for each
  206. * listed driver. */
  207. #define FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  208. DRIVER_ADD_COMMAND(bitforce) \
  209. DRIVER_ADD_COMMAND(icarus) \
  210. DRIVER_ADD_COMMAND(modminer)
  211. #define ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  212. DRIVER_ADD_COMMAND(bflsc) \
  213. DRIVER_ADD_COMMAND(bitfury) \
  214. DRIVER_ADD_COMMAND(hashfast) \
  215. DRIVER_ADD_COMMAND(klondike) \
  216. DRIVER_ADD_COMMAND(knc) \
  217. DRIVER_ADD_COMMAND(avalon)
  218. #define DRIVER_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  219. FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  220. ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND)
  221. #define DRIVER_ENUM(X) DRIVER_##X,
  222. #define DRIVER_PROTOTYPE(X) struct device_drv X##_drv;
  223. /* Create drv_driver enum from DRIVER_PARSE_COMMANDS macro */
  224. enum drv_driver {
  225. DRIVER_PARSE_COMMANDS(DRIVER_ENUM)
  226. DRIVER_MAX
  227. };
  228. /* Use DRIVER_PARSE_COMMANDS to generate extern device_drv prototypes */
  229. DRIVER_PARSE_COMMANDS(DRIVER_PROTOTYPE)
  230. enum alive {
  231. LIFE_WELL,
  232. LIFE_SICK,
  233. LIFE_DEAD,
  234. LIFE_NOSTART,
  235. LIFE_INIT,
  236. };
  237. enum pool_strategy {
  238. POOL_FAILOVER,
  239. POOL_ROUNDROBIN,
  240. POOL_ROTATE,
  241. POOL_LOADBALANCE,
  242. POOL_BALANCE,
  243. };
  244. #define TOP_STRATEGY (POOL_BALANCE)
  245. struct strategies {
  246. const char *s;
  247. };
  248. struct cgpu_info;
  249. #ifdef HAVE_ADL
  250. struct gpu_adl {
  251. ADLTemperature lpTemperature;
  252. int iAdapterIndex;
  253. int lpAdapterID;
  254. int iBusNumber;
  255. char strAdapterName[256];
  256. ADLPMActivity lpActivity;
  257. ADLODParameters lpOdParameters;
  258. ADLODPerformanceLevels *DefPerfLev;
  259. ADLFanSpeedInfo lpFanSpeedInfo;
  260. ADLFanSpeedValue lpFanSpeedValue;
  261. ADLFanSpeedValue DefFanSpeedValue;
  262. int iEngineClock;
  263. int iMemoryClock;
  264. int iVddc;
  265. int iPercentage;
  266. bool autofan;
  267. bool autoengine;
  268. bool managed; /* Were the values ever changed on this card */
  269. int lastengine;
  270. int lasttemp;
  271. int targetfan;
  272. int targettemp;
  273. int overtemp;
  274. int minspeed;
  275. int maxspeed;
  276. int gpu;
  277. bool has_fanspeed;
  278. struct gpu_adl *twin;
  279. };
  280. #endif
  281. extern void blank_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info __maybe_unused *cgpu);
  282. struct api_data;
  283. struct thr_info;
  284. struct work;
  285. struct device_drv {
  286. enum drv_driver drv_id;
  287. char *dname;
  288. char *name;
  289. // DRV-global functions
  290. void (*drv_detect)(bool);
  291. // Device-specific functions
  292. void (*reinit_device)(struct cgpu_info *);
  293. void (*get_statline_before)(char *, size_t, struct cgpu_info *);
  294. void (*get_statline)(char *, size_t, struct cgpu_info *);
  295. struct api_data *(*get_api_stats)(struct cgpu_info *);
  296. bool (*get_stats)(struct cgpu_info *);
  297. void (*identify_device)(struct cgpu_info *); // e.g. to flash a led
  298. char *(*set_device)(struct cgpu_info *, char *option, char *setting, char *replybuf);
  299. // Thread-specific functions
  300. bool (*thread_prepare)(struct thr_info *);
  301. uint64_t (*can_limit_work)(struct thr_info *);
  302. bool (*thread_init)(struct thr_info *);
  303. bool (*prepare_work)(struct thr_info *, struct work *);
  304. /* Which hash work loop this driver uses. */
  305. void (*hash_work)(struct thr_info *);
  306. /* Two variants depending on whether the device divides work up into
  307. * small pieces or works with whole work items and may or may not have
  308. * a queue of its own. */
  309. int64_t (*scanhash)(struct thr_info *, struct work *, int64_t);
  310. int64_t (*scanwork)(struct thr_info *);
  311. /* Used to extract work from the hash table of queued work and tell
  312. * the main loop that it should not add any further work to the table.
  313. */
  314. bool (*queue_full)(struct cgpu_info *);
  315. /* Tell the driver of a block change */
  316. void (*flush_work)(struct cgpu_info *);
  317. /* Tell the driver of an updated work template for eg. stratum */
  318. void (*update_work)(struct cgpu_info *);
  319. void (*hw_error)(struct thr_info *);
  320. void (*thread_shutdown)(struct thr_info *);
  321. void (*thread_enable)(struct thr_info *);
  322. // Does it need to be free()d?
  323. bool copy;
  324. /* Highest target diff the device supports */
  325. double max_diff;
  326. double working_diff;
  327. };
  328. extern struct device_drv *copy_drv(struct device_drv*);
  329. enum dev_enable {
  330. DEV_ENABLED,
  331. DEV_DISABLED,
  332. DEV_RECOVER,
  333. };
  334. enum cl_kernels {
  335. KL_NONE,
  336. KL_POCLBM,
  337. KL_PHATK,
  338. KL_DIAKGCN,
  339. KL_DIABLO,
  340. KL_SCRYPT,
  341. };
  342. enum dev_reason {
  343. REASON_THREAD_FAIL_INIT,
  344. REASON_THREAD_ZERO_HASH,
  345. REASON_THREAD_FAIL_QUEUE,
  346. REASON_DEV_SICK_IDLE_60,
  347. REASON_DEV_DEAD_IDLE_600,
  348. REASON_DEV_NOSTART,
  349. REASON_DEV_OVER_HEAT,
  350. REASON_DEV_THERMAL_CUTOFF,
  351. REASON_DEV_COMMS_ERROR,
  352. REASON_DEV_THROTTLE,
  353. };
  354. #define REASON_NONE "None"
  355. #define REASON_THREAD_FAIL_INIT_STR "Thread failed to init"
  356. #define REASON_THREAD_ZERO_HASH_STR "Thread got zero hashes"
  357. #define REASON_THREAD_FAIL_QUEUE_STR "Thread failed to queue work"
  358. #define REASON_DEV_SICK_IDLE_60_STR "Device idle for 60s"
  359. #define REASON_DEV_DEAD_IDLE_600_STR "Device dead - idle for 600s"
  360. #define REASON_DEV_NOSTART_STR "Device failed to start"
  361. #define REASON_DEV_OVER_HEAT_STR "Device over heated"
  362. #define REASON_DEV_THERMAL_CUTOFF_STR "Device reached thermal cutoff"
  363. #define REASON_DEV_COMMS_ERROR_STR "Device comms error"
  364. #define REASON_DEV_THROTTLE_STR "Device throttle"
  365. #define REASON_UNKNOWN_STR "Unknown reason - code bug"
  366. #define MIN_SEC_UNSET 99999999
  367. struct cgminer_stats {
  368. uint32_t getwork_calls;
  369. struct timeval getwork_wait;
  370. struct timeval getwork_wait_max;
  371. struct timeval getwork_wait_min;
  372. };
  373. // Just the actual network getworks to the pool
  374. struct cgminer_pool_stats {
  375. uint32_t getwork_calls;
  376. uint32_t getwork_attempts;
  377. struct timeval getwork_wait;
  378. struct timeval getwork_wait_max;
  379. struct timeval getwork_wait_min;
  380. double getwork_wait_rolling;
  381. bool hadrolltime;
  382. bool canroll;
  383. bool hadexpire;
  384. uint32_t rolltime;
  385. double min_diff;
  386. double max_diff;
  387. double last_diff;
  388. uint32_t min_diff_count;
  389. uint32_t max_diff_count;
  390. uint64_t times_sent;
  391. uint64_t bytes_sent;
  392. uint64_t net_bytes_sent;
  393. uint64_t times_received;
  394. uint64_t bytes_received;
  395. uint64_t net_bytes_received;
  396. };
  397. struct cgpu_info {
  398. int cgminer_id;
  399. struct device_drv *drv;
  400. int device_id;
  401. char *name;
  402. char *device_path;
  403. void *device_data;
  404. #ifdef USE_USBUTILS
  405. struct cg_usb_device *usbdev;
  406. #endif
  407. #ifdef USE_AVALON
  408. struct work **works;
  409. int work_array;
  410. int queued;
  411. int results;
  412. #endif
  413. #ifdef USE_USBUTILS
  414. struct cg_usb_info usbinfo;
  415. #endif
  416. #ifdef USE_MODMINER
  417. char fpgaid;
  418. unsigned char clock;
  419. pthread_mutex_t *modminer_mutex;
  420. #endif
  421. #ifdef USE_BITFORCE
  422. struct timeval work_start_tv;
  423. unsigned int wait_ms;
  424. unsigned int sleep_ms;
  425. double avg_wait_f;
  426. unsigned int avg_wait_d;
  427. uint32_t nonces;
  428. bool nonce_range;
  429. bool polling;
  430. bool flash_led;
  431. #endif /* USE_BITFORCE */
  432. #if defined(USE_BITFORCE) || defined(USE_BFLSC)
  433. pthread_mutex_t device_mutex;
  434. #endif /* USE_BITFORCE || USE_BFLSC */
  435. enum dev_enable deven;
  436. int accepted;
  437. int rejected;
  438. int hw_errors;
  439. double rolling;
  440. double total_mhashes;
  441. double utility;
  442. enum alive status;
  443. char init[40];
  444. struct timeval last_message_tv;
  445. int threads;
  446. struct thr_info **thr;
  447. int64_t max_hashes;
  448. const char *kname;
  449. bool new_work;
  450. float temp;
  451. int cutofftemp;
  452. int diff1;
  453. double diff_accepted;
  454. double diff_rejected;
  455. int last_share_pool;
  456. time_t last_share_pool_time;
  457. double last_share_diff;
  458. time_t last_device_valid_work;
  459. time_t device_last_well;
  460. time_t device_last_not_well;
  461. enum dev_reason device_not_well_reason;
  462. int thread_fail_init_count;
  463. int thread_zero_hash_count;
  464. int thread_fail_queue_count;
  465. int dev_sick_idle_60_count;
  466. int dev_dead_idle_600_count;
  467. int dev_nostart_count;
  468. int dev_over_heat_count; // It's a warning but worth knowing
  469. int dev_thermal_cutoff_count;
  470. int dev_comms_error_count;
  471. int dev_throttle_count;
  472. struct cgminer_stats cgminer_stats;
  473. pthread_rwlock_t qlock;
  474. struct work *queued_work;
  475. struct work *unqueued_work;
  476. unsigned int queued_count;
  477. bool shutdown;
  478. struct timeval dev_start_tv;
  479. };
  480. extern bool add_cgpu(struct cgpu_info*);
  481. struct thread_q {
  482. struct list_head q;
  483. bool frozen;
  484. pthread_mutex_t mutex;
  485. pthread_cond_t cond;
  486. };
  487. struct thr_info {
  488. int id;
  489. int device_thread;
  490. bool primary_thread;
  491. pthread_t pth;
  492. cgsem_t sem;
  493. struct thread_q *q;
  494. struct cgpu_info *cgpu;
  495. void *cgpu_data;
  496. struct timeval last;
  497. struct timeval sick;
  498. bool pause;
  499. bool getwork;
  500. double rolling;
  501. bool work_restart;
  502. bool work_update;
  503. };
  504. struct string_elist {
  505. char *string;
  506. bool free_me;
  507. struct list_head list;
  508. };
  509. static inline void string_elist_add(const char *s, struct list_head *head)
  510. {
  511. struct string_elist *n;
  512. n = calloc(1, sizeof(*n));
  513. n->string = strdup(s);
  514. n->free_me = true;
  515. list_add_tail(&n->list, head);
  516. }
  517. static inline void string_elist_del(struct string_elist *item)
  518. {
  519. if (item->free_me)
  520. free(item->string);
  521. list_del(&item->list);
  522. }
  523. static inline uint32_t swab32(uint32_t v)
  524. {
  525. return bswap_32(v);
  526. }
  527. static inline void swap256(void *dest_p, const void *src_p)
  528. {
  529. uint32_t *dest = dest_p;
  530. const uint32_t *src = src_p;
  531. dest[0] = src[7];
  532. dest[1] = src[6];
  533. dest[2] = src[5];
  534. dest[3] = src[4];
  535. dest[4] = src[3];
  536. dest[5] = src[2];
  537. dest[6] = src[1];
  538. dest[7] = src[0];
  539. }
  540. static inline void swab256(void *dest_p, const void *src_p)
  541. {
  542. uint32_t *dest = dest_p;
  543. const uint32_t *src = src_p;
  544. dest[0] = swab32(src[7]);
  545. dest[1] = swab32(src[6]);
  546. dest[2] = swab32(src[5]);
  547. dest[3] = swab32(src[4]);
  548. dest[4] = swab32(src[3]);
  549. dest[5] = swab32(src[2]);
  550. dest[6] = swab32(src[1]);
  551. dest[7] = swab32(src[0]);
  552. }
  553. static inline void flip32(void *dest_p, const void *src_p)
  554. {
  555. uint32_t *dest = dest_p;
  556. const uint32_t *src = src_p;
  557. int i;
  558. for (i = 0; i < 8; i++)
  559. dest[i] = swab32(src[i]);
  560. }
  561. static inline void flip64(void *dest_p, const void *src_p)
  562. {
  563. uint32_t *dest = dest_p;
  564. const uint32_t *src = src_p;
  565. int i;
  566. for (i = 0; i < 16; i++)
  567. dest[i] = swab32(src[i]);
  568. }
  569. static inline void flip80(void *dest_p, const void *src_p)
  570. {
  571. uint32_t *dest = dest_p;
  572. const uint32_t *src = src_p;
  573. int i;
  574. for (i = 0; i < 20; i++)
  575. dest[i] = swab32(src[i]);
  576. }
  577. static inline void flip128(void *dest_p, const void *src_p)
  578. {
  579. uint32_t *dest = dest_p;
  580. const uint32_t *src = src_p;
  581. int i;
  582. for (i = 0; i < 32; i++)
  583. dest[i] = swab32(src[i]);
  584. }
  585. /* For flipping to the correct endianness if necessary */
  586. #if defined(__BIG_ENDIAN__) || defined(MIPSEB)
  587. static inline void endian_flip32(void *dest_p, const void *src_p)
  588. {
  589. flip32(dest_p, src_p);
  590. }
  591. static inline void endian_flip128(void *dest_p, const void *src_p)
  592. {
  593. flip128(dest_p, src_p);
  594. }
  595. #else
  596. static inline void
  597. endian_flip32(void __maybe_unused *dest_p, const void __maybe_unused *src_p)
  598. {
  599. }
  600. static inline void
  601. endian_flip128(void __maybe_unused *dest_p, const void __maybe_unused *src_p)
  602. {
  603. }
  604. #endif
  605. extern void _quit(int status);
  606. /*
  607. * Set this to non-zero to enable lock tracking
  608. * Use the API lockstats command to see the locking status on stderr
  609. * i.e. in your log file if you 2> log.log - but not on the screen
  610. * API lockstats is privilidged but will always exist and will return
  611. * success if LOCK_TRACKING is enabled and warning if disabled
  612. * In production code, this should never be enabled since it will slow down all locking
  613. * So, e.g. use it to track down a deadlock - after a reproducable deadlock occurs
  614. * ... Of course if the API code itself deadlocks, it wont help :)
  615. */
  616. #define LOCK_TRACKING 0
  617. #if LOCK_TRACKING
  618. enum cglock_typ {
  619. CGLOCK_MUTEX,
  620. CGLOCK_RW,
  621. CGLOCK_UNKNOWN
  622. };
  623. extern uint64_t api_getlock(void *lock, const char *file, const char *func, const int line);
  624. extern void api_gotlock(uint64_t id, void *lock, const char *file, const char *func, const int line);
  625. extern uint64_t api_trylock(void *lock, const char *file, const char *func, const int line);
  626. extern void api_didlock(uint64_t id, int ret, void *lock, const char *file, const char *func, const int line);
  627. extern void api_gunlock(void *lock, const char *file, const char *func, const int line);
  628. extern void api_initlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int line);
  629. #define GETLOCK(_lock, _file, _func, _line) uint64_t _id1 = api_getlock((void *)(_lock), _file, _func, _line)
  630. #define GOTLOCK(_lock, _file, _func, _line) api_gotlock(_id1, (void *)(_lock), _file, _func, _line)
  631. #define TRYLOCK(_lock, _file, _func, _line) uint64_t _id2 = api_trylock((void *)(_lock), _file, _func, _line)
  632. #define DIDLOCK(_ret, _lock, _file, _func, _line) api_didlock(_id2, _ret, (void *)(_lock), _file, _func, _line)
  633. #define GUNLOCK(_lock, _file, _func, _line) api_gunlock((void *)(_lock), _file, _func, _line)
  634. #define INITLOCK(_lock, _typ, _file, _func, _line) api_initlock((void *)(_lock), _typ, _file, _func, _line)
  635. #else
  636. #define GETLOCK(_lock, _file, _func, _line)
  637. #define GOTLOCK(_lock, _file, _func, _line)
  638. #define TRYLOCK(_lock, _file, _func, _line)
  639. #define DIDLOCK(_ret, _lock, _file, _func, _line)
  640. #define GUNLOCK(_lock, _file, _func, _line)
  641. #define INITLOCK(_typ, _lock, _file, _func, _line)
  642. #endif
  643. #define mutex_lock(_lock) _mutex_lock(_lock, __FILE__, __func__, __LINE__)
  644. #define mutex_unlock_noyield(_lock) _mutex_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  645. #define mutex_unlock(_lock) _mutex_unlock(_lock, __FILE__, __func__, __LINE__)
  646. #define mutex_trylock(_lock) _mutex_trylock(_lock, __FILE__, __func__, __LINE__)
  647. #define wr_lock(_lock) _wr_lock(_lock, __FILE__, __func__, __LINE__)
  648. #define wr_trylock(_lock) _wr_trylock(_lock, __FILE__, __func__, __LINE__)
  649. #define rd_lock(_lock) _rd_lock(_lock, __FILE__, __func__, __LINE__)
  650. #define rw_unlock(_lock) _rw_unlock(_lock, __FILE__, __func__, __LINE__)
  651. #define rd_unlock_noyield(_lock) _rd_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  652. #define wr_unlock_noyield(_lock) _wr_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  653. #define rd_unlock(_lock) _rd_unlock(_lock, __FILE__, __func__, __LINE__)
  654. #define wr_unlock(_lock) _wr_unlock(_lock, __FILE__, __func__, __LINE__)
  655. #define mutex_init(_lock) _mutex_init(_lock, __FILE__, __func__, __LINE__)
  656. #define rwlock_init(_lock) _rwlock_init(_lock, __FILE__, __func__, __LINE__)
  657. #define cglock_init(_lock) _cglock_init(_lock, __FILE__, __func__, __LINE__)
  658. #define cg_rlock(_lock) _cg_rlock(_lock, __FILE__, __func__, __LINE__)
  659. #define cg_ilock(_lock) _cg_ilock(_lock, __FILE__, __func__, __LINE__)
  660. #define cg_ulock(_lock) _cg_ulock(_lock, __FILE__, __func__, __LINE__)
  661. #define cg_wlock(_lock) _cg_wlock(_lock, __FILE__, __func__, __LINE__)
  662. #define cg_dwlock(_lock) _cg_dwlock(_lock, __FILE__, __func__, __LINE__)
  663. #define cg_dwilock(_lock) _cg_dwilock(_lock, __FILE__, __func__, __LINE__)
  664. #define cg_dlock(_lock) _cg_dlock(_lock, __FILE__, __func__, __LINE__)
  665. #define cg_runlock(_lock) _cg_runlock(_lock, __FILE__, __func__, __LINE__)
  666. #define cg_ruwlock(_lock) _cg_ruwlock(_lock, __FILE__, __func__, __LINE__)
  667. #define cg_wunlock(_lock) _cg_wunlock(_lock, __FILE__, __func__, __LINE__)
  668. static inline void _mutex_lock(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  669. {
  670. GETLOCK(lock, file, func, line);
  671. if (unlikely(pthread_mutex_lock(lock)))
  672. quitfrom(1, file, func, line, "WTF MUTEX ERROR ON LOCK! errno=%d", errno);
  673. GOTLOCK(lock, file, func, line);
  674. }
  675. static inline void _mutex_unlock_noyield(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  676. {
  677. if (unlikely(pthread_mutex_unlock(lock)))
  678. quitfrom(1, file, func, line, "WTF MUTEX ERROR ON UNLOCK! errno=%d", errno);
  679. GUNLOCK(lock, file, func, line);
  680. }
  681. static inline void _mutex_unlock(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  682. {
  683. _mutex_unlock_noyield(lock, file, func, line);
  684. sched_yield();
  685. }
  686. static inline int _mutex_trylock(pthread_mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line)
  687. {
  688. TRYLOCK(lock, file, func, line);
  689. int ret = pthread_mutex_trylock(lock);
  690. DIDLOCK(ret, lock, file, func, line);
  691. return ret;
  692. }
  693. static inline void _wr_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  694. {
  695. GETLOCK(lock, file, func, line);
  696. if (unlikely(pthread_rwlock_wrlock(lock)))
  697. quitfrom(1, file, func, line, "WTF WRLOCK ERROR ON LOCK! errno=%d", errno);
  698. GOTLOCK(lock, file, func, line);
  699. }
  700. static inline int _wr_trylock(pthread_rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line)
  701. {
  702. TRYLOCK(lock, file, func, line);
  703. int ret = pthread_rwlock_trywrlock(lock);
  704. DIDLOCK(ret, lock, file, func, line);
  705. return ret;
  706. }
  707. static inline void _rd_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  708. {
  709. GETLOCK(lock, file, func, line);
  710. if (unlikely(pthread_rwlock_rdlock(lock)))
  711. quitfrom(1, file, func, line, "WTF RDLOCK ERROR ON LOCK! errno=%d", errno);
  712. GOTLOCK(lock, file, func, line);
  713. }
  714. static inline void _rw_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  715. {
  716. if (unlikely(pthread_rwlock_unlock(lock)))
  717. quitfrom(1, file, func, line, "WTF RWLOCK ERROR ON UNLOCK! errno=%d", errno);
  718. GUNLOCK(lock, file, func, line);
  719. }
  720. static inline void _rd_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  721. {
  722. _rw_unlock(lock, file, func, line);
  723. }
  724. static inline void _wr_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  725. {
  726. _rw_unlock(lock, file, func, line);
  727. }
  728. static inline void _rd_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  729. {
  730. _rw_unlock(lock, file, func, line);
  731. sched_yield();
  732. }
  733. static inline void _wr_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  734. {
  735. _rw_unlock(lock, file, func, line);
  736. sched_yield();
  737. }
  738. static inline void _mutex_init(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  739. {
  740. if (unlikely(pthread_mutex_init(lock, NULL)))
  741. quitfrom(1, file, func, line, "Failed to pthread_mutex_init errno=%d", errno);
  742. INITLOCK(lock, CGLOCK_MUTEX, file, func, line);
  743. }
  744. static inline void mutex_destroy(pthread_mutex_t *lock)
  745. {
  746. /* Ignore return code. This only invalidates the mutex on linux but
  747. * releases resources on windows. */
  748. pthread_mutex_destroy(lock);
  749. }
  750. static inline void _rwlock_init(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  751. {
  752. if (unlikely(pthread_rwlock_init(lock, NULL)))
  753. quitfrom(1, file, func, line, "Failed to pthread_rwlock_init errno=%d", errno);
  754. INITLOCK(lock, CGLOCK_RW, file, func, line);
  755. }
  756. static inline void rwlock_destroy(pthread_rwlock_t *lock)
  757. {
  758. pthread_rwlock_destroy(lock);
  759. }
  760. static inline void _cglock_init(cglock_t *lock, const char *file, const char *func, const int line)
  761. {
  762. _mutex_init(&lock->mutex, file, func, line);
  763. _rwlock_init(&lock->rwlock, file, func, line);
  764. }
  765. static inline void cglock_destroy(cglock_t *lock)
  766. {
  767. rwlock_destroy(&lock->rwlock);
  768. mutex_destroy(&lock->mutex);
  769. }
  770. /* Read lock variant of cglock. Cannot be promoted. */
  771. static inline void _cg_rlock(cglock_t *lock, const char *file, const char *func, const int line)
  772. {
  773. _mutex_lock(&lock->mutex, file, func, line);
  774. _rd_lock(&lock->rwlock, file, func, line);
  775. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  776. }
  777. /* Intermediate variant of cglock - behaves as a read lock but can be promoted
  778. * to a write lock or demoted to read lock. */
  779. static inline void _cg_ilock(cglock_t *lock, const char *file, const char *func, const int line)
  780. {
  781. _mutex_lock(&lock->mutex, file, func, line);
  782. }
  783. /* Upgrade intermediate variant to a write lock */
  784. static inline void _cg_ulock(cglock_t *lock, const char *file, const char *func, const int line)
  785. {
  786. _wr_lock(&lock->rwlock, file, func, line);
  787. }
  788. /* Write lock variant of cglock */
  789. static inline void _cg_wlock(cglock_t *lock, const char *file, const char *func, const int line)
  790. {
  791. _mutex_lock(&lock->mutex, file, func, line);
  792. _wr_lock(&lock->rwlock, file, func, line);
  793. }
  794. /* Downgrade write variant to a read lock */
  795. static inline void _cg_dwlock(cglock_t *lock, const char *file, const char *func, const int line)
  796. {
  797. _wr_unlock_noyield(&lock->rwlock, file, func, line);
  798. _rd_lock(&lock->rwlock, file, func, line);
  799. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  800. }
  801. /* Demote a write variant to an intermediate variant */
  802. static inline void _cg_dwilock(cglock_t *lock, const char *file, const char *func, const int line)
  803. {
  804. _wr_unlock(&lock->rwlock, file, func, line);
  805. }
  806. /* Downgrade intermediate variant to a read lock */
  807. static inline void _cg_dlock(cglock_t *lock, const char *file, const char *func, const int line)
  808. {
  809. _rd_lock(&lock->rwlock, file, func, line);
  810. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  811. }
  812. static inline void _cg_runlock(cglock_t *lock, const char *file, const char *func, const int line)
  813. {
  814. _rd_unlock(&lock->rwlock, file, func, line);
  815. }
  816. /* This drops the read lock and grabs a write lock. It does NOT protect data
  817. * between the two locks! */
  818. static inline void _cg_ruwlock(cglock_t *lock, const char *file, const char *func, const int line)
  819. {
  820. _rd_unlock_noyield(&lock->rwlock, file, func, line);
  821. _cg_wlock(lock, file, func, line);
  822. }
  823. static inline void _cg_wunlock(cglock_t *lock, const char *file, const char *func, const int line)
  824. {
  825. _wr_unlock_noyield(&lock->rwlock, file, func, line);
  826. _mutex_unlock(&lock->mutex, file, func, line);
  827. }
  828. struct pool;
  829. #define API_MCAST_CODE "FTW"
  830. #define API_MCAST_ADDR "224.0.0.75"
  831. extern bool opt_work_update;
  832. extern bool opt_protocol;
  833. extern bool have_longpoll;
  834. extern char *opt_kernel_path;
  835. extern char *opt_socks_proxy;
  836. extern char *cgminer_path;
  837. extern bool opt_fail_only;
  838. extern bool opt_autofan;
  839. extern bool opt_autoengine;
  840. extern bool use_curses;
  841. extern char *opt_api_allow;
  842. extern bool opt_api_mcast;
  843. extern char *opt_api_mcast_addr;
  844. extern char *opt_api_mcast_code;
  845. extern char *opt_api_mcast_des;
  846. extern int opt_api_mcast_port;
  847. extern char *opt_api_groups;
  848. extern char *opt_api_description;
  849. extern int opt_api_port;
  850. extern bool opt_api_listen;
  851. extern bool opt_api_network;
  852. extern bool opt_delaynet;
  853. extern bool opt_restart;
  854. extern bool opt_nogpu;
  855. extern char *opt_icarus_options;
  856. extern char *opt_icarus_timing;
  857. extern bool opt_worktime;
  858. #ifdef USE_AVALON
  859. extern char *opt_avalon_options;
  860. extern char *opt_bitburner_fury_options;
  861. #endif
  862. #ifdef USE_KLONDIKE
  863. extern char *opt_klondike_options;
  864. #endif
  865. #ifdef USE_USBUTILS
  866. extern char *opt_usb_select;
  867. extern int opt_usbdump;
  868. extern bool opt_usb_list_all;
  869. extern cgsem_t usb_resource_sem;
  870. #endif
  871. #ifdef USE_BITFORCE
  872. extern bool opt_bfl_noncerange;
  873. #endif
  874. extern int swork_id;
  875. #if LOCK_TRACKING
  876. extern pthread_mutex_t lockstat_lock;
  877. #endif
  878. extern pthread_rwlock_t netacc_lock;
  879. extern const uint32_t sha256_init_state[];
  880. #ifdef HAVE_LIBCURL
  881. extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass,
  882. const char *rpc_req, bool, bool, int *,
  883. struct pool *pool, bool);
  884. #endif
  885. extern const char *proxytype(proxytypes_t proxytype);
  886. extern char *get_proxy(char *url, struct pool *pool);
  887. extern void __bin2hex(char *s, const unsigned char *p, size_t len);
  888. extern char *bin2hex(const unsigned char *p, size_t len);
  889. extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len);
  890. typedef bool (*sha256_func)(struct thr_info*, const unsigned char *pmidstate,
  891. unsigned char *pdata,
  892. unsigned char *phash1, unsigned char *phash,
  893. const unsigned char *ptarget,
  894. uint32_t max_nonce,
  895. uint32_t *last_nonce,
  896. uint32_t nonce);
  897. extern bool fulltest(const unsigned char *hash, const unsigned char *target);
  898. extern int opt_queue;
  899. extern int opt_scantime;
  900. extern int opt_expiry;
  901. #ifdef USE_USBUTILS
  902. extern pthread_mutex_t cgusb_lock;
  903. extern pthread_mutex_t cgusbres_lock;
  904. extern cglock_t cgusb_fd_lock;
  905. #endif
  906. extern cglock_t control_lock;
  907. extern pthread_mutex_t hash_lock;
  908. extern pthread_mutex_t console_lock;
  909. extern cglock_t ch_lock;
  910. extern pthread_rwlock_t mining_thr_lock;
  911. extern pthread_rwlock_t devices_lock;
  912. extern pthread_mutex_t restart_lock;
  913. extern pthread_cond_t restart_cond;
  914. extern void clear_stratum_shares(struct pool *pool);
  915. extern void set_target(unsigned char *dest_target, double diff);
  916. extern int restart_wait(struct thr_info *thr, unsigned int mstime);
  917. extern void kill_work(void);
  918. extern void reinit_device(struct cgpu_info *cgpu);
  919. #ifdef HAVE_ADL
  920. extern bool gpu_stats(int gpu, float *temp, int *engineclock, int *memclock, float *vddc, int *activity, int *fanspeed, int *fanpercent, int *powertune);
  921. extern int set_fanspeed(int gpu, int iFanSpeed);
  922. extern int set_vddc(int gpu, float fVddc);
  923. extern int set_engineclock(int gpu, int iEngineClock);
  924. extern int set_memoryclock(int gpu, int iMemoryClock);
  925. #endif
  926. extern void api(int thr_id);
  927. extern struct pool *current_pool(void);
  928. extern int enabled_pools;
  929. extern void get_intrange(char *arg, int *val1, int *val2);
  930. extern bool detect_stratum(struct pool *pool, char *url);
  931. extern void print_summary(void);
  932. extern void adjust_quota_gcd(void);
  933. extern struct pool *add_pool(void);
  934. extern bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass);
  935. #define MAX_GPUDEVICES 16
  936. #define MAX_DEVICES 4096
  937. #define MIN_SHA_INTENSITY -10
  938. #define MIN_SHA_INTENSITY_STR "-10"
  939. #define MAX_SHA_INTENSITY 14
  940. #define MAX_SHA_INTENSITY_STR "14"
  941. #define MIN_SCRYPT_INTENSITY 8
  942. #define MIN_SCRYPT_INTENSITY_STR "8"
  943. #define MAX_SCRYPT_INTENSITY 20
  944. #define MAX_SCRYPT_INTENSITY_STR "20"
  945. #ifdef USE_SCRYPT
  946. #define MIN_INTENSITY (opt_scrypt ? MIN_SCRYPT_INTENSITY : MIN_SHA_INTENSITY)
  947. #define MIN_INTENSITY_STR (opt_scrypt ? MIN_SCRYPT_INTENSITY_STR : MIN_SHA_INTENSITY_STR)
  948. #define MAX_INTENSITY (opt_scrypt ? MAX_SCRYPT_INTENSITY : MAX_SHA_INTENSITY)
  949. #define MAX_INTENSITY_STR (opt_scrypt ? MAX_SCRYPT_INTENSITY_STR : MAX_SHA_INTENSITY_STR)
  950. #define MAX_GPU_INTENSITY MAX_SCRYPT_INTENSITY
  951. #else
  952. #define MIN_INTENSITY MIN_SHA_INTENSITY
  953. #define MIN_INTENSITY_STR MIN_SHA_INTENSITY_STR
  954. #define MAX_INTENSITY MAX_SHA_INTENSITY
  955. #define MAX_INTENSITY_STR MAX_SHA_INTENSITY_STR
  956. #define MAX_GPU_INTENSITY MAX_SHA_INTENSITY
  957. #endif
  958. extern bool hotplug_mode;
  959. extern int hotplug_time;
  960. extern struct list_head scan_devices;
  961. extern int nDevs;
  962. extern int num_processors;
  963. extern int hw_errors;
  964. extern bool use_syslog;
  965. extern bool opt_quiet;
  966. extern struct thr_info *control_thr;
  967. extern struct thr_info **mining_thr;
  968. extern struct cgpu_info gpus[MAX_GPUDEVICES];
  969. extern int gpu_threads;
  970. #ifdef USE_SCRYPT
  971. extern bool opt_scrypt;
  972. #else
  973. #define opt_scrypt (0)
  974. #endif
  975. extern double total_secs;
  976. extern int mining_threads;
  977. extern int total_devices;
  978. extern int zombie_devs;
  979. extern struct cgpu_info **devices;
  980. extern int total_pools;
  981. extern struct pool **pools;
  982. extern struct strategies strategies[];
  983. extern enum pool_strategy pool_strategy;
  984. extern int opt_rotate_period;
  985. extern double total_rolling;
  986. extern double total_mhashes_done;
  987. extern unsigned int new_blocks;
  988. extern unsigned int found_blocks;
  989. extern int total_accepted, total_rejected, total_diff1;;
  990. extern int total_getworks, total_stale, total_discarded;
  991. extern double total_diff_accepted, total_diff_rejected, total_diff_stale;
  992. extern unsigned int local_work;
  993. extern unsigned int total_go, total_ro;
  994. extern const int opt_cutofftemp;
  995. extern int opt_log_interval;
  996. extern unsigned long long global_hashrate;
  997. extern char current_hash[68];
  998. extern double current_diff;
  999. extern uint64_t best_diff;
  1000. extern struct timeval block_timeval;
  1001. extern char *workpadding;
  1002. struct curl_ent {
  1003. CURL *curl;
  1004. struct list_head node;
  1005. struct timeval tv;
  1006. };
  1007. /* Disabled needs to be the lowest enum as a freshly calloced value will then
  1008. * equal disabled */
  1009. enum pool_enable {
  1010. POOL_DISABLED,
  1011. POOL_ENABLED,
  1012. POOL_REJECTING,
  1013. };
  1014. struct stratum_work {
  1015. char *job_id;
  1016. char *prev_hash;
  1017. unsigned char **merkle_bin;
  1018. char *bbversion;
  1019. char *nbit;
  1020. char *ntime;
  1021. bool clean;
  1022. size_t cb_len;
  1023. size_t header_len;
  1024. int merkles;
  1025. double diff;
  1026. };
  1027. #define RBUFSIZE 8192
  1028. #define RECVSIZE (RBUFSIZE - 4)
  1029. struct pool {
  1030. int pool_no;
  1031. int prio;
  1032. int accepted, rejected;
  1033. int seq_rejects;
  1034. int seq_getfails;
  1035. int solved;
  1036. int diff1;
  1037. char diff[8];
  1038. int quota;
  1039. int quota_gcd;
  1040. int quota_used;
  1041. int works;
  1042. double diff_accepted;
  1043. double diff_rejected;
  1044. double diff_stale;
  1045. bool submit_fail;
  1046. bool idle;
  1047. bool lagging;
  1048. bool probed;
  1049. enum pool_enable enabled;
  1050. bool submit_old;
  1051. bool removed;
  1052. bool lp_started;
  1053. char *hdr_path;
  1054. char *lp_url;
  1055. unsigned int getwork_requested;
  1056. unsigned int stale_shares;
  1057. unsigned int discarded_work;
  1058. unsigned int getfail_occasions;
  1059. unsigned int remotefail_occasions;
  1060. struct timeval tv_idle;
  1061. double utility;
  1062. int last_shares, shares;
  1063. char *rpc_req;
  1064. char *rpc_url;
  1065. char *rpc_userpass;
  1066. char *rpc_user, *rpc_pass;
  1067. proxytypes_t rpc_proxytype;
  1068. char *rpc_proxy;
  1069. pthread_mutex_t pool_lock;
  1070. cglock_t data_lock;
  1071. struct thread_q *submit_q;
  1072. struct thread_q *getwork_q;
  1073. pthread_t longpoll_thread;
  1074. pthread_t test_thread;
  1075. bool testing;
  1076. int curls;
  1077. pthread_cond_t cr_cond;
  1078. struct list_head curlring;
  1079. time_t last_share_time;
  1080. double last_share_diff;
  1081. uint64_t best_diff;
  1082. struct cgminer_stats cgminer_stats;
  1083. struct cgminer_pool_stats cgminer_pool_stats;
  1084. /* The last block this particular pool knows about */
  1085. char prev_block[32];
  1086. /* Stratum variables */
  1087. char *stratum_url;
  1088. char *stratum_port;
  1089. struct addrinfo stratum_hints;
  1090. SOCKETTYPE sock;
  1091. char *sockbuf;
  1092. size_t sockbuf_size;
  1093. char *sockaddr_url; /* stripped url used for sockaddr */
  1094. char *sockaddr_proxy_url;
  1095. char *sockaddr_proxy_port;
  1096. char *nonce1;
  1097. unsigned char *nonce1bin;
  1098. size_t n1_len;
  1099. uint32_t nonce2;
  1100. int n2size;
  1101. char *sessionid;
  1102. bool has_stratum;
  1103. bool stratum_active;
  1104. bool stratum_init;
  1105. bool stratum_notify;
  1106. struct stratum_work swork;
  1107. pthread_t stratum_sthread;
  1108. pthread_t stratum_rthread;
  1109. pthread_mutex_t stratum_lock;
  1110. struct thread_q *stratum_q;
  1111. int sshares; /* stratum shares submitted waiting on response */
  1112. /* GBT variables */
  1113. bool has_gbt;
  1114. cglock_t gbt_lock;
  1115. unsigned char previousblockhash[32];
  1116. unsigned char gbt_target[32];
  1117. char *coinbasetxn;
  1118. char *longpollid;
  1119. char *gbt_workid;
  1120. int gbt_expires;
  1121. uint32_t gbt_version;
  1122. uint32_t curtime;
  1123. uint32_t gbt_bits;
  1124. unsigned char *txn_hashes;
  1125. int gbt_txns;
  1126. int coinbase_len;
  1127. /* Shared by both stratum & GBT */
  1128. unsigned char *coinbase;
  1129. int nonce2_offset;
  1130. unsigned char header_bin[128];
  1131. int merkle_offset;
  1132. struct timeval tv_lastwork;
  1133. };
  1134. #define GETWORK_MODE_TESTPOOL 'T'
  1135. #define GETWORK_MODE_POOL 'P'
  1136. #define GETWORK_MODE_LP 'L'
  1137. #define GETWORK_MODE_BENCHMARK 'B'
  1138. #define GETWORK_MODE_STRATUM 'S'
  1139. #define GETWORK_MODE_GBT 'G'
  1140. struct work {
  1141. unsigned char data[128];
  1142. unsigned char midstate[32];
  1143. unsigned char target[32];
  1144. unsigned char hash[32];
  1145. #ifdef USE_SCRYPT
  1146. unsigned char device_target[32];
  1147. #endif
  1148. double device_diff;
  1149. uint64_t share_diff;
  1150. int rolls;
  1151. int drv_rolllimit; /* How much the driver can roll ntime */
  1152. uint32_t nonce; /* For devices that hash sole work */
  1153. struct thr_info *thr;
  1154. int thr_id;
  1155. struct pool *pool;
  1156. struct timeval tv_staged;
  1157. bool mined;
  1158. bool clone;
  1159. bool cloned;
  1160. int rolltime;
  1161. bool longpoll;
  1162. bool stale;
  1163. bool mandatory;
  1164. bool block;
  1165. bool stratum;
  1166. char *job_id;
  1167. uint32_t nonce2;
  1168. size_t nonce2_len;
  1169. char *ntime;
  1170. double sdiff;
  1171. char *nonce1;
  1172. bool gbt;
  1173. char *coinbase;
  1174. int gbt_txns;
  1175. unsigned int work_block;
  1176. int id;
  1177. UT_hash_handle hh;
  1178. double work_difficulty;
  1179. // Allow devices to identify work if multiple sub-devices
  1180. int subid;
  1181. // Allow devices to flag work for their own purposes
  1182. bool devflag;
  1183. // Allow devices to timestamp work for their own purposes
  1184. struct timeval tv_stamp;
  1185. struct timeval tv_getwork;
  1186. struct timeval tv_getwork_reply;
  1187. struct timeval tv_cloned;
  1188. struct timeval tv_work_start;
  1189. struct timeval tv_work_found;
  1190. char getwork_mode;
  1191. };
  1192. #ifdef USE_MODMINER
  1193. struct modminer_fpga_state {
  1194. bool work_running;
  1195. struct work running_work;
  1196. struct timeval tv_workstart;
  1197. uint32_t hashes;
  1198. char next_work_cmd[46];
  1199. char fpgaid;
  1200. bool overheated;
  1201. bool new_work;
  1202. uint32_t shares;
  1203. uint32_t shares_last_hw;
  1204. uint32_t hw_errors;
  1205. uint32_t shares_to_good;
  1206. uint32_t timeout_fail;
  1207. uint32_t success_more;
  1208. struct timeval last_changed;
  1209. struct timeval last_nonce;
  1210. struct timeval first_work;
  1211. bool death_stage_one;
  1212. bool tried_two_byte_temp;
  1213. bool one_byte_temp;
  1214. };
  1215. #endif
  1216. #define TAILBUFSIZ 64
  1217. #define tailsprintf(buf, bufsiz, fmt, ...) do { \
  1218. char tmp13[TAILBUFSIZ]; \
  1219. size_t len13, buflen = strlen(buf); \
  1220. snprintf(tmp13, sizeof(tmp13), fmt, ##__VA_ARGS__); \
  1221. len13 = strlen(tmp13); \
  1222. if ((buflen + len13) >= bufsiz) \
  1223. quit(1, "tailsprintf buffer overflow in %s %s line %d", __FILE__, __func__, __LINE__); \
  1224. strcat(buf, tmp13); \
  1225. } while (0)
  1226. extern void get_datestamp(char *, size_t, struct timeval *);
  1227. extern void inc_hw_errors(struct thr_info *thr);
  1228. extern bool test_nonce(struct work *work, uint32_t nonce);
  1229. extern bool test_nonce_diff(struct work *work, uint32_t nonce, double diff);
  1230. extern void submit_tested_work(struct thr_info *thr, struct work *work);
  1231. extern bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce);
  1232. extern bool submit_noffset_nonce(struct thr_info *thr, struct work *work, uint32_t nonce,
  1233. int noffset);
  1234. extern struct work *get_work(struct thr_info *thr, const int thr_id);
  1235. extern void __add_queued(struct cgpu_info *cgpu, struct work *work);
  1236. extern struct work *get_queued(struct cgpu_info *cgpu);
  1237. extern void add_queued(struct cgpu_info *cgpu, struct work *work);
  1238. extern struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id);
  1239. extern struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1240. extern struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1241. extern struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1242. extern void __work_completed(struct cgpu_info *cgpu, struct work *work);
  1243. extern void work_completed(struct cgpu_info *cgpu, struct work *work);
  1244. extern struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1245. extern void hash_driver_work(struct thr_info *mythr);
  1246. extern void hash_queued_work(struct thr_info *mythr);
  1247. extern void _wlog(const char *str);
  1248. extern void _wlogprint(const char *str);
  1249. extern int curses_int(const char *query);
  1250. extern char *curses_input(const char *query);
  1251. extern void kill_work(void);
  1252. extern void switch_pools(struct pool *selected);
  1253. extern void discard_work(struct work *work);
  1254. extern void remove_pool(struct pool *pool);
  1255. extern void write_config(FILE *fcfg);
  1256. extern void zero_bestshare(void);
  1257. extern void zero_stats(void);
  1258. extern void default_save_file(char *filename);
  1259. extern bool log_curses_only(int prio, const char *datetime, const char *str);
  1260. extern void clear_logwin(void);
  1261. extern void logwin_update(void);
  1262. extern bool pool_tclear(struct pool *pool, bool *var);
  1263. extern struct thread_q *tq_new(void);
  1264. extern void tq_free(struct thread_q *tq);
  1265. extern bool tq_push(struct thread_q *tq, void *data);
  1266. extern void *tq_pop(struct thread_q *tq, const struct timespec *abstime);
  1267. extern void tq_freeze(struct thread_q *tq);
  1268. extern void tq_thaw(struct thread_q *tq);
  1269. extern bool successful_connect;
  1270. extern void adl(void);
  1271. extern void app_restart(void);
  1272. extern void clean_work(struct work *work);
  1273. extern void free_work(struct work *work);
  1274. extern struct work *copy_work_noffset(struct work *base_work, int noffset);
  1275. #define copy_work(work_in) copy_work_noffset(work_in, 0)
  1276. extern struct thr_info *get_thread(int thr_id);
  1277. extern struct cgpu_info *get_devices(int id);
  1278. enum api_data_type {
  1279. API_ESCAPE,
  1280. API_STRING,
  1281. API_CONST,
  1282. API_UINT8,
  1283. API_UINT16,
  1284. API_INT,
  1285. API_UINT,
  1286. API_UINT32,
  1287. API_UINT64,
  1288. API_DOUBLE,
  1289. API_ELAPSED,
  1290. API_BOOL,
  1291. API_TIMEVAL,
  1292. API_TIME,
  1293. API_MHS,
  1294. API_MHTOTAL,
  1295. API_TEMP,
  1296. API_UTILITY,
  1297. API_FREQ,
  1298. API_VOLTS,
  1299. API_HS,
  1300. API_DIFF,
  1301. API_PERCENT
  1302. };
  1303. struct api_data {
  1304. enum api_data_type type;
  1305. char *name;
  1306. void *data;
  1307. bool data_was_malloc;
  1308. struct api_data *prev;
  1309. struct api_data *next;
  1310. };
  1311. extern struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data);
  1312. extern struct api_data *api_add_string(struct api_data *root, char *name, char *data, bool copy_data);
  1313. extern struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data);
  1314. extern struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data);
  1315. extern struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data);
  1316. extern struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data);
  1317. extern struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data);
  1318. extern struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data);
  1319. extern struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data);
  1320. extern struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data);
  1321. extern struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data);
  1322. extern struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data);
  1323. extern struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data);
  1324. extern struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data);
  1325. extern struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data);
  1326. extern struct api_data *api_add_mhstotal(struct api_data *root, char *name, double *data, bool copy_data);
  1327. extern struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data);
  1328. extern struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data);
  1329. extern struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data);
  1330. extern struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data);
  1331. extern struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data);
  1332. extern struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data);
  1333. extern struct api_data *api_add_percent(struct api_data *root, char *name, double *data, bool copy_data);
  1334. #endif /* __MINER_H__ */