miner.h 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542
  1. #ifndef __MINER_H__
  2. #define __MINER_H__
  3. #include "config.h"
  4. #include <stdbool.h>
  5. #include <stdint.h>
  6. #include <sys/time.h>
  7. #include <pthread.h>
  8. #include <jansson.h>
  9. #ifdef HAVE_LIBCURL
  10. #include <curl/curl.h>
  11. #else
  12. typedef char CURL;
  13. extern char *curly;
  14. #define curl_easy_init(curl) (curly)
  15. #define curl_easy_cleanup(curl) {}
  16. #define curl_global_cleanup() {}
  17. #define CURL_GLOBAL_ALL 0
  18. #define curl_global_init(X) (0)
  19. #endif
  20. #include <sched.h>
  21. #include "elist.h"
  22. #include "uthash.h"
  23. #include "logging.h"
  24. #include "util.h"
  25. #include <sys/types.h>
  26. #ifndef WIN32
  27. # include <sys/socket.h>
  28. # include <netdb.h>
  29. #endif
  30. #ifdef USE_USBUTILS
  31. #include <semaphore.h>
  32. #endif
  33. #ifdef STDC_HEADERS
  34. # include <stdlib.h>
  35. # include <stddef.h>
  36. #else
  37. # ifdef HAVE_STDLIB_H
  38. # include <stdlib.h>
  39. # endif
  40. #endif
  41. #ifdef HAVE_ALLOCA_H
  42. # include <alloca.h>
  43. #elif defined __GNUC__
  44. # ifndef WIN32
  45. # define alloca __builtin_alloca
  46. # else
  47. # include <malloc.h>
  48. # endif
  49. #elif defined _AIX
  50. # define alloca __alloca
  51. #elif defined _MSC_VER
  52. # include <malloc.h>
  53. # define alloca _alloca
  54. #else
  55. # ifndef HAVE_ALLOCA
  56. # ifdef __cplusplus
  57. extern "C"
  58. # endif
  59. void *alloca (size_t);
  60. # endif
  61. #endif
  62. #ifdef __MINGW32__
  63. #include <windows.h>
  64. #include <io.h>
  65. static inline int fsync (int fd)
  66. {
  67. return (FlushFileBuffers ((HANDLE) _get_osfhandle (fd))) ? 0 : -1;
  68. }
  69. #ifndef EWOULDBLOCK
  70. # define EWOULDBLOCK EAGAIN
  71. #endif
  72. #ifndef MSG_DONTWAIT
  73. # define MSG_DONTWAIT 0x1000000
  74. #endif
  75. #endif /* __MINGW32__ */
  76. #if defined (__linux)
  77. #ifndef LINUX
  78. #define LINUX
  79. #endif
  80. #endif
  81. #ifdef WIN32
  82. #ifndef timersub
  83. #define timersub(a, b, result) \
  84. do { \
  85. (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
  86. (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
  87. if ((result)->tv_usec < 0) { \
  88. --(result)->tv_sec; \
  89. (result)->tv_usec += 1000000; \
  90. } \
  91. } while (0)
  92. #endif
  93. #ifndef timeradd
  94. # define timeradd(a, b, result) \
  95. do { \
  96. (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
  97. (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
  98. if ((result)->tv_usec >= 1000000) \
  99. { \
  100. ++(result)->tv_sec; \
  101. (result)->tv_usec -= 1000000; \
  102. } \
  103. } while (0)
  104. #endif
  105. #endif
  106. #ifdef HAVE_ADL
  107. #include "ADL_SDK/adl_sdk.h"
  108. #endif
  109. #ifdef USE_USBUTILS
  110. #include <libusb.h>
  111. #endif
  112. #ifdef USE_USBUTILS
  113. #include "usbutils.h"
  114. #endif
  115. #if (!defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
  116. || (defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)))
  117. #ifndef bswap_16
  118. #define bswap_16 __builtin_bswap16
  119. #define bswap_32 __builtin_bswap32
  120. #define bswap_64 __builtin_bswap64
  121. #endif
  122. #else
  123. #if HAVE_BYTESWAP_H
  124. #include <byteswap.h>
  125. #elif defined(USE_SYS_ENDIAN_H)
  126. #include <sys/endian.h>
  127. #elif defined(__APPLE__)
  128. #include <libkern/OSByteOrder.h>
  129. #define bswap_16 OSSwapInt16
  130. #define bswap_32 OSSwapInt32
  131. #define bswap_64 OSSwapInt64
  132. #else
  133. #define bswap_16(value) \
  134. ((((value) & 0xff) << 8) | ((value) >> 8))
  135. #define bswap_32(value) \
  136. (((uint32_t)bswap_16((uint16_t)((value) & 0xffff)) << 16) | \
  137. (uint32_t)bswap_16((uint16_t)((value) >> 16)))
  138. #define bswap_64(value) \
  139. (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) \
  140. << 32) | \
  141. (uint64_t)bswap_32((uint32_t)((value) >> 32)))
  142. #endif
  143. #endif /* !defined(__GLXBYTEORDER_H__) */
  144. /* This assumes htobe32 is a macro in endian.h, and if it doesn't exist, then
  145. * htobe64 also won't exist */
  146. #ifndef htobe32
  147. # if __BYTE_ORDER == __LITTLE_ENDIAN
  148. # define htole16(x) (x)
  149. # define htole32(x) (x)
  150. # define htole64(x) (x)
  151. # define le32toh(x) (x)
  152. # define le64toh(x) (x)
  153. # define be32toh(x) bswap_32(x)
  154. # define be64toh(x) bswap_64(x)
  155. # define htobe32(x) bswap_32(x)
  156. # define htobe64(x) bswap_64(x)
  157. # elif __BYTE_ORDER == __BIG_ENDIAN
  158. # define htole16(x) bswap_16(x)
  159. # define htole32(x) bswap_32(x)
  160. # define le32toh(x) bswap_32(x)
  161. # define le64toh(x) bswap_64(x)
  162. # define htole64(x) bswap_64(x)
  163. # define be32toh(x) (x)
  164. # define be64toh(x) (x)
  165. # define htobe32(x) (x)
  166. # define htobe64(x) (x)
  167. #else
  168. #error UNKNOWN BYTE ORDER
  169. #endif
  170. #endif
  171. #undef unlikely
  172. #undef likely
  173. #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
  174. #define unlikely(expr) (__builtin_expect(!!(expr), 0))
  175. #define likely(expr) (__builtin_expect(!!(expr), 1))
  176. #else
  177. #define unlikely(expr) (expr)
  178. #define likely(expr) (expr)
  179. #endif
  180. #define __maybe_unused __attribute__((unused))
  181. #define uninitialised_var(x) x = x
  182. #if defined(__i386__)
  183. #define WANT_CRYPTOPP_ASM32
  184. #endif
  185. #ifndef ARRAY_SIZE
  186. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
  187. #endif
  188. #ifdef MIPSEB
  189. #ifndef roundl
  190. #define roundl(x) (long double)((long long)((x==0)?0.0:((x)+((x)>0)?0.5:-0.5)))
  191. #endif
  192. #endif
  193. /* No semtimedop on apple so ignore timeout till we implement one */
  194. #ifdef __APPLE__
  195. #define semtimedop(SEM, SOPS, VAL, TIMEOUT) semop(SEM, SOPS, VAL)
  196. #endif
  197. #ifndef MIN
  198. #define MIN(x, y) ((x) > (y) ? (y) : (x))
  199. #endif
  200. #ifndef MAX
  201. #define MAX(x, y) ((x) > (y) ? (x) : (y))
  202. #endif
  203. /* Put avalon last to make it the last device it tries to detect to prevent it
  204. * trying to claim same chip but different devices. Adding a device here will
  205. * update all macros in the code that use the *_PARSE_COMMANDS macros for each
  206. * listed driver. */
  207. #define FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  208. DRIVER_ADD_COMMAND(bitforce) \
  209. DRIVER_ADD_COMMAND(icarus) \
  210. DRIVER_ADD_COMMAND(modminer)
  211. #define ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  212. DRIVER_ADD_COMMAND(bflsc) \
  213. DRIVER_ADD_COMMAND(bitfury) \
  214. DRIVER_ADD_COMMAND(hashfast) \
  215. DRIVER_ADD_COMMAND(klondike) \
  216. DRIVER_ADD_COMMAND(knc) \
  217. DRIVER_ADD_COMMAND(bab) \
  218. DRIVER_ADD_COMMAND(avalon)
  219. #define DRIVER_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  220. FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  221. ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND)
  222. #define DRIVER_ENUM(X) DRIVER_##X,
  223. #define DRIVER_PROTOTYPE(X) struct device_drv X##_drv;
  224. /* Create drv_driver enum from DRIVER_PARSE_COMMANDS macro */
  225. enum drv_driver {
  226. DRIVER_PARSE_COMMANDS(DRIVER_ENUM)
  227. DRIVER_MAX
  228. };
  229. /* Use DRIVER_PARSE_COMMANDS to generate extern device_drv prototypes */
  230. DRIVER_PARSE_COMMANDS(DRIVER_PROTOTYPE)
  231. enum alive {
  232. LIFE_WELL,
  233. LIFE_SICK,
  234. LIFE_DEAD,
  235. LIFE_NOSTART,
  236. LIFE_INIT,
  237. };
  238. enum pool_strategy {
  239. POOL_FAILOVER,
  240. POOL_ROUNDROBIN,
  241. POOL_ROTATE,
  242. POOL_LOADBALANCE,
  243. POOL_BALANCE,
  244. };
  245. #define TOP_STRATEGY (POOL_BALANCE)
  246. struct strategies {
  247. const char *s;
  248. };
  249. struct cgpu_info;
  250. #ifdef HAVE_ADL
  251. struct gpu_adl {
  252. ADLTemperature lpTemperature;
  253. int iAdapterIndex;
  254. int lpAdapterID;
  255. int iBusNumber;
  256. char strAdapterName[256];
  257. ADLPMActivity lpActivity;
  258. ADLODParameters lpOdParameters;
  259. ADLODPerformanceLevels *DefPerfLev;
  260. ADLFanSpeedInfo lpFanSpeedInfo;
  261. ADLFanSpeedValue lpFanSpeedValue;
  262. ADLFanSpeedValue DefFanSpeedValue;
  263. int iEngineClock;
  264. int iMemoryClock;
  265. int iVddc;
  266. int iPercentage;
  267. bool autofan;
  268. bool autoengine;
  269. bool managed; /* Were the values ever changed on this card */
  270. int lastengine;
  271. int lasttemp;
  272. int targetfan;
  273. int targettemp;
  274. int overtemp;
  275. int minspeed;
  276. int maxspeed;
  277. int gpu;
  278. bool has_fanspeed;
  279. struct gpu_adl *twin;
  280. };
  281. #endif
  282. extern void blank_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info __maybe_unused *cgpu);
  283. struct api_data;
  284. struct thr_info;
  285. struct work;
  286. struct device_drv {
  287. enum drv_driver drv_id;
  288. char *dname;
  289. char *name;
  290. // DRV-global functions
  291. void (*drv_detect)(bool);
  292. // Device-specific functions
  293. void (*reinit_device)(struct cgpu_info *);
  294. void (*get_statline_before)(char *, size_t, struct cgpu_info *);
  295. void (*get_statline)(char *, size_t, struct cgpu_info *);
  296. struct api_data *(*get_api_stats)(struct cgpu_info *);
  297. bool (*get_stats)(struct cgpu_info *);
  298. void (*identify_device)(struct cgpu_info *); // e.g. to flash a led
  299. char *(*set_device)(struct cgpu_info *, char *option, char *setting, char *replybuf);
  300. // Thread-specific functions
  301. bool (*thread_prepare)(struct thr_info *);
  302. uint64_t (*can_limit_work)(struct thr_info *);
  303. bool (*thread_init)(struct thr_info *);
  304. bool (*prepare_work)(struct thr_info *, struct work *);
  305. /* Which hash work loop this driver uses. */
  306. void (*hash_work)(struct thr_info *);
  307. /* Two variants depending on whether the device divides work up into
  308. * small pieces or works with whole work items and may or may not have
  309. * a queue of its own. */
  310. int64_t (*scanhash)(struct thr_info *, struct work *, int64_t);
  311. int64_t (*scanwork)(struct thr_info *);
  312. /* Used to extract work from the hash table of queued work and tell
  313. * the main loop that it should not add any further work to the table.
  314. */
  315. bool (*queue_full)(struct cgpu_info *);
  316. /* Tell the driver of a block change */
  317. void (*flush_work)(struct cgpu_info *);
  318. /* Tell the driver of an updated work template for eg. stratum */
  319. void (*update_work)(struct cgpu_info *);
  320. void (*hw_error)(struct thr_info *);
  321. void (*thread_shutdown)(struct thr_info *);
  322. void (*thread_enable)(struct thr_info *);
  323. // Does it need to be free()d?
  324. bool copy;
  325. /* Highest target diff the device supports */
  326. double max_diff;
  327. double working_diff;
  328. };
  329. extern struct device_drv *copy_drv(struct device_drv*);
  330. enum dev_enable {
  331. DEV_ENABLED,
  332. DEV_DISABLED,
  333. DEV_RECOVER,
  334. };
  335. enum cl_kernels {
  336. KL_NONE,
  337. KL_POCLBM,
  338. KL_PHATK,
  339. KL_DIAKGCN,
  340. KL_DIABLO,
  341. KL_SCRYPT,
  342. };
  343. enum dev_reason {
  344. REASON_THREAD_FAIL_INIT,
  345. REASON_THREAD_ZERO_HASH,
  346. REASON_THREAD_FAIL_QUEUE,
  347. REASON_DEV_SICK_IDLE_60,
  348. REASON_DEV_DEAD_IDLE_600,
  349. REASON_DEV_NOSTART,
  350. REASON_DEV_OVER_HEAT,
  351. REASON_DEV_THERMAL_CUTOFF,
  352. REASON_DEV_COMMS_ERROR,
  353. REASON_DEV_THROTTLE,
  354. };
  355. #define REASON_NONE "None"
  356. #define REASON_THREAD_FAIL_INIT_STR "Thread failed to init"
  357. #define REASON_THREAD_ZERO_HASH_STR "Thread got zero hashes"
  358. #define REASON_THREAD_FAIL_QUEUE_STR "Thread failed to queue work"
  359. #define REASON_DEV_SICK_IDLE_60_STR "Device idle for 60s"
  360. #define REASON_DEV_DEAD_IDLE_600_STR "Device dead - idle for 600s"
  361. #define REASON_DEV_NOSTART_STR "Device failed to start"
  362. #define REASON_DEV_OVER_HEAT_STR "Device over heated"
  363. #define REASON_DEV_THERMAL_CUTOFF_STR "Device reached thermal cutoff"
  364. #define REASON_DEV_COMMS_ERROR_STR "Device comms error"
  365. #define REASON_DEV_THROTTLE_STR "Device throttle"
  366. #define REASON_UNKNOWN_STR "Unknown reason - code bug"
  367. #define MIN_SEC_UNSET 99999999
  368. struct cgminer_stats {
  369. uint32_t getwork_calls;
  370. struct timeval getwork_wait;
  371. struct timeval getwork_wait_max;
  372. struct timeval getwork_wait_min;
  373. };
  374. // Just the actual network getworks to the pool
  375. struct cgminer_pool_stats {
  376. uint32_t getwork_calls;
  377. uint32_t getwork_attempts;
  378. struct timeval getwork_wait;
  379. struct timeval getwork_wait_max;
  380. struct timeval getwork_wait_min;
  381. double getwork_wait_rolling;
  382. bool hadrolltime;
  383. bool canroll;
  384. bool hadexpire;
  385. uint32_t rolltime;
  386. double min_diff;
  387. double max_diff;
  388. double last_diff;
  389. uint32_t min_diff_count;
  390. uint32_t max_diff_count;
  391. uint64_t times_sent;
  392. uint64_t bytes_sent;
  393. uint64_t net_bytes_sent;
  394. uint64_t times_received;
  395. uint64_t bytes_received;
  396. uint64_t net_bytes_received;
  397. };
  398. struct cgpu_info {
  399. int cgminer_id;
  400. struct device_drv *drv;
  401. int device_id;
  402. char *name;
  403. char *device_path;
  404. void *device_data;
  405. #ifdef USE_USBUTILS
  406. struct cg_usb_device *usbdev;
  407. #endif
  408. #ifdef USE_AVALON
  409. struct work **works;
  410. int work_array;
  411. int queued;
  412. int results;
  413. #endif
  414. #ifdef USE_USBUTILS
  415. struct cg_usb_info usbinfo;
  416. #endif
  417. #ifdef USE_MODMINER
  418. char fpgaid;
  419. unsigned char clock;
  420. pthread_mutex_t *modminer_mutex;
  421. #endif
  422. #ifdef USE_BITFORCE
  423. struct timeval work_start_tv;
  424. unsigned int wait_ms;
  425. unsigned int sleep_ms;
  426. double avg_wait_f;
  427. unsigned int avg_wait_d;
  428. uint32_t nonces;
  429. bool nonce_range;
  430. bool polling;
  431. bool flash_led;
  432. #endif /* USE_BITFORCE */
  433. #if defined(USE_BITFORCE) || defined(USE_BFLSC)
  434. pthread_mutex_t device_mutex;
  435. #endif /* USE_BITFORCE || USE_BFLSC */
  436. enum dev_enable deven;
  437. int accepted;
  438. int rejected;
  439. int hw_errors;
  440. double rolling;
  441. double total_mhashes;
  442. double utility;
  443. enum alive status;
  444. char init[40];
  445. struct timeval last_message_tv;
  446. int threads;
  447. struct thr_info **thr;
  448. int64_t max_hashes;
  449. const char *kname;
  450. bool new_work;
  451. float temp;
  452. int cutofftemp;
  453. int diff1;
  454. double diff_accepted;
  455. double diff_rejected;
  456. int last_share_pool;
  457. time_t last_share_pool_time;
  458. double last_share_diff;
  459. time_t last_device_valid_work;
  460. time_t device_last_well;
  461. time_t device_last_not_well;
  462. enum dev_reason device_not_well_reason;
  463. int thread_fail_init_count;
  464. int thread_zero_hash_count;
  465. int thread_fail_queue_count;
  466. int dev_sick_idle_60_count;
  467. int dev_dead_idle_600_count;
  468. int dev_nostart_count;
  469. int dev_over_heat_count; // It's a warning but worth knowing
  470. int dev_thermal_cutoff_count;
  471. int dev_comms_error_count;
  472. int dev_throttle_count;
  473. struct cgminer_stats cgminer_stats;
  474. pthread_rwlock_t qlock;
  475. struct work *queued_work;
  476. struct work *unqueued_work;
  477. unsigned int queued_count;
  478. bool shutdown;
  479. struct timeval dev_start_tv;
  480. };
  481. extern bool add_cgpu(struct cgpu_info*);
  482. struct thread_q {
  483. struct list_head q;
  484. bool frozen;
  485. pthread_mutex_t mutex;
  486. pthread_cond_t cond;
  487. };
  488. struct thr_info {
  489. int id;
  490. int device_thread;
  491. bool primary_thread;
  492. pthread_t pth;
  493. cgsem_t sem;
  494. struct thread_q *q;
  495. struct cgpu_info *cgpu;
  496. void *cgpu_data;
  497. struct timeval last;
  498. struct timeval sick;
  499. bool pause;
  500. bool getwork;
  501. double rolling;
  502. bool work_restart;
  503. bool work_update;
  504. };
  505. struct string_elist {
  506. char *string;
  507. bool free_me;
  508. struct list_head list;
  509. };
  510. static inline void string_elist_add(const char *s, struct list_head *head)
  511. {
  512. struct string_elist *n;
  513. n = calloc(1, sizeof(*n));
  514. n->string = strdup(s);
  515. n->free_me = true;
  516. list_add_tail(&n->list, head);
  517. }
  518. static inline void string_elist_del(struct string_elist *item)
  519. {
  520. if (item->free_me)
  521. free(item->string);
  522. list_del(&item->list);
  523. }
  524. static inline uint32_t swab32(uint32_t v)
  525. {
  526. return bswap_32(v);
  527. }
  528. static inline void swap256(void *dest_p, const void *src_p)
  529. {
  530. uint32_t *dest = dest_p;
  531. const uint32_t *src = src_p;
  532. dest[0] = src[7];
  533. dest[1] = src[6];
  534. dest[2] = src[5];
  535. dest[3] = src[4];
  536. dest[4] = src[3];
  537. dest[5] = src[2];
  538. dest[6] = src[1];
  539. dest[7] = src[0];
  540. }
  541. static inline void swab256(void *dest_p, const void *src_p)
  542. {
  543. uint32_t *dest = dest_p;
  544. const uint32_t *src = src_p;
  545. dest[0] = swab32(src[7]);
  546. dest[1] = swab32(src[6]);
  547. dest[2] = swab32(src[5]);
  548. dest[3] = swab32(src[4]);
  549. dest[4] = swab32(src[3]);
  550. dest[5] = swab32(src[2]);
  551. dest[6] = swab32(src[1]);
  552. dest[7] = swab32(src[0]);
  553. }
  554. static inline void flip32(void *dest_p, const void *src_p)
  555. {
  556. uint32_t *dest = dest_p;
  557. const uint32_t *src = src_p;
  558. int i;
  559. for (i = 0; i < 8; i++)
  560. dest[i] = swab32(src[i]);
  561. }
  562. static inline void flip64(void *dest_p, const void *src_p)
  563. {
  564. uint32_t *dest = dest_p;
  565. const uint32_t *src = src_p;
  566. int i;
  567. for (i = 0; i < 16; i++)
  568. dest[i] = swab32(src[i]);
  569. }
  570. static inline void flip80(void *dest_p, const void *src_p)
  571. {
  572. uint32_t *dest = dest_p;
  573. const uint32_t *src = src_p;
  574. int i;
  575. for (i = 0; i < 20; i++)
  576. dest[i] = swab32(src[i]);
  577. }
  578. static inline void flip128(void *dest_p, const void *src_p)
  579. {
  580. uint32_t *dest = dest_p;
  581. const uint32_t *src = src_p;
  582. int i;
  583. for (i = 0; i < 32; i++)
  584. dest[i] = swab32(src[i]);
  585. }
  586. /* For flipping to the correct endianness if necessary */
  587. #if defined(__BIG_ENDIAN__) || defined(MIPSEB)
  588. static inline void endian_flip32(void *dest_p, const void *src_p)
  589. {
  590. flip32(dest_p, src_p);
  591. }
  592. static inline void endian_flip128(void *dest_p, const void *src_p)
  593. {
  594. flip128(dest_p, src_p);
  595. }
  596. #else
  597. static inline void
  598. endian_flip32(void __maybe_unused *dest_p, const void __maybe_unused *src_p)
  599. {
  600. }
  601. static inline void
  602. endian_flip128(void __maybe_unused *dest_p, const void __maybe_unused *src_p)
  603. {
  604. }
  605. #endif
  606. extern void _quit(int status);
  607. /*
  608. * Set this to non-zero to enable lock tracking
  609. * Use the API lockstats command to see the locking status on stderr
  610. * i.e. in your log file if you 2> log.log - but not on the screen
  611. * API lockstats is privilidged but will always exist and will return
  612. * success if LOCK_TRACKING is enabled and warning if disabled
  613. * In production code, this should never be enabled since it will slow down all locking
  614. * So, e.g. use it to track down a deadlock - after a reproducable deadlock occurs
  615. * ... Of course if the API code itself deadlocks, it wont help :)
  616. */
  617. #define LOCK_TRACKING 0
  618. #if LOCK_TRACKING
  619. enum cglock_typ {
  620. CGLOCK_MUTEX,
  621. CGLOCK_RW,
  622. CGLOCK_UNKNOWN
  623. };
  624. extern uint64_t api_getlock(void *lock, const char *file, const char *func, const int line);
  625. extern void api_gotlock(uint64_t id, void *lock, const char *file, const char *func, const int line);
  626. extern uint64_t api_trylock(void *lock, const char *file, const char *func, const int line);
  627. extern void api_didlock(uint64_t id, int ret, void *lock, const char *file, const char *func, const int line);
  628. extern void api_gunlock(void *lock, const char *file, const char *func, const int line);
  629. extern void api_initlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int line);
  630. #define GETLOCK(_lock, _file, _func, _line) uint64_t _id1 = api_getlock((void *)(_lock), _file, _func, _line)
  631. #define GOTLOCK(_lock, _file, _func, _line) api_gotlock(_id1, (void *)(_lock), _file, _func, _line)
  632. #define TRYLOCK(_lock, _file, _func, _line) uint64_t _id2 = api_trylock((void *)(_lock), _file, _func, _line)
  633. #define DIDLOCK(_ret, _lock, _file, _func, _line) api_didlock(_id2, _ret, (void *)(_lock), _file, _func, _line)
  634. #define GUNLOCK(_lock, _file, _func, _line) api_gunlock((void *)(_lock), _file, _func, _line)
  635. #define INITLOCK(_lock, _typ, _file, _func, _line) api_initlock((void *)(_lock), _typ, _file, _func, _line)
  636. #else
  637. #define GETLOCK(_lock, _file, _func, _line)
  638. #define GOTLOCK(_lock, _file, _func, _line)
  639. #define TRYLOCK(_lock, _file, _func, _line)
  640. #define DIDLOCK(_ret, _lock, _file, _func, _line)
  641. #define GUNLOCK(_lock, _file, _func, _line)
  642. #define INITLOCK(_typ, _lock, _file, _func, _line)
  643. #endif
  644. #define mutex_lock(_lock) _mutex_lock(_lock, __FILE__, __func__, __LINE__)
  645. #define mutex_unlock_noyield(_lock) _mutex_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  646. #define mutex_unlock(_lock) _mutex_unlock(_lock, __FILE__, __func__, __LINE__)
  647. #define mutex_trylock(_lock) _mutex_trylock(_lock, __FILE__, __func__, __LINE__)
  648. #define wr_lock(_lock) _wr_lock(_lock, __FILE__, __func__, __LINE__)
  649. #define wr_trylock(_lock) _wr_trylock(_lock, __FILE__, __func__, __LINE__)
  650. #define rd_lock(_lock) _rd_lock(_lock, __FILE__, __func__, __LINE__)
  651. #define rw_unlock(_lock) _rw_unlock(_lock, __FILE__, __func__, __LINE__)
  652. #define rd_unlock_noyield(_lock) _rd_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  653. #define wr_unlock_noyield(_lock) _wr_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  654. #define rd_unlock(_lock) _rd_unlock(_lock, __FILE__, __func__, __LINE__)
  655. #define wr_unlock(_lock) _wr_unlock(_lock, __FILE__, __func__, __LINE__)
  656. #define mutex_init(_lock) _mutex_init(_lock, __FILE__, __func__, __LINE__)
  657. #define rwlock_init(_lock) _rwlock_init(_lock, __FILE__, __func__, __LINE__)
  658. #define cglock_init(_lock) _cglock_init(_lock, __FILE__, __func__, __LINE__)
  659. #define cg_rlock(_lock) _cg_rlock(_lock, __FILE__, __func__, __LINE__)
  660. #define cg_ilock(_lock) _cg_ilock(_lock, __FILE__, __func__, __LINE__)
  661. #define cg_ulock(_lock) _cg_ulock(_lock, __FILE__, __func__, __LINE__)
  662. #define cg_wlock(_lock) _cg_wlock(_lock, __FILE__, __func__, __LINE__)
  663. #define cg_dwlock(_lock) _cg_dwlock(_lock, __FILE__, __func__, __LINE__)
  664. #define cg_dwilock(_lock) _cg_dwilock(_lock, __FILE__, __func__, __LINE__)
  665. #define cg_dlock(_lock) _cg_dlock(_lock, __FILE__, __func__, __LINE__)
  666. #define cg_runlock(_lock) _cg_runlock(_lock, __FILE__, __func__, __LINE__)
  667. #define cg_ruwlock(_lock) _cg_ruwlock(_lock, __FILE__, __func__, __LINE__)
  668. #define cg_wunlock(_lock) _cg_wunlock(_lock, __FILE__, __func__, __LINE__)
  669. static inline void _mutex_lock(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  670. {
  671. GETLOCK(lock, file, func, line);
  672. if (unlikely(pthread_mutex_lock(lock)))
  673. quitfrom(1, file, func, line, "WTF MUTEX ERROR ON LOCK! errno=%d", errno);
  674. GOTLOCK(lock, file, func, line);
  675. }
  676. static inline void _mutex_unlock_noyield(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  677. {
  678. if (unlikely(pthread_mutex_unlock(lock)))
  679. quitfrom(1, file, func, line, "WTF MUTEX ERROR ON UNLOCK! errno=%d", errno);
  680. GUNLOCK(lock, file, func, line);
  681. }
  682. static inline void _mutex_unlock(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  683. {
  684. _mutex_unlock_noyield(lock, file, func, line);
  685. sched_yield();
  686. }
  687. static inline int _mutex_trylock(pthread_mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line)
  688. {
  689. TRYLOCK(lock, file, func, line);
  690. int ret = pthread_mutex_trylock(lock);
  691. DIDLOCK(ret, lock, file, func, line);
  692. return ret;
  693. }
  694. static inline void _wr_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  695. {
  696. GETLOCK(lock, file, func, line);
  697. if (unlikely(pthread_rwlock_wrlock(lock)))
  698. quitfrom(1, file, func, line, "WTF WRLOCK ERROR ON LOCK! errno=%d", errno);
  699. GOTLOCK(lock, file, func, line);
  700. }
  701. static inline int _wr_trylock(pthread_rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line)
  702. {
  703. TRYLOCK(lock, file, func, line);
  704. int ret = pthread_rwlock_trywrlock(lock);
  705. DIDLOCK(ret, lock, file, func, line);
  706. return ret;
  707. }
  708. static inline void _rd_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  709. {
  710. GETLOCK(lock, file, func, line);
  711. if (unlikely(pthread_rwlock_rdlock(lock)))
  712. quitfrom(1, file, func, line, "WTF RDLOCK ERROR ON LOCK! errno=%d", errno);
  713. GOTLOCK(lock, file, func, line);
  714. }
  715. static inline void _rw_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  716. {
  717. if (unlikely(pthread_rwlock_unlock(lock)))
  718. quitfrom(1, file, func, line, "WTF RWLOCK ERROR ON UNLOCK! errno=%d", errno);
  719. GUNLOCK(lock, file, func, line);
  720. }
  721. static inline void _rd_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  722. {
  723. _rw_unlock(lock, file, func, line);
  724. }
  725. static inline void _wr_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  726. {
  727. _rw_unlock(lock, file, func, line);
  728. }
  729. static inline void _rd_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  730. {
  731. _rw_unlock(lock, file, func, line);
  732. sched_yield();
  733. }
  734. static inline void _wr_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  735. {
  736. _rw_unlock(lock, file, func, line);
  737. sched_yield();
  738. }
  739. static inline void _mutex_init(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  740. {
  741. if (unlikely(pthread_mutex_init(lock, NULL)))
  742. quitfrom(1, file, func, line, "Failed to pthread_mutex_init errno=%d", errno);
  743. INITLOCK(lock, CGLOCK_MUTEX, file, func, line);
  744. }
  745. static inline void mutex_destroy(pthread_mutex_t *lock)
  746. {
  747. /* Ignore return code. This only invalidates the mutex on linux but
  748. * releases resources on windows. */
  749. pthread_mutex_destroy(lock);
  750. }
  751. static inline void _rwlock_init(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  752. {
  753. if (unlikely(pthread_rwlock_init(lock, NULL)))
  754. quitfrom(1, file, func, line, "Failed to pthread_rwlock_init errno=%d", errno);
  755. INITLOCK(lock, CGLOCK_RW, file, func, line);
  756. }
  757. static inline void rwlock_destroy(pthread_rwlock_t *lock)
  758. {
  759. pthread_rwlock_destroy(lock);
  760. }
  761. static inline void _cglock_init(cglock_t *lock, const char *file, const char *func, const int line)
  762. {
  763. _mutex_init(&lock->mutex, file, func, line);
  764. _rwlock_init(&lock->rwlock, file, func, line);
  765. }
  766. static inline void cglock_destroy(cglock_t *lock)
  767. {
  768. rwlock_destroy(&lock->rwlock);
  769. mutex_destroy(&lock->mutex);
  770. }
  771. /* Read lock variant of cglock. Cannot be promoted. */
  772. static inline void _cg_rlock(cglock_t *lock, const char *file, const char *func, const int line)
  773. {
  774. _mutex_lock(&lock->mutex, file, func, line);
  775. _rd_lock(&lock->rwlock, file, func, line);
  776. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  777. }
  778. /* Intermediate variant of cglock - behaves as a read lock but can be promoted
  779. * to a write lock or demoted to read lock. */
  780. static inline void _cg_ilock(cglock_t *lock, const char *file, const char *func, const int line)
  781. {
  782. _mutex_lock(&lock->mutex, file, func, line);
  783. }
  784. /* Upgrade intermediate variant to a write lock */
  785. static inline void _cg_ulock(cglock_t *lock, const char *file, const char *func, const int line)
  786. {
  787. _wr_lock(&lock->rwlock, file, func, line);
  788. }
  789. /* Write lock variant of cglock */
  790. static inline void _cg_wlock(cglock_t *lock, const char *file, const char *func, const int line)
  791. {
  792. _mutex_lock(&lock->mutex, file, func, line);
  793. _wr_lock(&lock->rwlock, file, func, line);
  794. }
  795. /* Downgrade write variant to a read lock */
  796. static inline void _cg_dwlock(cglock_t *lock, const char *file, const char *func, const int line)
  797. {
  798. _wr_unlock_noyield(&lock->rwlock, file, func, line);
  799. _rd_lock(&lock->rwlock, file, func, line);
  800. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  801. }
  802. /* Demote a write variant to an intermediate variant */
  803. static inline void _cg_dwilock(cglock_t *lock, const char *file, const char *func, const int line)
  804. {
  805. _wr_unlock(&lock->rwlock, file, func, line);
  806. }
  807. /* Downgrade intermediate variant to a read lock */
  808. static inline void _cg_dlock(cglock_t *lock, const char *file, const char *func, const int line)
  809. {
  810. _rd_lock(&lock->rwlock, file, func, line);
  811. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  812. }
  813. static inline void _cg_runlock(cglock_t *lock, const char *file, const char *func, const int line)
  814. {
  815. _rd_unlock(&lock->rwlock, file, func, line);
  816. }
  817. /* This drops the read lock and grabs a write lock. It does NOT protect data
  818. * between the two locks! */
  819. static inline void _cg_ruwlock(cglock_t *lock, const char *file, const char *func, const int line)
  820. {
  821. _rd_unlock_noyield(&lock->rwlock, file, func, line);
  822. _cg_wlock(lock, file, func, line);
  823. }
  824. static inline void _cg_wunlock(cglock_t *lock, const char *file, const char *func, const int line)
  825. {
  826. _wr_unlock_noyield(&lock->rwlock, file, func, line);
  827. _mutex_unlock(&lock->mutex, file, func, line);
  828. }
  829. struct pool;
  830. #define API_MCAST_CODE "FTW"
  831. #define API_MCAST_ADDR "224.0.0.75"
  832. extern bool opt_work_update;
  833. extern bool opt_protocol;
  834. extern bool have_longpoll;
  835. extern char *opt_kernel_path;
  836. extern char *opt_socks_proxy;
  837. extern char *cgminer_path;
  838. extern bool opt_fail_only;
  839. extern bool opt_autofan;
  840. extern bool opt_autoengine;
  841. extern bool use_curses;
  842. extern char *opt_api_allow;
  843. extern bool opt_api_mcast;
  844. extern char *opt_api_mcast_addr;
  845. extern char *opt_api_mcast_code;
  846. extern char *opt_api_mcast_des;
  847. extern int opt_api_mcast_port;
  848. extern char *opt_api_groups;
  849. extern char *opt_api_description;
  850. extern int opt_api_port;
  851. extern bool opt_api_listen;
  852. extern bool opt_api_network;
  853. extern bool opt_delaynet;
  854. extern bool opt_restart;
  855. extern bool opt_nogpu;
  856. extern char *opt_icarus_options;
  857. extern char *opt_icarus_timing;
  858. extern bool opt_worktime;
  859. #ifdef USE_AVALON
  860. extern char *opt_avalon_options;
  861. extern char *opt_bitburner_fury_options;
  862. #endif
  863. #ifdef USE_KLONDIKE
  864. extern char *opt_klondike_options;
  865. #endif
  866. #ifdef USE_USBUTILS
  867. extern char *opt_usb_select;
  868. extern int opt_usbdump;
  869. extern bool opt_usb_list_all;
  870. extern cgsem_t usb_resource_sem;
  871. #endif
  872. #ifdef USE_BITFORCE
  873. extern bool opt_bfl_noncerange;
  874. #endif
  875. extern int swork_id;
  876. #if LOCK_TRACKING
  877. extern pthread_mutex_t lockstat_lock;
  878. #endif
  879. extern pthread_rwlock_t netacc_lock;
  880. extern const uint32_t sha256_init_state[];
  881. #ifdef HAVE_LIBCURL
  882. extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass,
  883. const char *rpc_req, bool, bool, int *,
  884. struct pool *pool, bool);
  885. #endif
  886. extern const char *proxytype(proxytypes_t proxytype);
  887. extern char *get_proxy(char *url, struct pool *pool);
  888. extern void __bin2hex(char *s, const unsigned char *p, size_t len);
  889. extern char *bin2hex(const unsigned char *p, size_t len);
  890. extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len);
  891. typedef bool (*sha256_func)(struct thr_info*, const unsigned char *pmidstate,
  892. unsigned char *pdata,
  893. unsigned char *phash1, unsigned char *phash,
  894. const unsigned char *ptarget,
  895. uint32_t max_nonce,
  896. uint32_t *last_nonce,
  897. uint32_t nonce);
  898. extern bool fulltest(const unsigned char *hash, const unsigned char *target);
  899. extern int opt_queue;
  900. extern int opt_scantime;
  901. extern int opt_expiry;
  902. #ifdef USE_USBUTILS
  903. extern pthread_mutex_t cgusb_lock;
  904. extern pthread_mutex_t cgusbres_lock;
  905. extern cglock_t cgusb_fd_lock;
  906. #endif
  907. extern cglock_t control_lock;
  908. extern pthread_mutex_t hash_lock;
  909. extern pthread_mutex_t console_lock;
  910. extern cglock_t ch_lock;
  911. extern pthread_rwlock_t mining_thr_lock;
  912. extern pthread_rwlock_t devices_lock;
  913. extern pthread_mutex_t restart_lock;
  914. extern pthread_cond_t restart_cond;
  915. extern void clear_stratum_shares(struct pool *pool);
  916. extern void set_target(unsigned char *dest_target, double diff);
  917. extern int restart_wait(struct thr_info *thr, unsigned int mstime);
  918. extern void kill_work(void);
  919. extern void reinit_device(struct cgpu_info *cgpu);
  920. #ifdef HAVE_ADL
  921. extern bool gpu_stats(int gpu, float *temp, int *engineclock, int *memclock, float *vddc, int *activity, int *fanspeed, int *fanpercent, int *powertune);
  922. extern int set_fanspeed(int gpu, int iFanSpeed);
  923. extern int set_vddc(int gpu, float fVddc);
  924. extern int set_engineclock(int gpu, int iEngineClock);
  925. extern int set_memoryclock(int gpu, int iMemoryClock);
  926. #endif
  927. extern void api(int thr_id);
  928. extern struct pool *current_pool(void);
  929. extern int enabled_pools;
  930. extern void get_intrange(char *arg, int *val1, int *val2);
  931. extern bool detect_stratum(struct pool *pool, char *url);
  932. extern void print_summary(void);
  933. extern void adjust_quota_gcd(void);
  934. extern struct pool *add_pool(void);
  935. extern bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass);
  936. #define MAX_GPUDEVICES 16
  937. #define MAX_DEVICES 4096
  938. #define MIN_SHA_INTENSITY -10
  939. #define MIN_SHA_INTENSITY_STR "-10"
  940. #define MAX_SHA_INTENSITY 14
  941. #define MAX_SHA_INTENSITY_STR "14"
  942. #define MIN_SCRYPT_INTENSITY 8
  943. #define MIN_SCRYPT_INTENSITY_STR "8"
  944. #define MAX_SCRYPT_INTENSITY 20
  945. #define MAX_SCRYPT_INTENSITY_STR "20"
  946. #ifdef USE_SCRYPT
  947. #define MIN_INTENSITY (opt_scrypt ? MIN_SCRYPT_INTENSITY : MIN_SHA_INTENSITY)
  948. #define MIN_INTENSITY_STR (opt_scrypt ? MIN_SCRYPT_INTENSITY_STR : MIN_SHA_INTENSITY_STR)
  949. #define MAX_INTENSITY (opt_scrypt ? MAX_SCRYPT_INTENSITY : MAX_SHA_INTENSITY)
  950. #define MAX_INTENSITY_STR (opt_scrypt ? MAX_SCRYPT_INTENSITY_STR : MAX_SHA_INTENSITY_STR)
  951. #define MAX_GPU_INTENSITY MAX_SCRYPT_INTENSITY
  952. #else
  953. #define MIN_INTENSITY MIN_SHA_INTENSITY
  954. #define MIN_INTENSITY_STR MIN_SHA_INTENSITY_STR
  955. #define MAX_INTENSITY MAX_SHA_INTENSITY
  956. #define MAX_INTENSITY_STR MAX_SHA_INTENSITY_STR
  957. #define MAX_GPU_INTENSITY MAX_SHA_INTENSITY
  958. #endif
  959. extern bool hotplug_mode;
  960. extern int hotplug_time;
  961. extern struct list_head scan_devices;
  962. extern int nDevs;
  963. extern int num_processors;
  964. extern int hw_errors;
  965. extern bool use_syslog;
  966. extern bool opt_quiet;
  967. extern struct thr_info *control_thr;
  968. extern struct thr_info **mining_thr;
  969. extern struct cgpu_info gpus[MAX_GPUDEVICES];
  970. extern int gpu_threads;
  971. #ifdef USE_SCRYPT
  972. extern bool opt_scrypt;
  973. #else
  974. #define opt_scrypt (0)
  975. #endif
  976. extern double total_secs;
  977. extern int mining_threads;
  978. extern int total_devices;
  979. extern int zombie_devs;
  980. extern struct cgpu_info **devices;
  981. extern int total_pools;
  982. extern struct pool **pools;
  983. extern struct strategies strategies[];
  984. extern enum pool_strategy pool_strategy;
  985. extern int opt_rotate_period;
  986. extern double total_rolling;
  987. extern double total_mhashes_done;
  988. extern unsigned int new_blocks;
  989. extern unsigned int found_blocks;
  990. extern int total_accepted, total_rejected, total_diff1;;
  991. extern int total_getworks, total_stale, total_discarded;
  992. extern double total_diff_accepted, total_diff_rejected, total_diff_stale;
  993. extern unsigned int local_work;
  994. extern unsigned int total_go, total_ro;
  995. extern const int opt_cutofftemp;
  996. extern int opt_log_interval;
  997. extern unsigned long long global_hashrate;
  998. extern char current_hash[68];
  999. extern double current_diff;
  1000. extern uint64_t best_diff;
  1001. extern struct timeval block_timeval;
  1002. extern char *workpadding;
  1003. struct curl_ent {
  1004. CURL *curl;
  1005. struct list_head node;
  1006. struct timeval tv;
  1007. };
  1008. /* Disabled needs to be the lowest enum as a freshly calloced value will then
  1009. * equal disabled */
  1010. enum pool_enable {
  1011. POOL_DISABLED,
  1012. POOL_ENABLED,
  1013. POOL_REJECTING,
  1014. };
  1015. struct stratum_work {
  1016. char *job_id;
  1017. char *prev_hash;
  1018. unsigned char **merkle_bin;
  1019. char *bbversion;
  1020. char *nbit;
  1021. char *ntime;
  1022. bool clean;
  1023. size_t cb_len;
  1024. size_t header_len;
  1025. int merkles;
  1026. double diff;
  1027. };
  1028. #define RBUFSIZE 8192
  1029. #define RECVSIZE (RBUFSIZE - 4)
  1030. struct pool {
  1031. int pool_no;
  1032. int prio;
  1033. int accepted, rejected;
  1034. int seq_rejects;
  1035. int seq_getfails;
  1036. int solved;
  1037. int diff1;
  1038. char diff[8];
  1039. int quota;
  1040. int quota_gcd;
  1041. int quota_used;
  1042. int works;
  1043. double diff_accepted;
  1044. double diff_rejected;
  1045. double diff_stale;
  1046. bool submit_fail;
  1047. bool idle;
  1048. bool lagging;
  1049. bool probed;
  1050. enum pool_enable enabled;
  1051. bool submit_old;
  1052. bool removed;
  1053. bool lp_started;
  1054. char *hdr_path;
  1055. char *lp_url;
  1056. unsigned int getwork_requested;
  1057. unsigned int stale_shares;
  1058. unsigned int discarded_work;
  1059. unsigned int getfail_occasions;
  1060. unsigned int remotefail_occasions;
  1061. struct timeval tv_idle;
  1062. double utility;
  1063. int last_shares, shares;
  1064. char *rpc_req;
  1065. char *rpc_url;
  1066. char *rpc_userpass;
  1067. char *rpc_user, *rpc_pass;
  1068. proxytypes_t rpc_proxytype;
  1069. char *rpc_proxy;
  1070. pthread_mutex_t pool_lock;
  1071. cglock_t data_lock;
  1072. struct thread_q *submit_q;
  1073. struct thread_q *getwork_q;
  1074. pthread_t longpoll_thread;
  1075. pthread_t test_thread;
  1076. bool testing;
  1077. int curls;
  1078. pthread_cond_t cr_cond;
  1079. struct list_head curlring;
  1080. time_t last_share_time;
  1081. double last_share_diff;
  1082. uint64_t best_diff;
  1083. struct cgminer_stats cgminer_stats;
  1084. struct cgminer_pool_stats cgminer_pool_stats;
  1085. /* The last block this particular pool knows about */
  1086. char prev_block[32];
  1087. /* Stratum variables */
  1088. char *stratum_url;
  1089. char *stratum_port;
  1090. struct addrinfo stratum_hints;
  1091. SOCKETTYPE sock;
  1092. char *sockbuf;
  1093. size_t sockbuf_size;
  1094. char *sockaddr_url; /* stripped url used for sockaddr */
  1095. char *sockaddr_proxy_url;
  1096. char *sockaddr_proxy_port;
  1097. char *nonce1;
  1098. unsigned char *nonce1bin;
  1099. size_t n1_len;
  1100. uint32_t nonce2;
  1101. int n2size;
  1102. char *sessionid;
  1103. bool has_stratum;
  1104. bool stratum_active;
  1105. bool stratum_init;
  1106. bool stratum_notify;
  1107. struct stratum_work swork;
  1108. pthread_t stratum_sthread;
  1109. pthread_t stratum_rthread;
  1110. pthread_mutex_t stratum_lock;
  1111. struct thread_q *stratum_q;
  1112. int sshares; /* stratum shares submitted waiting on response */
  1113. /* GBT variables */
  1114. bool has_gbt;
  1115. cglock_t gbt_lock;
  1116. unsigned char previousblockhash[32];
  1117. unsigned char gbt_target[32];
  1118. char *coinbasetxn;
  1119. char *longpollid;
  1120. char *gbt_workid;
  1121. int gbt_expires;
  1122. uint32_t gbt_version;
  1123. uint32_t curtime;
  1124. uint32_t gbt_bits;
  1125. unsigned char *txn_hashes;
  1126. int gbt_txns;
  1127. int coinbase_len;
  1128. /* Shared by both stratum & GBT */
  1129. unsigned char *coinbase;
  1130. int nonce2_offset;
  1131. unsigned char header_bin[128];
  1132. int merkle_offset;
  1133. struct timeval tv_lastwork;
  1134. };
  1135. #define GETWORK_MODE_TESTPOOL 'T'
  1136. #define GETWORK_MODE_POOL 'P'
  1137. #define GETWORK_MODE_LP 'L'
  1138. #define GETWORK_MODE_BENCHMARK 'B'
  1139. #define GETWORK_MODE_STRATUM 'S'
  1140. #define GETWORK_MODE_GBT 'G'
  1141. struct work {
  1142. unsigned char data[128];
  1143. unsigned char midstate[32];
  1144. unsigned char target[32];
  1145. unsigned char hash[32];
  1146. #ifdef USE_SCRYPT
  1147. unsigned char device_target[32];
  1148. #endif
  1149. double device_diff;
  1150. uint64_t share_diff;
  1151. int rolls;
  1152. int drv_rolllimit; /* How much the driver can roll ntime */
  1153. uint32_t nonce; /* For devices that hash sole work */
  1154. struct thr_info *thr;
  1155. int thr_id;
  1156. struct pool *pool;
  1157. struct timeval tv_staged;
  1158. bool mined;
  1159. bool clone;
  1160. bool cloned;
  1161. int rolltime;
  1162. bool longpoll;
  1163. bool stale;
  1164. bool mandatory;
  1165. bool block;
  1166. bool stratum;
  1167. char *job_id;
  1168. uint32_t nonce2;
  1169. size_t nonce2_len;
  1170. char *ntime;
  1171. double sdiff;
  1172. char *nonce1;
  1173. bool gbt;
  1174. char *coinbase;
  1175. int gbt_txns;
  1176. unsigned int work_block;
  1177. int id;
  1178. UT_hash_handle hh;
  1179. double work_difficulty;
  1180. // Allow devices to identify work if multiple sub-devices
  1181. int subid;
  1182. // Allow devices to flag work for their own purposes
  1183. bool devflag;
  1184. // Allow devices to timestamp work for their own purposes
  1185. struct timeval tv_stamp;
  1186. struct timeval tv_getwork;
  1187. struct timeval tv_getwork_reply;
  1188. struct timeval tv_cloned;
  1189. struct timeval tv_work_start;
  1190. struct timeval tv_work_found;
  1191. char getwork_mode;
  1192. };
  1193. #ifdef USE_MODMINER
  1194. struct modminer_fpga_state {
  1195. bool work_running;
  1196. struct work running_work;
  1197. struct timeval tv_workstart;
  1198. uint32_t hashes;
  1199. char next_work_cmd[46];
  1200. char fpgaid;
  1201. bool overheated;
  1202. bool new_work;
  1203. uint32_t shares;
  1204. uint32_t shares_last_hw;
  1205. uint32_t hw_errors;
  1206. uint32_t shares_to_good;
  1207. uint32_t timeout_fail;
  1208. uint32_t success_more;
  1209. struct timeval last_changed;
  1210. struct timeval last_nonce;
  1211. struct timeval first_work;
  1212. bool death_stage_one;
  1213. bool tried_two_byte_temp;
  1214. bool one_byte_temp;
  1215. };
  1216. #endif
  1217. #define TAILBUFSIZ 64
  1218. #define tailsprintf(buf, bufsiz, fmt, ...) do { \
  1219. char tmp13[TAILBUFSIZ]; \
  1220. size_t len13, buflen = strlen(buf); \
  1221. snprintf(tmp13, sizeof(tmp13), fmt, ##__VA_ARGS__); \
  1222. len13 = strlen(tmp13); \
  1223. if ((buflen + len13) >= bufsiz) \
  1224. quit(1, "tailsprintf buffer overflow in %s %s line %d", __FILE__, __func__, __LINE__); \
  1225. strcat(buf, tmp13); \
  1226. } while (0)
  1227. extern void get_datestamp(char *, size_t, struct timeval *);
  1228. extern void inc_hw_errors(struct thr_info *thr);
  1229. extern bool test_nonce(struct work *work, uint32_t nonce);
  1230. extern bool test_nonce_diff(struct work *work, uint32_t nonce, double diff);
  1231. extern void submit_tested_work(struct thr_info *thr, struct work *work);
  1232. extern bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce);
  1233. extern bool submit_noffset_nonce(struct thr_info *thr, struct work *work, uint32_t nonce,
  1234. int noffset);
  1235. extern struct work *get_work(struct thr_info *thr, const int thr_id);
  1236. extern void __add_queued(struct cgpu_info *cgpu, struct work *work);
  1237. extern struct work *get_queued(struct cgpu_info *cgpu);
  1238. extern void add_queued(struct cgpu_info *cgpu, struct work *work);
  1239. extern struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id);
  1240. extern struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1241. extern struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1242. extern struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1243. extern void __work_completed(struct cgpu_info *cgpu, struct work *work);
  1244. extern void work_completed(struct cgpu_info *cgpu, struct work *work);
  1245. extern struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1246. extern void hash_driver_work(struct thr_info *mythr);
  1247. extern void hash_queued_work(struct thr_info *mythr);
  1248. extern void _wlog(const char *str);
  1249. extern void _wlogprint(const char *str);
  1250. extern int curses_int(const char *query);
  1251. extern char *curses_input(const char *query);
  1252. extern void kill_work(void);
  1253. extern void switch_pools(struct pool *selected);
  1254. extern void discard_work(struct work *work);
  1255. extern void remove_pool(struct pool *pool);
  1256. extern void write_config(FILE *fcfg);
  1257. extern void zero_bestshare(void);
  1258. extern void zero_stats(void);
  1259. extern void default_save_file(char *filename);
  1260. extern bool log_curses_only(int prio, const char *datetime, const char *str);
  1261. extern void clear_logwin(void);
  1262. extern void logwin_update(void);
  1263. extern bool pool_tclear(struct pool *pool, bool *var);
  1264. extern struct thread_q *tq_new(void);
  1265. extern void tq_free(struct thread_q *tq);
  1266. extern bool tq_push(struct thread_q *tq, void *data);
  1267. extern void *tq_pop(struct thread_q *tq, const struct timespec *abstime);
  1268. extern void tq_freeze(struct thread_q *tq);
  1269. extern void tq_thaw(struct thread_q *tq);
  1270. extern bool successful_connect;
  1271. extern void adl(void);
  1272. extern void app_restart(void);
  1273. extern void clean_work(struct work *work);
  1274. extern void free_work(struct work *work);
  1275. extern struct work *copy_work_noffset(struct work *base_work, int noffset);
  1276. #define copy_work(work_in) copy_work_noffset(work_in, 0)
  1277. extern struct thr_info *get_thread(int thr_id);
  1278. extern struct cgpu_info *get_devices(int id);
  1279. enum api_data_type {
  1280. API_ESCAPE,
  1281. API_STRING,
  1282. API_CONST,
  1283. API_UINT8,
  1284. API_UINT16,
  1285. API_INT,
  1286. API_UINT,
  1287. API_UINT32,
  1288. API_UINT64,
  1289. API_DOUBLE,
  1290. API_ELAPSED,
  1291. API_BOOL,
  1292. API_TIMEVAL,
  1293. API_TIME,
  1294. API_MHS,
  1295. API_MHTOTAL,
  1296. API_TEMP,
  1297. API_UTILITY,
  1298. API_FREQ,
  1299. API_VOLTS,
  1300. API_HS,
  1301. API_DIFF,
  1302. API_PERCENT
  1303. };
  1304. struct api_data {
  1305. enum api_data_type type;
  1306. char *name;
  1307. void *data;
  1308. bool data_was_malloc;
  1309. struct api_data *prev;
  1310. struct api_data *next;
  1311. };
  1312. extern struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data);
  1313. extern struct api_data *api_add_string(struct api_data *root, char *name, char *data, bool copy_data);
  1314. extern struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data);
  1315. extern struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data);
  1316. extern struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data);
  1317. extern struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data);
  1318. extern struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data);
  1319. extern struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data);
  1320. extern struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data);
  1321. extern struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data);
  1322. extern struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data);
  1323. extern struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data);
  1324. extern struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data);
  1325. extern struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data);
  1326. extern struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data);
  1327. extern struct api_data *api_add_mhstotal(struct api_data *root, char *name, double *data, bool copy_data);
  1328. extern struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data);
  1329. extern struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data);
  1330. extern struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data);
  1331. extern struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data);
  1332. extern struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data);
  1333. extern struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data);
  1334. extern struct api_data *api_add_percent(struct api_data *root, char *name, double *data, bool copy_data);
  1335. #endif /* __MINER_H__ */