driver-bitforce.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * Copyright 2012 Luke Dashjr
  3. * Copyright 2012 Con Kolivas
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 3 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include <limits.h>
  11. #include <pthread.h>
  12. #include <stdio.h>
  13. #include <strings.h>
  14. #include <sys/time.h>
  15. #include <unistd.h>
  16. #include "config.h"
  17. #include "fpgautils.h"
  18. #include "miner.h"
  19. #define BITFORCE_SLEEP_MS 2000
  20. #define BITFORCE_TIMEOUT_MS 15000
  21. #define BITFORCE_CHECK_INTERVAL_MS 10
  22. #define WORK_CHECK_INTERVAL_MS 50
  23. #define MAX_START_DELAY_US 100000
  24. struct device_api bitforce_api;
  25. #define BFopen(devpath) serial_open(devpath, 0, -1, true)
  26. static void BFgets(char *buf, size_t bufLen, int fd)
  27. {
  28. do
  29. --bufLen;
  30. while (likely(bufLen && read(fd, buf, 1) && (buf++)[0] != '\n'))
  31. ;
  32. buf[0] = '\0';
  33. }
  34. static ssize_t BFwrite(int fd, const void *buf, ssize_t bufLen)
  35. {
  36. if ((bufLen) != write(fd, buf, bufLen)) {
  37. applog(LOG_ERR, "BFL: Error writing: %s", buf);
  38. return 0;
  39. } else
  40. return bufLen;
  41. }
  42. #define BFclose(fd) close(fd)
  43. static bool bitforce_detect_one(const char *devpath)
  44. {
  45. char *s;
  46. char pdevbuf[0x100];
  47. applog(LOG_DEBUG, "BFL: Attempting to open %s", devpath);
  48. int fdDev = BFopen(devpath);
  49. if (unlikely(fdDev == -1)) {
  50. applog(LOG_ERR, "BFL: Failed to open %s", devpath);
  51. return false;
  52. }
  53. BFwrite(fdDev, "ZGX", 3);
  54. BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
  55. if (unlikely(!pdevbuf[0])) {
  56. applog(LOG_ERR, "BFL: Error reading (ZGX)");
  57. return 0;
  58. }
  59. BFclose(fdDev);
  60. if (unlikely(!strstr(pdevbuf, "SHA256"))) {
  61. applog(LOG_ERR, "BFL: Didn't recognise BitForce on %s", devpath);
  62. return false;
  63. }
  64. // We have a real BitForce!
  65. struct cgpu_info *bitforce;
  66. bitforce = calloc(1, sizeof(*bitforce));
  67. bitforce->api = &bitforce_api;
  68. bitforce->device_path = strdup(devpath);
  69. bitforce->deven = DEV_ENABLED;
  70. bitforce->threads = 1;
  71. bitforce->sleep_ms = BITFORCE_SLEEP_MS;
  72. if (likely((!memcmp(pdevbuf, ">>>ID: ", 7)) && (s = strstr(pdevbuf + 3, ">>>"))))
  73. {
  74. s[0] = '\0';
  75. bitforce->name = strdup(pdevbuf + 7);
  76. }
  77. mutex_init(&bitforce->device_mutex);
  78. return add_cgpu(bitforce);
  79. }
  80. static char bitforce_detect_auto()
  81. {
  82. return
  83. serial_autodetect_udev (bitforce_detect_one, "BitFORCE*SHA256") ?:
  84. serial_autodetect_devserial(bitforce_detect_one, "BitFORCE_SHA256") ?:
  85. 0;
  86. }
  87. static void bitforce_detect()
  88. {
  89. serial_detect_auto(bitforce_api.dname, bitforce_detect_one, bitforce_detect_auto);
  90. }
  91. static void get_bitforce_statline_before(char *buf, struct cgpu_info *bitforce)
  92. {
  93. float gt = bitforce->temp;
  94. if (gt > 0)
  95. tailsprintf(buf, "%5.1fC ", gt);
  96. else
  97. tailsprintf(buf, " ", gt);
  98. tailsprintf(buf, " | ");
  99. }
  100. static bool bitforce_thread_prepare(struct thr_info *thr)
  101. {
  102. struct cgpu_info *bitforce = thr->cgpu;
  103. struct timeval now;
  104. int fdDev = BFopen(bitforce->device_path);
  105. if (unlikely(-1 == fdDev)) {
  106. applog(LOG_ERR, "BFL%i: Failed to open %s", bitforce->device_id, bitforce->device_path);
  107. return false;
  108. }
  109. bitforce->device_fd = fdDev;
  110. applog(LOG_INFO, "BFL%i: Opened %s", bitforce->device_id, bitforce->device_path);
  111. gettimeofday(&now, NULL);
  112. get_datestamp(bitforce->init, &now);
  113. return true;
  114. }
  115. void bitforce_init(struct cgpu_info *bitforce)
  116. {
  117. int fdDev = bitforce->device_fd;
  118. char *devpath = bitforce->device_path;
  119. char pdevbuf[0x100];
  120. char *s;
  121. applog(LOG_WARNING, "BFL%i: Re-initalizing", bitforce->device_id);
  122. mutex_lock(&bitforce->device_mutex);
  123. if (fdDev)
  124. BFclose(fdDev);
  125. bitforce->device_fd = 0;
  126. fdDev = BFopen(devpath);
  127. if (unlikely(fdDev == -1)) {
  128. applog(LOG_ERR, "BFL%i: Failed to open %s", bitforce->device_id, devpath);
  129. mutex_unlock(&bitforce->device_mutex);
  130. return;
  131. }
  132. BFwrite(fdDev, "ZGX", 3);
  133. BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
  134. if (unlikely(!pdevbuf[0])) {
  135. applog(LOG_ERR, "BFL%i: Error reading (ZGX)", bitforce->device_id);
  136. mutex_unlock(&bitforce->device_mutex);
  137. return;
  138. }
  139. if (unlikely(!strstr(pdevbuf, "SHA256"))) {
  140. applog(LOG_ERR, "BFL%i: Didn't recognise BitForce on %s returned: %s", bitforce->device_id, devpath, pdevbuf);
  141. mutex_unlock(&bitforce->device_mutex);
  142. return;
  143. }
  144. if (likely((!memcmp(pdevbuf, ">>>ID: ", 7)) && (s = strstr(pdevbuf + 3, ">>>"))))
  145. {
  146. s[0] = '\0';
  147. bitforce->name = strdup(pdevbuf + 7);
  148. }
  149. bitforce->device_fd = fdDev;
  150. mutex_unlock(&bitforce->device_mutex);
  151. }
  152. static bool bitforce_get_temp(struct cgpu_info *bitforce)
  153. {
  154. int fdDev = bitforce->device_fd;
  155. char pdevbuf[0x100];
  156. char *s;
  157. if (!fdDev)
  158. return false;
  159. mutex_lock(&bitforce->device_mutex);
  160. BFwrite(fdDev, "ZLX", 3);
  161. BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
  162. mutex_unlock(&bitforce->device_mutex);
  163. if (unlikely(!pdevbuf[0])) {
  164. applog(LOG_ERR, "BFL%i: Error reading (ZLX)", bitforce->device_id);
  165. bitforce->temp = 0;
  166. return false;
  167. }
  168. if ((!strncasecmp(pdevbuf, "TEMP", 4)) && (s = strchr(pdevbuf + 4, ':'))) {
  169. float temp = strtof(s + 1, NULL);
  170. if (temp > 0) {
  171. bitforce->temp = temp;
  172. if (temp > bitforce->cutofftemp) {
  173. applog(LOG_WARNING, "BFL%i: Hit thermal cutoff limit, disabling!", bitforce->device_id);
  174. bitforce->deven = DEV_RECOVER;
  175. bitforce->device_last_not_well = time(NULL);
  176. bitforce->device_not_well_reason = REASON_DEV_THERMAL_CUTOFF;
  177. bitforce->dev_thermal_cutoff_count++;
  178. }
  179. }
  180. }
  181. return true;
  182. }
  183. static bool bitforce_send_work(struct thr_info *thr, struct work *work)
  184. {
  185. struct cgpu_info *bitforce = thr->cgpu;
  186. int fdDev = bitforce->device_fd;
  187. char pdevbuf[0x100];
  188. unsigned char ob[61] = ">>>>>>>>12345678901234567890123456789012123456789012>>>>>>>>";
  189. char *s;
  190. if (!fdDev)
  191. return false;
  192. mutex_lock(&bitforce->device_mutex);
  193. BFwrite(fdDev, "ZDX", 3);
  194. BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
  195. if (unlikely(!pdevbuf[0])) {
  196. applog(LOG_ERR, "BFL%i: Error reading (ZDX)", bitforce->device_id);
  197. mutex_unlock(&bitforce->device_mutex);
  198. return false;
  199. }
  200. if (pdevbuf[0] == 'B'){
  201. applog(LOG_WARNING, "BFL%i: Throttling", bitforce->device_id);
  202. mutex_unlock(&bitforce->device_mutex);
  203. return true;
  204. }
  205. else if (unlikely(pdevbuf[0] != 'O' || pdevbuf[1] != 'K')) {
  206. applog(LOG_ERR, "BFL%i: ZDX reports: %s", bitforce->device_id, pdevbuf);
  207. mutex_unlock(&bitforce->device_mutex);
  208. return false;
  209. }
  210. memcpy(ob + 8, work->midstate, 32);
  211. memcpy(ob + 8 + 32, work->data + 64, 12);
  212. BFwrite(fdDev, ob, 60);
  213. if (opt_debug) {
  214. s = bin2hex(ob + 8, 44);
  215. applog(LOG_DEBUG, "BFL%i: block data: %s", bitforce->device_id, s);
  216. free(s);
  217. }
  218. BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
  219. mutex_unlock(&bitforce->device_mutex);
  220. if (unlikely(!pdevbuf[0])) {
  221. applog(LOG_ERR, "BFL%i: Error reading (block data)", bitforce->device_id);
  222. return false;
  223. }
  224. if (unlikely(pdevbuf[0] != 'O' || pdevbuf[1] != 'K')) {
  225. applog(LOG_ERR, "BFL%i: block data reports: %s", bitforce->device_id, pdevbuf);
  226. return false;
  227. }
  228. return true;
  229. }
  230. static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
  231. {
  232. struct cgpu_info *bitforce = thr->cgpu;
  233. int fdDev = bitforce->device_fd;
  234. char pdevbuf[0x100];
  235. char *pnoncebuf;
  236. uint32_t nonce;
  237. if (!fdDev)
  238. return 0;
  239. while (bitforce->wait_ms < BITFORCE_TIMEOUT_MS) {
  240. mutex_lock(&bitforce->device_mutex);
  241. BFwrite(fdDev, "ZFX", 3);
  242. BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
  243. mutex_unlock(&bitforce->device_mutex);
  244. if (unlikely(!pdevbuf[0])) {
  245. applog(LOG_ERR, "BFL%i: Error reading (ZFX)", bitforce->device_id);
  246. return 0;
  247. }
  248. if (pdevbuf[0] != 'B')
  249. break;
  250. usleep(BITFORCE_CHECK_INTERVAL_MS*1000);
  251. bitforce->wait_ms += BITFORCE_CHECK_INTERVAL_MS;
  252. }
  253. if (bitforce->wait_ms >= BITFORCE_TIMEOUT_MS) {
  254. applog(LOG_ERR, "BFL%i: took longer than 15s", bitforce->device_id);
  255. bitforce->device_last_not_well = time(NULL);
  256. bitforce->device_not_well_reason = REASON_DEV_OVER_HEAT;
  257. bitforce->dev_over_heat_count++;
  258. return 1;
  259. } else if (pdevbuf[0] == 'N') {/* Hashing complete (NONCE-FOUND or NO-NONCE) */
  260. /* Simple timing adjustment */
  261. if (bitforce->wait_ms > (bitforce->sleep_ms + WORK_CHECK_INTERVAL_MS))
  262. bitforce->sleep_ms += WORK_CHECK_INTERVAL_MS;
  263. else if (bitforce->wait_ms == bitforce->sleep_ms)
  264. bitforce->sleep_ms -= WORK_CHECK_INTERVAL_MS;
  265. }
  266. applog(LOG_DEBUG, "BFL%i: waited %dms until %s", bitforce->device_id, bitforce->wait_ms, pdevbuf);
  267. work->blk.nonce = 0xffffffff;
  268. if (pdevbuf[2] == '-')
  269. return 0xffffffff; /* No valid nonce found */
  270. else if (pdevbuf[0] == 'I')
  271. return 1; /* Device idle */
  272. else if (strncasecmp(pdevbuf, "NONCE-FOUND", 11)) {
  273. applog(LOG_WARNING, "BFL%i: result reports: %s", bitforce->device_id, pdevbuf);
  274. return 1;
  275. }
  276. pnoncebuf = &pdevbuf[12];
  277. while (1) {
  278. hex2bin((void*)&nonce, pnoncebuf, 4);
  279. #ifndef __BIG_ENDIAN__
  280. nonce = swab32(nonce);
  281. #endif
  282. submit_nonce(thr, work, nonce);
  283. if (pnoncebuf[8] != ',')
  284. break;
  285. pnoncebuf += 9;
  286. }
  287. return 0xffffffff;
  288. }
  289. static void bitforce_shutdown(struct thr_info *thr)
  290. {
  291. struct cgpu_info *bitforce = thr->cgpu;
  292. BFclose(bitforce->device_fd);
  293. bitforce->device_fd = 0;
  294. }
  295. static void biforce_thread_enable(struct thr_info *thr)
  296. {
  297. struct cgpu_info *bitforce = thr->cgpu;
  298. bitforce_init(bitforce);
  299. }
  300. extern bool opt_submit_stale;
  301. static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint64_t __maybe_unused max_nonce)
  302. {
  303. struct cgpu_info *bitforce = thr->cgpu;
  304. bool submit_old = work->pool->submit_old;
  305. bitforce->wait_ms = 0;
  306. uint64_t ret;
  307. ret = bitforce_send_work(thr, work);
  308. if(!opt_submit_stale || !submit_old) {
  309. while (bitforce->wait_ms < bitforce->sleep_ms) {
  310. usleep(WORK_CHECK_INTERVAL_MS*1000);
  311. bitforce->wait_ms += WORK_CHECK_INTERVAL_MS;
  312. if (work_restart[thr->id].restart) {
  313. applog(LOG_DEBUG, "BFL%i: Work restart, discarding after %dms", bitforce->device_id, bitforce->wait_ms);
  314. return 1; //we have discarded all work; equivalent to 0 hashes done.
  315. }
  316. }
  317. } else {
  318. usleep(bitforce->sleep_ms*1000);
  319. bitforce->wait_ms = bitforce->sleep_ms;
  320. }
  321. if (ret)
  322. ret = bitforce_get_result(thr, work);
  323. if (!ret) {
  324. ret = 1;
  325. applog(LOG_ERR, "BFL%i: Comms error", bitforce->device_id);
  326. bitforce->device_last_not_well = time(NULL);
  327. bitforce->device_not_well_reason = REASON_DEV_NOSTART;
  328. bitforce->dev_nostart_count++;
  329. }
  330. return ret;
  331. }
  332. static bool bitforce_get_stats(struct cgpu_info *bitforce)
  333. {
  334. return bitforce_get_temp(bitforce);
  335. }
  336. static bool bitforce_thread_init(struct thr_info *thr)
  337. {
  338. struct cgpu_info *bitforce = thr->cgpu;
  339. unsigned int wait;
  340. /* Pause each new thread a random time between 0-100ms
  341. so the devices aren't making calls all at the same time. */
  342. wait = (rand() * MAX_START_DELAY_US)/RAND_MAX;
  343. applog(LOG_DEBUG, "BFL%i: Delaying start by %dms", bitforce->device_id, wait/1000);
  344. usleep(wait);
  345. return true;
  346. }
  347. struct device_api bitforce_api = {
  348. .dname = "bitforce",
  349. .name = "BFL",
  350. .api_detect = bitforce_detect,
  351. .reinit_device = bitforce_init,
  352. .get_statline_before = get_bitforce_statline_before,
  353. .get_stats = bitforce_get_stats,
  354. .thread_prepare = bitforce_thread_prepare,
  355. .thread_init = bitforce_thread_init,
  356. .scanhash = bitforce_scanhash,
  357. .thread_shutdown = bitforce_shutdown,
  358. .thread_enable = biforce_thread_enable
  359. };