driver-avalonmm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /*
  2. * Copyright 2014 Luke Dashjr
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 3 of the License, or (at your option)
  7. * any later version. See COPYING for more details.
  8. */
  9. #include "config.h"
  10. #include <stdbool.h>
  11. #include <stdint.h>
  12. #include <stdlib.h>
  13. #include <string.h>
  14. #include <unistd.h>
  15. #include <utlist.h>
  16. #include "deviceapi.h"
  17. #include "logging.h"
  18. #include "lowlevel.h"
  19. #include "lowl-vcom.h"
  20. #include "miner.h"
  21. #include "util.h"
  22. #include "work2d.h"
  23. #define AVALONMM_MAX_MODULES 4
  24. #define AVALONMM_MAX_COINBASE_SIZE (6 * 1024)
  25. #define AVALONMM_MAX_MERKLES 20
  26. #define AVALONMM_MAX_NONCE_DIFF 0x20
  27. // Must be a power of two
  28. #define AVALONMM_CACHED_JOBS 2
  29. #define AVALONMM_NONCE_OFFSET 0x180
  30. BFG_REGISTER_DRIVER(avalonmm_drv)
  31. static const struct bfg_set_device_definition avalonmm_set_device_funcs[];
  32. #define AVALONMM_PKT_DATA_SIZE 0x20
  33. #define AVALONMM_PKT_SIZE (AVALONMM_PKT_DATA_SIZE + 7)
  34. enum avalonmm_cmd {
  35. AMC_DETECT = 0x0a,
  36. AMC_NEW_JOB = 0x0b,
  37. AMC_JOB_ID = 0x0c,
  38. AMC_COINBASE = 0x0d,
  39. AMC_MERKLES = 0x0e,
  40. AMC_BLKHDR = 0x0f,
  41. AMC_POLL = 0x10,
  42. AMC_TARGET = 0x11,
  43. AMC_START = 0x13,
  44. };
  45. enum avalonmm_reply {
  46. AMR_NONCE = 0x17,
  47. AMR_STATUS = 0x18,
  48. AMR_DETECT_ACK = 0x19,
  49. };
  50. static
  51. bool avalonmm_write_cmd(const int fd, const enum avalonmm_cmd cmd, const void *data, size_t datasz)
  52. {
  53. uint8_t packets = ((datasz + AVALONMM_PKT_DATA_SIZE - 1) / AVALONMM_PKT_DATA_SIZE) ?: 1;
  54. uint8_t pkt[AVALONMM_PKT_SIZE] = {'A', 'V', cmd, 1, packets};
  55. uint16_t crc;
  56. ssize_t r;
  57. while (true)
  58. {
  59. size_t copysz = AVALONMM_PKT_DATA_SIZE;
  60. if (datasz < copysz)
  61. {
  62. copysz = datasz;
  63. memset(&pkt[5 + copysz], '\0', AVALONMM_PKT_DATA_SIZE - copysz);
  64. }
  65. if (copysz)
  66. memcpy(&pkt[5], data, copysz);
  67. crc = crc16xmodem(&pkt[5], AVALONMM_PKT_DATA_SIZE);
  68. pk_u16be(pkt, 5 + AVALONMM_PKT_DATA_SIZE, crc);
  69. r = write(fd, pkt, sizeof(pkt));
  70. if (opt_dev_protocol)
  71. {
  72. char hex[(sizeof(pkt) * 2) + 1];
  73. bin2hex(hex, pkt, sizeof(pkt));
  74. applog(LOG_DEBUG, "DEVPROTO fd=%d SEND: %s => %d", fd, hex, (int)r);
  75. }
  76. if (sizeof(pkt) != r)
  77. return false;
  78. datasz -= copysz;
  79. if (!datasz)
  80. break;
  81. data += copysz;
  82. ++pkt[3];
  83. }
  84. return true;
  85. }
  86. static
  87. ssize_t avalonmm_read(const int fd, const int logprio, enum avalonmm_reply *out_reply, void * const bufp, size_t bufsz)
  88. {
  89. uint8_t *buf = bufp;
  90. uint8_t pkt[AVALONMM_PKT_SIZE];
  91. uint8_t packets = 0, got = 0;
  92. uint16_t good_crc, actual_crc;
  93. ssize_t r;
  94. while (true)
  95. {
  96. r = serial_read(fd, pkt, sizeof(pkt));
  97. if (opt_dev_protocol)
  98. {
  99. if (r >= 0)
  100. {
  101. char hex[(r * 2) + 1];
  102. bin2hex(hex, pkt, r);
  103. applog(LOG_DEBUG, "DEVPROTO fd=%d RECV: %s", fd, hex);
  104. }
  105. else
  106. applog(LOG_DEBUG, "DEVPROTO fd=%d RECV (%d)", fd, (int)r);
  107. }
  108. if (r != sizeof(pkt))
  109. return -1;
  110. if (memcmp(pkt, "AV", 2))
  111. applogr(-1, logprio, "%s: bad header", __func__);
  112. good_crc = crc16xmodem(&pkt[5], AVALONMM_PKT_DATA_SIZE);
  113. actual_crc = upk_u16le(pkt, 5 + AVALONMM_PKT_DATA_SIZE);
  114. if (good_crc != actual_crc)
  115. applogr(-1, logprio, "%s: bad CRC (good=%04x actual=%04x)", __func__, good_crc, actual_crc);
  116. *out_reply = pkt[2];
  117. if (!got)
  118. {
  119. if (pkt[3] != 1)
  120. applogr(-1, logprio, "%s: first packet is not index 1", __func__);
  121. ++got;
  122. packets = pkt[4];
  123. }
  124. else
  125. {
  126. if (pkt[3] != ++got)
  127. applogr(-1, logprio, "%s: packet %d is not index %d", __func__, got, got);
  128. if (pkt[4] != packets)
  129. applogr(-1, logprio, "%s: packet %d total packet count is %d rather than original value of %d", __func__, got, pkt[4], packets);
  130. }
  131. if (bufsz)
  132. {
  133. if (likely(bufsz > AVALONMM_PKT_DATA_SIZE))
  134. {
  135. memcpy(buf, &pkt[5], AVALONMM_PKT_DATA_SIZE);
  136. bufsz -= AVALONMM_PKT_DATA_SIZE;
  137. buf += AVALONMM_PKT_DATA_SIZE;
  138. }
  139. else
  140. {
  141. memcpy(buf, &pkt[5], bufsz);
  142. bufsz = 0;
  143. }
  144. }
  145. if (got == packets)
  146. break;
  147. }
  148. return (((ssize_t)got) * AVALONMM_PKT_DATA_SIZE);
  149. }
  150. static
  151. bool avalonmm_detect_one(const char * const devpath)
  152. {
  153. uint8_t buf[AVALONMM_PKT_DATA_SIZE] = {0};
  154. enum avalonmm_reply reply;
  155. const int fd = serial_open(devpath, 0, 1, true);
  156. struct cgpu_info *prev_cgpu = NULL;
  157. if (fd == -1)
  158. applogr(false, LOG_DEBUG, "%s: Failed to open %s", __func__, devpath);
  159. for (int i = 0; i < AVALONMM_MAX_MODULES; ++i)
  160. {
  161. pk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4, i);
  162. avalonmm_write_cmd(fd, AMC_DETECT, buf, AVALONMM_PKT_DATA_SIZE);
  163. }
  164. while (avalonmm_read(fd, LOG_DEBUG, &reply, NULL, 0) > 0)
  165. {
  166. if (reply != AMR_DETECT_ACK)
  167. continue;
  168. int moduleno = upk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4);
  169. struct cgpu_info * const cgpu = malloc(sizeof(*cgpu));
  170. *cgpu = (struct cgpu_info){
  171. .drv = &avalonmm_drv,
  172. .device_path = prev_cgpu ? prev_cgpu->device_path : strdup(devpath),
  173. .device_data = (void*)(intptr_t)moduleno,
  174. .set_device_funcs = avalonmm_set_device_funcs,
  175. .deven = DEV_ENABLED,
  176. .procs = 1,
  177. .threads = prev_cgpu ? 0 : 1,
  178. };
  179. add_cgpu_slave(cgpu, prev_cgpu);
  180. prev_cgpu = cgpu;
  181. }
  182. serial_close(fd);
  183. return prev_cgpu;
  184. }
  185. static
  186. bool avalonmm_lowl_probe(const struct lowlevel_device_info * const info)
  187. {
  188. return vcom_lowl_probe_wrapper(info, avalonmm_detect_one);
  189. }
  190. struct avalonmm_job {
  191. struct stratum_work swork;
  192. uint32_t jobid;
  193. struct timeval tv_prepared;
  194. double nonce_diff;
  195. };
  196. struct avalonmm_chain_state {
  197. uint32_t xnonce1;
  198. struct avalonmm_job *jobs[AVALONMM_CACHED_JOBS];
  199. uint32_t next_jobid;
  200. uint32_t clock_desired;
  201. };
  202. struct avalonmm_module_state {
  203. unsigned module_id;
  204. uint16_t temp[2];
  205. uint32_t clock_actual;
  206. };
  207. static struct cgpu_info *avalonmm_dev_for_module_id(struct cgpu_info *, uint32_t);
  208. static bool avalonmm_poll_once(struct cgpu_info *, int64_t *);
  209. static
  210. bool avalonmm_init(struct thr_info * const master_thr)
  211. {
  212. struct cgpu_info * const master_dev = master_thr->cgpu, *dev = NULL;
  213. const char * const devpath = master_dev->device_path;
  214. const int fd = serial_open(devpath, 115200, 1, true);
  215. uint8_t buf[AVALONMM_PKT_DATA_SIZE] = {0};
  216. int64_t module_id;
  217. master_dev->device_fd = fd;
  218. if (unlikely(fd == -1))
  219. applogr(false, LOG_ERR, "%s: Failed to initialise", master_dev->dev_repr);
  220. struct avalonmm_chain_state * const chain = malloc(sizeof(*chain));
  221. *chain = (struct avalonmm_chain_state){
  222. .xnonce1 = 0,
  223. };
  224. work2d_init();
  225. if (!reserve_work2d_(&chain->xnonce1))
  226. {
  227. applog(LOG_ERR, "%s: Failed to reserve 2D work", master_dev->dev_repr);
  228. free(chain);
  229. serial_close(fd);
  230. return false;
  231. }
  232. for_each_managed_proc(proc, master_dev)
  233. {
  234. if (dev == proc->device)
  235. continue;
  236. dev = proc->device;
  237. struct thr_info * const thr = proc->thr[0];
  238. struct avalonmm_module_state * const module = malloc(sizeof(*module));
  239. *module = (struct avalonmm_module_state){
  240. .module_id = (intptr_t)dev->device_data,
  241. };
  242. proc->device_data = chain;
  243. thr->cgpu_data = module;
  244. }
  245. dev = NULL;
  246. for_each_managed_proc(proc, master_dev)
  247. {
  248. cgpu_set_defaults(proc);
  249. proc->status = LIFE_INIT2;
  250. }
  251. if (!chain->clock_desired)
  252. {
  253. // Get a reasonable default frequency
  254. dev = master_dev;
  255. struct thr_info * const thr = dev->thr[0];
  256. struct avalonmm_module_state * const module = thr->cgpu_data;
  257. resend:
  258. pk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4, module->module_id);
  259. avalonmm_write_cmd(fd, AMC_POLL, buf, AVALONMM_PKT_DATA_SIZE);
  260. while (avalonmm_poll_once(master_dev, &module_id))
  261. {
  262. if (module_id != module->module_id)
  263. continue;
  264. if (module->clock_actual)
  265. {
  266. chain->clock_desired = module->clock_actual;
  267. break;
  268. }
  269. else
  270. goto resend;
  271. }
  272. }
  273. if (likely(chain->clock_desired))
  274. applog(LOG_DEBUG, "%s: Frequency is initialised with %d MHz", master_dev->dev_repr, chain->clock_desired);
  275. else
  276. applogr(false, LOG_ERR, "%s: No frequency detected, please use --set %s@%s:clock=MHZ", master_dev->dev_repr, master_dev->drv->dname, devpath);
  277. return true;
  278. }
  279. static
  280. bool avalonmm_send_swork(const int fd, struct avalonmm_chain_state * const chain, const struct stratum_work * const swork, uint32_t jobid, double *out_nonce_diff)
  281. {
  282. uint8_t buf[AVALONMM_PKT_DATA_SIZE];
  283. bytes_t coinbase = BYTES_INIT;
  284. int coinbase_len = bytes_len(&swork->coinbase);
  285. if (coinbase_len > AVALONMM_MAX_COINBASE_SIZE)
  286. return false;
  287. if (swork->merkles > AVALONMM_MAX_MERKLES)
  288. return false;
  289. pk_u32be(buf, 0, coinbase_len);
  290. const size_t xnonce2_offset = swork->nonce2_offset + work2d_pad_xnonce_size(swork) + work2d_xnonce1sz;
  291. pk_u32be(buf, 4, xnonce2_offset);
  292. pk_u32be(buf, 8, 4); // extranonce2 size, but only 4 is supported - smaller sizes are handled by limiting the range
  293. pk_u32be(buf, 0x0c, 0x24); // merkle_offset, always 0x24 for Bitcoin
  294. pk_u32be(buf, 0x10, swork->merkles);
  295. pk_u32be(buf, 0x14, 1); // diff? poorly defined
  296. pk_u32be(buf, 0x18, 0); // pool number - none of its business
  297. if (!avalonmm_write_cmd(fd, AMC_NEW_JOB, buf, 0x1c))
  298. return false;
  299. double nonce_diff = target_diff(swork->target);
  300. if (nonce_diff >= AVALONMM_MAX_NONCE_DIFF)
  301. set_target_to_pdiff(buf, nonce_diff = AVALONMM_MAX_NONCE_DIFF);
  302. else
  303. memcpy(buf, swork->target, 0x20);
  304. *out_nonce_diff = nonce_diff;
  305. if (!avalonmm_write_cmd(fd, AMC_TARGET, buf, 0x20))
  306. return false;
  307. pk_u32be(buf, 0, jobid);
  308. if (!avalonmm_write_cmd(fd, AMC_JOB_ID, buf, 4))
  309. return false;
  310. // Need to add extranonce padding and extranonce2
  311. bytes_cpy(&coinbase, &swork->coinbase);
  312. uint8_t *cbp = bytes_buf(&coinbase);
  313. cbp += swork->nonce2_offset;
  314. work2d_pad_xnonce(cbp, swork, false);
  315. cbp += work2d_pad_xnonce_size(swork);
  316. memcpy(cbp, &chain->xnonce1, work2d_xnonce1sz);
  317. cbp += work2d_xnonce1sz;
  318. if (!avalonmm_write_cmd(fd, AMC_COINBASE, bytes_buf(&coinbase), bytes_len(&coinbase)))
  319. return false;
  320. if (!avalonmm_write_cmd(fd, AMC_MERKLES, bytes_buf(&swork->merkle_bin), bytes_len(&swork->merkle_bin)))
  321. return false;
  322. uint8_t header_bin[0x80];
  323. memcpy(&header_bin[ 0], swork->header1, 0x24);
  324. memset(&header_bin[0x24], '\0', 0x20); // merkle root
  325. pk_u32be(header_bin, 0x44, swork->ntime);
  326. memcpy(&header_bin[0x48], swork->diffbits, 4);
  327. memset(&header_bin[0x4c], '\0', 4); // nonce
  328. memcpy(&header_bin[0x50], bfg_workpadding_bin, 0x30);
  329. if (!avalonmm_write_cmd(fd, AMC_BLKHDR, header_bin, sizeof(header_bin)))
  330. return false;
  331. // Avalon MM cannot handle xnonce2_size other than 4, and works in big endian, so we use a range to ensure the following bytes match
  332. const int fixed_mm_xnonce2_bytes = (work2d_xnonce2sz >= 4) ? 0 : (4 - work2d_xnonce2sz);
  333. uint8_t mm_xnonce2_start[4];
  334. uint32_t xnonce2_range;
  335. memset(mm_xnonce2_start, '\0', 4);
  336. cbp += work2d_xnonce2sz;
  337. for (int i = 1; i <= fixed_mm_xnonce2_bytes; ++i)
  338. mm_xnonce2_start[fixed_mm_xnonce2_bytes - i] = cbp++[0];
  339. if (fixed_mm_xnonce2_bytes > 0)
  340. xnonce2_range = (1 << (8 * work2d_xnonce2sz)) - 1;
  341. else
  342. xnonce2_range = 0xffffffff;
  343. pk_u32be(buf, 0, 80); // fan speed %
  344. uint16_t voltcfg = ((uint16_t)bitflip8((0x78 - /*deci-milli-volts*/6625 / 125) << 1 | 1)) << 8;
  345. pk_u32be(buf, 4, voltcfg);
  346. pk_u32be(buf, 8, chain->clock_desired);
  347. memcpy(&buf[0xc], mm_xnonce2_start, 4);
  348. pk_u32be(buf, 0x10, xnonce2_range);
  349. if (!avalonmm_write_cmd(fd, AMC_START, buf, 0x14))
  350. return false;
  351. return true;
  352. }
  353. static
  354. void avalonmm_free_job(struct avalonmm_job * const mmjob)
  355. {
  356. stratum_work_clean(&mmjob->swork);
  357. free(mmjob);
  358. }
  359. static
  360. bool avalonmm_update_swork_from_pool(struct cgpu_info * const master_dev, struct pool * const pool)
  361. {
  362. struct avalonmm_chain_state * const chain = master_dev->device_data;
  363. const int fd = master_dev->device_fd;
  364. struct avalonmm_job *mmjob = malloc(sizeof(*mmjob));
  365. *mmjob = (struct avalonmm_job){
  366. .jobid = chain->next_jobid,
  367. };
  368. cg_rlock(&pool->data_lock);
  369. stratum_work_cpy(&mmjob->swork, &pool->swork);
  370. cg_runlock(&pool->data_lock);
  371. timer_set_now(&mmjob->tv_prepared);
  372. mmjob->swork.data_lock_p = NULL;
  373. if (!avalonmm_send_swork(fd, chain, &mmjob->swork, mmjob->jobid, &mmjob->nonce_diff))
  374. {
  375. avalonmm_free_job(mmjob);
  376. return false;
  377. }
  378. applog(LOG_DEBUG, "%s: Upload of job id %08lx complete", master_dev->dev_repr, (unsigned long)mmjob->jobid);
  379. ++chain->next_jobid;
  380. struct avalonmm_job **jobentry = &chain->jobs[mmjob->jobid % AVALONMM_CACHED_JOBS];
  381. if (*jobentry)
  382. avalonmm_free_job(*jobentry);
  383. *jobentry = mmjob;
  384. return true;
  385. }
  386. static
  387. struct cgpu_info *avalonmm_dev_for_module_id(struct cgpu_info * const master_dev, const uint32_t module_id)
  388. {
  389. struct cgpu_info *dev = NULL;
  390. for_each_managed_proc(proc, master_dev)
  391. {
  392. if (dev == proc->device)
  393. continue;
  394. dev = proc->device;
  395. struct thr_info * const thr = dev->thr[0];
  396. struct avalonmm_module_state * const module = thr->cgpu_data;
  397. if (module->module_id == module_id)
  398. return dev;
  399. }
  400. return NULL;
  401. }
  402. static
  403. bool avalonmm_poll_once(struct cgpu_info * const master_dev, int64_t *out_module_id)
  404. {
  405. struct avalonmm_chain_state * const chain = master_dev->device_data;
  406. const int fd = master_dev->device_fd;
  407. uint8_t buf[AVALONMM_PKT_DATA_SIZE];
  408. enum avalonmm_reply reply;
  409. *out_module_id = -1;
  410. if (avalonmm_read(fd, LOG_ERR, &reply, buf, sizeof(buf)) < 0)
  411. return false;
  412. switch (reply)
  413. {
  414. case AMR_STATUS:
  415. {
  416. const uint32_t module_id = upk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4);
  417. struct cgpu_info * const dev = avalonmm_dev_for_module_id(master_dev, module_id);
  418. if (unlikely(!dev))
  419. {
  420. struct thr_info * const master_thr = master_dev->thr[0];
  421. applog(LOG_ERR, "%s: %s for unknown module id %lu", master_dev->dev_repr, "Status", (unsigned long)module_id);
  422. inc_hw_errors_only(master_thr);
  423. break;
  424. }
  425. *out_module_id = module_id;
  426. struct thr_info * const thr = dev->thr[0];
  427. struct avalonmm_module_state * const module = thr->cgpu_data;
  428. module->temp[0] = upk_u16be(buf, 0);
  429. module->temp[1] = upk_u16be(buf, 2);
  430. #if 0
  431. module->fan [0] = upk_u16be(buf, 4);
  432. module->fan [1] = upk_u16be(buf, 6);
  433. #endif
  434. module->clock_actual = upk_u32be(buf, 8);
  435. #if 0
  436. module->voltage = upk_u32be(buf, 0x0c);
  437. #endif
  438. dev->temp = max(module->temp[0], module->temp[1]);
  439. break;
  440. }
  441. case AMR_NONCE:
  442. {
  443. const int fixed_mm_xnonce2_bytes = (work2d_xnonce2sz >= 4) ? 0 : (4 - work2d_xnonce2sz);
  444. const uint8_t * const backward_xnonce2 = &buf[8 + fixed_mm_xnonce2_bytes];
  445. const uint32_t nonce = upk_u32be(buf, 0x10) - AVALONMM_NONCE_OFFSET;
  446. const uint32_t jobid = upk_u32be(buf, 0x14);
  447. const uint32_t module_id = upk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4);
  448. struct cgpu_info * const dev = avalonmm_dev_for_module_id(master_dev, module_id);
  449. if (unlikely(!dev))
  450. {
  451. struct thr_info * const master_thr = master_dev->thr[0];
  452. applog(LOG_ERR, "%s: %s for unknown module id %lu", master_dev->dev_repr, "Nonce", (unsigned long)module_id);
  453. inc_hw_errors_only(master_thr);
  454. break;
  455. }
  456. *out_module_id = module_id;
  457. struct thr_info * const thr = dev->thr[0];
  458. bool invalid_jobid = false;
  459. if (unlikely((uint32_t)(chain->next_jobid - AVALONMM_CACHED_JOBS) > chain->next_jobid))
  460. // Jobs wrap around
  461. invalid_jobid = (jobid < chain->next_jobid - AVALONMM_CACHED_JOBS && jobid >= chain->next_jobid);
  462. else
  463. invalid_jobid = (jobid < chain->next_jobid - AVALONMM_CACHED_JOBS || jobid >= chain->next_jobid);
  464. if (unlikely(invalid_jobid))
  465. {
  466. applog(LOG_ERR, "%s: Bad job id %08lx", dev->dev_repr, (unsigned long)jobid);
  467. inc_hw_errors_only(thr);
  468. break;
  469. }
  470. struct avalonmm_job * const mmjob = chain->jobs[jobid % AVALONMM_CACHED_JOBS];
  471. uint8_t xnonce2[work2d_xnonce2sz];
  472. for (int i = 0; i < work2d_xnonce2sz; ++i)
  473. xnonce2[i] = backward_xnonce2[(work2d_xnonce2sz - 1) - i];
  474. work2d_submit_nonce(thr, &mmjob->swork, &mmjob->tv_prepared, xnonce2, chain->xnonce1, nonce, mmjob->swork.ntime, NULL, mmjob->nonce_diff);
  475. hashes_done2(thr, mmjob->nonce_diff * 0x100000000, NULL);
  476. break;
  477. }
  478. }
  479. return true;
  480. }
  481. static
  482. void avalonmm_poll(struct cgpu_info * const master_dev, int n)
  483. {
  484. int64_t dummy;
  485. while (n > 0)
  486. {
  487. if (avalonmm_poll_once(master_dev, &dummy))
  488. --n;
  489. }
  490. }
  491. static
  492. void avalonmm_minerloop(struct thr_info * const master_thr)
  493. {
  494. struct cgpu_info * const master_dev = master_thr->cgpu;
  495. const int fd = master_dev->device_fd;
  496. struct pool *nextpool = current_pool(), *pool = NULL;
  497. uint8_t buf[AVALONMM_PKT_DATA_SIZE] = {0};
  498. while (likely(!master_dev->shutdown))
  499. {
  500. master_thr->work_restart = false;
  501. if (!pool_has_usable_swork(nextpool))
  502. ; // FIXME
  503. else
  504. if (avalonmm_update_swork_from_pool(master_dev, nextpool))
  505. pool = nextpool;
  506. while (likely(!(master_thr->work_restart || ((nextpool = current_pool()) != pool && pool_has_usable_swork(nextpool)))))
  507. {
  508. cgsleep_ms(10);
  509. struct cgpu_info *dev = NULL;
  510. int n = 0;
  511. for_each_managed_proc(proc, master_dev)
  512. {
  513. if (dev == proc->device)
  514. continue;
  515. dev = proc->device;
  516. struct thr_info * const thr = dev->thr[0];
  517. struct avalonmm_module_state * const module = thr->cgpu_data;
  518. pk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4, module->module_id);
  519. avalonmm_write_cmd(fd, AMC_POLL, buf, AVALONMM_PKT_DATA_SIZE);
  520. ++n;
  521. }
  522. avalonmm_poll(master_dev, n);
  523. }
  524. }
  525. }
  526. static
  527. const char *avalonmm_set_clock(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  528. {
  529. struct avalonmm_chain_state * const chain = proc->device_data;
  530. const int nv = atoi(newvalue);
  531. if (nv < 0)
  532. return "Invalid clock";
  533. chain->clock_desired = nv;
  534. return NULL;
  535. }
  536. static const struct bfg_set_device_definition avalonmm_set_device_funcs[] = {
  537. {"clock", avalonmm_set_clock, "clock frequency"},
  538. {NULL},
  539. };
  540. struct device_drv avalonmm_drv = {
  541. .dname = "avalonmm",
  542. .name = "AVM",
  543. .lowl_probe = avalonmm_lowl_probe,
  544. .thread_init = avalonmm_init,
  545. .minerloop = avalonmm_minerloop,
  546. };