driver-knc-spi-fpga.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /* cgminer driver for KnCminer Jupiter */
  2. #include <stdlib.h>
  3. #include <assert.h>
  4. #include <fcntl.h>
  5. #include <limits.h>
  6. #include <unistd.h>
  7. #include <sys/ioctl.h>
  8. #include <linux/types.h>
  9. #include <linux/spi/spidev.h>
  10. #include "logging.h"
  11. #include "miner.h"
  12. #define MAX_SPIS 1
  13. #define MAX_BYTES_IN_SPI_XSFER 4096
  14. /* /dev/spidevB.C, where B = bus, C = chipselect */
  15. #define SPI_DEVICE_TEMPLATE "/dev/spidev%d.%d"
  16. #define SPI_MODE (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
  17. #define SPI_BITS_PER_WORD 32
  18. #define SPI_MAX_SPEED 3000000
  19. #define SPI_DELAY_USECS 0
  20. /* Max number of ASICs permitted on one SPI device */
  21. #define MAX_ASICS 6
  22. /* How many hardware errors in a row before disabling the core */
  23. #define HW_ERR_LIMIT 10
  24. #define DISA_ERR_LIMIT 3
  25. #define MAX_ACTIVE_WORKS (192 * 2 * 6 * 2)
  26. #define WORK_MIDSTATE_WORDS 8
  27. #define WORK_DATA_WORDS 3
  28. #define WORK_STALE_US 60000000
  29. /* Keep core disabled for no longer than 15 minutes */
  30. #define CORE_DISA_PERIOD_US (15 * 60 * 1000000)
  31. struct spidev_context {
  32. int fd;
  33. uint32_t speed;
  34. uint16_t delay;
  35. uint8_t mode;
  36. uint8_t bits;
  37. };
  38. struct spi_request {
  39. #define CMD_NOP 0
  40. #define CMD_GET_VERSION 1
  41. #define CMD_SUBMIT_WORK 2
  42. #define CMD_FLUSH_QUEUE 3
  43. #define WORK_ID_MASK 0x7FFF
  44. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  45. uint32_t cmd :4;
  46. uint32_t rsvd :1; /* set to zero */
  47. uint32_t queue_id :12;
  48. uint32_t work_id :15;
  49. #else
  50. uint32_t work_id :15;
  51. uint32_t queue_id :12;
  52. uint32_t rsvd :1; /* set to zero */
  53. uint32_t cmd :4;
  54. #endif
  55. uint32_t midstate[WORK_MIDSTATE_WORDS];
  56. uint32_t data[WORK_DATA_WORDS];
  57. };
  58. struct spi_response {
  59. #define RESPONSE_TYPE_NOP 0
  60. #define RESPONSE_TYPE_NONCE_FOUND 1
  61. #define RESPONSE_TYPE_WORK_DONE 2
  62. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  63. uint32_t type :2;
  64. uint32_t asic :3;
  65. uint32_t queue_id :12;
  66. uint32_t work_id :15;
  67. #else
  68. uint32_t work_id :15;
  69. uint32_t queue_id :12;
  70. uint32_t asic :3;
  71. uint32_t type :2;
  72. #endif
  73. uint32_t nonce;
  74. uint32_t core;
  75. };
  76. #define MAX_REQUESTS_IN_BATCH ( MAX_BYTES_IN_SPI_XSFER / \
  77. sizeof(struct spi_request) \
  78. )
  79. static struct spi_request spi_txbuf[MAX_REQUESTS_IN_BATCH];
  80. #define MAX_RESPONSES_IN_BATCH ( (sizeof(spi_txbuf) - 12) / \
  81. sizeof(struct spi_response) \
  82. )
  83. struct spi_rx_t {
  84. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  85. uint32_t rsvd_1 :31;
  86. uint32_t response_queue_full :1;
  87. #else
  88. uint32_t response_queue_full :1;
  89. uint32_t rsvd_1 :31;
  90. #endif
  91. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  92. uint32_t rsvd_2 :16;
  93. uint32_t works_accepted :16;
  94. #else
  95. uint32_t works_accepted :16;
  96. uint32_t rsvd_2 :16;
  97. #endif
  98. uint32_t rsvd_3;
  99. struct spi_response responses[MAX_RESPONSES_IN_BATCH];
  100. };
  101. static struct spi_rx_t spi_rxbuf;
  102. struct device_drv knc_drv;
  103. struct active_work {
  104. struct work *work;
  105. uint32_t work_id;
  106. struct timeval begin;
  107. };
  108. struct core_disa_data {
  109. struct timeval disa_begin;
  110. uint8_t asic;
  111. uint8_t core;
  112. };
  113. struct knc_state {
  114. struct spidev_context *ctx;
  115. int devices;
  116. uint32_t salt;
  117. uint32_t next_work_id;
  118. /* read - last read item, next is at (read + 1) mod BUFSIZE
  119. * write - next write item, last written at (write - 1) mod BUFSIZE
  120. * When buffer is empty, read + 1 == write
  121. * Buffer full condition: read == write
  122. */
  123. int read_q, write_q;
  124. #define KNC_QUEUED_BUFFER_SIZE (MAX_REQUESTS_IN_BATCH + 1)
  125. struct active_work queued_fifo[KNC_QUEUED_BUFFER_SIZE];
  126. int read_a, write_a;
  127. #define KNC_ACTIVE_BUFFER_SIZE (MAX_ACTIVE_WORKS + 1)
  128. struct active_work active_fifo[KNC_ACTIVE_BUFFER_SIZE];
  129. uint8_t hwerrs[MAX_ASICS * 256];
  130. uint8_t disa_cnt[MAX_ASICS * 256];
  131. uint32_t hwerr_work_id[MAX_ASICS * 256];
  132. int read_d, write_d;
  133. #define KNC_DISA_CORES_SIZE (MAX_ASICS * 256)
  134. struct core_disa_data disa_cores_fifo[KNC_DISA_CORES_SIZE];
  135. };
  136. static inline bool knc_queued_fifo_full(struct knc_state *knc)
  137. {
  138. return (knc->read_q == knc->write_q);
  139. }
  140. static inline bool knc_active_fifo_full(struct knc_state *knc)
  141. {
  142. return (knc->read_a == knc->write_a);
  143. }
  144. static inline void knc_queued_fifo_inc_idx(int *idx)
  145. {
  146. if (unlikely(*idx >= ((int)KNC_QUEUED_BUFFER_SIZE - 1)))
  147. *idx = 0;
  148. else
  149. ++(*idx);
  150. }
  151. static inline void knc_active_fifo_inc_idx(int *idx)
  152. {
  153. if (unlikely(*idx >= (KNC_ACTIVE_BUFFER_SIZE - 1)))
  154. *idx = 0;
  155. else
  156. ++(*idx);
  157. }
  158. static inline void knc_disa_cores_fifo_inc_idx(int *idx)
  159. {
  160. if (unlikely(*idx >= (KNC_DISA_CORES_SIZE - 1)))
  161. *idx = 0;
  162. else
  163. ++(*idx);
  164. }
  165. /* Find SPI device with index idx, init it */
  166. static struct spidev_context * spi_new(int idx)
  167. {
  168. struct spidev_context *ctx;
  169. char dev_fname[PATH_MAX];
  170. if(NULL == (ctx = malloc(sizeof(struct spidev_context)))) {
  171. applog(LOG_ERR, "KnC spi: Out of memory");
  172. goto l_exit_error;
  173. }
  174. ctx->mode = SPI_MODE;
  175. ctx->bits = SPI_BITS_PER_WORD;
  176. ctx->speed = SPI_MAX_SPEED;
  177. ctx->delay = SPI_DELAY_USECS;
  178. ctx->fd = -1;
  179. sprintf(dev_fname, SPI_DEVICE_TEMPLATE,
  180. idx, /* bus */
  181. 0 /* chipselect */
  182. );
  183. if (0 > (ctx->fd = open(dev_fname, O_RDWR))) {
  184. applog(LOG_ERR, "KnC spi: Can not open SPI device %s: %m",
  185. dev_fname);
  186. goto l_free_exit_error;
  187. }
  188. /*
  189. * spi mode
  190. */
  191. if (0 > ioctl(ctx->fd, SPI_IOC_WR_MODE, &ctx->mode))
  192. goto l_ioctl_error;
  193. if (0 > ioctl(ctx->fd, SPI_IOC_RD_MODE, &ctx->mode))
  194. goto l_ioctl_error;
  195. /*
  196. * bits per word
  197. */
  198. if (0 > ioctl(ctx->fd, SPI_IOC_WR_BITS_PER_WORD, &ctx->bits))
  199. goto l_ioctl_error;
  200. if (0 > ioctl(ctx->fd, SPI_IOC_RD_BITS_PER_WORD, &ctx->bits))
  201. goto l_ioctl_error;
  202. /*
  203. * max speed hz
  204. */
  205. if (0 > ioctl(ctx->fd, SPI_IOC_WR_MAX_SPEED_HZ, &ctx->speed))
  206. goto l_ioctl_error;
  207. if (0 > ioctl(ctx->fd, SPI_IOC_RD_MAX_SPEED_HZ, &ctx->speed))
  208. goto l_ioctl_error;
  209. applog(LOG_INFO, "KnC spi: device %s uses mode %hhu, bits %hhu, speed %u",
  210. dev_fname, ctx->mode, ctx->bits, ctx->speed);
  211. return ctx;
  212. l_ioctl_error:
  213. applog(LOG_ERR, "KnC spi: ioctl error on SPI device %s: %m", dev_fname);
  214. // l_close_free_exit_error:
  215. close(ctx->fd);
  216. l_free_exit_error:
  217. free(ctx);
  218. l_exit_error:
  219. return NULL;
  220. }
  221. static void spi_free(struct spidev_context *ctx)
  222. {
  223. if (NULL == ctx)
  224. return;
  225. close(ctx->fd);
  226. free(ctx);
  227. }
  228. static int spi_transfer(struct spidev_context *ctx, uint8_t *txbuf,
  229. uint8_t *rxbuf, int len)
  230. {
  231. int ret;
  232. struct spi_ioc_transfer xfr;
  233. memset(rxbuf, 0xff, len);
  234. ret = len;
  235. xfr.tx_buf = (unsigned long)txbuf;
  236. xfr.rx_buf = (unsigned long)rxbuf;
  237. xfr.len = len;
  238. xfr.speed_hz = ctx->speed;
  239. xfr.delay_usecs = ctx->delay;
  240. xfr.bits_per_word = ctx->bits;
  241. xfr.cs_change = 0;
  242. xfr.pad = 0;
  243. if (1 > (ret = ioctl(ctx->fd, SPI_IOC_MESSAGE(1), &xfr))) {
  244. applog(LOG_ERR, "KnC spi xfer: ioctl error on SPI device: %m");
  245. }
  246. return ret;
  247. }
  248. static void disable_core(uint8_t asic, uint8_t core)
  249. {
  250. char str[256];
  251. snprintf(str, sizeof(str), "i2cset -y 2 0x2%hhu %hhu 0", asic, core);
  252. if (0 != WEXITSTATUS(system(str)))
  253. applog(LOG_ERR, "KnC: system call failed");
  254. }
  255. static void enable_core(uint8_t asic, uint8_t core)
  256. {
  257. char str[256];
  258. snprintf(str, sizeof(str), "i2cset -y 2 0x2%hhu %hhu 1", asic, core);
  259. if (0 != WEXITSTATUS(system(str)))
  260. applog(LOG_ERR, "KnC: system call failed");
  261. }
  262. static int64_t timediff(const struct timeval *a, const struct timeval *b)
  263. {
  264. struct timeval diff;
  265. timersub(a, b, &diff);
  266. return diff.tv_sec * 1000000 + diff.tv_usec;
  267. }
  268. static void knc_check_disabled_cores(struct knc_state *knc)
  269. {
  270. int next_read_d;
  271. struct timeval now;
  272. int64_t us;
  273. struct core_disa_data *core;
  274. int cidx;
  275. next_read_d = knc->read_d;
  276. knc_disa_cores_fifo_inc_idx(&next_read_d);
  277. if (next_read_d == knc->write_d)
  278. return; /* queue empty */
  279. core = &knc->disa_cores_fifo[next_read_d];
  280. gettimeofday(&now, NULL);
  281. us = timediff(&now, &core->disa_begin);
  282. if ((us >= 0) && (us < CORE_DISA_PERIOD_US))
  283. return; /* latest disabled core still not expired */
  284. cidx = core->asic * 256 + core->core;
  285. enable_core(core->asic, core->core);
  286. knc->hwerrs[cidx] = 0;
  287. applog(LOG_NOTICE,
  288. "KnC: core %u-%u was enabled back from disabled state",
  289. core->asic, core->core);
  290. knc->read_d = next_read_d;
  291. }
  292. static void knc_work_from_queue_to_spi(struct knc_state *knc,
  293. struct active_work *q_work,
  294. struct spi_request *spi_req)
  295. {
  296. uint32_t *buf_from, *buf_to;
  297. int i;
  298. spi_req->cmd = CMD_SUBMIT_WORK;
  299. spi_req->queue_id = 0; /* at the moment we have one and only queue #0 */
  300. spi_req->work_id = (knc->next_work_id ^ knc->salt) & WORK_ID_MASK;
  301. q_work->work_id = spi_req->work_id;
  302. ++(knc->next_work_id);
  303. buf_to = spi_req->midstate;
  304. buf_from = (uint32_t *)q_work->work->midstate;
  305. for (i = 0; i < WORK_MIDSTATE_WORDS; ++i)
  306. buf_to[i] = le32toh(buf_from[8 - i - 1]);
  307. buf_to = spi_req->data;
  308. buf_from = (uint32_t *)&(q_work->work->data[16 * 4]);
  309. for (i = 0; i < WORK_DATA_WORDS; ++i)
  310. buf_to[i] = le32toh(buf_from[3 - i - 1]);
  311. }
  312. static int64_t knc_process_response(struct thr_info *thr, struct cgpu_info *cgpu,
  313. struct spi_rx_t *rxbuf, int __maybe_unused num)
  314. {
  315. struct knc_state *knc = cgpu->knc_state;
  316. struct work *work;
  317. int64_t us;
  318. int submitted, completed, i, num_sent;
  319. int next_read_q, next_read_a;
  320. struct timeval now;
  321. if (knc->write_q > knc->read_q)
  322. num_sent = knc->write_q - knc->read_q - 1;
  323. else
  324. num_sent =
  325. knc->write_q + KNC_QUEUED_BUFFER_SIZE - knc->read_q - 1;
  326. /* Actually process SPI response */
  327. if (rxbuf->works_accepted) {
  328. applog(LOG_DEBUG, "KnC spi: raw response %08X %08X",
  329. ((uint32_t *)rxbuf)[0], ((uint32_t *)rxbuf)[1]);
  330. applog(LOG_DEBUG,
  331. "KnC spi: response, accepted %u (from %u), full %u",
  332. rxbuf->works_accepted, num_sent,
  333. rxbuf->response_queue_full);
  334. }
  335. /* move works_accepted number of items from queued_fifo to active_fifo */
  336. gettimeofday(&now, NULL);
  337. submitted = 0;
  338. for (i = 0; i < rxbuf->works_accepted; ++i) {
  339. next_read_q = knc->read_q;
  340. knc_queued_fifo_inc_idx(&next_read_q);
  341. if ((next_read_q == knc->write_q) || knc_active_fifo_full(knc))
  342. break;
  343. memcpy(&knc->active_fifo[knc->write_a],
  344. &knc->queued_fifo[next_read_q],
  345. sizeof(struct active_work));
  346. knc->active_fifo[knc->write_a].begin = now;
  347. knc->queued_fifo[next_read_q].work = NULL;
  348. knc->read_q = next_read_q;
  349. knc_active_fifo_inc_idx(&knc->write_a);
  350. ++submitted;
  351. }
  352. if (submitted != rxbuf->works_accepted)
  353. applog(LOG_ERR,
  354. "KnC: accepted by FPGA %u works, but only %d submitted",
  355. rxbuf->works_accepted, submitted);
  356. /* check for completed works and calculated nonces */
  357. gettimeofday(&now, NULL);
  358. completed = 0;
  359. for (i = 0; i < (int)MAX_RESPONSES_IN_BATCH; ++i)
  360. {
  361. if ( (rxbuf->responses[i].type != RESPONSE_TYPE_NONCE_FOUND) &&
  362. (rxbuf->responses[i].type != RESPONSE_TYPE_WORK_DONE)
  363. )
  364. continue;
  365. applog(LOG_DEBUG, "KnC spi: raw response %08X %08X",
  366. ((uint32_t *)&rxbuf->responses[i])[0],
  367. ((uint32_t *)&rxbuf->responses[i])[1]);
  368. applog(LOG_DEBUG, "KnC spi: response, T:%u C:%u-%u Q:%u W:%u",
  369. rxbuf->responses[i].type,
  370. rxbuf->responses[i].asic, rxbuf->responses[i].core,
  371. rxbuf->responses[i].queue_id,
  372. rxbuf->responses[i].work_id);
  373. /* Find active work with matching ID */
  374. next_read_a = knc->read_a;
  375. knc_active_fifo_inc_idx(&next_read_a);
  376. while (next_read_a != knc->write_a) {
  377. if (knc->active_fifo[next_read_a].work_id ==
  378. rxbuf->responses[i].work_id)
  379. break;
  380. /* check for stale works */
  381. us = timediff(&now,
  382. &knc->active_fifo[next_read_a].begin);
  383. if ((us < 0) || (us >= WORK_STALE_US)) {
  384. applog(LOG_DEBUG,
  385. "KnC spi: remove stale work %u",
  386. knc->active_fifo[next_read_a].work_id);
  387. work = knc->active_fifo[next_read_a].work;
  388. knc_active_fifo_inc_idx(&knc->read_a);
  389. work_completed(cgpu, work);
  390. if (next_read_a != knc->read_a)
  391. memcpy(&(knc->active_fifo[next_read_a]),
  392. &(knc->active_fifo[knc->read_a]),
  393. sizeof(struct active_work));
  394. knc->active_fifo[knc->read_a].work = NULL;
  395. }
  396. knc_active_fifo_inc_idx(&next_read_a);
  397. }
  398. if (next_read_a == knc->write_a)
  399. continue;
  400. applog(LOG_DEBUG, "KnC spi: response work %u found",
  401. rxbuf->responses[i].work_id);
  402. work = knc->active_fifo[next_read_a].work;
  403. if (rxbuf->responses[i].type == RESPONSE_TYPE_NONCE_FOUND) {
  404. if (NULL != thr) {
  405. int cidx = rxbuf->responses[i].asic * 256 +
  406. rxbuf->responses[i].core;
  407. if (submit_nonce(thr, work,
  408. rxbuf->responses[i].nonce)) {
  409. if (cidx < (int)sizeof(knc->hwerrs)) {
  410. knc->hwerrs[cidx] = 0;
  411. knc->disa_cnt[cidx] = 0;
  412. knc->hwerr_work_id[cidx] = 0xFFFFFFFF;
  413. }
  414. } else {
  415. if ((cidx < (int)sizeof(knc->hwerrs)) &&
  416. (knc->hwerr_work_id[cidx] != rxbuf->responses[i].work_id)) {
  417. knc->hwerr_work_id[cidx] = rxbuf->responses[i].work_id;
  418. if (++(knc->hwerrs[cidx]) >= HW_ERR_LIMIT) {
  419. struct core_disa_data *core;
  420. core = &knc->disa_cores_fifo[knc->write_d];
  421. core->disa_begin = now;
  422. core->asic = rxbuf->responses[i].asic;
  423. core->core = rxbuf->responses[i].core;
  424. disable_core(core->asic, core->core);
  425. if (++(knc->disa_cnt[cidx]) >= DISA_ERR_LIMIT) {
  426. applog(LOG_WARNING,
  427. "KnC: core %u-%u was disabled permanently", core->asic, core->core);
  428. } else {
  429. applog(LOG_WARNING,
  430. "KnC: core %u-%u was disabled due to %u HW errors in a row",
  431. core->asic, core->core, HW_ERR_LIMIT);
  432. knc_disa_cores_fifo_inc_idx(&knc->write_d);
  433. }
  434. }
  435. }
  436. };
  437. }
  438. continue;
  439. }
  440. /* Work completed */
  441. knc_active_fifo_inc_idx(&knc->read_a);
  442. work_completed(cgpu, work);
  443. if (next_read_a != knc->read_a)
  444. memcpy(&(knc->active_fifo[next_read_a]),
  445. &(knc->active_fifo[knc->read_a]),
  446. sizeof(struct active_work));
  447. knc->active_fifo[knc->read_a].work = NULL;
  448. ++completed;
  449. }
  450. return ((uint64_t)completed) * 0x100000000UL;
  451. }
  452. /* Send flush command via SPI */
  453. static int _internal_knc_flush_fpga(struct knc_state *knc)
  454. {
  455. int len;
  456. spi_txbuf[0].cmd = CMD_FLUSH_QUEUE;
  457. spi_txbuf[0].queue_id = 0; /* at the moment we have one and only queue #0 */
  458. len = spi_transfer(knc->ctx, (uint8_t *)spi_txbuf,
  459. (uint8_t *)&spi_rxbuf, sizeof(struct spi_request));
  460. if (len != sizeof(struct spi_request))
  461. return -1;
  462. len /= sizeof(struct spi_response);
  463. return len;
  464. }
  465. static bool knc_detect_one(struct spidev_context *ctx)
  466. {
  467. /* Scan device for ASICs */
  468. int chip_id;
  469. int devices = 0;
  470. for (chip_id = 0; chip_id < MAX_ASICS; ++chip_id) {
  471. /* TODO: perform the ASIC test/detection */
  472. ++devices;
  473. }
  474. if (!devices) {
  475. applog(LOG_INFO, "SPI detected, but not KnCminer ASICs");
  476. return false;
  477. }
  478. applog(LOG_INFO, "Found a KnC miner with %d ASICs", devices);
  479. struct cgpu_info *cgpu = calloc(1, sizeof(*cgpu));
  480. struct knc_state *knc = calloc(1, sizeof(*knc));
  481. if (!cgpu || !knc) {
  482. applog(LOG_ERR, "KnC miner detected, but failed to allocate memory");
  483. return false;
  484. }
  485. knc->ctx = ctx;
  486. knc->devices = devices;
  487. knc->read_q = 0;
  488. knc->write_q = 1;
  489. knc->read_a = 0;
  490. knc->write_a = 1;
  491. knc->read_d = 0;
  492. knc->write_d = 1;
  493. knc->salt = rand();
  494. memset(knc->hwerr_work_id, 0xFF, sizeof(knc->hwerr_work_id));
  495. _internal_knc_flush_fpga(knc);
  496. cgpu->drv = &knc_drv;
  497. cgpu->name = "KnCminer";
  498. cgpu->threads = 1; // .. perhaps our number of devices?
  499. cgpu->knc_state = knc;
  500. add_cgpu(cgpu);
  501. return true;
  502. }
  503. // http://www.concentric.net/~Ttwang/tech/inthash.htm
  504. static unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
  505. {
  506. a=a-b; a=a-c; a=a^(c >> 13);
  507. b=b-c; b=b-a; b=b^(a << 8);
  508. c=c-a; c=c-b; c=c^(b >> 13);
  509. a=a-b; a=a-c; a=a^(c >> 12);
  510. b=b-c; b=b-a; b=b^(a << 16);
  511. c=c-a; c=c-b; c=c^(b >> 5);
  512. a=a-b; a=a-c; a=a^(c >> 3);
  513. b=b-c; b=b-a; b=b^(a << 10);
  514. c=c-a; c=c-b; c=c^(b >> 15);
  515. return c;
  516. }
  517. /* Probe devices and register with add_cgpu */
  518. void knc_detect(bool __maybe_unused hotplug)
  519. {
  520. int idx;
  521. srand(mix(clock(), time(NULL), getpid()));
  522. /* Loop through all possible SPI interfaces */
  523. for (idx = 0; idx < MAX_SPIS; ++idx) {
  524. struct spidev_context *ctx = spi_new(idx + 1);
  525. if (ctx != NULL) {
  526. if (!knc_detect_one(ctx))
  527. spi_free(ctx);
  528. }
  529. }
  530. }
  531. /* return value is number of nonces that have been checked since
  532. * previous call
  533. */
  534. static int64_t knc_scanwork(struct thr_info *thr)
  535. {
  536. struct cgpu_info *cgpu = thr->cgpu;
  537. struct knc_state *knc = cgpu->knc_state;
  538. int len, num;
  539. int next_read_q;
  540. applog(LOG_DEBUG, "KnC running scanwork");
  541. knc_check_disabled_cores(knc);
  542. /* Prepare tx buffer */
  543. memset(spi_txbuf, 0, sizeof(spi_txbuf));
  544. num = 0;
  545. next_read_q = knc->read_q;
  546. knc_queued_fifo_inc_idx(&next_read_q);
  547. while (next_read_q != knc->write_q) {
  548. knc_work_from_queue_to_spi(knc, &knc->queued_fifo[next_read_q],
  549. &spi_txbuf[num]);
  550. knc_queued_fifo_inc_idx(&next_read_q);
  551. ++num;
  552. }
  553. /* knc->read_q is advanced in knc_process_response, not here */
  554. len = spi_transfer(knc->ctx, (uint8_t *)spi_txbuf,
  555. (uint8_t *)&spi_rxbuf, sizeof(spi_txbuf));
  556. if (len != sizeof(spi_rxbuf))
  557. return -1;
  558. len /= sizeof(struct spi_response);
  559. applog(LOG_DEBUG, "KnC spi: %d works in request", num);
  560. return knc_process_response(thr, cgpu, &spi_rxbuf, len);
  561. }
  562. static bool knc_queue_full(struct cgpu_info *cgpu)
  563. {
  564. struct knc_state *knc = cgpu->knc_state;
  565. struct work *work;
  566. int queue_full = true;
  567. applog(LOG_DEBUG, "KnC running queue full");
  568. while (!knc_queued_fifo_full(knc)) {
  569. work = get_queued(cgpu);
  570. if (!work) {
  571. queue_full = false;
  572. break;
  573. }
  574. knc->queued_fifo[knc->write_q].work = work;
  575. knc_queued_fifo_inc_idx(&(knc->write_q));
  576. }
  577. return queue_full;
  578. }
  579. static void knc_flush_work(struct cgpu_info *cgpu)
  580. {
  581. struct knc_state *knc = cgpu->knc_state;
  582. struct work *work;
  583. int len;
  584. int next_read_q, next_read_a;
  585. applog(LOG_ERR, "KnC running flushwork");
  586. /* Drain queued works */
  587. next_read_q = knc->read_q;
  588. knc_queued_fifo_inc_idx(&next_read_q);
  589. while (next_read_q != knc->write_q) {
  590. work = knc->queued_fifo[next_read_q].work;
  591. work_completed(cgpu, work);
  592. knc->queued_fifo[next_read_q].work = NULL;
  593. knc->read_q = next_read_q;
  594. knc_queued_fifo_inc_idx(&next_read_q);
  595. }
  596. /* Drain active works */
  597. next_read_a = knc->read_a;
  598. knc_active_fifo_inc_idx(&next_read_a);
  599. while (next_read_a != knc->write_a) {
  600. work = knc->active_fifo[next_read_a].work;
  601. work_completed(cgpu, work);
  602. knc->active_fifo[next_read_a].work = NULL;
  603. knc->read_a = next_read_a;
  604. knc_active_fifo_inc_idx(&next_read_a);
  605. }
  606. len = _internal_knc_flush_fpga(knc);
  607. if (len > 0)
  608. knc_process_response(NULL, cgpu, &spi_rxbuf, len);
  609. }
  610. struct device_drv knc_drv = {
  611. .drv_id = DRIVER_knc,
  612. .dname = "KnCminer",
  613. .name = "KnC",
  614. .drv_detect = knc_detect, // Probe for devices, add with add_cgpu
  615. .hash_work = hash_queued_work,
  616. .scanwork = knc_scanwork,
  617. .queue_full = knc_queue_full,
  618. .flush_work = knc_flush_work,
  619. };