driver-knc-spi-fpga.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * cgminer driver for KnCminer devices
  3. *
  4. * Copyright 2013 Con Kolivas <kernel@kolivas.org>
  5. * Copyright 2013 KnCminer
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include <stdlib.h>
  13. #include <assert.h>
  14. #include <fcntl.h>
  15. #include <limits.h>
  16. #include <unistd.h>
  17. #include <sys/ioctl.h>
  18. #include <linux/types.h>
  19. #include <linux/spi/spidev.h>
  20. #include "logging.h"
  21. #include "miner.h"
  22. #define MAX_SPIS 1
  23. #define MAX_BYTES_IN_SPI_XSFER 4096
  24. /* /dev/spidevB.C, where B = bus, C = chipselect */
  25. #define SPI_DEVICE_TEMPLATE "/dev/spidev%d.%d"
  26. #define SPI_MODE (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
  27. #define SPI_BITS_PER_WORD 32
  28. #define SPI_MAX_SPEED 3000000
  29. #define SPI_DELAY_USECS 0
  30. /* Max number of ASICs permitted on one SPI device */
  31. #define MAX_ASICS 6
  32. /* How many hardware errors in a row before disabling the core */
  33. #define HW_ERR_LIMIT 10
  34. #define DISA_ERR_LIMIT 3
  35. #define MAX_ACTIVE_WORKS (192 * 2 * 6 * 2)
  36. #define WORK_MIDSTATE_WORDS 8
  37. #define WORK_DATA_WORDS 3
  38. #define WORK_STALE_US 60000000
  39. /* Keep core disabled for no longer than 15 minutes */
  40. #define CORE_DISA_PERIOD_US (15 * 60 * 1000000)
  41. struct spidev_context {
  42. int fd;
  43. uint32_t speed;
  44. uint16_t delay;
  45. uint8_t mode;
  46. uint8_t bits;
  47. };
  48. struct spi_request {
  49. #define CMD_NOP 0
  50. #define CMD_GET_VERSION 1
  51. #define CMD_SUBMIT_WORK 2
  52. #define CMD_FLUSH_QUEUE 3
  53. #define WORK_ID_MASK 0x7FFF
  54. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  55. uint32_t cmd :4;
  56. uint32_t rsvd :1; /* set to zero */
  57. uint32_t queue_id :12;
  58. uint32_t work_id :15;
  59. #else
  60. uint32_t work_id :15;
  61. uint32_t queue_id :12;
  62. uint32_t rsvd :1; /* set to zero */
  63. uint32_t cmd :4;
  64. #endif
  65. uint32_t midstate[WORK_MIDSTATE_WORDS];
  66. uint32_t data[WORK_DATA_WORDS];
  67. };
  68. struct spi_response {
  69. #define RESPONSE_TYPE_NOP 0
  70. #define RESPONSE_TYPE_NONCE_FOUND 1
  71. #define RESPONSE_TYPE_WORK_DONE 2
  72. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  73. uint32_t type :2;
  74. uint32_t asic :3;
  75. uint32_t queue_id :12;
  76. uint32_t work_id :15;
  77. #else
  78. uint32_t work_id :15;
  79. uint32_t queue_id :12;
  80. uint32_t asic :3;
  81. uint32_t type :2;
  82. #endif
  83. uint32_t nonce;
  84. uint32_t core;
  85. };
  86. #define MAX_REQUESTS_IN_BATCH ( MAX_BYTES_IN_SPI_XSFER / \
  87. sizeof(struct spi_request) \
  88. )
  89. static struct spi_request spi_txbuf[MAX_REQUESTS_IN_BATCH];
  90. #define MAX_RESPONSES_IN_BATCH ( (sizeof(spi_txbuf) - 12) / \
  91. sizeof(struct spi_response) \
  92. )
  93. struct spi_rx_t {
  94. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  95. uint32_t rsvd_1 :31;
  96. uint32_t response_queue_full :1;
  97. #else
  98. uint32_t response_queue_full :1;
  99. uint32_t rsvd_1 :31;
  100. #endif
  101. #if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
  102. uint32_t rsvd_2 :16;
  103. uint32_t works_accepted :16;
  104. #else
  105. uint32_t works_accepted :16;
  106. uint32_t rsvd_2 :16;
  107. #endif
  108. uint32_t rsvd_3;
  109. struct spi_response responses[MAX_RESPONSES_IN_BATCH];
  110. };
  111. static struct spi_rx_t spi_rxbuf;
  112. struct active_work {
  113. struct work *work;
  114. uint32_t work_id;
  115. struct timeval begin;
  116. };
  117. struct core_disa_data {
  118. struct timeval disa_begin;
  119. uint8_t asic;
  120. uint8_t core;
  121. };
  122. struct knc_state {
  123. struct spidev_context *ctx;
  124. int devices;
  125. uint32_t salt;
  126. uint32_t next_work_id;
  127. /* read - last read item, next is at (read + 1) mod BUFSIZE
  128. * write - next write item, last written at (write - 1) mod BUFSIZE
  129. * When buffer is empty, read + 1 == write
  130. * Buffer full condition: read == write
  131. */
  132. int read_q, write_q;
  133. #define KNC_QUEUED_BUFFER_SIZE (MAX_REQUESTS_IN_BATCH + 1)
  134. struct active_work queued_fifo[KNC_QUEUED_BUFFER_SIZE];
  135. int read_a, write_a;
  136. #define KNC_ACTIVE_BUFFER_SIZE (MAX_ACTIVE_WORKS + 1)
  137. struct active_work active_fifo[KNC_ACTIVE_BUFFER_SIZE];
  138. uint8_t hwerrs[MAX_ASICS * 256];
  139. uint8_t disa_cnt[MAX_ASICS * 256];
  140. uint32_t hwerr_work_id[MAX_ASICS * 256];
  141. int read_d, write_d;
  142. #define KNC_DISA_CORES_SIZE (MAX_ASICS * 256)
  143. struct core_disa_data disa_cores_fifo[KNC_DISA_CORES_SIZE];
  144. pthread_mutex_t lock;
  145. };
  146. static inline bool knc_queued_fifo_full(struct knc_state *knc)
  147. {
  148. return (knc->read_q == knc->write_q);
  149. }
  150. static inline bool knc_active_fifo_full(struct knc_state *knc)
  151. {
  152. return (knc->read_a == knc->write_a);
  153. }
  154. static inline void knc_queued_fifo_inc_idx(int *idx)
  155. {
  156. if (unlikely(*idx >= ((int)KNC_QUEUED_BUFFER_SIZE - 1)))
  157. *idx = 0;
  158. else
  159. ++(*idx);
  160. }
  161. static inline void knc_active_fifo_inc_idx(int *idx)
  162. {
  163. if (unlikely(*idx >= (KNC_ACTIVE_BUFFER_SIZE - 1)))
  164. *idx = 0;
  165. else
  166. ++(*idx);
  167. }
  168. static inline void knc_disa_cores_fifo_inc_idx(int *idx)
  169. {
  170. if (unlikely(*idx >= (KNC_DISA_CORES_SIZE - 1)))
  171. *idx = 0;
  172. else
  173. ++(*idx);
  174. }
  175. /* Find SPI device with index idx, init it */
  176. static struct spidev_context *spi_new(int idx)
  177. {
  178. struct spidev_context *ctx;
  179. char dev_fname[PATH_MAX];
  180. if (NULL == (ctx = malloc(sizeof(struct spidev_context)))) {
  181. applog(LOG_ERR, "KnC spi: Out of memory");
  182. goto l_exit_error;
  183. }
  184. ctx->mode = SPI_MODE;
  185. ctx->bits = SPI_BITS_PER_WORD;
  186. ctx->speed = SPI_MAX_SPEED;
  187. ctx->delay = SPI_DELAY_USECS;
  188. ctx->fd = -1;
  189. sprintf(dev_fname, SPI_DEVICE_TEMPLATE,
  190. idx, /* bus */
  191. 0 /* chipselect */
  192. );
  193. if (0 > (ctx->fd = open(dev_fname, O_RDWR))) {
  194. applog(LOG_ERR, "KnC spi: Can not open SPI device %s: %m",
  195. dev_fname);
  196. goto l_free_exit_error;
  197. }
  198. /*
  199. * spi mode
  200. */
  201. if (0 > ioctl(ctx->fd, SPI_IOC_WR_MODE, &ctx->mode))
  202. goto l_ioctl_error;
  203. if (0 > ioctl(ctx->fd, SPI_IOC_RD_MODE, &ctx->mode))
  204. goto l_ioctl_error;
  205. /*
  206. * bits per word
  207. */
  208. if (0 > ioctl(ctx->fd, SPI_IOC_WR_BITS_PER_WORD, &ctx->bits))
  209. goto l_ioctl_error;
  210. if (0 > ioctl(ctx->fd, SPI_IOC_RD_BITS_PER_WORD, &ctx->bits))
  211. goto l_ioctl_error;
  212. /*
  213. * max speed hz
  214. */
  215. if (0 > ioctl(ctx->fd, SPI_IOC_WR_MAX_SPEED_HZ, &ctx->speed))
  216. goto l_ioctl_error;
  217. if (0 > ioctl(ctx->fd, SPI_IOC_RD_MAX_SPEED_HZ, &ctx->speed))
  218. goto l_ioctl_error;
  219. applog(LOG_INFO, "KnC spi: device %s uses mode %hhu, bits %hhu, speed %u",
  220. dev_fname, ctx->mode, ctx->bits, ctx->speed);
  221. return ctx;
  222. l_ioctl_error:
  223. applog(LOG_ERR, "KnC spi: ioctl error on SPI device %s: %m", dev_fname);
  224. close(ctx->fd);
  225. l_free_exit_error:
  226. free(ctx);
  227. l_exit_error:
  228. return NULL;
  229. }
  230. static void spi_free(struct spidev_context *ctx)
  231. {
  232. if (NULL == ctx)
  233. return;
  234. close(ctx->fd);
  235. free(ctx);
  236. }
  237. static int spi_transfer(struct spidev_context *ctx, uint8_t *txbuf,
  238. uint8_t *rxbuf, int len)
  239. {
  240. struct spi_ioc_transfer xfr;
  241. int ret;
  242. memset(rxbuf, 0xff, len);
  243. ret = len;
  244. xfr.tx_buf = (unsigned long)txbuf;
  245. xfr.rx_buf = (unsigned long)rxbuf;
  246. xfr.len = len;
  247. xfr.speed_hz = ctx->speed;
  248. xfr.delay_usecs = ctx->delay;
  249. xfr.bits_per_word = ctx->bits;
  250. xfr.cs_change = 0;
  251. xfr.pad = 0;
  252. if (1 > (ret = ioctl(ctx->fd, SPI_IOC_MESSAGE(1), &xfr)))
  253. applog(LOG_ERR, "KnC spi xfer: ioctl error on SPI device: %m");
  254. return ret;
  255. }
  256. static void disable_core(uint8_t asic, uint8_t core)
  257. {
  258. char str[256];
  259. snprintf(str, sizeof(str), "i2cset -y 2 0x2%hhu %hhu 0", asic, core);
  260. if (0 != WEXITSTATUS(system(str)))
  261. applog(LOG_ERR, "KnC: system call failed");
  262. }
  263. static void enable_core(uint8_t asic, uint8_t core)
  264. {
  265. char str[256];
  266. snprintf(str, sizeof(str), "i2cset -y 2 0x2%hhu %hhu 1", asic, core);
  267. if (0 != WEXITSTATUS(system(str)))
  268. applog(LOG_ERR, "KnC: system call failed");
  269. }
  270. static int64_t timediff(const struct timeval *a, const struct timeval *b)
  271. {
  272. struct timeval diff;
  273. timersub(a, b, &diff);
  274. return diff.tv_sec * 1000000 + diff.tv_usec;
  275. }
  276. static void knc_check_disabled_cores(struct knc_state *knc)
  277. {
  278. struct core_disa_data *core;
  279. int next_read_d, cidx;
  280. struct timeval now;
  281. int64_t us;
  282. next_read_d = knc->read_d;
  283. knc_disa_cores_fifo_inc_idx(&next_read_d);
  284. if (next_read_d == knc->write_d)
  285. return; /* queue empty */
  286. core = &knc->disa_cores_fifo[next_read_d];
  287. gettimeofday(&now, NULL);
  288. us = timediff(&now, &core->disa_begin);
  289. if ((us >= 0) && (us < CORE_DISA_PERIOD_US))
  290. return; /* latest disabled core still not expired */
  291. cidx = core->asic * 256 + core->core;
  292. enable_core(core->asic, core->core);
  293. knc->hwerrs[cidx] = 0;
  294. applog(LOG_NOTICE,
  295. "KnC: core %u-%u was enabled back from disabled state",
  296. core->asic, core->core);
  297. knc->read_d = next_read_d;
  298. }
  299. static void knc_work_from_queue_to_spi(struct knc_state *knc,
  300. struct active_work *q_work,
  301. struct spi_request *spi_req)
  302. {
  303. uint32_t *buf_from, *buf_to;
  304. int i;
  305. spi_req->cmd = CMD_SUBMIT_WORK;
  306. spi_req->queue_id = 0; /* at the moment we have one and only queue #0 */
  307. spi_req->work_id = (knc->next_work_id ^ knc->salt) & WORK_ID_MASK;
  308. q_work->work_id = spi_req->work_id;
  309. ++(knc->next_work_id);
  310. buf_to = spi_req->midstate;
  311. buf_from = (uint32_t *)q_work->work->midstate;
  312. for (i = 0; i < WORK_MIDSTATE_WORDS; ++i)
  313. buf_to[i] = le32toh(buf_from[8 - i - 1]);
  314. buf_to = spi_req->data;
  315. buf_from = (uint32_t *)&(q_work->work->data[16 * 4]);
  316. for (i = 0; i < WORK_DATA_WORDS; ++i)
  317. buf_to[i] = le32toh(buf_from[3 - i - 1]);
  318. }
  319. static int64_t knc_process_response(struct thr_info *thr, struct cgpu_info *cgpu,
  320. struct spi_rx_t *rxbuf)
  321. {
  322. struct knc_state *knc = cgpu->device_data;
  323. int submitted, successful, i, num_sent;
  324. int next_read_q, next_read_a;
  325. struct timeval now;
  326. struct work *work;
  327. int64_t us;
  328. num_sent = knc->write_q - knc->read_q - 1;
  329. if (knc->write_q <= knc->read_q)
  330. num_sent += KNC_QUEUED_BUFFER_SIZE;
  331. /* Actually process SPI response */
  332. if (rxbuf->works_accepted) {
  333. applog(LOG_DEBUG, "KnC spi: raw response %08X %08X",
  334. ((uint32_t *)rxbuf)[0], ((uint32_t *)rxbuf)[1]);
  335. applog(LOG_DEBUG,
  336. "KnC spi: response, accepted %u (from %u), full %u",
  337. rxbuf->works_accepted, num_sent,
  338. rxbuf->response_queue_full);
  339. }
  340. /* move works_accepted number of items from queued_fifo to active_fifo */
  341. gettimeofday(&now, NULL);
  342. submitted = 0;
  343. for (i = 0; i < rxbuf->works_accepted; ++i) {
  344. next_read_q = knc->read_q;
  345. knc_queued_fifo_inc_idx(&next_read_q);
  346. if ((next_read_q == knc->write_q) || knc_active_fifo_full(knc))
  347. break;
  348. memcpy(&knc->active_fifo[knc->write_a],
  349. &knc->queued_fifo[next_read_q],
  350. sizeof(struct active_work));
  351. knc->active_fifo[knc->write_a].begin = now;
  352. knc->queued_fifo[next_read_q].work = NULL;
  353. knc->read_q = next_read_q;
  354. knc_active_fifo_inc_idx(&knc->write_a);
  355. ++submitted;
  356. }
  357. if (submitted != rxbuf->works_accepted) {
  358. applog(LOG_ERR,
  359. "KnC: accepted by FPGA %u works, but only %d submitted",
  360. rxbuf->works_accepted, submitted);
  361. }
  362. /* check for completed works and calculated nonces */
  363. gettimeofday(&now, NULL);
  364. successful = 0;
  365. for (i = 0; i < (int)MAX_RESPONSES_IN_BATCH; ++i) {
  366. if ((rxbuf->responses[i].type != RESPONSE_TYPE_NONCE_FOUND) &&
  367. (rxbuf->responses[i].type != RESPONSE_TYPE_WORK_DONE))
  368. continue;
  369. applog(LOG_DEBUG, "KnC spi: raw response %08X %08X",
  370. ((uint32_t *)&rxbuf->responses[i])[0],
  371. ((uint32_t *)&rxbuf->responses[i])[1]);
  372. applog(LOG_DEBUG, "KnC spi: response, T:%u C:%u-%u Q:%u W:%u",
  373. rxbuf->responses[i].type,
  374. rxbuf->responses[i].asic, rxbuf->responses[i].core,
  375. rxbuf->responses[i].queue_id,
  376. rxbuf->responses[i].work_id);
  377. /* Find active work with matching ID */
  378. next_read_a = knc->read_a;
  379. knc_active_fifo_inc_idx(&next_read_a);
  380. while (next_read_a != knc->write_a) {
  381. if (knc->active_fifo[next_read_a].work_id ==
  382. rxbuf->responses[i].work_id)
  383. break;
  384. /* check for stale works */
  385. us = timediff(&now,
  386. &knc->active_fifo[next_read_a].begin);
  387. if ((us < 0) || (us >= WORK_STALE_US)) {
  388. applog(LOG_DEBUG,
  389. "KnC spi: remove stale work %u",
  390. knc->active_fifo[next_read_a].work_id);
  391. work = knc->active_fifo[next_read_a].work;
  392. knc_active_fifo_inc_idx(&knc->read_a);
  393. work_completed(cgpu, work);
  394. if (next_read_a != knc->read_a) {
  395. memcpy(&(knc->active_fifo[next_read_a]),
  396. &(knc->active_fifo[knc->read_a]),
  397. sizeof(struct active_work));
  398. }
  399. knc->active_fifo[knc->read_a].work = NULL;
  400. }
  401. knc_active_fifo_inc_idx(&next_read_a);
  402. }
  403. if (next_read_a == knc->write_a)
  404. continue;
  405. applog(LOG_DEBUG, "KnC spi: response work %u found",
  406. rxbuf->responses[i].work_id);
  407. work = knc->active_fifo[next_read_a].work;
  408. if (rxbuf->responses[i].type == RESPONSE_TYPE_NONCE_FOUND) {
  409. if (NULL != thr) {
  410. int cidx = rxbuf->responses[i].asic * 256 +
  411. rxbuf->responses[i].core;
  412. if (submit_nonce(thr, work,
  413. rxbuf->responses[i].nonce)) {
  414. if (cidx < (int)sizeof(knc->hwerrs)) {
  415. knc->hwerrs[cidx] = 0;
  416. knc->disa_cnt[cidx] = 0;
  417. knc->hwerr_work_id[cidx] = 0xFFFFFFFF;
  418. }
  419. successful++;
  420. } else {
  421. if ((cidx < (int)sizeof(knc->hwerrs)) &&
  422. (knc->hwerr_work_id[cidx] != rxbuf->responses[i].work_id)) {
  423. knc->hwerr_work_id[cidx] = rxbuf->responses[i].work_id;
  424. if (++(knc->hwerrs[cidx]) >= HW_ERR_LIMIT) {
  425. struct core_disa_data *core;
  426. core = &knc->disa_cores_fifo[knc->write_d];
  427. core->disa_begin = now;
  428. core->asic = rxbuf->responses[i].asic;
  429. core->core = rxbuf->responses[i].core;
  430. disable_core(core->asic, core->core);
  431. if (++(knc->disa_cnt[cidx]) >= DISA_ERR_LIMIT) {
  432. applog(LOG_WARNING,
  433. "KnC: core %u-%u was disabled permanently", core->asic, core->core);
  434. } else {
  435. applog(LOG_WARNING,
  436. "KnC: core %u-%u was disabled due to %u HW errors in a row",
  437. core->asic, core->core, HW_ERR_LIMIT);
  438. knc_disa_cores_fifo_inc_idx(&knc->write_d);
  439. }
  440. }
  441. }
  442. };
  443. }
  444. continue;
  445. }
  446. /* Work completed */
  447. knc_active_fifo_inc_idx(&knc->read_a);
  448. work_completed(cgpu, work);
  449. if (next_read_a != knc->read_a) {
  450. memcpy(&(knc->active_fifo[next_read_a]),
  451. &(knc->active_fifo[knc->read_a]),
  452. sizeof(struct active_work));
  453. }
  454. knc->active_fifo[knc->read_a].work = NULL;
  455. }
  456. return ((uint64_t)successful) * 0x100000000UL;
  457. }
  458. /* Send flush command via SPI */
  459. static int _internal_knc_flush_fpga(struct knc_state *knc)
  460. {
  461. int len;
  462. spi_txbuf[0].cmd = CMD_FLUSH_QUEUE;
  463. spi_txbuf[0].queue_id = 0; /* at the moment we have one and only queue #0 */
  464. len = spi_transfer(knc->ctx, (uint8_t *)spi_txbuf,
  465. (uint8_t *)&spi_rxbuf, sizeof(struct spi_request));
  466. if (len != sizeof(struct spi_request))
  467. return -1;
  468. len /= sizeof(struct spi_response);
  469. return len;
  470. }
  471. static bool knc_detect_one(struct spidev_context *ctx)
  472. {
  473. /* Scan device for ASICs */
  474. int chip_id, devices = 0;
  475. struct cgpu_info *cgpu;
  476. struct knc_state *knc;
  477. for (chip_id = 0; chip_id < MAX_ASICS; ++chip_id) {
  478. /* TODO: perform the ASIC test/detection */
  479. ++devices;
  480. }
  481. if (!devices) {
  482. applog(LOG_INFO, "SPI detected, but not KnCminer ASICs");
  483. return false;
  484. }
  485. applog(LOG_INFO, "Found a KnC miner with %d ASICs", devices);
  486. cgpu = calloc(1, sizeof(*cgpu));
  487. knc = calloc(1, sizeof(*knc));
  488. if (!cgpu || !knc) {
  489. applog(LOG_ERR, "KnC miner detected, but failed to allocate memory");
  490. return false;
  491. }
  492. knc->ctx = ctx;
  493. knc->devices = devices;
  494. knc->read_q = 0;
  495. knc->write_q = 1;
  496. knc->read_a = 0;
  497. knc->write_a = 1;
  498. knc->read_d = 0;
  499. knc->write_d = 1;
  500. knc->salt = rand();
  501. mutex_init(&knc->lock);
  502. memset(knc->hwerr_work_id, 0xFF, sizeof(knc->hwerr_work_id));
  503. _internal_knc_flush_fpga(knc);
  504. cgpu->drv = &knc_drv;
  505. cgpu->name = "KnCminer";
  506. cgpu->threads = 1; // .. perhaps our number of devices?
  507. cgpu->device_data = knc;
  508. add_cgpu(cgpu);
  509. return true;
  510. }
  511. // http://www.concentric.net/~Ttwang/tech/inthash.htm
  512. static unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
  513. {
  514. a = a - b; a = a - c; a = a ^ (c >> 13);
  515. b = b - c; b = b - a; b = b ^ (a << 8);
  516. c = c - a; c = c - b; c = c ^ (b >> 13);
  517. a = a - b; a = a - c; a = a ^ (c >> 12);
  518. b = b - c; b = b - a; b = b ^ (a << 16);
  519. c = c - a; c = c - b; c = c ^ (b >> 5);
  520. a = a - b; a = a - c; a = a ^ (c >> 3);
  521. b = b - c; b = b - a; b = b ^ (a << 10);
  522. c = c - a; c = c - b; c = c ^ (b >> 15);
  523. return c;
  524. }
  525. /* Probe devices and register with add_cgpu */
  526. void knc_detect(bool __maybe_unused hotplug)
  527. {
  528. int idx;
  529. srand(mix(clock(), time(NULL), getpid()));
  530. /* Loop through all possible SPI interfaces */
  531. for (idx = 0; idx < MAX_SPIS; ++idx) {
  532. struct spidev_context *ctx = spi_new(idx + 1);
  533. if (ctx != NULL) {
  534. if (!knc_detect_one(ctx))
  535. spi_free(ctx);
  536. }
  537. }
  538. }
  539. /* return value is number of nonces that have been checked since
  540. * previous call
  541. */
  542. static int64_t knc_scanwork(struct thr_info *thr)
  543. {
  544. struct cgpu_info *cgpu = thr->cgpu;
  545. struct knc_state *knc = cgpu->device_data;
  546. int len, num, next_read_q;
  547. int64_t ret;
  548. applog(LOG_DEBUG, "KnC running scanwork");
  549. knc_check_disabled_cores(knc);
  550. /* Prepare tx buffer */
  551. memset(spi_txbuf, 0, sizeof(spi_txbuf));
  552. num = 0;
  553. mutex_lock(&knc->lock);
  554. next_read_q = knc->read_q;
  555. knc_queued_fifo_inc_idx(&next_read_q);
  556. while (next_read_q != knc->write_q) {
  557. knc_work_from_queue_to_spi(knc, &knc->queued_fifo[next_read_q],
  558. &spi_txbuf[num]);
  559. knc_queued_fifo_inc_idx(&next_read_q);
  560. ++num;
  561. }
  562. /* knc->read_q is advanced in knc_process_response, not here */
  563. len = spi_transfer(knc->ctx, (uint8_t *)spi_txbuf,
  564. (uint8_t *)&spi_rxbuf, sizeof(spi_txbuf));
  565. if (len != sizeof(spi_rxbuf)) {
  566. ret = -1;
  567. goto out_unlock;
  568. }
  569. applog(LOG_DEBUG, "KnC spi: %d works in request", num);
  570. ret = knc_process_response(thr, cgpu, &spi_rxbuf);
  571. out_unlock:
  572. mutex_unlock(&knc->lock);
  573. return ret;
  574. }
  575. static bool knc_queue_full(struct cgpu_info *cgpu)
  576. {
  577. struct knc_state *knc = cgpu->device_data;
  578. int queue_full = false;
  579. struct work *work;
  580. applog(LOG_DEBUG, "KnC running queue full");
  581. mutex_lock(&knc->lock);
  582. if (knc_queued_fifo_full(knc)) {
  583. queue_full = true;
  584. goto out_unlock;
  585. }
  586. work = get_queued(cgpu);
  587. if (!work)
  588. goto out_unlock;
  589. knc->queued_fifo[knc->write_q].work = work;
  590. knc_queued_fifo_inc_idx(&(knc->write_q));
  591. if (knc_queued_fifo_full(knc))
  592. queue_full = true;
  593. out_unlock:
  594. mutex_unlock(&knc->lock);
  595. return queue_full;
  596. }
  597. static void knc_flush_work(struct cgpu_info *cgpu)
  598. {
  599. struct knc_state *knc = cgpu->device_data;
  600. int len, next_read_q, next_read_a;
  601. struct work *work;
  602. applog(LOG_ERR, "KnC running flushwork");
  603. mutex_lock(&knc->lock);
  604. /* Drain queued works */
  605. next_read_q = knc->read_q;
  606. knc_queued_fifo_inc_idx(&next_read_q);
  607. while (next_read_q != knc->write_q) {
  608. work = knc->queued_fifo[next_read_q].work;
  609. work_completed(cgpu, work);
  610. knc->queued_fifo[next_read_q].work = NULL;
  611. knc->read_q = next_read_q;
  612. knc_queued_fifo_inc_idx(&next_read_q);
  613. }
  614. /* Drain active works */
  615. next_read_a = knc->read_a;
  616. knc_active_fifo_inc_idx(&next_read_a);
  617. while (next_read_a != knc->write_a) {
  618. work = knc->active_fifo[next_read_a].work;
  619. work_completed(cgpu, work);
  620. knc->active_fifo[next_read_a].work = NULL;
  621. knc->read_a = next_read_a;
  622. knc_active_fifo_inc_idx(&next_read_a);
  623. }
  624. len = _internal_knc_flush_fpga(knc);
  625. if (len > 0)
  626. knc_process_response(NULL, cgpu, &spi_rxbuf);
  627. mutex_unlock(&knc->lock);
  628. }
  629. struct device_drv knc_drv = {
  630. .drv_id = DRIVER_knc,
  631. .dname = "KnCminer",
  632. .name = "KnC",
  633. .drv_detect = knc_detect, // Probe for devices, add with add_cgpu
  634. .hash_work = hash_queued_work,
  635. .scanwork = knc_scanwork,
  636. .queue_full = knc_queue_full,
  637. .flush_work = knc_flush_work,
  638. };