driver-kncasic.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. /*
  2. * cgminer driver for KnCminer devices
  3. *
  4. * Copyright 2014 KnCminer
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 3 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include <stdlib.h>
  12. #include <assert.h>
  13. #include <fcntl.h>
  14. #include <limits.h>
  15. #include <unistd.h>
  16. #include <sys/ioctl.h>
  17. #include <sys/time.h>
  18. #include <linux/types.h>
  19. #include <linux/spi/spidev.h>
  20. #include <zlib.h>
  21. #include "deviceapi.h"
  22. #include "logging.h"
  23. #include "miner.h"
  24. #include "knc-asic/knc-transport.h"
  25. #include "knc-asic/knc-asic.h"
  26. #define MAX_ASICS 6
  27. #define DIES_PER_ASIC 4
  28. #define MAX_CORES_PER_DIE 360
  29. #define WORKS_PER_CORE 3
  30. #define CORE_ERROR_LIMIT 30
  31. #define CORE_ERROR_INTERVAL 30
  32. #define CORE_ERROR_DISABLE_TIME 5*60
  33. #define CORE_SUBMIT_MIN_TIME 2
  34. #define CORE_TIMEOUT 20
  35. #define SCAN_ADJUST_RANGE 32
  36. BFG_REGISTER_DRIVER(kncasic_drv)
  37. static struct timeval now;
  38. static const struct timeval core_check_interval = {
  39. CORE_ERROR_INTERVAL, 0
  40. };
  41. static const struct timeval core_disable_interval = {
  42. CORE_ERROR_DISABLE_TIME, 0
  43. };
  44. static const struct timeval core_submit_interval = {
  45. CORE_SUBMIT_MIN_TIME, 0
  46. };
  47. static const struct timeval core_timeout_interval = {
  48. CORE_TIMEOUT, 0
  49. };
  50. struct knc_die;
  51. struct knc_core_state {
  52. int generation;
  53. int core;
  54. int coreid;
  55. struct knc_die *die;
  56. struct {
  57. int slot;
  58. struct work *work;
  59. } workslot[WORKS_PER_CORE]; /* active, next */
  60. int transfer_stamp;
  61. struct knc_report report;
  62. struct {
  63. int slot;
  64. uint32_t nonce;
  65. } last_nonce;
  66. uint32_t works;
  67. uint32_t shares;
  68. uint32_t errors;
  69. uint32_t completed;
  70. int last_slot;
  71. uint32_t errors_now;
  72. struct timeval disabled_until;
  73. struct timeval hold_work_until;
  74. struct timeval timeout;
  75. bool inuse;
  76. };
  77. struct knc_state;
  78. struct knc_die {
  79. int channel;
  80. int die;
  81. int version;
  82. int cores;
  83. struct knc_state *knc;
  84. struct knc_core_state *core;
  85. };
  86. #define MAX_SPI_SIZE (4096)
  87. #define MAX_SPI_RESPONSES (MAX_SPI_SIZE / (2 + 4 + 1 + 1 + 1 + 4))
  88. #define MAX_SPI_MESSAGE (128)
  89. #define KNC_SPI_BUFFERS (3)
  90. struct knc_state {
  91. struct cgpu_info *cgpu;
  92. void *ctx;
  93. int generation; /* work/block generation, incremented on each flush invalidating older works */
  94. int dies;
  95. struct knc_die die[MAX_ASICS*DIES_PER_ASIC];
  96. int cores;
  97. int scan_adjust;
  98. int startup;
  99. /* Statistics */
  100. uint64_t shares; /* diff1 shares reported by hardware */
  101. uint64_t works; /* Work units submitted */
  102. uint64_t completed; /* Work units completed */
  103. uint64_t errors; /* Hardware & communication errors */
  104. struct timeval next_error_interval;
  105. /* End of statistics */
  106. /* SPI communications thread */
  107. pthread_mutex_t spi_qlock; /* SPI queue status lock */
  108. struct thr_info spi_thr; /* SPI I/O thread */
  109. pthread_cond_t spi_qcond; /* SPI queue change wakeup */
  110. struct knc_spi_buffer {
  111. enum {
  112. KNC_SPI_IDLE=0,
  113. KNC_SPI_PENDING,
  114. KNC_SPI_DONE
  115. } state;
  116. int size;
  117. uint8_t txbuf[MAX_SPI_SIZE];
  118. uint8_t rxbuf[MAX_SPI_SIZE];
  119. int responses;
  120. struct knc_spi_response {
  121. int request_length;
  122. int response_length;
  123. enum {
  124. KNC_UNKNOWN = 0,
  125. KNC_NO_RESPONSE,
  126. KNC_SETWORK,
  127. KNC_REPORT,
  128. KNC_INFO
  129. } type;
  130. struct knc_core_state *core;
  131. uint32_t data;
  132. int offset;
  133. } response_info[MAX_SPI_RESPONSES];
  134. } spi_buffer[KNC_SPI_BUFFERS];
  135. int send_buffer;
  136. int read_buffer;
  137. int send_buffer_count;
  138. int read_buffer_count;
  139. /* end SPI thread */
  140. /* Do not add anything below here!! core[] must be last */
  141. struct knc_core_state core[];
  142. };
  143. int opt_knc_device_bus = -1;
  144. char *knc_log_file = NULL;
  145. static void *knc_spi(void *thr_data)
  146. {
  147. struct cgpu_info *cgpu = thr_data;
  148. struct knc_state *knc = cgpu->device_data;
  149. int buffer = 0;
  150. pthread_mutex_lock(&knc->spi_qlock);
  151. while (!cgpu->shutdown) {
  152. int this_buffer = buffer;
  153. while (knc->spi_buffer[buffer].state != KNC_SPI_PENDING && !cgpu->shutdown)
  154. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  155. pthread_mutex_unlock(&knc->spi_qlock);
  156. if (cgpu->shutdown)
  157. return NULL;
  158. knc_trnsp_transfer(knc->ctx, knc->spi_buffer[buffer].txbuf, knc->spi_buffer[buffer].rxbuf, knc->spi_buffer[buffer].size);
  159. buffer += 1;
  160. if (buffer >= KNC_SPI_BUFFERS)
  161. buffer = 0;
  162. pthread_mutex_lock(&knc->spi_qlock);
  163. knc->spi_buffer[this_buffer].state = KNC_SPI_DONE;
  164. pthread_cond_signal(&knc->spi_qcond);
  165. }
  166. pthread_mutex_unlock(&knc->spi_qlock);
  167. return NULL;
  168. }
  169. static void knc_process_responses(struct thr_info *thr);
  170. static void knc_flush(struct thr_info *thr)
  171. {
  172. struct cgpu_info *cgpu = thr->cgpu;
  173. struct knc_state *knc = cgpu->device_data;
  174. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  175. if (buffer->state == KNC_SPI_IDLE && buffer->size > 0) {
  176. pthread_mutex_lock(&knc->spi_qlock);
  177. buffer->state = KNC_SPI_PENDING;
  178. pthread_cond_signal(&knc->spi_qcond);
  179. knc->send_buffer += 1;
  180. knc->send_buffer_count += 1;
  181. if (knc->send_buffer >= KNC_SPI_BUFFERS)
  182. knc->send_buffer = 0;
  183. buffer = &knc->spi_buffer[knc->send_buffer];
  184. /* Block for SPI to finish a transfer if all buffers are busy */
  185. while (buffer->state == KNC_SPI_PENDING) {
  186. applog(LOG_DEBUG, "KnC: SPI buffer full (%d), waiting for SPI thread", buffer->responses);
  187. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  188. }
  189. pthread_mutex_unlock(&knc->spi_qlock);
  190. }
  191. knc_process_responses(thr);
  192. }
  193. static void knc_sync(struct thr_info *thr)
  194. {
  195. struct cgpu_info *cgpu = thr->cgpu;
  196. struct knc_state *knc = cgpu->device_data;
  197. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  198. int sent = 0;
  199. pthread_mutex_lock(&knc->spi_qlock);
  200. if (buffer->state == KNC_SPI_IDLE && buffer->size > 0) {
  201. buffer->state = KNC_SPI_PENDING;
  202. pthread_cond_signal(&knc->spi_qcond);
  203. knc->send_buffer += 1;
  204. knc->send_buffer_count += 1;
  205. if (knc->send_buffer >= KNC_SPI_BUFFERS)
  206. knc->send_buffer = 0;
  207. sent = 1;
  208. }
  209. int prev_buffer = knc->send_buffer - 1;
  210. if (prev_buffer < 0)
  211. prev_buffer = KNC_SPI_BUFFERS - 1;
  212. buffer = &knc->spi_buffer[prev_buffer];
  213. while (buffer->state == KNC_SPI_PENDING)
  214. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  215. pthread_mutex_unlock(&knc->spi_qlock);
  216. int pending = knc->send_buffer - knc->read_buffer;
  217. if (pending <= 0)
  218. pending += KNC_SPI_BUFFERS;
  219. pending -= 1 - sent;
  220. applog(LOG_INFO, "KnC: sync %d pending buffers", pending);
  221. knc_process_responses(thr);
  222. }
  223. static void knc_transfer(struct thr_info *thr, struct knc_core_state *core, int request_length, uint8_t *request, int response_length, int response_type, uint32_t data)
  224. {
  225. struct cgpu_info *cgpu = thr->cgpu;
  226. struct knc_state *knc = cgpu->device_data;
  227. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  228. /* FPGA control, request header, request body/response, CRC(4), ACK(1), EXTRA(3) */
  229. int msglen = 2 + max(request_length, 4 + response_length) + 4 + 1 + 3;
  230. if (buffer->size + msglen > MAX_SPI_SIZE || buffer->responses >= MAX_SPI_RESPONSES) {
  231. applog(LOG_INFO, "KnC: SPI buffer sent, %d messages %d bytes", buffer->responses, buffer->size);
  232. knc_flush(thr);
  233. buffer = &knc->spi_buffer[knc->send_buffer];
  234. }
  235. struct knc_spi_response *response_info = &buffer->response_info[buffer->responses];
  236. buffer->responses++;
  237. response_info->offset = buffer->size;
  238. response_info->type = response_type;
  239. response_info->request_length = request_length;
  240. response_info->response_length = response_length;
  241. response_info->core = core;
  242. response_info->data = data;
  243. buffer->size = knc_prepare_transfer(buffer->txbuf, buffer->size, MAX_SPI_SIZE, core->die->channel, request_length, request, response_length);
  244. }
  245. static int knc_transfer_stamp(struct knc_state *knc)
  246. {
  247. return knc->send_buffer_count;
  248. }
  249. static int knc_transfer_completed(struct knc_state *knc, int stamp)
  250. {
  251. /* signed delta math, counter wrap OK */
  252. return (int)(knc->read_buffer_count - stamp) >= 1;
  253. }
  254. static bool knc_detect_one(void *ctx)
  255. {
  256. /* Scan device for ASICs */
  257. int channel, die, cores = 0, core;
  258. struct cgpu_info *cgpu;
  259. struct knc_state *knc;
  260. struct knc_die_info die_info[MAX_ASICS][DIES_PER_ASIC];
  261. memset(die_info, 0, sizeof(die_info));
  262. /* Send GETINFO to each die to detect if it is usable */
  263. for (channel = 0; channel < MAX_ASICS; channel++) {
  264. if (!knc_trnsp_asic_detect(ctx, channel))
  265. continue;
  266. for (die = 0; die < DIES_PER_ASIC; die++) {
  267. if (knc_detect_die(ctx, channel, die, &die_info[channel][die]) == 0)
  268. cores += die_info[channel][die].cores;
  269. }
  270. }
  271. if (!cores) {
  272. applog(LOG_NOTICE, "no KnCminer cores found");
  273. return false;
  274. }
  275. applog(LOG_ERR, "Found a KnC miner with %d cores", cores);
  276. cgpu = calloc(1, sizeof(*cgpu));
  277. knc = calloc(1, sizeof(*knc) + cores * sizeof(struct knc_core_state));
  278. if (!cgpu || !knc) {
  279. applog(LOG_ERR, "KnC miner detected, but failed to allocate memory");
  280. return false;
  281. }
  282. knc->cgpu = cgpu;
  283. knc->ctx = ctx;
  284. knc->generation = 1;
  285. /* Index all cores */
  286. int dies = 0;
  287. cores = 0;
  288. struct knc_core_state *pcore = knc->core;
  289. for (channel = 0; channel < MAX_ASICS; channel++) {
  290. for (die = 0; die < DIES_PER_ASIC; die++) {
  291. if (die_info[channel][die].cores) {
  292. knc->die[dies].channel = channel;
  293. knc->die[dies].die = die;
  294. knc->die[dies].version = die_info[channel][die].version;
  295. knc->die[dies].cores = die_info[channel][die].cores;
  296. knc->die[dies].core = pcore;
  297. knc->die[dies].knc = knc;
  298. for (core = 0; core < knc->die[dies].cores; core++) {
  299. knc->die[dies].core[core].die = &knc->die[dies];
  300. knc->die[dies].core[core].core = core;
  301. }
  302. cores += knc->die[dies].cores;
  303. pcore += knc->die[dies].cores;
  304. dies++;
  305. }
  306. }
  307. }
  308. for (core = 0; core < cores; core++)
  309. knc->core[core].coreid = core;
  310. knc->dies = dies;
  311. knc->cores = cores;
  312. knc->startup = 2;
  313. cgpu->drv = &kncasic_drv;
  314. cgpu->name = "KnCminer";
  315. cgpu->threads = 1;
  316. cgpu->device_data = knc;
  317. pthread_mutex_init(&knc->spi_qlock, NULL);
  318. pthread_cond_init(&knc->spi_qcond, NULL);
  319. if (thr_info_create(&knc->spi_thr, NULL, knc_spi, (void *)cgpu)) {
  320. applog(LOG_ERR, "%s%i: SPI thread create failed",
  321. cgpu->drv->name, cgpu->device_id);
  322. free(cgpu);
  323. free(knc);
  324. return false;
  325. }
  326. add_cgpu(cgpu);
  327. return true;
  328. }
  329. /* Probe devices and register with add_cgpu */
  330. static
  331. bool kncasic_detect_one(const char * const devpath)
  332. {
  333. void *ctx = knc_trnsp_new(devpath);
  334. if (ctx != NULL) {
  335. if (!knc_detect_one(ctx))
  336. knc_trnsp_free(ctx);
  337. else
  338. return true;
  339. }
  340. return false;
  341. }
  342. static
  343. int kncasic_detect_auto(void)
  344. {
  345. return knc_detect_one(NULL) ? 1 : 0;
  346. }
  347. static
  348. void kncasic_detect(void)
  349. {
  350. generic_detect(&kncasic_drv, kncasic_detect_one, kncasic_detect_auto, GDF_REQUIRE_DNAME | GDF_DEFAULT_NOAUTO);
  351. }
  352. /* Core helper functions */
  353. static int knc_core_hold_work(struct knc_core_state *core)
  354. {
  355. return timercmp(&core->hold_work_until, &now, >);
  356. }
  357. static int knc_core_has_work(struct knc_core_state *core)
  358. {
  359. int i;
  360. for (i = 0; i < WORKS_PER_CORE; i++) {
  361. if (core->workslot[i].slot > 0)
  362. return true;
  363. }
  364. return false;
  365. }
  366. static int knc_core_need_work(struct knc_core_state *core)
  367. {
  368. return !knc_core_hold_work(core) && !core->workslot[1].work && !core->workslot[2].work;
  369. }
  370. static int knc_core_disabled(struct knc_core_state *core)
  371. {
  372. return timercmp(&core->disabled_until, &now, >);
  373. }
  374. static int _knc_core_next_slot(struct knc_core_state *core)
  375. {
  376. /* Avoid slot #0 and #15. #0 is "no work assigned" and #15 is seen on bad cores */
  377. int slot = core->last_slot + 1;
  378. if (slot >= 15)
  379. slot = 1;
  380. core->last_slot = slot;
  381. return slot;
  382. }
  383. static bool knc_core_slot_busy(struct knc_core_state *core, int slot)
  384. {
  385. if (slot == core->report.active_slot)
  386. return true;
  387. if (slot == core->report.next_slot)
  388. return true;
  389. int i;
  390. for (i = 0; i < WORKS_PER_CORE; i++) {
  391. if (slot == core->workslot[i].slot)
  392. return true;
  393. }
  394. return false;
  395. }
  396. static int knc_core_next_slot(struct knc_core_state *core)
  397. {
  398. int slot;
  399. do slot = _knc_core_next_slot(core);
  400. while (knc_core_slot_busy(core, slot));
  401. return slot;
  402. }
  403. static void knc_core_failure(struct knc_core_state *core)
  404. {
  405. core->errors++;
  406. core->errors_now++;
  407. core->die->knc->errors++;
  408. if (knc_core_disabled(core))
  409. return;
  410. if (core->errors_now > CORE_ERROR_LIMIT) {
  411. applog(LOG_ERR, "KnC: %d.%d.%d disabled for %d seconds due to repeated hardware errors",
  412. core->die->channel, core->die->die, core->core, core_disable_interval.tv_sec);
  413. timeradd(&now, &core_disable_interval, &core->disabled_until);
  414. }
  415. }
  416. static
  417. void knc_core_handle_nonce(struct thr_info *thr, struct knc_core_state *core, int slot, uint32_t nonce)
  418. {
  419. int i;
  420. if (!slot)
  421. return;
  422. core->last_nonce.slot = slot;
  423. core->last_nonce.nonce = nonce;
  424. if (core->die->knc->startup)
  425. return;
  426. for (i = 0; i < WORKS_PER_CORE; i++) {
  427. if (slot == core->workslot[i].slot && core->workslot[i].work) {
  428. applog(LOG_INFO, "KnC: %d.%d.%d found nonce %08x", core->die->channel, core->die->die, core->core, nonce);
  429. if (submit_nonce(thr, core->workslot[i].work, nonce)) {
  430. /* Good share */
  431. core->shares++;
  432. core->die->knc->shares++;
  433. /* This core is useful. Ignore any errors */
  434. core->errors_now = 0;
  435. } else {
  436. applog(LOG_INFO, "KnC: %d.%d.%d hwerror nonce %08x", core->die->channel, core->die->die, core->core, nonce);
  437. /* Bad share */
  438. knc_core_failure(core);
  439. }
  440. }
  441. }
  442. }
  443. static int knc_core_process_report(struct thr_info *thr, struct knc_core_state *core, uint8_t *response)
  444. {
  445. struct knc_report *report = &core->report;
  446. knc_decode_report(response, report, core->die->version);
  447. bool had_event = false;
  448. applog(LOG_DEBUG, "KnC %d.%d.%d: Process report %d %d(%d) / %d %d %d", core->die->channel, core->die->die, core->core, report->active_slot, report->next_slot, report->next_state, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  449. int n;
  450. for (n = 0; n < KNC_NONCES_PER_REPORT; n++) {
  451. if (report->nonce[n].slot < 0)
  452. break;
  453. if (core->last_nonce.slot == report->nonce[n].slot && core->last_nonce.nonce == report->nonce[n].nonce)
  454. break;
  455. }
  456. while(n-- > 0) {
  457. knc_core_handle_nonce(thr, core, report->nonce[n].slot, report->nonce[n].nonce);
  458. }
  459. if (report->active_slot && core->workslot[0].slot != report->active_slot) {
  460. had_event = true;
  461. applog(LOG_INFO, "KnC: New work on %d.%d.%d, %d %d / %d %d %d", core->die->channel, core->die->die, core->core, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  462. /* Core switched to next work */
  463. if (core->workslot[0].work) {
  464. core->die->knc->completed++;
  465. core->completed++;
  466. applog(LOG_INFO, "KnC: Work completed on core %d.%d.%d!", core->die->channel, core->die->die, core->core);
  467. free_work(core->workslot[0].work);
  468. }
  469. core->workslot[0] = core->workslot[1];
  470. core->workslot[1].work = NULL;
  471. core->workslot[1].slot = -1;
  472. /* or did it switch directly to pending work? */
  473. if (report->active_slot == core->workslot[2].slot) {
  474. applog(LOG_INFO, "KnC: New work on %d.%d.%d, %d %d %d %d (pending)", core->die->channel, core->die->die, core->core, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  475. if (core->workslot[0].work)
  476. free_work(core->workslot[0].work);
  477. core->workslot[0] = core->workslot[2];
  478. core->workslot[2].work = NULL;
  479. core->workslot[2].slot = -1;
  480. }
  481. }
  482. if (report->next_state && core->workslot[2].slot > 0 && (core->workslot[2].slot == report->next_slot || report->next_slot == -1)) {
  483. had_event = true;
  484. applog(LOG_INFO, "KnC: Accepted work on %d.%d.%d, %d %d %d %d (pending)", core->die->channel, core->die->die, core->core, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  485. /* core accepted next work */
  486. if (core->workslot[1].work)
  487. free_work(core->workslot[1].work);
  488. core->workslot[1] = core->workslot[2];
  489. core->workslot[2].work = NULL;
  490. core->workslot[2].slot = -1;
  491. }
  492. if (core->workslot[2].work && knc_transfer_completed(core->die->knc, core->transfer_stamp)) {
  493. had_event = true;
  494. applog(LOG_INFO, "KnC: Setwork failed on core %d.%d.%d?", core->die->channel, core->die->die, core->core);
  495. free_work(core->workslot[2].work);
  496. core->workslot[2].slot = -1;
  497. }
  498. if (had_event)
  499. applog(LOG_INFO, "KnC: Exit report on %d.%d.%d, %d %d / %d %d %d", core->die->channel, core->die->die, core->core, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  500. return 0;
  501. }
  502. static void knc_process_responses(struct thr_info *thr)
  503. {
  504. struct cgpu_info *cgpu = thr->cgpu;
  505. struct knc_state *knc = cgpu->device_data;
  506. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->read_buffer];
  507. while (buffer->state == KNC_SPI_DONE) {
  508. int i;
  509. for (i = 0; i < buffer->responses; i++) {
  510. struct knc_spi_response *response_info = &buffer->response_info[i];
  511. uint8_t *rxbuf = &buffer->rxbuf[response_info->offset];
  512. struct knc_core_state *core = response_info->core;
  513. int status = knc_decode_response(rxbuf, response_info->request_length, &rxbuf, response_info->response_length);
  514. /* Invert KNC_ACCEPTED to simplify logics below */
  515. if (response_info->type == KNC_SETWORK && !KNC_IS_ERROR(status))
  516. status ^= KNC_ACCEPTED;
  517. if (core->die->version != KNC_VERSION_JUPITER && status != 0) {
  518. applog(LOG_ERR, "KnC %d.%d.%d: Communication error (%x / %d)", core->die->channel, core->die->die, core->core, status, i);
  519. if (status == KNC_ACCEPTED) {
  520. /* Core refused our work vector. Likely out of sync. Reset it */
  521. core->inuse = false;
  522. }
  523. knc_core_failure(core);
  524. }
  525. switch(response_info->type) {
  526. case KNC_REPORT:
  527. case KNC_SETWORK:
  528. /* Should we care about failed SETWORK explicit? Or simply handle it by next state not loaded indication in reports? */
  529. knc_core_process_report(thr, core, rxbuf);
  530. break;
  531. }
  532. }
  533. buffer->state = KNC_SPI_IDLE;
  534. buffer->responses = 0;
  535. buffer->size = 0;
  536. knc->read_buffer += 1;
  537. knc->read_buffer_count += 1;
  538. if (knc->read_buffer >= KNC_SPI_BUFFERS)
  539. knc->read_buffer = 0;
  540. buffer = &knc->spi_buffer[knc->read_buffer];
  541. }
  542. }
  543. static int knc_core_send_work(struct thr_info *thr, struct knc_core_state *core, struct work *work, bool clean)
  544. {
  545. struct knc_state *knc = core->die->knc;
  546. struct cgpu_info *cgpu = knc->cgpu;
  547. int request_length = 4 + 1 + 6*4 + 3*4 + 8*4;
  548. uint8_t request[request_length];
  549. int response_length = 1 + 1 + (1 + 4) * 5;
  550. uint8_t response[response_length];
  551. int slot = knc_core_next_slot(core);
  552. if (slot < 0)
  553. goto error;
  554. applog(LOG_INFO, "KnC setwork%s %d.%d.%d = %d, %d %d / %d %d %d", clean ? " CLEAN" : "", core->die->channel, core->die->die, core->core, slot, core->report.active_slot, core->report.next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  555. if (!clean && !knc_core_need_work(core))
  556. goto error;
  557. switch(core->die->version) {
  558. case KNC_VERSION_JUPITER:
  559. if (clean) {
  560. /* Double halt to get rid of any previous queued work */
  561. request_length = knc_prepare_jupiter_halt(request, core->die->die, core->core);
  562. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  563. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  564. }
  565. request_length = knc_prepare_jupiter_setwork(request, core->die->die, core->core, slot, work);
  566. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  567. break;
  568. case KNC_VERSION_NEPTUNE:
  569. request_length = knc_prepare_neptune_setwork(request, core->die->die, core->core, slot, work, clean);
  570. knc_transfer(thr, core, request_length, request, response_length, KNC_SETWORK, slot);
  571. break;
  572. default:
  573. goto error;
  574. }
  575. core->workslot[2].work = work;
  576. core->workslot[2].slot = slot;
  577. core->works++;
  578. core->die->knc->works++;
  579. core->transfer_stamp = knc_transfer_stamp(knc);
  580. core->inuse = true;
  581. timeradd(&now, &core_submit_interval, &core->hold_work_until);
  582. timeradd(&now, &core_timeout_interval, &core->timeout);
  583. return 0;
  584. error:
  585. applog(LOG_INFO, "KnC: %d.%d.%d Failed to setwork (%d)",
  586. core->die->channel, core->die->die, core->core, core->errors_now);
  587. knc_core_failure(core);
  588. free_work(work);
  589. return -1;
  590. }
  591. static int knc_core_request_report(struct thr_info *thr, struct knc_core_state *core)
  592. {
  593. struct knc_state *knc = core->die->knc;
  594. struct cgpu_info *cgpu = knc->cgpu;
  595. int request_length = 4;
  596. uint8_t request[request_length];
  597. int response_length = 1 + 1 + (1 + 4) * 5;
  598. uint8_t response[response_length];
  599. applog(LOG_DEBUG, "KnC: %d.%d.%d Request report", core->die->channel, core->die->die, core->core);
  600. request_length = knc_prepare_report(request, core->die->die, core->core);
  601. switch(core->die->version) {
  602. case KNC_VERSION_JUPITER:
  603. response_length = 1 + 1 + (1 + 4);
  604. knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0);
  605. return 0;
  606. case KNC_VERSION_NEPTUNE:
  607. knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0);
  608. return 0;
  609. }
  610. error:
  611. applog(LOG_INFO, "KnC: Failed to scan work report");
  612. knc_core_failure(core);
  613. return -1;
  614. }
  615. /* return value is number of nonces that have been checked since
  616. * previous call
  617. */
  618. static int64_t knc_scanwork(struct thr_info *thr)
  619. {
  620. #define KNC_COUNT_UNIT shares
  621. struct cgpu_info *cgpu = thr->cgpu;
  622. struct knc_state *knc = cgpu->device_data;
  623. int64_t ret = 0;
  624. uint32_t last_count = knc->KNC_COUNT_UNIT;
  625. applog(LOG_DEBUG, "KnC running scanwork");
  626. gettimeofday(&now, NULL);
  627. knc_trnsp_periodic_check(knc->ctx);
  628. int i;
  629. knc_process_responses(thr);
  630. if (timercmp(&knc->next_error_interval, &now, >)) {
  631. /* Reset hw error limiter every check interval */
  632. timeradd(&now, &core_check_interval, &knc->next_error_interval);
  633. for (i = 0; i < knc->cores; i++) {
  634. struct knc_core_state *core = &knc->core[i];
  635. core->errors_now = 0;
  636. }
  637. }
  638. for (i = 0; i < knc->cores; i++) {
  639. struct knc_core_state *core = &knc->core[i];
  640. bool clean = !core->inuse;
  641. if (knc_core_disabled(core))
  642. continue;
  643. if (core->generation != knc->generation) {
  644. applog(LOG_INFO, "KnC %d.%d.%d flush gen=%d/%d", core->die->channel, core->die->die, core->core, core->generation, knc->generation);
  645. /* clean set state, forget everything */
  646. int slot;
  647. for (slot = 0; slot < WORKS_PER_CORE; slot ++) {
  648. if (core->workslot[slot].work)
  649. free_work(core->workslot[slot].work);
  650. core->workslot[slot].slot = -1;
  651. }
  652. core->hold_work_until = now;
  653. core->generation = knc->generation;
  654. } else if (timercmp(&core->timeout, &now, <=) && (core->workslot[0].slot > 0 || core->workslot[1].slot > 0 || core->workslot[2].slot > 0)) {
  655. applog(LOG_ERR, "KnC %d.%d.%d timeout", core->die->channel, core->die->die, core->core, core->generation, knc->generation);
  656. clean = true;
  657. }
  658. if (!knc_core_has_work(core))
  659. clean = true;
  660. if (core->workslot[0].slot < 0 && core->workslot[1].slot < 0 && core->workslot[2].slot < 0)
  661. clean = true;
  662. if (i % SCAN_ADJUST_RANGE == knc->scan_adjust)
  663. clean = true;
  664. if ((knc_core_need_work(core) || clean) && !knc->startup) {
  665. struct work *work = get_work(thr);
  666. knc_core_send_work(thr, core, work, clean);
  667. } else {
  668. knc_core_request_report(thr, core);
  669. }
  670. }
  671. /* knc->startup delays initial work submission until we have had chance to query all cores on their current status, to avoid slot number collisions with earlier run */
  672. if (knc->startup)
  673. knc->startup--;
  674. else if (knc->scan_adjust < SCAN_ADJUST_RANGE)
  675. knc->scan_adjust++;
  676. knc_flush(thr);
  677. return (int64_t)(knc->KNC_COUNT_UNIT - last_count) * 0x100000000UL;
  678. }
  679. static void knc_flush_work(struct cgpu_info *cgpu)
  680. {
  681. struct knc_state *knc = cgpu->device_data;
  682. applog(LOG_INFO, "KnC running flushwork");
  683. knc->generation++;
  684. knc->scan_adjust=0;
  685. if (!knc->generation)
  686. knc->generation++;
  687. }
  688. static void knc_zero_stats(struct cgpu_info *cgpu)
  689. {
  690. int core;
  691. struct knc_state *knc = cgpu->device_data;
  692. for (core = 0; core < knc->cores; core++) {
  693. knc->shares = 0;
  694. knc->completed = 0;
  695. knc->works = 0;
  696. knc->errors = 0;
  697. knc->core[core].works = 0;
  698. knc->core[core].errors = 0;
  699. knc->core[core].shares = 0;
  700. knc->core[core].completed = 0;
  701. }
  702. }
  703. static struct api_data *knc_api_stats(struct cgpu_info *cgpu)
  704. {
  705. struct knc_state *knc = cgpu->device_data;
  706. struct api_data *root = NULL;
  707. unsigned int cursize;
  708. int asic, core, n;
  709. char label[256];
  710. root = api_add_int(root, "dies", &knc->dies, 1);
  711. root = api_add_int(root, "cores", &knc->cores, 1);
  712. root = api_add_uint64(root, "shares", &knc->shares, 1);
  713. root = api_add_uint64(root, "works", &knc->works, 1);
  714. root = api_add_uint64(root, "completed", &knc->completed, 1);
  715. root = api_add_uint64(root, "errors", &knc->errors, 1);
  716. /* Active cores */
  717. int active = knc->cores;
  718. for (core = 0; core < knc->cores; core++) {
  719. if (knc_core_disabled(&knc->core[core]))
  720. active -= 1;
  721. }
  722. root = api_add_int(root, "active", &active, 1);
  723. /* Per ASIC/die data */
  724. for (n = 0; n < knc->dies; n++) {
  725. struct knc_die *die = &knc->die[n];
  726. #define knc_api_die_string(name, value) do { \
  727. snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \
  728. root = api_add_string(root, label, value, 1); \
  729. } while(0)
  730. #define knc_api_die_int(name, value) do { \
  731. snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \
  732. uint64_t v = value; \
  733. root = api_add_uint64(root, label, &v, 1); \
  734. } while(0)
  735. /* Model */
  736. {
  737. char *model = "?";
  738. switch(die->version) {
  739. case KNC_VERSION_JUPITER:
  740. model = "Jupiter";
  741. break;
  742. case KNC_VERSION_NEPTUNE:
  743. model = "Neptune";
  744. break;
  745. }
  746. knc_api_die_string("model", model);
  747. knc_api_die_int("cores", die->cores);
  748. }
  749. /* Core based stats */
  750. {
  751. int active = 0;
  752. uint64_t errors = 0;
  753. uint64_t shares = 0;
  754. uint64_t works = 0;
  755. uint64_t completed = 0;
  756. char coremap[die->cores+1];
  757. for (core = 0; core < die->cores; core++) {
  758. coremap[core] = knc_core_disabled(&die->core[core]) ? '0' : '1';
  759. works += die->core[core].works;
  760. shares += die->core[core].shares;
  761. errors += die->core[core].errors;
  762. completed += die->core[core].completed;
  763. }
  764. coremap[die->cores] = '\0';
  765. knc_api_die_int("errors", errors);
  766. knc_api_die_int("shares", shares);
  767. knc_api_die_int("works", works);
  768. knc_api_die_int("completed", completed);
  769. knc_api_die_string("coremap", coremap);
  770. }
  771. }
  772. return root;
  773. }
  774. static
  775. void hash_driver_work(struct thr_info * const thr)
  776. {
  777. struct cgpu_info * const cgpu = thr->cgpu;
  778. struct device_drv * const drv = cgpu->drv;
  779. while (likely(!cgpu->shutdown))
  780. {
  781. int64_t hashes = drv->scanwork(thr);
  782. if (unlikely(!hashes_done2(thr, hashes, NULL)))
  783. break;
  784. if (unlikely(thr->pause || cgpu->deven != DEV_ENABLED))
  785. mt_disable(thr);
  786. }
  787. }
  788. struct device_drv kncasic_drv = {
  789. .dname = "kncasic",
  790. .name = "KNC",
  791. .drv_detect = kncasic_detect,
  792. .minerloop = hash_driver_work,
  793. .flush_work = knc_flush_work,
  794. .scanwork = knc_scanwork,
  795. .zero_stats = knc_zero_stats,
  796. .get_api_stats = knc_api_stats,
  797. };