driver-kncasic.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * cgminer driver for KnCminer devices
  3. *
  4. * Copyright 2014 KnCminer
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 3 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include <stdlib.h>
  12. #include <assert.h>
  13. #include <fcntl.h>
  14. #include <limits.h>
  15. #include <unistd.h>
  16. #include <sys/ioctl.h>
  17. #include <sys/time.h>
  18. #include <linux/types.h>
  19. #include <linux/spi/spidev.h>
  20. #include <zlib.h>
  21. #include "deviceapi.h"
  22. #include "logging.h"
  23. #include "miner.h"
  24. #include "knc-asic/knc-transport.h"
  25. #include "knc-asic/knc-asic.h"
  26. #define MAX_ASICS 6
  27. #define DIES_PER_ASIC 4
  28. #define MAX_CORES_PER_DIE 360
  29. #define WORKS_PER_CORE 3
  30. #define CORE_ERROR_LIMIT 30
  31. #define CORE_ERROR_INTERVAL 30
  32. #define CORE_ERROR_DISABLE_TIME 5*60
  33. #define CORE_SUBMIT_MIN_TIME 2
  34. #define CORE_TIMEOUT 20
  35. #define SCAN_ADJUST_RANGE 32
  36. BFG_REGISTER_DRIVER(kncasic_drv)
  37. static struct timeval now;
  38. static const struct timeval core_check_interval = {
  39. CORE_ERROR_INTERVAL, 0
  40. };
  41. static const struct timeval core_disable_interval = {
  42. CORE_ERROR_DISABLE_TIME, 0
  43. };
  44. static const struct timeval core_submit_interval = {
  45. CORE_SUBMIT_MIN_TIME, 0
  46. };
  47. static const struct timeval core_timeout_interval = {
  48. CORE_TIMEOUT, 0
  49. };
  50. struct knc_die;
  51. struct knc_core_state {
  52. int generation;
  53. int core;
  54. int coreid;
  55. struct knc_die *die;
  56. struct {
  57. int slot;
  58. struct work *work;
  59. } workslot[WORKS_PER_CORE]; /* active, next */
  60. int transfer_stamp;
  61. struct knc_report report;
  62. struct {
  63. int slot;
  64. uint32_t nonce;
  65. } last_nonce;
  66. uint32_t works;
  67. uint32_t shares;
  68. uint32_t errors;
  69. uint32_t completed;
  70. int last_slot;
  71. uint32_t errors_now;
  72. struct timeval disabled_until;
  73. struct timeval hold_work_until;
  74. struct timeval timeout;
  75. bool inuse;
  76. struct cgpu_info *proc;
  77. };
  78. struct knc_state;
  79. struct knc_die {
  80. int channel;
  81. int die;
  82. int version;
  83. int cores;
  84. struct knc_state *knc;
  85. struct knc_core_state *core;
  86. };
  87. #define MAX_SPI_SIZE (4096)
  88. #define MAX_SPI_RESPONSES (MAX_SPI_SIZE / (2 + 4 + 1 + 1 + 1 + 4))
  89. #define MAX_SPI_MESSAGE (128)
  90. #define KNC_SPI_BUFFERS (3)
  91. struct knc_state {
  92. struct cgpu_info *cgpu;
  93. void *ctx;
  94. int generation; /* work/block generation, incremented on each flush invalidating older works */
  95. int dies;
  96. struct knc_die die[MAX_ASICS*DIES_PER_ASIC];
  97. int cores;
  98. int scan_adjust;
  99. int startup;
  100. /* Statistics */
  101. uint64_t shares; /* diff1 shares reported by hardware */
  102. uint64_t works; /* Work units submitted */
  103. uint64_t completed; /* Work units completed */
  104. uint64_t errors; /* Hardware & communication errors */
  105. struct timeval next_error_interval;
  106. /* End of statistics */
  107. /* SPI communications thread */
  108. pthread_mutex_t spi_qlock; /* SPI queue status lock */
  109. struct thr_info spi_thr; /* SPI I/O thread */
  110. pthread_cond_t spi_qcond; /* SPI queue change wakeup */
  111. struct knc_spi_buffer {
  112. enum {
  113. KNC_SPI_IDLE=0,
  114. KNC_SPI_PENDING,
  115. KNC_SPI_DONE
  116. } state;
  117. int size;
  118. uint8_t txbuf[MAX_SPI_SIZE];
  119. uint8_t rxbuf[MAX_SPI_SIZE];
  120. int responses;
  121. struct knc_spi_response {
  122. int request_length;
  123. int response_length;
  124. enum {
  125. KNC_UNKNOWN = 0,
  126. KNC_NO_RESPONSE,
  127. KNC_SETWORK,
  128. KNC_REPORT,
  129. KNC_INFO
  130. } type;
  131. struct knc_core_state *core;
  132. uint32_t data;
  133. int offset;
  134. } response_info[MAX_SPI_RESPONSES];
  135. } spi_buffer[KNC_SPI_BUFFERS];
  136. int send_buffer;
  137. int read_buffer;
  138. int send_buffer_count;
  139. int read_buffer_count;
  140. /* end SPI thread */
  141. /* Do not add anything below here!! core[] must be last */
  142. struct knc_core_state core[];
  143. };
  144. int opt_knc_device_bus = -1;
  145. char *knc_log_file = NULL;
  146. static void *knc_spi(void *thr_data)
  147. {
  148. struct cgpu_info *cgpu = thr_data;
  149. struct knc_state *knc = cgpu->device_data;
  150. int buffer = 0;
  151. pthread_mutex_lock(&knc->spi_qlock);
  152. while (!cgpu->shutdown) {
  153. int this_buffer = buffer;
  154. while (knc->spi_buffer[buffer].state != KNC_SPI_PENDING && !cgpu->shutdown)
  155. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  156. pthread_mutex_unlock(&knc->spi_qlock);
  157. if (cgpu->shutdown)
  158. return NULL;
  159. knc_trnsp_transfer(knc->ctx, knc->spi_buffer[buffer].txbuf, knc->spi_buffer[buffer].rxbuf, knc->spi_buffer[buffer].size);
  160. buffer += 1;
  161. if (buffer >= KNC_SPI_BUFFERS)
  162. buffer = 0;
  163. pthread_mutex_lock(&knc->spi_qlock);
  164. knc->spi_buffer[this_buffer].state = KNC_SPI_DONE;
  165. pthread_cond_signal(&knc->spi_qcond);
  166. }
  167. pthread_mutex_unlock(&knc->spi_qlock);
  168. return NULL;
  169. }
  170. static void knc_process_responses(struct thr_info *thr);
  171. static void knc_flush(struct thr_info *thr)
  172. {
  173. struct cgpu_info *cgpu = thr->cgpu;
  174. struct knc_state *knc = cgpu->device_data;
  175. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  176. if (buffer->state == KNC_SPI_IDLE && buffer->size > 0) {
  177. pthread_mutex_lock(&knc->spi_qlock);
  178. buffer->state = KNC_SPI_PENDING;
  179. pthread_cond_signal(&knc->spi_qcond);
  180. knc->send_buffer += 1;
  181. knc->send_buffer_count += 1;
  182. if (knc->send_buffer >= KNC_SPI_BUFFERS)
  183. knc->send_buffer = 0;
  184. buffer = &knc->spi_buffer[knc->send_buffer];
  185. /* Block for SPI to finish a transfer if all buffers are busy */
  186. while (buffer->state == KNC_SPI_PENDING) {
  187. applog(LOG_DEBUG, "KnC: SPI buffer full (%d), waiting for SPI thread", buffer->responses);
  188. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  189. }
  190. pthread_mutex_unlock(&knc->spi_qlock);
  191. }
  192. knc_process_responses(thr);
  193. }
  194. static void knc_transfer(struct thr_info *thr, struct knc_core_state *core, int request_length, uint8_t *request, int response_length, int response_type, uint32_t data)
  195. {
  196. struct cgpu_info *cgpu = thr->cgpu;
  197. struct knc_state *knc = cgpu->device_data;
  198. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  199. /* FPGA control, request header, request body/response, CRC(4), ACK(1), EXTRA(3) */
  200. int msglen = 2 + max(request_length, 4 + response_length) + 4 + 1 + 3;
  201. if (buffer->size + msglen > MAX_SPI_SIZE || buffer->responses >= MAX_SPI_RESPONSES) {
  202. applog(LOG_INFO, "KnC: SPI buffer sent, %d messages %d bytes", buffer->responses, buffer->size);
  203. knc_flush(thr);
  204. buffer = &knc->spi_buffer[knc->send_buffer];
  205. }
  206. struct knc_spi_response *response_info = &buffer->response_info[buffer->responses];
  207. buffer->responses++;
  208. response_info->offset = buffer->size;
  209. response_info->type = response_type;
  210. response_info->request_length = request_length;
  211. response_info->response_length = response_length;
  212. response_info->core = core;
  213. response_info->data = data;
  214. buffer->size = knc_prepare_transfer(buffer->txbuf, buffer->size, MAX_SPI_SIZE, core->die->channel, request_length, request, response_length);
  215. }
  216. static int knc_transfer_stamp(struct knc_state *knc)
  217. {
  218. return knc->send_buffer_count;
  219. }
  220. static int knc_transfer_completed(struct knc_state *knc, int stamp)
  221. {
  222. /* signed delta math, counter wrap OK */
  223. return (int)(knc->read_buffer_count - stamp) >= 1;
  224. }
  225. static bool knc_detect_one(void *ctx)
  226. {
  227. /* Scan device for ASICs */
  228. int channel, die, cores = 0, core;
  229. struct cgpu_info *cgpu;
  230. struct knc_state *knc;
  231. struct knc_die_info die_info[MAX_ASICS][DIES_PER_ASIC];
  232. memset(die_info, 0, sizeof(die_info));
  233. /* Send GETINFO to each die to detect if it is usable */
  234. for (channel = 0; channel < MAX_ASICS; channel++) {
  235. if (!knc_trnsp_asic_detect(ctx, channel))
  236. continue;
  237. for (die = 0; die < DIES_PER_ASIC; die++) {
  238. if (knc_detect_die(ctx, channel, die, &die_info[channel][die]) == 0)
  239. cores += die_info[channel][die].cores;
  240. }
  241. }
  242. if (!cores) {
  243. applog(LOG_NOTICE, "no KnCminer cores found");
  244. return false;
  245. }
  246. applog(LOG_ERR, "Found a KnC miner with %d cores", cores);
  247. cgpu = calloc(1, sizeof(*cgpu));
  248. knc = calloc(1, sizeof(*knc) + cores * sizeof(struct knc_core_state));
  249. if (!cgpu || !knc) {
  250. applog(LOG_ERR, "KnC miner detected, but failed to allocate memory");
  251. return false;
  252. }
  253. knc->cgpu = cgpu;
  254. knc->ctx = ctx;
  255. knc->generation = 1;
  256. /* Index all cores */
  257. int dies = 0;
  258. cores = 0;
  259. struct knc_core_state *pcore = knc->core;
  260. for (channel = 0; channel < MAX_ASICS; channel++) {
  261. for (die = 0; die < DIES_PER_ASIC; die++) {
  262. if (die_info[channel][die].cores) {
  263. knc->die[dies].channel = channel;
  264. knc->die[dies].die = die;
  265. knc->die[dies].version = die_info[channel][die].version;
  266. knc->die[dies].cores = die_info[channel][die].cores;
  267. knc->die[dies].core = pcore;
  268. knc->die[dies].knc = knc;
  269. for (core = 0; core < knc->die[dies].cores; core++) {
  270. knc->die[dies].core[core].die = &knc->die[dies];
  271. knc->die[dies].core[core].core = core;
  272. }
  273. cores += knc->die[dies].cores;
  274. pcore += knc->die[dies].cores;
  275. dies++;
  276. }
  277. }
  278. }
  279. for (core = 0; core < cores; core++)
  280. knc->core[core].coreid = core;
  281. knc->dies = dies;
  282. knc->cores = cores;
  283. knc->startup = 2;
  284. cgpu->drv = &kncasic_drv;
  285. cgpu->name = "KnCminer";
  286. cgpu->procs = cores;
  287. cgpu->threads = 1;
  288. cgpu->device_data = knc;
  289. pthread_mutex_init(&knc->spi_qlock, NULL);
  290. pthread_cond_init(&knc->spi_qcond, NULL);
  291. if (thr_info_create(&knc->spi_thr, NULL, knc_spi, (void *)cgpu)) {
  292. applog(LOG_ERR, "%s%i: SPI thread create failed",
  293. cgpu->drv->name, cgpu->device_id);
  294. free(cgpu);
  295. free(knc);
  296. return false;
  297. }
  298. add_cgpu(cgpu);
  299. core = 0;
  300. for_each_managed_proc(proc, cgpu)
  301. {
  302. knc->core[core++].proc = proc;
  303. }
  304. return true;
  305. }
  306. /* Probe devices and register with add_cgpu */
  307. static
  308. bool kncasic_detect_one(const char * const devpath)
  309. {
  310. void *ctx = knc_trnsp_new(devpath);
  311. if (ctx != NULL) {
  312. if (!knc_detect_one(ctx))
  313. knc_trnsp_free(ctx);
  314. else
  315. return true;
  316. }
  317. return false;
  318. }
  319. static
  320. int kncasic_detect_auto(void)
  321. {
  322. return knc_detect_one(NULL) ? 1 : 0;
  323. }
  324. static
  325. void kncasic_detect(void)
  326. {
  327. generic_detect(&kncasic_drv, kncasic_detect_one, kncasic_detect_auto, GDF_REQUIRE_DNAME | GDF_DEFAULT_NOAUTO);
  328. }
  329. /* Core helper functions */
  330. static int knc_core_hold_work(struct knc_core_state *core)
  331. {
  332. return timercmp(&core->hold_work_until, &now, >);
  333. }
  334. static int knc_core_has_work(struct knc_core_state *core)
  335. {
  336. int i;
  337. for (i = 0; i < WORKS_PER_CORE; i++) {
  338. if (core->workslot[i].slot > 0)
  339. return true;
  340. }
  341. return false;
  342. }
  343. static int knc_core_need_work(struct knc_core_state *core)
  344. {
  345. return !knc_core_hold_work(core) && !core->workslot[1].work && !core->workslot[2].work;
  346. }
  347. static int knc_core_disabled(struct knc_core_state *core)
  348. {
  349. return timercmp(&core->disabled_until, &now, >);
  350. }
  351. static int _knc_core_next_slot(struct knc_core_state *core)
  352. {
  353. /* Avoid slot #0 and #15. #0 is "no work assigned" and #15 is seen on bad cores */
  354. int slot = core->last_slot + 1;
  355. if (slot >= 15)
  356. slot = 1;
  357. core->last_slot = slot;
  358. return slot;
  359. }
  360. static bool knc_core_slot_busy(struct knc_core_state *core, int slot)
  361. {
  362. if (slot == core->report.active_slot)
  363. return true;
  364. if (slot == core->report.next_slot)
  365. return true;
  366. int i;
  367. for (i = 0; i < WORKS_PER_CORE; i++) {
  368. if (slot == core->workslot[i].slot)
  369. return true;
  370. }
  371. return false;
  372. }
  373. static int knc_core_next_slot(struct knc_core_state *core)
  374. {
  375. int slot;
  376. do slot = _knc_core_next_slot(core);
  377. while (knc_core_slot_busy(core, slot));
  378. return slot;
  379. }
  380. static void knc_core_failure(struct knc_core_state *core)
  381. {
  382. core->errors++;
  383. core->errors_now++;
  384. core->die->knc->errors++;
  385. if (knc_core_disabled(core))
  386. return;
  387. if (core->errors_now > CORE_ERROR_LIMIT) {
  388. struct cgpu_info * const proc = core->proc;
  389. applog(LOG_ERR, "%"PRIpreprv" disabled for %ld seconds due to repeated hardware errors",
  390. proc->proc_repr, (long)core_disable_interval.tv_sec);
  391. timeradd(&now, &core_disable_interval, &core->disabled_until);
  392. }
  393. }
  394. static
  395. void knc_core_handle_nonce(struct thr_info *thr, struct knc_core_state *core, int slot, uint32_t nonce)
  396. {
  397. int i;
  398. if (!slot)
  399. return;
  400. core->last_nonce.slot = slot;
  401. core->last_nonce.nonce = nonce;
  402. if (core->die->knc->startup)
  403. return;
  404. for (i = 0; i < WORKS_PER_CORE; i++) {
  405. if (slot == core->workslot[i].slot && core->workslot[i].work) {
  406. struct cgpu_info * const proc = core->proc;
  407. struct thr_info * const corethr = proc->thr[0];
  408. applog(LOG_INFO, "%"PRIpreprv" found nonce %08x", proc->proc_repr, nonce);
  409. if (submit_nonce(corethr, core->workslot[i].work, nonce)) {
  410. /* Good share */
  411. core->shares++;
  412. core->die->knc->shares++;
  413. hashes_done2(corethr, 0x100000000, NULL);
  414. /* This core is useful. Ignore any errors */
  415. core->errors_now = 0;
  416. } else {
  417. applog(LOG_INFO, "%"PRIpreprv" hwerror nonce %08x", proc->proc_repr, nonce);
  418. /* Bad share */
  419. knc_core_failure(core);
  420. }
  421. }
  422. }
  423. }
  424. static int knc_core_process_report(struct thr_info *thr, struct knc_core_state *core, uint8_t *response)
  425. {
  426. struct cgpu_info * const proc = core->proc;
  427. struct knc_report *report = &core->report;
  428. knc_decode_report(response, report, core->die->version);
  429. bool had_event = false;
  430. applog(LOG_DEBUG, "%"PRIpreprv": Process report %d %d(%d) / %d %d %d", proc->proc_repr, report->active_slot, report->next_slot, report->next_state, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  431. int n;
  432. for (n = 0; n < KNC_NONCES_PER_REPORT; n++) {
  433. if (report->nonce[n].slot < 0)
  434. break;
  435. if (core->last_nonce.slot == report->nonce[n].slot && core->last_nonce.nonce == report->nonce[n].nonce)
  436. break;
  437. }
  438. while(n-- > 0) {
  439. knc_core_handle_nonce(thr, core, report->nonce[n].slot, report->nonce[n].nonce);
  440. }
  441. if (report->active_slot && core->workslot[0].slot != report->active_slot) {
  442. had_event = true;
  443. applog(LOG_INFO, "%"PRIpreprv": New work %d %d / %d %d %d", proc->proc_repr, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  444. /* Core switched to next work */
  445. if (core->workslot[0].work) {
  446. core->die->knc->completed++;
  447. core->completed++;
  448. applog(LOG_INFO, "%"PRIpreprv": Work completed!", proc->proc_repr);
  449. free_work(core->workslot[0].work);
  450. }
  451. core->workslot[0] = core->workslot[1];
  452. core->workslot[1].work = NULL;
  453. core->workslot[1].slot = -1;
  454. /* or did it switch directly to pending work? */
  455. if (report->active_slot == core->workslot[2].slot) {
  456. applog(LOG_INFO, "%"PRIpreprv": New work %d %d %d %d (pending)", proc->proc_repr, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  457. if (core->workslot[0].work)
  458. free_work(core->workslot[0].work);
  459. core->workslot[0] = core->workslot[2];
  460. core->workslot[2].work = NULL;
  461. core->workslot[2].slot = -1;
  462. }
  463. }
  464. if (report->next_state && core->workslot[2].slot > 0 && (core->workslot[2].slot == report->next_slot || report->next_slot == -1)) {
  465. had_event = true;
  466. applog(LOG_INFO, "%"PRIpreprv": Accepted work %d %d %d %d (pending)", proc->proc_repr, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  467. /* core accepted next work */
  468. if (core->workslot[1].work)
  469. free_work(core->workslot[1].work);
  470. core->workslot[1] = core->workslot[2];
  471. core->workslot[2].work = NULL;
  472. core->workslot[2].slot = -1;
  473. }
  474. if (core->workslot[2].work && knc_transfer_completed(core->die->knc, core->transfer_stamp)) {
  475. had_event = true;
  476. applog(LOG_INFO, "%"PRIpreprv": Setwork failed?", proc->proc_repr);
  477. free_work(core->workslot[2].work);
  478. core->workslot[2].slot = -1;
  479. }
  480. if (had_event)
  481. applog(LOG_INFO, "%"PRIpreprv": Exit report %d %d / %d %d %d", proc->proc_repr, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  482. return 0;
  483. }
  484. static void knc_process_responses(struct thr_info *thr)
  485. {
  486. struct cgpu_info *cgpu = thr->cgpu;
  487. struct knc_state *knc = cgpu->device_data;
  488. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->read_buffer];
  489. while (buffer->state == KNC_SPI_DONE) {
  490. int i;
  491. for (i = 0; i < buffer->responses; i++) {
  492. struct knc_spi_response *response_info = &buffer->response_info[i];
  493. uint8_t *rxbuf = &buffer->rxbuf[response_info->offset];
  494. struct knc_core_state *core = response_info->core;
  495. struct cgpu_info * const proc = core->proc;
  496. int status = knc_decode_response(rxbuf, response_info->request_length, &rxbuf, response_info->response_length);
  497. /* Invert KNC_ACCEPTED to simplify logics below */
  498. if (response_info->type == KNC_SETWORK && !KNC_IS_ERROR(status))
  499. status ^= KNC_ACCEPTED;
  500. if (core->die->version != KNC_VERSION_JUPITER && status != 0) {
  501. applog(LOG_ERR, "%s: Communication error (%x / %d)", proc->proc_repr, status, i);
  502. if (status == KNC_ACCEPTED) {
  503. /* Core refused our work vector. Likely out of sync. Reset it */
  504. core->inuse = false;
  505. }
  506. knc_core_failure(core);
  507. }
  508. switch(response_info->type) {
  509. case KNC_REPORT:
  510. case KNC_SETWORK:
  511. /* Should we care about failed SETWORK explicit? Or simply handle it by next state not loaded indication in reports? */
  512. knc_core_process_report(thr, core, rxbuf);
  513. break;
  514. default:
  515. break;
  516. }
  517. }
  518. buffer->state = KNC_SPI_IDLE;
  519. buffer->responses = 0;
  520. buffer->size = 0;
  521. knc->read_buffer += 1;
  522. knc->read_buffer_count += 1;
  523. if (knc->read_buffer >= KNC_SPI_BUFFERS)
  524. knc->read_buffer = 0;
  525. buffer = &knc->spi_buffer[knc->read_buffer];
  526. }
  527. }
  528. static int knc_core_send_work(struct thr_info *thr, struct knc_core_state *core, struct work *work, bool clean)
  529. {
  530. struct knc_state *knc = core->die->knc;
  531. struct cgpu_info * const proc = core->proc;
  532. int request_length = 4 + 1 + 6*4 + 3*4 + 8*4;
  533. uint8_t request[request_length];
  534. int response_length = 1 + 1 + (1 + 4) * 5;
  535. int slot = knc_core_next_slot(core);
  536. if (slot < 0)
  537. goto error;
  538. applog(LOG_INFO, "%"PRIpreprv" setwork%s = %d, %d %d / %d %d %d", proc->proc_repr, clean ? " CLEAN" : "", slot, core->report.active_slot, core->report.next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  539. if (!clean && !knc_core_need_work(core))
  540. goto error;
  541. switch(core->die->version) {
  542. case KNC_VERSION_JUPITER:
  543. if (clean) {
  544. /* Double halt to get rid of any previous queued work */
  545. request_length = knc_prepare_jupiter_halt(request, core->die->die, core->core);
  546. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  547. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  548. }
  549. request_length = knc_prepare_jupiter_setwork(request, core->die->die, core->core, slot, work);
  550. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  551. break;
  552. case KNC_VERSION_NEPTUNE:
  553. request_length = knc_prepare_neptune_setwork(request, core->die->die, core->core, slot, work, clean);
  554. knc_transfer(thr, core, request_length, request, response_length, KNC_SETWORK, slot);
  555. break;
  556. default:
  557. goto error;
  558. }
  559. core->workslot[2].work = work;
  560. core->workslot[2].slot = slot;
  561. core->works++;
  562. core->die->knc->works++;
  563. core->transfer_stamp = knc_transfer_stamp(knc);
  564. core->inuse = true;
  565. timeradd(&now, &core_submit_interval, &core->hold_work_until);
  566. timeradd(&now, &core_timeout_interval, &core->timeout);
  567. return 0;
  568. error:
  569. applog(LOG_INFO, "%"PRIpreprv": Failed to setwork (%d)", proc->proc_repr, core->errors_now);
  570. knc_core_failure(core);
  571. free_work(work);
  572. return -1;
  573. }
  574. static int knc_core_request_report(struct thr_info *thr, struct knc_core_state *core)
  575. {
  576. struct cgpu_info * const proc = core->proc;
  577. int request_length = 4;
  578. uint8_t request[request_length];
  579. int response_length = 1 + 1 + (1 + 4) * 5;
  580. applog(LOG_DEBUG, "%"PRIpreprv": Request report", proc->proc_repr);
  581. request_length = knc_prepare_report(request, core->die->die, core->core);
  582. switch(core->die->version) {
  583. case KNC_VERSION_JUPITER:
  584. response_length = 1 + 1 + (1 + 4);
  585. knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0);
  586. return 0;
  587. case KNC_VERSION_NEPTUNE:
  588. knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0);
  589. return 0;
  590. }
  591. applog(LOG_INFO, "%"PRIpreprv": Failed to scan work report", proc->proc_repr);
  592. knc_core_failure(core);
  593. return -1;
  594. }
  595. /* return value is number of nonces that have been checked since
  596. * previous call
  597. */
  598. static int64_t knc_scanwork(struct thr_info *thr)
  599. {
  600. struct cgpu_info *cgpu = thr->cgpu;
  601. struct knc_state *knc = cgpu->device_data;
  602. applog(LOG_DEBUG, "KnC running scanwork");
  603. gettimeofday(&now, NULL);
  604. knc_trnsp_periodic_check(knc->ctx);
  605. int i;
  606. knc_process_responses(thr);
  607. if (timercmp(&knc->next_error_interval, &now, >)) {
  608. /* Reset hw error limiter every check interval */
  609. timeradd(&now, &core_check_interval, &knc->next_error_interval);
  610. for (i = 0; i < knc->cores; i++) {
  611. struct knc_core_state *core = &knc->core[i];
  612. core->errors_now = 0;
  613. }
  614. }
  615. for (i = 0; i < knc->cores; i++) {
  616. struct knc_core_state *core = &knc->core[i];
  617. struct cgpu_info * const proc = core->proc;
  618. bool clean = !core->inuse;
  619. if (knc_core_disabled(core))
  620. continue;
  621. if (core->generation != knc->generation) {
  622. applog(LOG_INFO, "%"PRIpreprv" flush gen=%d/%d", proc->proc_repr, core->generation, knc->generation);
  623. /* clean set state, forget everything */
  624. int slot;
  625. for (slot = 0; slot < WORKS_PER_CORE; slot ++) {
  626. if (core->workslot[slot].work)
  627. free_work(core->workslot[slot].work);
  628. core->workslot[slot].slot = -1;
  629. }
  630. core->hold_work_until = now;
  631. core->generation = knc->generation;
  632. } else if (timercmp(&core->timeout, &now, <=) && (core->workslot[0].slot > 0 || core->workslot[1].slot > 0 || core->workslot[2].slot > 0)) {
  633. applog(LOG_ERR, "%"PRIpreprv" timeout gen=%d/%d", proc->proc_repr, core->generation, knc->generation);
  634. clean = true;
  635. }
  636. if (!knc_core_has_work(core))
  637. clean = true;
  638. if (core->workslot[0].slot < 0 && core->workslot[1].slot < 0 && core->workslot[2].slot < 0)
  639. clean = true;
  640. if (i % SCAN_ADJUST_RANGE == knc->scan_adjust)
  641. clean = true;
  642. if ((knc_core_need_work(core) || clean) && !knc->startup) {
  643. struct work *work = get_work(thr);
  644. knc_core_send_work(thr, core, work, clean);
  645. } else {
  646. knc_core_request_report(thr, core);
  647. }
  648. }
  649. /* knc->startup delays initial work submission until we have had chance to query all cores on their current status, to avoid slot number collisions with earlier run */
  650. if (knc->startup)
  651. knc->startup--;
  652. else if (knc->scan_adjust < SCAN_ADJUST_RANGE)
  653. knc->scan_adjust++;
  654. knc_flush(thr);
  655. return 0;
  656. }
  657. static void knc_flush_work(struct cgpu_info *cgpu)
  658. {
  659. struct knc_state *knc = cgpu->device_data;
  660. applog(LOG_INFO, "KnC running flushwork");
  661. knc->generation++;
  662. knc->scan_adjust=0;
  663. if (!knc->generation)
  664. knc->generation++;
  665. }
  666. static void knc_zero_stats(struct cgpu_info *cgpu)
  667. {
  668. int core;
  669. struct knc_state *knc = cgpu->device_data;
  670. for (core = 0; core < knc->cores; core++) {
  671. knc->shares = 0;
  672. knc->completed = 0;
  673. knc->works = 0;
  674. knc->errors = 0;
  675. knc->core[core].works = 0;
  676. knc->core[core].errors = 0;
  677. knc->core[core].shares = 0;
  678. knc->core[core].completed = 0;
  679. }
  680. }
  681. static struct api_data *knc_api_stats(struct cgpu_info *cgpu)
  682. {
  683. struct knc_state *knc = cgpu->device_data;
  684. struct knc_core_state * const proccore = &knc->core[cgpu->proc_id];
  685. struct knc_die * const die = proccore->die;
  686. struct api_data *root = NULL;
  687. int core;
  688. char label[256];
  689. root = api_add_int(root, "dies", &knc->dies, 1);
  690. root = api_add_int(root, "cores", &knc->cores, 1);
  691. root = api_add_uint64(root, "shares", &knc->shares, 1);
  692. root = api_add_uint64(root, "works", &knc->works, 1);
  693. root = api_add_uint64(root, "completed", &knc->completed, 1);
  694. root = api_add_uint64(root, "errors", &knc->errors, 1);
  695. /* Active cores */
  696. int active = knc->cores;
  697. for (core = 0; core < knc->cores; core++) {
  698. if (knc_core_disabled(&knc->core[core]))
  699. active -= 1;
  700. }
  701. root = api_add_int(root, "active", &active, 1);
  702. /* Per ASIC/die data */
  703. {
  704. #define knc_api_die_string(name, value) do { \
  705. snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \
  706. root = api_add_string(root, label, value, 1); \
  707. } while(0)
  708. #define knc_api_die_int(name, value) do { \
  709. snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \
  710. uint64_t v = value; \
  711. root = api_add_uint64(root, label, &v, 1); \
  712. } while(0)
  713. /* Model */
  714. {
  715. char *model = "?";
  716. switch(die->version) {
  717. case KNC_VERSION_JUPITER:
  718. model = "Jupiter";
  719. break;
  720. case KNC_VERSION_NEPTUNE:
  721. model = "Neptune";
  722. break;
  723. }
  724. knc_api_die_string("model", model);
  725. knc_api_die_int("cores", die->cores);
  726. }
  727. /* Core based stats */
  728. {
  729. uint64_t errors = 0;
  730. uint64_t shares = 0;
  731. uint64_t works = 0;
  732. uint64_t completed = 0;
  733. char coremap[die->cores+1];
  734. for (core = 0; core < die->cores; core++) {
  735. coremap[core] = knc_core_disabled(&die->core[core]) ? '0' : '1';
  736. works += die->core[core].works;
  737. shares += die->core[core].shares;
  738. errors += die->core[core].errors;
  739. completed += die->core[core].completed;
  740. }
  741. coremap[die->cores] = '\0';
  742. knc_api_die_int("errors", errors);
  743. knc_api_die_int("shares", shares);
  744. knc_api_die_int("works", works);
  745. knc_api_die_int("completed", completed);
  746. knc_api_die_string("coremap", coremap);
  747. }
  748. }
  749. return root;
  750. }
  751. static
  752. void hash_driver_work(struct thr_info * const thr)
  753. {
  754. struct cgpu_info * const cgpu = thr->cgpu;
  755. struct device_drv * const drv = cgpu->drv;
  756. while (likely(!cgpu->shutdown))
  757. {
  758. drv->scanwork(thr);
  759. if (unlikely(thr->pause || cgpu->deven != DEV_ENABLED))
  760. mt_disable(thr);
  761. }
  762. }
  763. struct device_drv kncasic_drv = {
  764. .dname = "kncasic",
  765. .name = "KNC",
  766. .drv_detect = kncasic_detect,
  767. .minerloop = hash_driver_work,
  768. .flush_work = knc_flush_work,
  769. .scanwork = knc_scanwork,
  770. .zero_stats = knc_zero_stats,
  771. .get_api_stats = knc_api_stats,
  772. };