driver-kncasic.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /*
  2. * Copyright 2014 KnCminer
  3. * Copyright 2014 Luke Dashjr
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 3 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include <stdlib.h>
  11. #include <assert.h>
  12. #include <fcntl.h>
  13. #include <limits.h>
  14. #include <unistd.h>
  15. #include <sys/ioctl.h>
  16. #include <sys/time.h>
  17. #include <linux/types.h>
  18. #include <linux/spi/spidev.h>
  19. #include <zlib.h>
  20. #include "deviceapi.h"
  21. #include "logging.h"
  22. #include "miner.h"
  23. #include "knc-asic/knc-transport.h"
  24. #include "knc-asic/knc-asic.h"
  25. #define WORKS_PER_CORE 3
  26. #define CORE_ERROR_LIMIT 30
  27. #define CORE_ERROR_INTERVAL 30
  28. #define CORE_ERROR_DISABLE_TIME 5*60
  29. #define CORE_SUBMIT_MIN_TIME 2
  30. #define CORE_TIMEOUT 20
  31. #define SCAN_ADJUST_RANGE 32
  32. BFG_REGISTER_DRIVER(kncasic_drv)
  33. static struct timeval now;
  34. static const struct timeval core_check_interval = {
  35. CORE_ERROR_INTERVAL, 0
  36. };
  37. static const struct timeval core_disable_interval = {
  38. CORE_ERROR_DISABLE_TIME, 0
  39. };
  40. static const struct timeval core_submit_interval = {
  41. CORE_SUBMIT_MIN_TIME, 0
  42. };
  43. static const struct timeval core_timeout_interval = {
  44. CORE_TIMEOUT, 0
  45. };
  46. struct knc_die;
  47. struct knc_core_state {
  48. int generation;
  49. int core;
  50. struct knc_die *die;
  51. struct {
  52. int slot;
  53. struct work *work;
  54. } workslot[WORKS_PER_CORE]; /* active, next */
  55. int transfer_stamp;
  56. struct knc_report report;
  57. struct {
  58. int slot;
  59. uint32_t nonce;
  60. } last_nonce;
  61. uint32_t works;
  62. uint32_t shares;
  63. uint32_t errors;
  64. uint32_t completed;
  65. int last_slot;
  66. uint32_t errors_now;
  67. struct timeval disabled_until;
  68. struct timeval hold_work_until;
  69. struct timeval timeout;
  70. bool inuse;
  71. struct cgpu_info *proc;
  72. };
  73. struct knc_state;
  74. struct knc_die {
  75. int channel;
  76. int die;
  77. int version;
  78. int cores;
  79. struct knc_state *knc;
  80. struct knc_core_state *core;
  81. };
  82. #define MAX_SPI_SIZE (4096)
  83. #define MAX_SPI_RESPONSES (MAX_SPI_SIZE / (2 + 4 + 1 + 1 + 1 + 4))
  84. #define MAX_SPI_MESSAGE (128)
  85. #define KNC_SPI_BUFFERS (3)
  86. struct knc_state {
  87. void *ctx;
  88. int generation; /* work/block generation, incremented on each flush invalidating older works */
  89. int dies;
  90. struct knc_die die[KNC_MAX_ASICS * KNC_MAX_DIES_PER_ASIC];
  91. int cores;
  92. int scan_adjust;
  93. int startup;
  94. /* Statistics */
  95. uint64_t shares; /* diff1 shares reported by hardware */
  96. uint64_t works; /* Work units submitted */
  97. uint64_t completed; /* Work units completed */
  98. uint64_t errors; /* Hardware & communication errors */
  99. struct timeval next_error_interval;
  100. /* End of statistics */
  101. /* SPI communications thread */
  102. pthread_mutex_t spi_qlock; /* SPI queue status lock */
  103. struct thr_info spi_thr; /* SPI I/O thread */
  104. pthread_cond_t spi_qcond; /* SPI queue change wakeup */
  105. struct knc_spi_buffer {
  106. enum {
  107. KNC_SPI_IDLE=0,
  108. KNC_SPI_PENDING,
  109. KNC_SPI_DONE
  110. } state;
  111. int size;
  112. uint8_t txbuf[MAX_SPI_SIZE];
  113. uint8_t rxbuf[MAX_SPI_SIZE];
  114. int responses;
  115. struct knc_spi_response {
  116. int request_length;
  117. int response_length;
  118. enum {
  119. KNC_UNKNOWN = 0,
  120. KNC_NO_RESPONSE,
  121. KNC_SETWORK,
  122. KNC_REPORT,
  123. KNC_INFO
  124. } type;
  125. struct knc_core_state *core;
  126. uint32_t data;
  127. int offset;
  128. } response_info[MAX_SPI_RESPONSES];
  129. } spi_buffer[KNC_SPI_BUFFERS];
  130. int send_buffer;
  131. int read_buffer;
  132. int send_buffer_count;
  133. int read_buffer_count;
  134. /* end SPI thread */
  135. /* lock to protect resources between different threads */
  136. pthread_mutex_t state_lock;
  137. /* Do not add anything below here!! core[] must be last */
  138. struct knc_core_state core[];
  139. };
  140. int opt_knc_device_bus = -1;
  141. char *knc_log_file = NULL;
  142. static void *knc_spi(void *thr_data)
  143. {
  144. struct cgpu_info *cgpu = thr_data;
  145. struct knc_state *knc = cgpu->device_data;
  146. int buffer = 0;
  147. pthread_mutex_lock(&knc->spi_qlock);
  148. while (!cgpu->shutdown) {
  149. int this_buffer = buffer;
  150. while (knc->spi_buffer[buffer].state != KNC_SPI_PENDING && !cgpu->shutdown)
  151. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  152. pthread_mutex_unlock(&knc->spi_qlock);
  153. if (cgpu->shutdown)
  154. return NULL;
  155. knc_trnsp_transfer(knc->ctx, knc->spi_buffer[buffer].txbuf, knc->spi_buffer[buffer].rxbuf, knc->spi_buffer[buffer].size);
  156. buffer += 1;
  157. if (buffer >= KNC_SPI_BUFFERS)
  158. buffer = 0;
  159. pthread_mutex_lock(&knc->spi_qlock);
  160. knc->spi_buffer[this_buffer].state = KNC_SPI_DONE;
  161. pthread_cond_signal(&knc->spi_qcond);
  162. }
  163. pthread_mutex_unlock(&knc->spi_qlock);
  164. return NULL;
  165. }
  166. static void knc_process_responses(struct thr_info *thr);
  167. static void knc_flush(struct thr_info *thr)
  168. {
  169. struct cgpu_info *cgpu = thr->cgpu;
  170. struct knc_state *knc = cgpu->device_data;
  171. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  172. if (buffer->state == KNC_SPI_IDLE && buffer->size > 0) {
  173. pthread_mutex_lock(&knc->spi_qlock);
  174. buffer->state = KNC_SPI_PENDING;
  175. pthread_cond_signal(&knc->spi_qcond);
  176. knc->send_buffer += 1;
  177. knc->send_buffer_count += 1;
  178. if (knc->send_buffer >= KNC_SPI_BUFFERS)
  179. knc->send_buffer = 0;
  180. buffer = &knc->spi_buffer[knc->send_buffer];
  181. /* Block for SPI to finish a transfer if all buffers are busy */
  182. while (buffer->state == KNC_SPI_PENDING) {
  183. applog(LOG_DEBUG, "KnC: SPI buffer full (%d), waiting for SPI thread", buffer->responses);
  184. pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock);
  185. }
  186. pthread_mutex_unlock(&knc->spi_qlock);
  187. }
  188. knc_process_responses(thr);
  189. }
  190. static void knc_transfer(struct thr_info *thr, struct knc_core_state *core, int request_length, uint8_t *request, int response_length, int response_type, uint32_t data)
  191. {
  192. struct cgpu_info *cgpu = thr->cgpu;
  193. struct knc_state *knc = cgpu->device_data;
  194. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer];
  195. /* FPGA control, request header, request body/response, CRC(4), ACK(1), EXTRA(3) */
  196. int msglen = 2 + max(request_length, 4 + response_length) + 4 + 1 + 3;
  197. if (buffer->size + msglen > MAX_SPI_SIZE || buffer->responses >= MAX_SPI_RESPONSES) {
  198. applog(LOG_INFO, "KnC: SPI buffer sent, %d messages %d bytes", buffer->responses, buffer->size);
  199. knc_flush(thr);
  200. buffer = &knc->spi_buffer[knc->send_buffer];
  201. }
  202. struct knc_spi_response *response_info = &buffer->response_info[buffer->responses];
  203. buffer->responses++;
  204. response_info->offset = buffer->size;
  205. response_info->type = response_type;
  206. response_info->request_length = request_length;
  207. response_info->response_length = response_length;
  208. response_info->core = core;
  209. response_info->data = data;
  210. buffer->size = knc_prepare_transfer(buffer->txbuf, buffer->size, MAX_SPI_SIZE, core->die->channel, request_length, request, response_length);
  211. }
  212. static int knc_transfer_stamp(struct knc_state *knc)
  213. {
  214. return knc->send_buffer_count;
  215. }
  216. static int knc_transfer_completed(struct knc_state *knc, int stamp)
  217. {
  218. /* signed delta math, counter wrap OK */
  219. return (int)(knc->read_buffer_count - stamp) >= 1;
  220. }
  221. static bool knc_detect_one(void *ctx)
  222. {
  223. /* Scan device for ASICs */
  224. int channel, die, cores = 0, core;
  225. struct knc_state *knc;
  226. struct knc_die_info die_info[KNC_MAX_ASICS][KNC_MAX_DIES_PER_ASIC];
  227. memset(die_info, 0, sizeof(die_info));
  228. /* Send GETINFO to each die to detect if it is usable */
  229. for (channel = 0; channel < KNC_MAX_ASICS; channel++) {
  230. if (!knc_trnsp_asic_detect(ctx, channel))
  231. continue;
  232. for (die = 0; die < KNC_MAX_DIES_PER_ASIC; die++) {
  233. if (knc_detect_die(ctx, channel, die, &die_info[channel][die]) == 0)
  234. cores += die_info[channel][die].cores;
  235. }
  236. }
  237. if (!cores) {
  238. applog(LOG_NOTICE, "no KnCminer cores found");
  239. return false;
  240. }
  241. applog(LOG_ERR, "Found a KnC miner with %d cores", cores);
  242. knc = calloc(1, sizeof(*knc) + cores * sizeof(struct knc_core_state));
  243. if (!knc)
  244. {
  245. applog(LOG_ERR, "KnC miner detected, but failed to allocate memory");
  246. return false;
  247. }
  248. knc->ctx = ctx;
  249. knc->generation = 1;
  250. /* Index all cores */
  251. struct cgpu_info *prev_cgpu = NULL, *first_cgpu = NULL;
  252. int dies = 0;
  253. cores = 0;
  254. struct knc_core_state *pcore = knc->core;
  255. int channel_cores_base = 0;
  256. for (channel = 0; channel < KNC_MAX_ASICS; channel++) {
  257. int channel_cores = 0;
  258. for (die = 0; die < KNC_MAX_DIES_PER_ASIC; die++) {
  259. if (die_info[channel][die].cores) {
  260. knc->die[dies].channel = channel;
  261. knc->die[dies].die = die;
  262. knc->die[dies].version = die_info[channel][die].version;
  263. knc->die[dies].cores = die_info[channel][die].cores;
  264. knc->die[dies].core = pcore;
  265. knc->die[dies].knc = knc;
  266. for (core = 0; core < knc->die[dies].cores; core++) {
  267. knc->die[dies].core[core].die = &knc->die[dies];
  268. knc->die[dies].core[core].core = core;
  269. }
  270. cores += knc->die[dies].cores;
  271. channel_cores += knc->die[dies].cores;
  272. pcore += knc->die[dies].cores;
  273. dies++;
  274. }
  275. }
  276. if (channel_cores)
  277. {
  278. struct cgpu_info * const cgpu = malloc(sizeof(*cgpu));
  279. *cgpu = (struct cgpu_info){
  280. .drv = &kncasic_drv,
  281. .name = "KnCminer",
  282. .procs = channel_cores,
  283. .threads = prev_cgpu ? 0 : 1,
  284. .device_data = knc,
  285. };
  286. add_cgpu_slave(cgpu, prev_cgpu);
  287. if (!prev_cgpu)
  288. first_cgpu = cgpu;
  289. prev_cgpu = cgpu;
  290. for_each_managed_proc(proc, cgpu)
  291. {
  292. knc->core[channel_cores_base++].proc = proc;
  293. }
  294. }
  295. }
  296. knc->dies = dies;
  297. knc->cores = cores;
  298. knc->startup = 2;
  299. pthread_mutex_init(&knc->spi_qlock, NULL);
  300. pthread_cond_init(&knc->spi_qcond, NULL);
  301. pthread_mutex_init(&knc->state_lock, NULL);
  302. if (thr_info_create(&knc->spi_thr, NULL, knc_spi, first_cgpu))
  303. {
  304. applog(LOG_ERR, "%s: SPI thread create failed", first_cgpu->dev_repr);
  305. free(knc);
  306. return false;
  307. }
  308. return true;
  309. }
  310. /* Probe devices and register with add_cgpu */
  311. static
  312. bool kncasic_detect_one(const char * const devpath)
  313. {
  314. void *ctx = knc_trnsp_new(devpath);
  315. if (ctx != NULL) {
  316. if (!knc_detect_one(ctx))
  317. knc_trnsp_free(ctx);
  318. else
  319. return true;
  320. }
  321. return false;
  322. }
  323. static
  324. int kncasic_detect_auto(void)
  325. {
  326. return kncasic_detect_one(NULL) ? 1 : 0;
  327. }
  328. static
  329. void kncasic_detect(void)
  330. {
  331. generic_detect(&kncasic_drv, kncasic_detect_one, kncasic_detect_auto, GDF_REQUIRE_DNAME | GDF_DEFAULT_NOAUTO);
  332. }
  333. /* Core helper functions */
  334. static int knc_core_hold_work(struct knc_core_state *core)
  335. {
  336. return timercmp(&core->hold_work_until, &now, >);
  337. }
  338. static int knc_core_has_work(struct knc_core_state *core)
  339. {
  340. int i;
  341. for (i = 0; i < WORKS_PER_CORE; i++) {
  342. if (core->workslot[i].slot > 0)
  343. return true;
  344. }
  345. return false;
  346. }
  347. static int knc_core_need_work(struct knc_core_state *core)
  348. {
  349. return !knc_core_hold_work(core) && !core->workslot[1].work && !core->workslot[2].work;
  350. }
  351. static int knc_core_disabled(struct knc_core_state *core)
  352. {
  353. return timercmp(&core->disabled_until, &now, >);
  354. }
  355. static int _knc_core_next_slot(struct knc_core_state *core)
  356. {
  357. /* Avoid slot #0 and #15. #0 is "no work assigned" and #15 is seen on bad cores */
  358. int slot = core->last_slot + 1;
  359. if (slot >= 15)
  360. slot = 1;
  361. core->last_slot = slot;
  362. return slot;
  363. }
  364. static bool knc_core_slot_busy(struct knc_core_state *core, int slot)
  365. {
  366. if (slot == core->report.active_slot)
  367. return true;
  368. if (slot == core->report.next_slot)
  369. return true;
  370. int i;
  371. for (i = 0; i < WORKS_PER_CORE; i++) {
  372. if (slot == core->workslot[i].slot)
  373. return true;
  374. }
  375. return false;
  376. }
  377. static int knc_core_next_slot(struct knc_core_state *core)
  378. {
  379. int slot;
  380. do slot = _knc_core_next_slot(core);
  381. while (knc_core_slot_busy(core, slot));
  382. return slot;
  383. }
  384. static void knc_core_failure(struct knc_core_state *core)
  385. {
  386. core->errors++;
  387. core->errors_now++;
  388. core->die->knc->errors++;
  389. if (knc_core_disabled(core))
  390. return;
  391. if (core->errors_now > CORE_ERROR_LIMIT) {
  392. struct cgpu_info * const proc = core->proc;
  393. applog(LOG_ERR, "%"PRIpreprv" disabled for %ld seconds due to repeated hardware errors",
  394. proc->proc_repr, (long)core_disable_interval.tv_sec);
  395. timeradd(&now, &core_disable_interval, &core->disabled_until);
  396. }
  397. }
  398. static
  399. void knc_core_handle_nonce(struct thr_info *thr, struct knc_core_state *core, int slot, uint32_t nonce)
  400. {
  401. int i;
  402. if (!slot)
  403. return;
  404. core->last_nonce.slot = slot;
  405. core->last_nonce.nonce = nonce;
  406. if (core->die->knc->startup)
  407. return;
  408. for (i = 0; i < WORKS_PER_CORE; i++) {
  409. if (slot == core->workslot[i].slot && core->workslot[i].work) {
  410. struct cgpu_info * const proc = core->proc;
  411. struct thr_info * const corethr = proc->thr[0];
  412. applog(LOG_INFO, "%"PRIpreprv" found nonce %08x", proc->proc_repr, nonce);
  413. if (submit_nonce(corethr, core->workslot[i].work, nonce)) {
  414. /* Good share */
  415. core->shares++;
  416. core->die->knc->shares++;
  417. hashes_done2(corethr, 0x100000000, NULL);
  418. /* This core is useful. Ignore any errors */
  419. core->errors_now = 0;
  420. } else {
  421. applog(LOG_INFO, "%"PRIpreprv" hwerror nonce %08x", proc->proc_repr, nonce);
  422. /* Bad share */
  423. knc_core_failure(core);
  424. }
  425. }
  426. }
  427. }
  428. static int knc_core_process_report(struct thr_info *thr, struct knc_core_state *core, uint8_t *response)
  429. {
  430. struct cgpu_info * const proc = core->proc;
  431. struct knc_report *report = &core->report;
  432. knc_decode_report(response, report, core->die->version);
  433. bool had_event = false;
  434. applog(LOG_DEBUG, "%"PRIpreprv": Process report %d %d(%d) / %d %d %d", proc->proc_repr, report->active_slot, report->next_slot, report->next_state, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  435. int n;
  436. for (n = 0; n < KNC_NONCES_PER_REPORT; n++) {
  437. if (report->nonce[n].slot < 0)
  438. break;
  439. if (core->last_nonce.slot == report->nonce[n].slot && core->last_nonce.nonce == report->nonce[n].nonce)
  440. break;
  441. }
  442. while(n-- > 0) {
  443. knc_core_handle_nonce(thr, core, report->nonce[n].slot, report->nonce[n].nonce);
  444. }
  445. if (report->active_slot && core->workslot[0].slot != report->active_slot) {
  446. had_event = true;
  447. applog(LOG_INFO, "%"PRIpreprv": New work %d %d / %d %d %d", proc->proc_repr, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  448. /* Core switched to next work */
  449. if (core->workslot[0].work) {
  450. core->die->knc->completed++;
  451. core->completed++;
  452. applog(LOG_INFO, "%"PRIpreprv": Work completed!", proc->proc_repr);
  453. free_work(core->workslot[0].work);
  454. }
  455. core->workslot[0] = core->workslot[1];
  456. core->workslot[1].work = NULL;
  457. core->workslot[1].slot = -1;
  458. /* or did it switch directly to pending work? */
  459. if (report->active_slot == core->workslot[2].slot) {
  460. applog(LOG_INFO, "%"PRIpreprv": New work %d %d %d %d (pending)", proc->proc_repr, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  461. if (core->workslot[0].work)
  462. free_work(core->workslot[0].work);
  463. core->workslot[0] = core->workslot[2];
  464. core->workslot[2].work = NULL;
  465. core->workslot[2].slot = -1;
  466. }
  467. }
  468. if (report->next_state && core->workslot[2].slot > 0 && (core->workslot[2].slot == report->next_slot || report->next_slot == -1)) {
  469. had_event = true;
  470. applog(LOG_INFO, "%"PRIpreprv": Accepted work %d %d %d %d (pending)", proc->proc_repr, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  471. /* core accepted next work */
  472. if (core->workslot[1].work)
  473. free_work(core->workslot[1].work);
  474. core->workslot[1] = core->workslot[2];
  475. core->workslot[2].work = NULL;
  476. core->workslot[2].slot = -1;
  477. }
  478. if (core->workslot[2].work && knc_transfer_completed(core->die->knc, core->transfer_stamp)) {
  479. had_event = true;
  480. applog(LOG_INFO, "%"PRIpreprv": Setwork failed?", proc->proc_repr);
  481. free_work(core->workslot[2].work);
  482. core->workslot[2].slot = -1;
  483. }
  484. if (had_event)
  485. applog(LOG_INFO, "%"PRIpreprv": Exit report %d %d / %d %d %d", proc->proc_repr, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  486. return 0;
  487. }
  488. static void knc_process_responses(struct thr_info *thr)
  489. {
  490. struct cgpu_info *cgpu = thr->cgpu;
  491. struct knc_state *knc = cgpu->device_data;
  492. struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->read_buffer];
  493. while (buffer->state == KNC_SPI_DONE) {
  494. int i;
  495. for (i = 0; i < buffer->responses; i++) {
  496. struct knc_spi_response *response_info = &buffer->response_info[i];
  497. uint8_t *rxbuf = &buffer->rxbuf[response_info->offset];
  498. struct knc_core_state *core = response_info->core;
  499. struct cgpu_info * const proc = core->proc;
  500. int status = knc_decode_response(rxbuf, response_info->request_length, &rxbuf, response_info->response_length);
  501. /* Invert KNC_ACCEPTED to simplify logics below */
  502. if (response_info->type == KNC_SETWORK && !KNC_IS_ERROR(status))
  503. status ^= KNC_ACCEPTED;
  504. if (core->die->version != KNC_VERSION_JUPITER && status != 0) {
  505. applog(LOG_ERR, "%s: Communication error (%x / %d)", proc->proc_repr, status, i);
  506. if (status == KNC_ACCEPTED) {
  507. /* Core refused our work vector. Likely out of sync. Reset it */
  508. core->inuse = false;
  509. }
  510. knc_core_failure(core);
  511. }
  512. switch(response_info->type) {
  513. case KNC_REPORT:
  514. case KNC_SETWORK:
  515. /* Should we care about failed SETWORK explicit? Or simply handle it by next state not loaded indication in reports? */
  516. knc_core_process_report(thr, core, rxbuf);
  517. break;
  518. default:
  519. break;
  520. }
  521. }
  522. buffer->state = KNC_SPI_IDLE;
  523. buffer->responses = 0;
  524. buffer->size = 0;
  525. knc->read_buffer += 1;
  526. knc->read_buffer_count += 1;
  527. if (knc->read_buffer >= KNC_SPI_BUFFERS)
  528. knc->read_buffer = 0;
  529. buffer = &knc->spi_buffer[knc->read_buffer];
  530. }
  531. }
  532. static int knc_core_send_work(struct thr_info *thr, struct knc_core_state *core, struct work *work, bool clean)
  533. {
  534. struct knc_state *knc = core->die->knc;
  535. struct cgpu_info * const proc = core->proc;
  536. int request_length = 4 + 1 + 6*4 + 3*4 + 8*4;
  537. uint8_t request[request_length];
  538. int response_length = 1 + 1 + (1 + 4) * 5;
  539. int slot = knc_core_next_slot(core);
  540. if (slot < 0)
  541. goto error;
  542. applog(LOG_INFO, "%"PRIpreprv" setwork%s = %d, %d %d / %d %d %d", proc->proc_repr, clean ? " CLEAN" : "", slot, core->report.active_slot, core->report.next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot);
  543. if (!clean && !knc_core_need_work(core))
  544. goto error;
  545. switch(core->die->version) {
  546. case KNC_VERSION_JUPITER:
  547. if (clean) {
  548. /* Double halt to get rid of any previous queued work */
  549. request_length = knc_prepare_jupiter_halt(request, core->die->die, core->core);
  550. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  551. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  552. }
  553. request_length = knc_prepare_jupiter_setwork(request, core->die->die, core->core, slot, work);
  554. knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0);
  555. break;
  556. case KNC_VERSION_NEPTUNE:
  557. request_length = knc_prepare_neptune_setwork(request, core->die->die, core->core, slot, work, clean);
  558. knc_transfer(thr, core, request_length, request, response_length, KNC_SETWORK, slot);
  559. break;
  560. default:
  561. goto error;
  562. }
  563. core->workslot[2].work = work;
  564. core->workslot[2].slot = slot;
  565. core->works++;
  566. core->die->knc->works++;
  567. core->transfer_stamp = knc_transfer_stamp(knc);
  568. core->inuse = true;
  569. timeradd(&now, &core_submit_interval, &core->hold_work_until);
  570. timeradd(&now, &core_timeout_interval, &core->timeout);
  571. return 0;
  572. error:
  573. applog(LOG_INFO, "%"PRIpreprv": Failed to setwork (%d)", proc->proc_repr, core->errors_now);
  574. knc_core_failure(core);
  575. free_work(work);
  576. return -1;
  577. }
  578. static int knc_core_request_report(struct thr_info *thr, struct knc_core_state *core)
  579. {
  580. struct cgpu_info * const proc = core->proc;
  581. int request_length = 4;
  582. uint8_t request[request_length];
  583. int response_length = 1 + 1 + (1 + 4) * 5;
  584. applog(LOG_DEBUG, "%"PRIpreprv": Request report", proc->proc_repr);
  585. request_length = knc_prepare_report(request, core->die->die, core->core);
  586. switch(core->die->version) {
  587. case KNC_VERSION_JUPITER:
  588. response_length = 1 + 1 + (1 + 4);
  589. knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0);
  590. return 0;
  591. case KNC_VERSION_NEPTUNE:
  592. knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0);
  593. return 0;
  594. }
  595. applog(LOG_INFO, "%"PRIpreprv": Failed to scan work report", proc->proc_repr);
  596. knc_core_failure(core);
  597. return -1;
  598. }
  599. /* return value is number of nonces that have been checked since
  600. * previous call
  601. */
  602. static int64_t knc_scanwork(struct thr_info *thr)
  603. {
  604. struct cgpu_info *cgpu = thr->cgpu;
  605. struct knc_state *knc = cgpu->device_data;
  606. applog(LOG_DEBUG, "KnC running scanwork");
  607. mutex_lock(&knc->state_lock);
  608. gettimeofday(&now, NULL);
  609. knc_trnsp_periodic_check(knc->ctx);
  610. int i;
  611. knc_process_responses(thr);
  612. if (timercmp(&knc->next_error_interval, &now, >)) {
  613. /* Reset hw error limiter every check interval */
  614. timeradd(&now, &core_check_interval, &knc->next_error_interval);
  615. for (i = 0; i < knc->cores; i++) {
  616. struct knc_core_state *core = &knc->core[i];
  617. core->errors_now = 0;
  618. }
  619. }
  620. for (i = 0; i < knc->cores; i++) {
  621. struct knc_core_state *core = &knc->core[i];
  622. struct cgpu_info * const proc = core->proc;
  623. bool clean = !core->inuse;
  624. if (knc_core_disabled(core))
  625. continue;
  626. if (core->generation != knc->generation) {
  627. applog(LOG_INFO, "%"PRIpreprv" flush gen=%d/%d", proc->proc_repr, core->generation, knc->generation);
  628. /* clean set state, forget everything */
  629. int slot;
  630. for (slot = 0; slot < WORKS_PER_CORE; slot ++) {
  631. if (core->workslot[slot].work)
  632. free_work(core->workslot[slot].work);
  633. core->workslot[slot].slot = -1;
  634. }
  635. core->hold_work_until = now;
  636. core->generation = knc->generation;
  637. } else if (timercmp(&core->timeout, &now, <=) && (core->workslot[0].slot > 0 || core->workslot[1].slot > 0 || core->workslot[2].slot > 0)) {
  638. applog(LOG_ERR, "%"PRIpreprv" timeout gen=%d/%d", proc->proc_repr, core->generation, knc->generation);
  639. clean = true;
  640. }
  641. if (!knc_core_has_work(core))
  642. clean = true;
  643. if (core->workslot[0].slot < 0 && core->workslot[1].slot < 0 && core->workslot[2].slot < 0)
  644. clean = true;
  645. if (i % SCAN_ADJUST_RANGE == knc->scan_adjust)
  646. clean = true;
  647. if ((knc_core_need_work(core) || clean) && !knc->startup) {
  648. struct work *work = get_work(thr);
  649. knc_core_send_work(thr, core, work, clean);
  650. } else {
  651. knc_core_request_report(thr, core);
  652. }
  653. }
  654. /* knc->startup delays initial work submission until we have had chance to query all cores on their current status, to avoid slot number collisions with earlier run */
  655. if (knc->startup)
  656. knc->startup--;
  657. else if (knc->scan_adjust < SCAN_ADJUST_RANGE)
  658. knc->scan_adjust++;
  659. knc_flush(thr);
  660. mutex_unlock(&knc->state_lock);
  661. return 0;
  662. }
  663. static void knc_flush_work(struct cgpu_info *cgpu)
  664. {
  665. struct knc_state *knc = cgpu->device_data;
  666. applog(LOG_INFO, "KnC running flushwork");
  667. mutex_lock(&knc->state_lock);
  668. knc->generation++;
  669. knc->scan_adjust=0;
  670. if (!knc->generation)
  671. knc->generation++;
  672. mutex_unlock(&knc->state_lock);
  673. }
  674. static void knc_zero_stats(struct cgpu_info *cgpu)
  675. {
  676. int core;
  677. struct knc_state *knc = cgpu->device_data;
  678. mutex_lock(&knc->state_lock);
  679. for (core = 0; core < knc->cores; core++) {
  680. knc->shares = 0;
  681. knc->completed = 0;
  682. knc->works = 0;
  683. knc->errors = 0;
  684. knc->core[core].works = 0;
  685. knc->core[core].errors = 0;
  686. knc->core[core].shares = 0;
  687. knc->core[core].completed = 0;
  688. }
  689. mutex_unlock(&knc->state_lock);
  690. }
  691. static struct api_data *knc_api_stats(struct cgpu_info *cgpu)
  692. {
  693. struct knc_state *knc = cgpu->device_data;
  694. struct knc_core_state * const proccore = &knc->core[cgpu->proc_id];
  695. struct knc_die * const die = proccore->die;
  696. struct api_data *root = NULL;
  697. int core;
  698. char label[256];
  699. mutex_lock(&knc->state_lock);
  700. root = api_add_int(root, "dies", &knc->dies, 1);
  701. root = api_add_int(root, "cores", &knc->cores, 1);
  702. root = api_add_uint64(root, "shares", &knc->shares, 1);
  703. root = api_add_uint64(root, "works", &knc->works, 1);
  704. root = api_add_uint64(root, "completed", &knc->completed, 1);
  705. root = api_add_uint64(root, "errors", &knc->errors, 1);
  706. /* Active cores */
  707. int active = knc->cores;
  708. for (core = 0; core < knc->cores; core++) {
  709. if (knc_core_disabled(&knc->core[core]))
  710. active -= 1;
  711. }
  712. root = api_add_int(root, "active", &active, 1);
  713. /* Per ASIC/die data */
  714. {
  715. #define knc_api_die_string(name, value) do { \
  716. snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \
  717. root = api_add_string(root, label, value, 1); \
  718. } while(0)
  719. #define knc_api_die_int(name, value) do { \
  720. snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \
  721. uint64_t v = value; \
  722. root = api_add_uint64(root, label, &v, 1); \
  723. } while(0)
  724. /* Model */
  725. {
  726. char *model = "?";
  727. switch(die->version) {
  728. case KNC_VERSION_JUPITER:
  729. model = "Jupiter";
  730. break;
  731. case KNC_VERSION_NEPTUNE:
  732. model = "Neptune";
  733. break;
  734. }
  735. knc_api_die_string("model", model);
  736. knc_api_die_int("cores", die->cores);
  737. }
  738. /* Core based stats */
  739. {
  740. uint64_t errors = 0;
  741. uint64_t shares = 0;
  742. uint64_t works = 0;
  743. uint64_t completed = 0;
  744. char coremap[die->cores+1];
  745. for (core = 0; core < die->cores; core++) {
  746. coremap[core] = knc_core_disabled(&die->core[core]) ? '0' : '1';
  747. works += die->core[core].works;
  748. shares += die->core[core].shares;
  749. errors += die->core[core].errors;
  750. completed += die->core[core].completed;
  751. }
  752. coremap[die->cores] = '\0';
  753. knc_api_die_int("errors", errors);
  754. knc_api_die_int("shares", shares);
  755. knc_api_die_int("works", works);
  756. knc_api_die_int("completed", completed);
  757. knc_api_die_string("coremap", coremap);
  758. }
  759. }
  760. mutex_unlock(&knc->state_lock);
  761. return root;
  762. }
  763. static
  764. void hash_driver_work(struct thr_info * const thr)
  765. {
  766. struct cgpu_info * const cgpu = thr->cgpu;
  767. struct device_drv * const drv = cgpu->drv;
  768. while (likely(!cgpu->shutdown))
  769. {
  770. drv->scanwork(thr);
  771. if (unlikely(thr->pause || cgpu->deven != DEV_ENABLED))
  772. mt_disable(thr);
  773. }
  774. }
  775. struct device_drv kncasic_drv = {
  776. .dname = "kncasic",
  777. .name = "KNC",
  778. .drv_detect = kncasic_detect,
  779. .minerloop = hash_driver_work,
  780. .flush_work = knc_flush_work,
  781. .scanwork = knc_scanwork,
  782. .zero_stats = knc_zero_stats,
  783. .get_api_stats = knc_api_stats,
  784. };