deviceapi.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #ifdef WIN32
  14. #include <winsock2.h>
  15. #else
  16. #include <sys/select.h>
  17. #endif
  18. #include <stdbool.h>
  19. #include <stdint.h>
  20. #include <sys/time.h>
  21. #include <sys/types.h>
  22. #include <time.h>
  23. #include <unistd.h>
  24. #include "compat.h"
  25. #include "deviceapi.h"
  26. #include "fpgautils.h"
  27. #include "logging.h"
  28. #include "miner.h"
  29. #include "util.h"
  30. struct driver_registration *_bfg_drvreg1;
  31. struct driver_registration *_bfg_drvreg2;
  32. void _bfg_register_driver(const struct device_drv *drv)
  33. {
  34. static struct driver_registration *initlist;
  35. struct driver_registration *ndr;
  36. if (!drv)
  37. {
  38. // Move initlist to hashtables
  39. LL_FOREACH(initlist, ndr)
  40. {
  41. drv = ndr->drv;
  42. if (drv->drv_init)
  43. drv->drv_init();
  44. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  45. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  46. }
  47. initlist = NULL;
  48. return;
  49. }
  50. ndr = malloc(sizeof(*ndr));
  51. *ndr = (struct driver_registration){
  52. .drv = drv,
  53. };
  54. LL_PREPEND(initlist, ndr);
  55. }
  56. static
  57. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  58. {
  59. return strcmp(a->drv->dname, b->drv->dname);
  60. };
  61. static
  62. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  63. {
  64. return a->drv->probe_priority - b->drv->probe_priority;
  65. };
  66. void bfg_devapi_init()
  67. {
  68. _bfg_register_driver(NULL);
  69. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  70. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  71. }
  72. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  73. {
  74. struct cgpu_info *cgpu = thr->cgpu;
  75. const long cycle = opt_log_interval / 5 ? : 1;
  76. if (unlikely(hashes == -1)) {
  77. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  78. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  79. if (thr->scanhash_working && opt_restart) {
  80. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  81. thr->scanhash_working = false;
  82. cgpu->reinit_backoff = 5.2734375;
  83. hashes = 0;
  84. } else {
  85. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  86. cgpu->deven = DEV_RECOVER_ERR;
  87. run_cmd(cmd_idle);
  88. return false;
  89. }
  90. }
  91. else
  92. thr->scanhash_working = true;
  93. thr->hashes_done += hashes;
  94. if (hashes > cgpu->max_hashes)
  95. cgpu->max_hashes = hashes;
  96. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  97. // max_nonce management (optional)
  98. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  99. int mult;
  100. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  101. return true;
  102. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  103. mult *= cycle;
  104. if (*max_nonce > (0xffffffff * 0x400) / mult)
  105. *max_nonce = 0xffffffff;
  106. else
  107. *max_nonce = (*max_nonce * mult) / 0x400;
  108. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  109. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  110. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  111. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  112. hashmeter2(thr);
  113. return true;
  114. }
  115. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  116. {
  117. struct timeval tv_now, tv_delta;
  118. timer_set_now(&tv_now);
  119. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  120. thr->_tv_last_hashes_done_call = tv_now;
  121. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  122. }
  123. /* A generic wait function for threads that poll that will wait a specified
  124. * time tdiff waiting on a work restart request. Returns zero if the condition
  125. * was met (work restart requested) or ETIMEDOUT if not.
  126. */
  127. int restart_wait(struct thr_info *thr, unsigned int mstime)
  128. {
  129. struct timeval tv_timer, tv_now, tv_timeout;
  130. fd_set rfds;
  131. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  132. int rv;
  133. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  134. {
  135. // This is a bug!
  136. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  137. cgsleep_ms(mstime);
  138. return (thr->work_restart ? 0 : ETIMEDOUT);
  139. }
  140. timer_set_now(&tv_now);
  141. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  142. while (true)
  143. {
  144. FD_ZERO(&rfds);
  145. FD_SET(wrn, &rfds);
  146. tv_timeout = tv_timer;
  147. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  148. if (rv == 0)
  149. return ETIMEDOUT;
  150. if (rv > 0)
  151. {
  152. if (thr->work_restart)
  153. return 0;
  154. notifier_read(thr->work_restart_notifier);
  155. }
  156. timer_set_now(&tv_now);
  157. }
  158. }
  159. static
  160. struct work *get_and_prepare_work(struct thr_info *thr)
  161. {
  162. struct cgpu_info *proc = thr->cgpu;
  163. struct device_drv *api = proc->drv;
  164. struct work *work;
  165. work = get_work(thr);
  166. if (!work)
  167. return NULL;
  168. if (api->prepare_work && !api->prepare_work(thr, work)) {
  169. free_work(work);
  170. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  171. proc->deven = DEV_RECOVER_ERR;
  172. run_cmd(cmd_idle);
  173. return NULL;
  174. }
  175. return work;
  176. }
  177. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  178. void minerloop_scanhash(struct thr_info *mythr)
  179. {
  180. struct cgpu_info *cgpu = mythr->cgpu;
  181. struct device_drv *api = cgpu->drv;
  182. struct timeval tv_start, tv_end;
  183. struct timeval tv_hashes, tv_worktime;
  184. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  185. int64_t hashes;
  186. struct work *work;
  187. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  188. #ifdef HAVE_PTHREAD_CANCEL
  189. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  190. #endif
  191. if (cgpu->deven != DEV_ENABLED)
  192. mt_disable(mythr);
  193. while (likely(!cgpu->shutdown)) {
  194. mythr->work_restart = false;
  195. request_work(mythr);
  196. work = get_and_prepare_work(mythr);
  197. if (!work)
  198. break;
  199. timer_set_now(&work->tv_work_start);
  200. do {
  201. thread_reportin(mythr);
  202. /* Only allow the mining thread to be cancelled when
  203. * it is not in the driver code. */
  204. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  205. timer_set_now(&tv_start);
  206. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  207. timer_set_now(&tv_end);
  208. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  209. pthread_testcancel();
  210. thread_reportin(mythr);
  211. timersub(&tv_end, &tv_start, &tv_hashes);
  212. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  213. goto disabled;
  214. if (unlikely(mythr->work_restart)) {
  215. /* Apart from device_thread 0, we stagger the
  216. * starting of every next thread to try and get
  217. * all devices busy before worrying about
  218. * getting work for their extra threads */
  219. if (!primary) {
  220. struct timespec rgtp;
  221. rgtp.tv_sec = 0;
  222. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  223. nanosleep(&rgtp, NULL);
  224. }
  225. break;
  226. }
  227. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  228. disabled:
  229. mt_disable(mythr);
  230. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  231. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  232. free_work(work);
  233. }
  234. }
  235. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  236. {
  237. struct cgpu_info *proc = mythr->cgpu;
  238. struct device_drv *api = proc->drv;
  239. struct timeval tv_worktime;
  240. mythr->tv_morework.tv_sec = -1;
  241. mythr->_job_transition_in_progress = true;
  242. if (mythr->work)
  243. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  244. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  245. {
  246. mythr->work_restart = false;
  247. request_work(mythr);
  248. // FIXME: Allow get_work to return NULL to retry on notification
  249. if (mythr->next_work)
  250. free_work(mythr->next_work);
  251. mythr->next_work = get_and_prepare_work(mythr);
  252. if (!mythr->next_work)
  253. return false;
  254. mythr->starting_next_work = true;
  255. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  256. }
  257. else
  258. {
  259. mythr->starting_next_work = false;
  260. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  261. }
  262. job_prepare_complete(mythr);
  263. return true;
  264. }
  265. void job_prepare_complete(struct thr_info *mythr)
  266. {
  267. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  268. return;
  269. if (mythr->work)
  270. {
  271. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  272. do_get_results(mythr, true);
  273. else
  274. {} // TODO: Set a timer to call do_get_results when job is near complete
  275. }
  276. else // no job currently running
  277. do_job_start(mythr);
  278. }
  279. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  280. {
  281. struct cgpu_info *proc = mythr->cgpu;
  282. struct device_drv *api = proc->drv;
  283. struct work *work = mythr->work;
  284. mythr->_job_transition_in_progress = true;
  285. mythr->tv_results_jobstart = mythr->tv_jobstart;
  286. mythr->_proceed_with_new_job = proceed_with_new_job;
  287. if (api->job_get_results)
  288. api->job_get_results(mythr, work);
  289. else
  290. job_results_fetched(mythr);
  291. }
  292. void job_results_fetched(struct thr_info *mythr)
  293. {
  294. if (mythr->_proceed_with_new_job)
  295. do_job_start(mythr);
  296. else
  297. {
  298. if (likely(mythr->prev_work))
  299. {
  300. struct timeval tv_now;
  301. timer_set_now(&tv_now);
  302. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  303. }
  304. mt_disable_start(mythr);
  305. }
  306. }
  307. void do_job_start(struct thr_info *mythr)
  308. {
  309. struct cgpu_info *proc = mythr->cgpu;
  310. struct device_drv *api = proc->drv;
  311. thread_reportin(mythr);
  312. api->job_start(mythr);
  313. }
  314. void mt_job_transition(struct thr_info *mythr)
  315. {
  316. struct timeval tv_now;
  317. timer_set_now(&tv_now);
  318. if (mythr->starting_next_work)
  319. {
  320. mythr->next_work->tv_work_start = tv_now;
  321. if (mythr->prev_work)
  322. free_work(mythr->prev_work);
  323. mythr->prev_work = mythr->work;
  324. mythr->work = mythr->next_work;
  325. mythr->next_work = NULL;
  326. }
  327. mythr->tv_jobstart = tv_now;
  328. mythr->_job_transition_in_progress = false;
  329. }
  330. void job_start_complete(struct thr_info *mythr)
  331. {
  332. struct timeval tv_now;
  333. if (unlikely(!mythr->prev_work))
  334. return;
  335. timer_set_now(&tv_now);
  336. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  337. }
  338. void job_start_abort(struct thr_info *mythr, bool failure)
  339. {
  340. struct cgpu_info *proc = mythr->cgpu;
  341. if (failure)
  342. {
  343. proc->deven = DEV_RECOVER_ERR;
  344. run_cmd(cmd_idle);
  345. }
  346. mythr->work = NULL;
  347. mythr->_job_transition_in_progress = false;
  348. }
  349. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  350. {
  351. struct cgpu_info *proc = mythr->cgpu;
  352. struct device_drv *api = proc->drv;
  353. struct timeval tv_hashes;
  354. int64_t hashes = 0;
  355. if (api->job_process_results)
  356. hashes = api->job_process_results(mythr, work, stopping);
  357. thread_reportin(mythr);
  358. if (hashes)
  359. {
  360. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  361. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  362. return false;
  363. }
  364. return true;
  365. }
  366. static
  367. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  368. {
  369. struct cgpu_info *cgpu = thr->cgpu;
  370. struct timeval tv_now;
  371. int maxfd;
  372. fd_set rfds;
  373. timer_set_now(&tv_now);
  374. FD_ZERO(&rfds);
  375. FD_SET(thr->notifier[0], &rfds);
  376. maxfd = thr->notifier[0];
  377. FD_SET(thr->work_restart_notifier[0], &rfds);
  378. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  379. if (thr->mutex_request[1] != INVSOCK)
  380. {
  381. FD_SET(thr->mutex_request[0], &rfds);
  382. set_maxfd(&maxfd, thr->mutex_request[0]);
  383. }
  384. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  385. return;
  386. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  387. {
  388. // FIXME: This can only handle one request at a time!
  389. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  390. notifier_read(thr->mutex_request);
  391. mutex_lock(mutexp);
  392. pthread_cond_signal(&cgpu->device_cond);
  393. pthread_cond_wait(&cgpu->device_cond, mutexp);
  394. mutex_unlock(mutexp);
  395. }
  396. if (FD_ISSET(thr->notifier[0], &rfds)) {
  397. notifier_read(thr->notifier);
  398. }
  399. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  400. notifier_read(thr->work_restart_notifier);
  401. }
  402. static
  403. void _minerloop_setup(struct thr_info *mythr)
  404. {
  405. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  406. if (mythr->work_restart_notifier[1] == -1)
  407. notifier_init(mythr->work_restart_notifier);
  408. for (proc = cgpu; proc; proc = proc->next_proc)
  409. {
  410. mythr = proc->thr[0];
  411. timer_set_now(&mythr->tv_watchdog);
  412. proc->disable_watchdog = true;
  413. }
  414. }
  415. void minerloop_async(struct thr_info *mythr)
  416. {
  417. struct thr_info *thr = mythr;
  418. struct cgpu_info *cgpu = mythr->cgpu;
  419. struct device_drv *api = cgpu->drv;
  420. struct timeval tv_now;
  421. struct timeval tv_timeout;
  422. struct cgpu_info *proc;
  423. bool is_running, should_be_running;
  424. _minerloop_setup(mythr);
  425. while (likely(!cgpu->shutdown)) {
  426. tv_timeout.tv_sec = -1;
  427. timer_set_now(&tv_now);
  428. for (proc = cgpu; proc; proc = proc->next_proc)
  429. {
  430. mythr = proc->thr[0];
  431. // Nothing should happen while we're starting a job
  432. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  433. goto defer_events;
  434. is_running = mythr->work;
  435. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  436. if (should_be_running)
  437. {
  438. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  439. {
  440. mt_disable_finish(mythr);
  441. goto djp;
  442. }
  443. if (unlikely(mythr->work_restart))
  444. goto djp;
  445. }
  446. else // ! should_be_running
  447. {
  448. if (unlikely((is_running || !thr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  449. {
  450. disabled: ;
  451. timer_unset(&mythr->tv_morework);
  452. if (is_running)
  453. {
  454. if (mythr->busy_state != TBS_GETTING_RESULTS)
  455. do_get_results(mythr, false);
  456. else
  457. // Avoid starting job when pending result fetch completes
  458. mythr->_proceed_with_new_job = false;
  459. }
  460. else // !thr->_mt_disable_called
  461. mt_disable_start(mythr);
  462. }
  463. }
  464. if (timer_passed(&mythr->tv_morework, &tv_now))
  465. {
  466. djp: ;
  467. if (!do_job_prepare(mythr, &tv_now))
  468. goto disabled;
  469. }
  470. defer_events:
  471. if (timer_passed(&mythr->tv_poll, &tv_now))
  472. api->poll(mythr);
  473. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  474. {
  475. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  476. bfg_watchdog(proc, &tv_now);
  477. }
  478. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  479. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  480. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  481. }
  482. do_notifier_select(thr, &tv_timeout);
  483. }
  484. }
  485. static
  486. void do_queue_flush(struct thr_info *mythr)
  487. {
  488. struct cgpu_info *proc = mythr->cgpu;
  489. struct device_drv *api = proc->drv;
  490. api->queue_flush(mythr);
  491. if (mythr->next_work)
  492. {
  493. free_work(mythr->next_work);
  494. mythr->next_work = NULL;
  495. }
  496. }
  497. void minerloop_queue(struct thr_info *thr)
  498. {
  499. struct thr_info *mythr;
  500. struct cgpu_info *cgpu = thr->cgpu;
  501. struct device_drv *api = cgpu->drv;
  502. struct timeval tv_now;
  503. struct timeval tv_timeout;
  504. struct cgpu_info *proc;
  505. bool should_be_running;
  506. struct work *work;
  507. _minerloop_setup(thr);
  508. while (likely(!cgpu->shutdown)) {
  509. tv_timeout.tv_sec = -1;
  510. timer_set_now(&tv_now);
  511. for (proc = cgpu; proc; proc = proc->next_proc)
  512. {
  513. mythr = proc->thr[0];
  514. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  515. redo:
  516. if (should_be_running)
  517. {
  518. if (unlikely(mythr->_mt_disable_called))
  519. mt_disable_finish(mythr);
  520. if (unlikely(mythr->work_restart))
  521. {
  522. mythr->work_restart = false;
  523. do_queue_flush(mythr);
  524. }
  525. while (!mythr->queue_full)
  526. {
  527. if (mythr->next_work)
  528. {
  529. work = mythr->next_work;
  530. mythr->next_work = NULL;
  531. }
  532. else
  533. {
  534. request_work(mythr);
  535. // FIXME: Allow get_work to return NULL to retry on notification
  536. work = get_and_prepare_work(mythr);
  537. }
  538. if (!work)
  539. break;
  540. if (!api->queue_append(mythr, work))
  541. mythr->next_work = work;
  542. }
  543. }
  544. else
  545. if (unlikely(!mythr->_mt_disable_called))
  546. {
  547. do_queue_flush(mythr);
  548. mt_disable_start(mythr);
  549. }
  550. if (timer_passed(&mythr->tv_poll, &tv_now))
  551. api->poll(mythr);
  552. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  553. {
  554. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  555. bfg_watchdog(proc, &tv_now);
  556. }
  557. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  558. if (should_be_running && !mythr->queue_full)
  559. goto redo;
  560. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  561. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  562. }
  563. do_notifier_select(thr, &tv_timeout);
  564. }
  565. }
  566. void *miner_thread(void *userdata)
  567. {
  568. struct thr_info *mythr = userdata;
  569. struct cgpu_info *cgpu = mythr->cgpu;
  570. struct device_drv *drv = cgpu->drv;
  571. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  572. char threadname[20];
  573. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  574. RenameThread(threadname);
  575. if (drv->thread_init && !drv->thread_init(mythr)) {
  576. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  577. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  578. dev_error(slave, REASON_THREAD_FAIL_INIT);
  579. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  580. goto out;
  581. }
  582. if (drv_ready(cgpu))
  583. cgpu_set_defaults(cgpu);
  584. thread_reportout(mythr);
  585. applog(LOG_DEBUG, "Popping ping in miner thread");
  586. notifier_read(mythr->notifier); // Wait for a notification to start
  587. cgtime(&cgpu->cgminer_stats.start_tv);
  588. if (drv->minerloop)
  589. drv->minerloop(mythr);
  590. else
  591. minerloop_scanhash(mythr);
  592. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  593. out: ;
  594. struct cgpu_info *proc = cgpu;
  595. do
  596. {
  597. proc->deven = DEV_DISABLED;
  598. proc->status = LIFE_DEAD2;
  599. }
  600. while ( (proc = proc->next_proc) && !proc->threads);
  601. mythr->getwork = 0;
  602. mythr->has_pth = false;
  603. cgsleep_ms(1);
  604. if (drv->thread_shutdown)
  605. drv->thread_shutdown(mythr);
  606. notifier_destroy(mythr->notifier);
  607. return NULL;
  608. }
  609. bool add_cgpu(struct cgpu_info *cgpu)
  610. {
  611. int lpcount;
  612. renumber_cgpu(cgpu);
  613. if (!cgpu->procs)
  614. cgpu->procs = 1;
  615. lpcount = cgpu->procs;
  616. cgpu->device = cgpu;
  617. cgpu->dev_repr = malloc(6);
  618. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  619. cgpu->dev_repr_ns = malloc(6);
  620. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  621. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  622. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  623. #ifdef HAVE_FPGAUTILS
  624. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  625. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  626. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  627. #endif
  628. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  629. devices_new[total_devices_new++] = cgpu;
  630. if (lpcount > 1)
  631. {
  632. int ns;
  633. int tpp = cgpu->threads / lpcount;
  634. struct cgpu_info **nlp_p, *slave;
  635. const bool manylp = (lpcount > 26);
  636. const char *as = (manylp ? "aa" : "a");
  637. // Note, strcpy instead of assigning a byte to get the \0 too
  638. strcpy(&cgpu->proc_repr[5], as);
  639. ns = strlen(cgpu->proc_repr_ns);
  640. strcpy(&cgpu->proc_repr_ns[ns], as);
  641. nlp_p = &cgpu->next_proc;
  642. for (int i = 1; i < lpcount; ++i)
  643. {
  644. slave = malloc(sizeof(*slave));
  645. *slave = *cgpu;
  646. slave->proc_id = i;
  647. if (manylp)
  648. {
  649. slave->proc_repr[5] += i / 26;
  650. slave->proc_repr[6] += i % 26;
  651. slave->proc_repr_ns[ns ] += i / 26;
  652. slave->proc_repr_ns[ns + 1] += i % 26;
  653. }
  654. else
  655. {
  656. slave->proc_repr[5] += i;
  657. slave->proc_repr_ns[ns] += i;
  658. }
  659. slave->threads = tpp;
  660. devices_new[total_devices_new++] = slave;
  661. *nlp_p = slave;
  662. nlp_p = &slave->next_proc;
  663. }
  664. *nlp_p = NULL;
  665. cgpu->proc_id = 0;
  666. cgpu->threads -= (tpp * (lpcount - 1));
  667. }
  668. cgpu->last_device_valid_work = time(NULL);
  669. return true;
  670. }
  671. void add_cgpu_live(void *p)
  672. {
  673. add_cgpu(p);
  674. }
  675. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  676. {
  677. int old_total_devices = total_devices_new;
  678. if (!prev_cgpu)
  679. return add_cgpu(cgpu);
  680. while (prev_cgpu->next_proc)
  681. prev_cgpu = prev_cgpu->next_proc;
  682. if (!add_cgpu(cgpu))
  683. return false;
  684. prev_cgpu->next_proc = devices_new[old_total_devices];
  685. return true;
  686. }
  687. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  688. {
  689. struct string_elist *iter, *tmp;
  690. const char *dev, *colon;
  691. bool inhibitauto = flags & 4;
  692. char found = 0;
  693. bool forceauto = flags & 1;
  694. bool hasname;
  695. size_t namel = strlen(api->name);
  696. size_t dnamel = strlen(api->dname);
  697. #ifdef HAVE_FPGAUTILS
  698. clear_detectone_meta_info();
  699. #endif
  700. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  701. dev = iter->string;
  702. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  703. size_t idlen = colon - dev;
  704. // allow either name:device or dname:device
  705. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  706. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  707. continue;
  708. dev = colon + 1;
  709. hasname = true;
  710. }
  711. else
  712. hasname = false;
  713. if (!strcmp(dev, "auto"))
  714. forceauto = true;
  715. else if (!strcmp(dev, "noauto"))
  716. inhibitauto = true;
  717. else
  718. if ((flags & 2) && !hasname)
  719. continue;
  720. else
  721. if (!detectone)
  722. {} // do nothing
  723. #ifdef HAVE_FPGAUTILS
  724. else
  725. if (serial_claim(dev, NULL))
  726. {
  727. applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev);
  728. string_elist_del(&scan_devices, iter);
  729. }
  730. #endif
  731. else if (detectone(dev)) {
  732. string_elist_del(&scan_devices, iter);
  733. inhibitauto = true;
  734. ++found;
  735. }
  736. }
  737. if ((forceauto || !inhibitauto) && autoscan)
  738. found += autoscan();
  739. return found;
  740. }
  741. static
  742. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  743. {
  744. char fullpath[PATH_MAX];
  745. strcpy(fullpath, path);
  746. strcat(fullpath, "/");
  747. if (subdir) {
  748. strcat(fullpath, subdir);
  749. strcat(fullpath, "/");
  750. }
  751. if (sub2) {
  752. strcat(fullpath, sub2);
  753. strcat(fullpath, "/");
  754. }
  755. strcat(fullpath, filename);
  756. return fopen(fullpath, "rb");
  757. }
  758. #define _open_bitstream(path, subdir, sub2) do { \
  759. f = _open_bitstream(path, subdir, sub2, filename); \
  760. if (f) \
  761. return f; \
  762. } while(0)
  763. #define _open_bitstream2(path, path3) do { \
  764. _open_bitstream(path, NULL, path3); \
  765. _open_bitstream(path, "../share/" PACKAGE, path3); \
  766. _open_bitstream(path, "../" PACKAGE, path3); \
  767. } while(0)
  768. #define _open_bitstream3(path) do { \
  769. _open_bitstream2(path, dname); \
  770. _open_bitstream2(path, "bitstreams"); \
  771. _open_bitstream2(path, NULL); \
  772. } while(0)
  773. FILE *open_bitstream(const char *dname, const char *filename)
  774. {
  775. FILE *f;
  776. _open_bitstream3(opt_kernel_path);
  777. _open_bitstream3(cgminer_path);
  778. _open_bitstream3(".");
  779. return NULL;
  780. }
  781. void close_device_fd(struct thr_info * const thr)
  782. {
  783. struct cgpu_info * const proc = thr->cgpu;
  784. const int fd = proc->device_fd;
  785. if (fd == -1)
  786. return;
  787. if (close(fd))
  788. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  789. else
  790. {
  791. proc->device_fd = -1;
  792. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  793. }
  794. }