deviceapi.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. * Copyright 2014 Nate Woolls
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <ctype.h>
  15. #ifdef WIN32
  16. #include <winsock2.h>
  17. #else
  18. #include <sys/select.h>
  19. #endif
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <sys/time.h>
  23. #include <sys/types.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include <utlist.h>
  27. #include "compat.h"
  28. #include "deviceapi.h"
  29. #include "logging.h"
  30. #include "lowlevel.h"
  31. #ifdef NEED_BFG_LOWL_VCOM
  32. #include "lowl-vcom.h"
  33. #endif
  34. #include "miner.h"
  35. #include "util.h"
  36. struct driver_registration *_bfg_drvreg1;
  37. struct driver_registration *_bfg_drvreg2;
  38. void _bfg_register_driver(const struct device_drv *drv)
  39. {
  40. struct driver_registration *ndr;
  41. if (!drv)
  42. {
  43. // NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
  44. LL_FOREACH2(_bfg_drvreg1, ndr, next_dname)
  45. {
  46. drv = ndr->drv;
  47. if (drv->drv_init)
  48. drv->drv_init();
  49. }
  50. return;
  51. }
  52. ndr = malloc(sizeof(*ndr));
  53. *ndr = (struct driver_registration){
  54. .drv = drv,
  55. };
  56. LL_PREPEND2(_bfg_drvreg1, ndr, next_dname);
  57. LL_PREPEND2(_bfg_drvreg2, ndr, next_prio);
  58. }
  59. static
  60. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  61. {
  62. return strcmp(a->drv->dname, b->drv->dname);
  63. };
  64. static
  65. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  66. {
  67. return a->drv->probe_priority - b->drv->probe_priority;
  68. };
  69. void bfg_devapi_init()
  70. {
  71. _bfg_register_driver(NULL);
  72. #ifdef LL_SORT2
  73. LL_SORT2(_bfg_drvreg1, sort_drv_by_dname, next_dname);
  74. LL_SORT2(_bfg_drvreg2, sort_drv_by_priority, next_prio);
  75. #else
  76. #define next next_dname
  77. LL_SORT(_bfg_drvreg1, sort_drv_by_dname);
  78. #undef next
  79. #define next next_prio
  80. LL_SORT(_bfg_drvreg2, sort_drv_by_priority);
  81. #undef next
  82. #endif
  83. }
  84. float common_sha256d_and_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  85. {
  86. switch (malgo->algo)
  87. {
  88. #ifdef USE_SCRYPT
  89. case POW_SCRYPT:
  90. return 1./0x10000;
  91. #endif
  92. #ifdef USE_SHA256D
  93. case POW_SHA256D:
  94. return 1.;
  95. #endif
  96. default:
  97. return -1.;
  98. }
  99. }
  100. #ifdef USE_SCRYPT
  101. float common_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  102. {
  103. return (malgo->algo == POW_SCRYPT) ? (1./0x10000) : -1.;
  104. }
  105. #endif
  106. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  107. {
  108. struct cgpu_info *cgpu = thr->cgpu;
  109. const long cycle = opt_log_interval / 5 ? : 1;
  110. if (unlikely(hashes == -1)) {
  111. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  112. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  113. if (thr->scanhash_working && opt_restart) {
  114. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  115. thr->scanhash_working = false;
  116. cgpu->reinit_backoff = 5.2734375;
  117. hashes = 0;
  118. } else {
  119. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  120. cgpu->deven = DEV_RECOVER_ERR;
  121. run_cmd(cmd_idle);
  122. return false;
  123. }
  124. }
  125. else
  126. thr->scanhash_working = true;
  127. thr->hashes_done += hashes;
  128. if (hashes > cgpu->max_hashes)
  129. cgpu->max_hashes = hashes;
  130. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  131. // max_nonce management (optional)
  132. if (max_nonce)
  133. {
  134. uint64_t new_max_nonce = *max_nonce;
  135. new_max_nonce *= cycle;
  136. new_max_nonce *= 1000000;
  137. new_max_nonce /= ((uint64_t)thr->tv_hashes_done.tv_sec * 1000000) + thr->tv_hashes_done.tv_usec;
  138. if (new_max_nonce > 0xffffffff)
  139. new_max_nonce = 0xffffffff;
  140. *max_nonce = new_max_nonce;
  141. }
  142. hashmeter2(thr);
  143. return true;
  144. }
  145. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  146. {
  147. struct timeval tv_now, tv_delta;
  148. timer_set_now(&tv_now);
  149. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  150. thr->_tv_last_hashes_done_call = tv_now;
  151. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  152. }
  153. /* A generic wait function for threads that poll that will wait a specified
  154. * time tdiff waiting on a work restart request. Returns zero if the condition
  155. * was met (work restart requested) or ETIMEDOUT if not.
  156. */
  157. int restart_wait(struct thr_info *thr, unsigned int mstime)
  158. {
  159. struct timeval tv_timer, tv_now, tv_timeout;
  160. fd_set rfds;
  161. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  162. int rv;
  163. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  164. {
  165. // This is a bug!
  166. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  167. cgsleep_ms(mstime);
  168. return (thr->work_restart ? 0 : ETIMEDOUT);
  169. }
  170. timer_set_now(&tv_now);
  171. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  172. while (true)
  173. {
  174. FD_ZERO(&rfds);
  175. FD_SET(wrn, &rfds);
  176. tv_timeout = tv_timer;
  177. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  178. if (rv == 0)
  179. return ETIMEDOUT;
  180. if (rv > 0)
  181. {
  182. if (thr->work_restart)
  183. return 0;
  184. notifier_read(thr->work_restart_notifier);
  185. }
  186. timer_set_now(&tv_now);
  187. }
  188. }
  189. static
  190. struct work *get_and_prepare_work(struct thr_info *thr)
  191. {
  192. struct cgpu_info *proc = thr->cgpu;
  193. struct device_drv *api = proc->drv;
  194. struct work *work;
  195. work = get_work(thr);
  196. if (!work)
  197. return NULL;
  198. if (api->prepare_work && !api->prepare_work(thr, work)) {
  199. free_work(work);
  200. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  201. proc->deven = DEV_RECOVER_ERR;
  202. run_cmd(cmd_idle);
  203. return NULL;
  204. }
  205. return work;
  206. }
  207. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  208. void minerloop_scanhash(struct thr_info *mythr)
  209. {
  210. struct cgpu_info *cgpu = mythr->cgpu;
  211. struct device_drv *api = cgpu->drv;
  212. struct timeval tv_start, tv_end;
  213. struct timeval tv_hashes, tv_worktime;
  214. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  215. int64_t hashes;
  216. struct work *work;
  217. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  218. #ifdef HAVE_PTHREAD_CANCEL
  219. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  220. #endif
  221. if (cgpu->deven != DEV_ENABLED)
  222. mt_disable(mythr);
  223. while (likely(!cgpu->shutdown)) {
  224. mythr->work_restart = false;
  225. request_work(mythr);
  226. work = get_and_prepare_work(mythr);
  227. if (!work)
  228. break;
  229. timer_set_now(&work->tv_work_start);
  230. do {
  231. thread_reportin(mythr);
  232. /* Only allow the mining thread to be cancelled when
  233. * it is not in the driver code. */
  234. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  235. timer_set_now(&tv_start);
  236. /* api->scanhash should scan the work for valid nonces
  237. * until max_nonce is reached or thr_info->work_restart */
  238. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  239. timer_set_now(&tv_end);
  240. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  241. pthread_testcancel();
  242. thread_reportin(mythr);
  243. timersub(&tv_end, &tv_start, &tv_hashes);
  244. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  245. goto disabled;
  246. if (unlikely(mythr->work_restart)) {
  247. /* Apart from device_thread 0, we stagger the
  248. * starting of every next thread to try and get
  249. * all devices busy before worrying about
  250. * getting work for their extra threads */
  251. if (!primary) {
  252. struct timespec rgtp;
  253. rgtp.tv_sec = 0;
  254. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  255. nanosleep(&rgtp, NULL);
  256. }
  257. break;
  258. }
  259. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  260. disabled:
  261. mt_disable(mythr);
  262. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  263. /* The inner do-while loop will exit unless the device is capable of
  264. * scanning a specific nonce range (currently CPU and GPU drivers)
  265. * See abandon_work comments for more details */
  266. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  267. free_work(work);
  268. }
  269. }
  270. void mt_disable_start__async(struct thr_info * const mythr)
  271. {
  272. mt_disable_start(mythr);
  273. if (mythr->prev_work)
  274. free_work(mythr->prev_work);
  275. mythr->prev_work = mythr->work;
  276. mythr->work = NULL;
  277. mythr->_job_transition_in_progress = false;
  278. }
  279. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  280. {
  281. struct cgpu_info *proc = mythr->cgpu;
  282. struct device_drv *api = proc->drv;
  283. struct timeval tv_worktime;
  284. mythr->tv_morework.tv_sec = -1;
  285. mythr->_job_transition_in_progress = true;
  286. if (mythr->work)
  287. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  288. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  289. {
  290. mythr->work_restart = false;
  291. request_work(mythr);
  292. // FIXME: Allow get_work to return NULL to retry on notification
  293. if (mythr->next_work)
  294. free_work(mythr->next_work);
  295. mythr->next_work = get_and_prepare_work(mythr);
  296. if (!mythr->next_work)
  297. return false;
  298. mythr->starting_next_work = true;
  299. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  300. }
  301. else
  302. {
  303. mythr->starting_next_work = false;
  304. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  305. }
  306. job_prepare_complete(mythr);
  307. return true;
  308. }
  309. void job_prepare_complete(struct thr_info *mythr)
  310. {
  311. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  312. return;
  313. if (mythr->work)
  314. {
  315. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  316. do_get_results(mythr, true);
  317. else
  318. {} // TODO: Set a timer to call do_get_results when job is near complete
  319. }
  320. else // no job currently running
  321. do_job_start(mythr);
  322. }
  323. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  324. {
  325. struct cgpu_info *proc = mythr->cgpu;
  326. struct device_drv *api = proc->drv;
  327. struct work *work = mythr->work;
  328. mythr->_job_transition_in_progress = true;
  329. mythr->tv_results_jobstart = mythr->tv_jobstart;
  330. mythr->_proceed_with_new_job = proceed_with_new_job;
  331. if (api->job_get_results)
  332. api->job_get_results(mythr, work);
  333. else
  334. job_results_fetched(mythr);
  335. }
  336. void job_results_fetched(struct thr_info *mythr)
  337. {
  338. if (mythr->_proceed_with_new_job)
  339. do_job_start(mythr);
  340. else
  341. {
  342. if (likely(mythr->prev_work))
  343. {
  344. struct timeval tv_now;
  345. timer_set_now(&tv_now);
  346. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  347. }
  348. mt_disable_start__async(mythr);
  349. }
  350. }
  351. void do_job_start(struct thr_info *mythr)
  352. {
  353. struct cgpu_info *proc = mythr->cgpu;
  354. struct device_drv *api = proc->drv;
  355. thread_reportin(mythr);
  356. api->job_start(mythr);
  357. }
  358. void mt_job_transition(struct thr_info *mythr)
  359. {
  360. struct timeval tv_now;
  361. timer_set_now(&tv_now);
  362. if (mythr->starting_next_work)
  363. {
  364. mythr->next_work->tv_work_start = tv_now;
  365. if (mythr->prev_work)
  366. free_work(mythr->prev_work);
  367. mythr->prev_work = mythr->work;
  368. mythr->work = mythr->next_work;
  369. mythr->next_work = NULL;
  370. }
  371. mythr->tv_jobstart = tv_now;
  372. mythr->_job_transition_in_progress = false;
  373. }
  374. void job_start_complete(struct thr_info *mythr)
  375. {
  376. struct timeval tv_now;
  377. if (unlikely(!mythr->prev_work))
  378. return;
  379. timer_set_now(&tv_now);
  380. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  381. }
  382. void job_start_abort(struct thr_info *mythr, bool failure)
  383. {
  384. struct cgpu_info *proc = mythr->cgpu;
  385. if (failure)
  386. {
  387. proc->deven = DEV_RECOVER_ERR;
  388. run_cmd(cmd_idle);
  389. }
  390. mythr->work = NULL;
  391. mythr->_job_transition_in_progress = false;
  392. }
  393. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  394. {
  395. struct cgpu_info *proc = mythr->cgpu;
  396. struct device_drv *api = proc->drv;
  397. struct timeval tv_hashes;
  398. int64_t hashes = 0;
  399. if (api->job_process_results)
  400. hashes = api->job_process_results(mythr, work, stopping);
  401. thread_reportin(mythr);
  402. if (hashes)
  403. {
  404. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  405. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  406. return false;
  407. }
  408. return true;
  409. }
  410. static
  411. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  412. {
  413. struct cgpu_info *cgpu = thr->cgpu;
  414. struct timeval tv_now;
  415. int maxfd;
  416. fd_set rfds;
  417. timer_set_now(&tv_now);
  418. FD_ZERO(&rfds);
  419. FD_SET(thr->notifier[0], &rfds);
  420. maxfd = thr->notifier[0];
  421. FD_SET(thr->work_restart_notifier[0], &rfds);
  422. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  423. if (thr->mutex_request[1] != INVSOCK)
  424. {
  425. FD_SET(thr->mutex_request[0], &rfds);
  426. set_maxfd(&maxfd, thr->mutex_request[0]);
  427. }
  428. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  429. return;
  430. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  431. {
  432. // FIXME: This can only handle one request at a time!
  433. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  434. notifier_read(thr->mutex_request);
  435. mutex_lock(mutexp);
  436. pthread_cond_signal(&cgpu->device_cond);
  437. pthread_cond_wait(&cgpu->device_cond, mutexp);
  438. mutex_unlock(mutexp);
  439. }
  440. if (FD_ISSET(thr->notifier[0], &rfds)) {
  441. notifier_read(thr->notifier);
  442. }
  443. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  444. notifier_read(thr->work_restart_notifier);
  445. }
  446. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  447. {
  448. mutex_init(&cgpu->device_mutex);
  449. notifier_init(cgpu->thr[0]->mutex_request);
  450. pthread_cond_init(&cgpu->device_cond, bfg_condattr);
  451. }
  452. void cgpu_request_control(struct cgpu_info * const cgpu)
  453. {
  454. struct thr_info * const thr = cgpu->thr[0];
  455. if (pthread_equal(pthread_self(), thr->pth))
  456. return;
  457. mutex_lock(&cgpu->device_mutex);
  458. notifier_wake(thr->mutex_request);
  459. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  460. }
  461. void cgpu_release_control(struct cgpu_info * const cgpu)
  462. {
  463. struct thr_info * const thr = cgpu->thr[0];
  464. if (pthread_equal(pthread_self(), thr->pth))
  465. return;
  466. pthread_cond_signal(&cgpu->device_cond);
  467. mutex_unlock(&cgpu->device_mutex);
  468. }
  469. static
  470. void _minerloop_setup(struct thr_info *mythr)
  471. {
  472. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  473. if (mythr->work_restart_notifier[1] == -1)
  474. notifier_init(mythr->work_restart_notifier);
  475. for (proc = cgpu; proc; proc = proc->next_proc)
  476. {
  477. mythr = proc->thr[0];
  478. timer_set_now(&mythr->tv_watchdog);
  479. proc->disable_watchdog = true;
  480. }
  481. }
  482. void minerloop_async(struct thr_info *mythr)
  483. {
  484. struct thr_info *thr = mythr;
  485. struct cgpu_info *cgpu = mythr->cgpu;
  486. struct device_drv *api = cgpu->drv;
  487. struct timeval tv_now;
  488. struct timeval tv_timeout;
  489. struct cgpu_info *proc;
  490. bool is_running, should_be_running;
  491. _minerloop_setup(mythr);
  492. while (likely(!cgpu->shutdown)) {
  493. tv_timeout.tv_sec = -1;
  494. timer_set_now(&tv_now);
  495. for (proc = cgpu; proc; proc = proc->next_proc)
  496. {
  497. mythr = proc->thr[0];
  498. // Nothing should happen while we're starting a job
  499. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  500. goto defer_events;
  501. is_running = mythr->work;
  502. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  503. if (should_be_running)
  504. {
  505. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  506. {
  507. mt_disable_finish(mythr);
  508. goto djp;
  509. }
  510. if (unlikely(mythr->work_restart))
  511. goto djp;
  512. }
  513. else // ! should_be_running
  514. {
  515. if (unlikely(mythr->_job_transition_in_progress && timer_isset(&mythr->tv_morework)))
  516. {
  517. // Really only happens at startup
  518. applog(LOG_DEBUG, "%"PRIpreprv": Job transition in progress, with morework timer enabled: unsetting in-progress flag", proc->proc_repr);
  519. mythr->_job_transition_in_progress = false;
  520. }
  521. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  522. {
  523. disabled: ;
  524. if (is_running)
  525. {
  526. if (mythr->busy_state != TBS_GETTING_RESULTS)
  527. do_get_results(mythr, false);
  528. else
  529. // Avoid starting job when pending result fetch completes
  530. mythr->_proceed_with_new_job = false;
  531. }
  532. else // !mythr->_mt_disable_called
  533. mt_disable_start__async(mythr);
  534. }
  535. timer_unset(&mythr->tv_morework);
  536. }
  537. if (timer_passed(&mythr->tv_morework, &tv_now))
  538. {
  539. djp: ;
  540. if (!do_job_prepare(mythr, &tv_now))
  541. goto disabled;
  542. }
  543. defer_events:
  544. if (timer_passed(&mythr->tv_poll, &tv_now))
  545. api->poll(mythr);
  546. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  547. {
  548. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  549. bfg_watchdog(proc, &tv_now);
  550. }
  551. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  552. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  553. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  554. }
  555. do_notifier_select(thr, &tv_timeout);
  556. }
  557. }
  558. static
  559. void do_queue_flush(struct thr_info *mythr)
  560. {
  561. struct cgpu_info *proc = mythr->cgpu;
  562. struct device_drv *api = proc->drv;
  563. api->queue_flush(mythr);
  564. if (mythr->next_work)
  565. {
  566. free_work(mythr->next_work);
  567. mythr->next_work = NULL;
  568. }
  569. }
  570. void minerloop_queue(struct thr_info *thr)
  571. {
  572. struct thr_info *mythr;
  573. struct cgpu_info *cgpu = thr->cgpu;
  574. struct device_drv *api = cgpu->drv;
  575. struct timeval tv_now;
  576. struct timeval tv_timeout;
  577. struct cgpu_info *proc;
  578. bool should_be_running;
  579. struct work *work;
  580. _minerloop_setup(thr);
  581. while (likely(!cgpu->shutdown)) {
  582. tv_timeout.tv_sec = -1;
  583. timer_set_now(&tv_now);
  584. for (proc = cgpu; proc; proc = proc->next_proc)
  585. {
  586. mythr = proc->thr[0];
  587. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  588. redo:
  589. if (should_be_running)
  590. {
  591. if (unlikely(mythr->_mt_disable_called))
  592. mt_disable_finish(mythr);
  593. if (unlikely(mythr->work_restart))
  594. {
  595. mythr->work_restart = false;
  596. do_queue_flush(mythr);
  597. }
  598. while (!mythr->queue_full)
  599. {
  600. if (mythr->next_work)
  601. {
  602. work = mythr->next_work;
  603. mythr->next_work = NULL;
  604. }
  605. else
  606. {
  607. request_work(mythr);
  608. // FIXME: Allow get_work to return NULL to retry on notification
  609. work = get_and_prepare_work(mythr);
  610. }
  611. if (!work)
  612. break;
  613. if (!api->queue_append(mythr, work))
  614. mythr->next_work = work;
  615. }
  616. }
  617. else
  618. if (unlikely(!mythr->_mt_disable_called))
  619. {
  620. do_queue_flush(mythr);
  621. mt_disable_start(mythr);
  622. }
  623. if (timer_passed(&mythr->tv_poll, &tv_now))
  624. api->poll(mythr);
  625. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  626. {
  627. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  628. bfg_watchdog(proc, &tv_now);
  629. }
  630. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  631. if (should_be_running && !mythr->queue_full)
  632. goto redo;
  633. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  634. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  635. }
  636. // HACK: Some designs set the main thr tv_poll from secondary thrs
  637. reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
  638. do_notifier_select(thr, &tv_timeout);
  639. }
  640. }
  641. void *miner_thread(void *userdata)
  642. {
  643. struct thr_info *mythr = userdata;
  644. struct cgpu_info *cgpu = mythr->cgpu;
  645. struct device_drv *drv = cgpu->drv;
  646. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  647. char threadname[20];
  648. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  649. RenameThread(threadname);
  650. if (drv->thread_init && !drv->thread_init(mythr)) {
  651. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  652. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  653. dev_error(slave, REASON_THREAD_FAIL_INIT);
  654. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  655. goto out;
  656. }
  657. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  658. cgpu_set_defaults(cgpu);
  659. thread_reportout(mythr);
  660. applog(LOG_DEBUG, "Popping ping in miner thread");
  661. notifier_read(mythr->notifier); // Wait for a notification to start
  662. cgtime(&cgpu->cgminer_stats.start_tv);
  663. if (drv->minerloop)
  664. drv->minerloop(mythr);
  665. else
  666. minerloop_scanhash(mythr);
  667. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  668. out: ;
  669. struct cgpu_info *proc = cgpu;
  670. do
  671. {
  672. proc->deven = DEV_DISABLED;
  673. proc->status = LIFE_DEAD2;
  674. }
  675. while ( (proc = proc->next_proc) && !proc->threads);
  676. mythr->getwork = 0;
  677. mythr->has_pth = false;
  678. cgsleep_ms(1);
  679. if (drv->thread_shutdown)
  680. drv->thread_shutdown(mythr);
  681. notifier_destroy(mythr->notifier);
  682. return NULL;
  683. }
  684. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  685. static
  686. bool _add_cgpu(struct cgpu_info *cgpu)
  687. {
  688. int lpcount;
  689. if (!cgpu->procs)
  690. cgpu->procs = 1;
  691. lpcount = cgpu->procs;
  692. cgpu->device = cgpu;
  693. cgpu->dev_repr = malloc(6);
  694. cgpu->dev_repr_ns = malloc(6);
  695. #ifdef NEED_BFG_LOWL_VCOM
  696. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  697. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  698. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  699. #endif
  700. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  701. devices_new[total_devices_new++] = cgpu;
  702. if (lpcount > 1)
  703. {
  704. int tpp = cgpu->threads / lpcount;
  705. struct cgpu_info **nlp_p, *slave;
  706. nlp_p = &cgpu->next_proc;
  707. for (int i = 1; i < lpcount; ++i)
  708. {
  709. slave = malloc(sizeof(*slave));
  710. *slave = *cgpu;
  711. slave->proc_id = i;
  712. slave->threads = tpp;
  713. devices_new[total_devices_new++] = slave;
  714. *nlp_p = slave;
  715. nlp_p = &slave->next_proc;
  716. }
  717. *nlp_p = NULL;
  718. cgpu->proc_id = 0;
  719. cgpu->threads -= (tpp * (lpcount - 1));
  720. }
  721. renumber_cgpu(cgpu);
  722. cgpu->last_device_valid_work = time(NULL);
  723. return true;
  724. }
  725. bool add_cgpu(struct cgpu_info *cgpu)
  726. {
  727. mutex_lock(&_add_cgpu_mutex);
  728. const bool rv = _add_cgpu(cgpu);
  729. mutex_unlock(&_add_cgpu_mutex);
  730. return rv;
  731. }
  732. void add_cgpu_live(void *p)
  733. {
  734. add_cgpu(p);
  735. }
  736. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  737. {
  738. if (!prev_cgpu)
  739. return add_cgpu(cgpu);
  740. while (prev_cgpu->next_proc)
  741. prev_cgpu = prev_cgpu->next_proc;
  742. mutex_lock(&_add_cgpu_mutex);
  743. int old_total_devices = total_devices_new;
  744. if (!_add_cgpu(cgpu))
  745. {
  746. mutex_unlock(&_add_cgpu_mutex);
  747. return false;
  748. }
  749. prev_cgpu->next_proc = devices_new[old_total_devices];
  750. mutex_unlock(&_add_cgpu_mutex);
  751. return true;
  752. }
  753. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  754. {
  755. const struct bfg_set_device_definition *sdf;
  756. char *p = replybuf;
  757. bool first = true;
  758. *out_success = SDR_HELP;
  759. sdf = proc->set_device_funcs;
  760. if (!sdf)
  761. nohelp:
  762. return "No help available";
  763. size_t matchlen = 0;
  764. if (newvalue)
  765. while (newvalue[matchlen] && !isspace(newvalue[matchlen]))
  766. ++matchlen;
  767. for ( ; sdf->optname; ++sdf)
  768. {
  769. if (!sdf->description)
  770. continue;
  771. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  772. continue;
  773. if (first)
  774. first = false;
  775. else
  776. p++[0] = '\n';
  777. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  778. }
  779. if (replybuf == p)
  780. goto nohelp;
  781. return replybuf;
  782. }
  783. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  784. {
  785. int target_diff = proc->cutofftemp - proc->targettemp;
  786. proc->cutofftemp = atoi(newvalue);
  787. if (!proc->targettemp_user)
  788. proc->targettemp = proc->cutofftemp - target_diff;
  789. return NULL;
  790. }
  791. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  792. {
  793. proc->targettemp = atoi(newvalue);
  794. proc->targettemp_user = true;
  795. return NULL;
  796. }
  797. static inline
  798. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  799. {
  800. if (!rv)
  801. *out_success = SDR_OK;
  802. else
  803. if (!strcasecmp(optname, "help"))
  804. *out_success = SDR_HELP;
  805. else
  806. *out_success = SDR_ERR;
  807. }
  808. static
  809. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  810. {
  811. const struct bfg_set_device_definition *sdf;
  812. sdf = proc->set_device_funcs;
  813. if (!sdf)
  814. {
  815. *out_success = SDR_NOSUPP;
  816. return "Device does not support setting parameters.";
  817. }
  818. for ( ; sdf->optname; ++sdf)
  819. if (!strcasecmp(optname, sdf->optname))
  820. {
  821. *out_success = SDR_AUTO;
  822. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  823. if (SDR_AUTO == *out_success)
  824. _set_auto_sdr(out_success, rv, optname);
  825. return rv;
  826. }
  827. if (!strcasecmp(optname, "help"))
  828. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  829. *out_success = SDR_UNKNOWN;
  830. sprintf(replybuf, "Unknown option: %s", optname);
  831. return replybuf;
  832. }
  833. static
  834. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  835. {
  836. if (proc->drv->set_device)
  837. {
  838. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  839. _set_auto_sdr(out_success, rv, optname);
  840. return rv;
  841. }
  842. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  843. }
  844. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char *newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  845. {
  846. if (!newvalue)
  847. newvalue = "";
  848. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  849. switch (*out_success)
  850. {
  851. case SDR_NOSUPP:
  852. case SDR_UNKNOWN:
  853. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  854. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  855. else
  856. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  857. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  858. default:
  859. break;
  860. }
  861. return rv;
  862. }
  863. #ifdef HAVE_CURSES
  864. const char *proc_set_device_tui_wrapper(struct cgpu_info * const proc, char * const optname, const bfg_set_device_func_t func, const char * const prompt, const char * const success_msg)
  865. {
  866. static char replybuf[0x2001];
  867. char * const cvar = curses_input(prompt);
  868. if (!cvar)
  869. return "Cancelled\n";
  870. enum bfg_set_device_replytype success;
  871. const char * const reply = func(proc, optname, cvar, replybuf, &success);
  872. free(cvar);
  873. if (reply)
  874. {
  875. if (reply != replybuf)
  876. snprintf(replybuf, sizeof(replybuf), "%s\n", reply);
  877. else
  878. tailsprintf(replybuf, sizeof(replybuf), "\n");
  879. return replybuf;
  880. }
  881. return success_msg ?: "Successful\n";
  882. }
  883. #endif
  884. #ifdef NEED_BFG_LOWL_VCOM
  885. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  886. {
  887. detectone_func_t detectone = userp;
  888. if (serial_claim(info->path, NULL))
  889. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  890. return detectone(info->path);
  891. }
  892. #endif
  893. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  894. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  895. {
  896. struct string_elist *iter, *tmp;
  897. const char *dev, *colon;
  898. bool inhibitauto = flags & 4;
  899. char found = 0;
  900. bool forceauto = flags & 1;
  901. bool hasname;
  902. size_t namel = strlen(api->name);
  903. size_t dnamel = strlen(api->dname);
  904. #ifdef NEED_BFG_LOWL_VCOM
  905. clear_detectone_meta_info();
  906. #endif
  907. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  908. dev = iter->string;
  909. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  910. size_t idlen = colon - dev;
  911. // allow either name:device or dname:device
  912. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  913. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  914. continue;
  915. dev = colon + 1;
  916. hasname = true;
  917. }
  918. else
  919. hasname = false;
  920. if (!strcmp(dev, "auto"))
  921. forceauto = true;
  922. else if (!strcmp(dev, "noauto"))
  923. inhibitauto = true;
  924. else
  925. if ((flags & 2) && !hasname)
  926. continue;
  927. else
  928. if (!detectone)
  929. {} // do nothing
  930. else
  931. if (!strcmp(dev, "all"))
  932. {} // n/a
  933. else if (detectone(dev)) {
  934. string_elist_del(&scan_devices, iter);
  935. ++found;
  936. }
  937. }
  938. if ((forceauto || !(inhibitauto || found)) && autoscan)
  939. found += autoscan();
  940. return found;
  941. }
  942. static
  943. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  944. {
  945. char fullpath[PATH_MAX];
  946. strcpy(fullpath, path);
  947. strcat(fullpath, "/");
  948. if (subdir) {
  949. strcat(fullpath, subdir);
  950. strcat(fullpath, "/");
  951. }
  952. if (sub2) {
  953. strcat(fullpath, sub2);
  954. strcat(fullpath, "/");
  955. }
  956. strcat(fullpath, filename);
  957. return fopen(fullpath, "rb");
  958. }
  959. #define _open_bitstream(path, subdir, sub2) do { \
  960. f = _open_bitstream(path, subdir, sub2, filename); \
  961. if (f) \
  962. return f; \
  963. } while(0)
  964. #define _open_bitstream2(path, path3) do { \
  965. _open_bitstream(path, NULL, path3); \
  966. _open_bitstream(path, "../share/" PACKAGE, path3); \
  967. _open_bitstream(path, "../" PACKAGE, path3); \
  968. } while(0)
  969. #define _open_bitstream3(path) do { \
  970. _open_bitstream2(path, dname); \
  971. _open_bitstream2(path, "bitstreams"); \
  972. _open_bitstream2(path, NULL); \
  973. } while(0)
  974. FILE *open_bitstream(const char *dname, const char *filename)
  975. {
  976. FILE *f;
  977. _open_bitstream3(opt_kernel_path);
  978. _open_bitstream3(cgminer_path);
  979. _open_bitstream3(".");
  980. return NULL;
  981. }
  982. void close_device_fd(struct thr_info * const thr)
  983. {
  984. struct cgpu_info * const proc = thr->cgpu;
  985. const int fd = proc->device_fd;
  986. if (fd == -1)
  987. return;
  988. if (close(fd))
  989. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  990. else
  991. {
  992. proc->device_fd = -1;
  993. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  994. }
  995. }
  996. struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, const int procid)
  997. {
  998. struct cgpu_info *proc = (void*)dev;
  999. for (int i = 0; i < procid; ++i)
  1000. {
  1001. proc = proc->next_proc;
  1002. if (unlikely((!proc) || proc->device != dev))
  1003. return NULL;
  1004. }
  1005. return proc;
  1006. }