deviceapi.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. * Copyright 2014 Nate Woolls
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <ctype.h>
  15. #ifdef WIN32
  16. #include <winsock2.h>
  17. #else
  18. #include <sys/select.h>
  19. #endif
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <sys/time.h>
  23. #include <sys/types.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "logging.h"
  29. #include "lowlevel.h"
  30. #ifdef NEED_BFG_LOWL_VCOM
  31. #include "lowl-vcom.h"
  32. #endif
  33. #include "miner.h"
  34. #include "util.h"
  35. struct driver_registration *_bfg_drvreg1;
  36. struct driver_registration *_bfg_drvreg2;
  37. void _bfg_register_driver(const struct device_drv *drv)
  38. {
  39. static struct driver_registration *initlist;
  40. struct driver_registration *ndr;
  41. if (!drv)
  42. {
  43. // Move initlist to hashtables
  44. LL_FOREACH(initlist, ndr)
  45. {
  46. drv = ndr->drv;
  47. if (drv->drv_init)
  48. drv->drv_init();
  49. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  50. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  51. }
  52. initlist = NULL;
  53. return;
  54. }
  55. ndr = malloc(sizeof(*ndr));
  56. *ndr = (struct driver_registration){
  57. .drv = drv,
  58. };
  59. LL_PREPEND(initlist, ndr);
  60. }
  61. static
  62. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  63. {
  64. return strcmp(a->drv->dname, b->drv->dname);
  65. };
  66. static
  67. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  68. {
  69. return a->drv->probe_priority - b->drv->probe_priority;
  70. };
  71. void bfg_devapi_init()
  72. {
  73. _bfg_register_driver(NULL);
  74. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  75. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  76. }
  77. float common_sha256d_and_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  78. {
  79. switch (malgo->algo)
  80. {
  81. #ifdef USE_SCRYPT
  82. case POW_SCRYPT:
  83. return 1./0x10000;
  84. #endif
  85. case POW_SHA256D:
  86. return 1.;
  87. default:
  88. return -1.;
  89. }
  90. }
  91. #ifdef USE_SCRYPT
  92. float common_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  93. {
  94. return (malgo->algo == POW_SCRYPT) ? (1./0x10000) : -1.;
  95. }
  96. #endif
  97. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  98. {
  99. struct cgpu_info *cgpu = thr->cgpu;
  100. const long cycle = opt_log_interval / 5 ? : 1;
  101. if (unlikely(hashes == -1)) {
  102. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  103. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  104. if (thr->scanhash_working && opt_restart) {
  105. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  106. thr->scanhash_working = false;
  107. cgpu->reinit_backoff = 5.2734375;
  108. hashes = 0;
  109. } else {
  110. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  111. cgpu->deven = DEV_RECOVER_ERR;
  112. run_cmd(cmd_idle);
  113. return false;
  114. }
  115. }
  116. else
  117. thr->scanhash_working = true;
  118. thr->hashes_done += hashes;
  119. if (hashes > cgpu->max_hashes)
  120. cgpu->max_hashes = hashes;
  121. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  122. // max_nonce management (optional)
  123. if (max_nonce)
  124. {
  125. uint64_t new_max_nonce = *max_nonce;
  126. new_max_nonce *= cycle;
  127. new_max_nonce *= 1000000;
  128. new_max_nonce /= ((uint64_t)thr->tv_hashes_done.tv_sec * 1000000) + thr->tv_hashes_done.tv_usec;
  129. if (new_max_nonce > 0xffffffff)
  130. new_max_nonce = 0xffffffff;
  131. *max_nonce = new_max_nonce;
  132. }
  133. hashmeter2(thr);
  134. return true;
  135. }
  136. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  137. {
  138. struct timeval tv_now, tv_delta;
  139. timer_set_now(&tv_now);
  140. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  141. thr->_tv_last_hashes_done_call = tv_now;
  142. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  143. }
  144. /* A generic wait function for threads that poll that will wait a specified
  145. * time tdiff waiting on a work restart request. Returns zero if the condition
  146. * was met (work restart requested) or ETIMEDOUT if not.
  147. */
  148. int restart_wait(struct thr_info *thr, unsigned int mstime)
  149. {
  150. struct timeval tv_timer, tv_now, tv_timeout;
  151. fd_set rfds;
  152. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  153. int rv;
  154. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  155. {
  156. // This is a bug!
  157. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  158. cgsleep_ms(mstime);
  159. return (thr->work_restart ? 0 : ETIMEDOUT);
  160. }
  161. timer_set_now(&tv_now);
  162. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  163. while (true)
  164. {
  165. FD_ZERO(&rfds);
  166. FD_SET(wrn, &rfds);
  167. tv_timeout = tv_timer;
  168. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  169. if (rv == 0)
  170. return ETIMEDOUT;
  171. if (rv > 0)
  172. {
  173. if (thr->work_restart)
  174. return 0;
  175. notifier_read(thr->work_restart_notifier);
  176. }
  177. timer_set_now(&tv_now);
  178. }
  179. }
  180. static
  181. struct work *get_and_prepare_work(struct thr_info *thr)
  182. {
  183. struct cgpu_info *proc = thr->cgpu;
  184. struct device_drv *api = proc->drv;
  185. struct work *work;
  186. work = get_work(thr);
  187. if (!work)
  188. return NULL;
  189. if (api->prepare_work && !api->prepare_work(thr, work)) {
  190. free_work(work);
  191. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  192. proc->deven = DEV_RECOVER_ERR;
  193. run_cmd(cmd_idle);
  194. return NULL;
  195. }
  196. return work;
  197. }
  198. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  199. void minerloop_scanhash(struct thr_info *mythr)
  200. {
  201. struct cgpu_info *cgpu = mythr->cgpu;
  202. struct device_drv *api = cgpu->drv;
  203. struct timeval tv_start, tv_end;
  204. struct timeval tv_hashes, tv_worktime;
  205. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  206. int64_t hashes;
  207. struct work *work;
  208. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  209. #ifdef HAVE_PTHREAD_CANCEL
  210. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  211. #endif
  212. if (cgpu->deven != DEV_ENABLED)
  213. mt_disable(mythr);
  214. while (likely(!cgpu->shutdown)) {
  215. mythr->work_restart = false;
  216. request_work(mythr);
  217. work = get_and_prepare_work(mythr);
  218. if (!work)
  219. break;
  220. timer_set_now(&work->tv_work_start);
  221. do {
  222. thread_reportin(mythr);
  223. /* Only allow the mining thread to be cancelled when
  224. * it is not in the driver code. */
  225. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  226. timer_set_now(&tv_start);
  227. /* api->scanhash should scan the work for valid nonces
  228. * until max_nonce is reached or thr_info->work_restart */
  229. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  230. timer_set_now(&tv_end);
  231. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  232. pthread_testcancel();
  233. thread_reportin(mythr);
  234. timersub(&tv_end, &tv_start, &tv_hashes);
  235. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  236. goto disabled;
  237. if (unlikely(mythr->work_restart)) {
  238. /* Apart from device_thread 0, we stagger the
  239. * starting of every next thread to try and get
  240. * all devices busy before worrying about
  241. * getting work for their extra threads */
  242. if (!primary) {
  243. struct timespec rgtp;
  244. rgtp.tv_sec = 0;
  245. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  246. nanosleep(&rgtp, NULL);
  247. }
  248. break;
  249. }
  250. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  251. disabled:
  252. mt_disable(mythr);
  253. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  254. /* The inner do-while loop will exit unless the device is capable of
  255. * scanning a specific nonce range (currently CPU and GPU drivers)
  256. * See abandon_work comments for more details */
  257. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  258. free_work(work);
  259. }
  260. }
  261. void mt_disable_start__async(struct thr_info * const mythr)
  262. {
  263. mt_disable_start(mythr);
  264. if (mythr->prev_work)
  265. free_work(mythr->prev_work);
  266. mythr->prev_work = mythr->work;
  267. mythr->work = NULL;
  268. mythr->_job_transition_in_progress = false;
  269. }
  270. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  271. {
  272. struct cgpu_info *proc = mythr->cgpu;
  273. struct device_drv *api = proc->drv;
  274. struct timeval tv_worktime;
  275. mythr->tv_morework.tv_sec = -1;
  276. mythr->_job_transition_in_progress = true;
  277. if (mythr->work)
  278. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  279. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  280. {
  281. mythr->work_restart = false;
  282. request_work(mythr);
  283. // FIXME: Allow get_work to return NULL to retry on notification
  284. if (mythr->next_work)
  285. free_work(mythr->next_work);
  286. mythr->next_work = get_and_prepare_work(mythr);
  287. if (!mythr->next_work)
  288. return false;
  289. mythr->starting_next_work = true;
  290. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  291. }
  292. else
  293. {
  294. mythr->starting_next_work = false;
  295. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  296. }
  297. job_prepare_complete(mythr);
  298. return true;
  299. }
  300. void job_prepare_complete(struct thr_info *mythr)
  301. {
  302. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  303. return;
  304. if (mythr->work)
  305. {
  306. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  307. do_get_results(mythr, true);
  308. else
  309. {} // TODO: Set a timer to call do_get_results when job is near complete
  310. }
  311. else // no job currently running
  312. do_job_start(mythr);
  313. }
  314. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  315. {
  316. struct cgpu_info *proc = mythr->cgpu;
  317. struct device_drv *api = proc->drv;
  318. struct work *work = mythr->work;
  319. mythr->_job_transition_in_progress = true;
  320. mythr->tv_results_jobstart = mythr->tv_jobstart;
  321. mythr->_proceed_with_new_job = proceed_with_new_job;
  322. if (api->job_get_results)
  323. api->job_get_results(mythr, work);
  324. else
  325. job_results_fetched(mythr);
  326. }
  327. void job_results_fetched(struct thr_info *mythr)
  328. {
  329. if (mythr->_proceed_with_new_job)
  330. do_job_start(mythr);
  331. else
  332. {
  333. if (likely(mythr->prev_work))
  334. {
  335. struct timeval tv_now;
  336. timer_set_now(&tv_now);
  337. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  338. }
  339. mt_disable_start__async(mythr);
  340. }
  341. }
  342. void do_job_start(struct thr_info *mythr)
  343. {
  344. struct cgpu_info *proc = mythr->cgpu;
  345. struct device_drv *api = proc->drv;
  346. thread_reportin(mythr);
  347. api->job_start(mythr);
  348. }
  349. void mt_job_transition(struct thr_info *mythr)
  350. {
  351. struct timeval tv_now;
  352. timer_set_now(&tv_now);
  353. if (mythr->starting_next_work)
  354. {
  355. mythr->next_work->tv_work_start = tv_now;
  356. if (mythr->prev_work)
  357. free_work(mythr->prev_work);
  358. mythr->prev_work = mythr->work;
  359. mythr->work = mythr->next_work;
  360. mythr->next_work = NULL;
  361. }
  362. mythr->tv_jobstart = tv_now;
  363. mythr->_job_transition_in_progress = false;
  364. }
  365. void job_start_complete(struct thr_info *mythr)
  366. {
  367. struct timeval tv_now;
  368. if (unlikely(!mythr->prev_work))
  369. return;
  370. timer_set_now(&tv_now);
  371. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  372. }
  373. void job_start_abort(struct thr_info *mythr, bool failure)
  374. {
  375. struct cgpu_info *proc = mythr->cgpu;
  376. if (failure)
  377. {
  378. proc->deven = DEV_RECOVER_ERR;
  379. run_cmd(cmd_idle);
  380. }
  381. mythr->work = NULL;
  382. mythr->_job_transition_in_progress = false;
  383. }
  384. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  385. {
  386. struct cgpu_info *proc = mythr->cgpu;
  387. struct device_drv *api = proc->drv;
  388. struct timeval tv_hashes;
  389. int64_t hashes = 0;
  390. if (api->job_process_results)
  391. hashes = api->job_process_results(mythr, work, stopping);
  392. thread_reportin(mythr);
  393. if (hashes)
  394. {
  395. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  396. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  397. return false;
  398. }
  399. return true;
  400. }
  401. static
  402. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  403. {
  404. struct cgpu_info *cgpu = thr->cgpu;
  405. struct timeval tv_now;
  406. int maxfd;
  407. fd_set rfds;
  408. timer_set_now(&tv_now);
  409. FD_ZERO(&rfds);
  410. FD_SET(thr->notifier[0], &rfds);
  411. maxfd = thr->notifier[0];
  412. FD_SET(thr->work_restart_notifier[0], &rfds);
  413. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  414. if (thr->mutex_request[1] != INVSOCK)
  415. {
  416. FD_SET(thr->mutex_request[0], &rfds);
  417. set_maxfd(&maxfd, thr->mutex_request[0]);
  418. }
  419. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  420. return;
  421. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  422. {
  423. // FIXME: This can only handle one request at a time!
  424. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  425. notifier_read(thr->mutex_request);
  426. mutex_lock(mutexp);
  427. pthread_cond_signal(&cgpu->device_cond);
  428. pthread_cond_wait(&cgpu->device_cond, mutexp);
  429. mutex_unlock(mutexp);
  430. }
  431. if (FD_ISSET(thr->notifier[0], &rfds)) {
  432. notifier_read(thr->notifier);
  433. }
  434. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  435. notifier_read(thr->work_restart_notifier);
  436. }
  437. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  438. {
  439. mutex_init(&cgpu->device_mutex);
  440. notifier_init(cgpu->thr[0]->mutex_request);
  441. pthread_cond_init(&cgpu->device_cond, bfg_condattr);
  442. }
  443. void cgpu_request_control(struct cgpu_info * const cgpu)
  444. {
  445. struct thr_info * const thr = cgpu->thr[0];
  446. if (pthread_equal(pthread_self(), thr->pth))
  447. return;
  448. mutex_lock(&cgpu->device_mutex);
  449. notifier_wake(thr->mutex_request);
  450. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  451. }
  452. void cgpu_release_control(struct cgpu_info * const cgpu)
  453. {
  454. struct thr_info * const thr = cgpu->thr[0];
  455. if (pthread_equal(pthread_self(), thr->pth))
  456. return;
  457. pthread_cond_signal(&cgpu->device_cond);
  458. mutex_unlock(&cgpu->device_mutex);
  459. }
  460. static
  461. void _minerloop_setup(struct thr_info *mythr)
  462. {
  463. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  464. if (mythr->work_restart_notifier[1] == -1)
  465. notifier_init(mythr->work_restart_notifier);
  466. for (proc = cgpu; proc; proc = proc->next_proc)
  467. {
  468. mythr = proc->thr[0];
  469. timer_set_now(&mythr->tv_watchdog);
  470. proc->disable_watchdog = true;
  471. }
  472. }
  473. void minerloop_async(struct thr_info *mythr)
  474. {
  475. struct thr_info *thr = mythr;
  476. struct cgpu_info *cgpu = mythr->cgpu;
  477. struct device_drv *api = cgpu->drv;
  478. struct timeval tv_now;
  479. struct timeval tv_timeout;
  480. struct cgpu_info *proc;
  481. bool is_running, should_be_running;
  482. _minerloop_setup(mythr);
  483. while (likely(!cgpu->shutdown)) {
  484. tv_timeout.tv_sec = -1;
  485. timer_set_now(&tv_now);
  486. for (proc = cgpu; proc; proc = proc->next_proc)
  487. {
  488. mythr = proc->thr[0];
  489. // Nothing should happen while we're starting a job
  490. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  491. goto defer_events;
  492. is_running = mythr->work;
  493. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  494. if (should_be_running)
  495. {
  496. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  497. {
  498. mt_disable_finish(mythr);
  499. goto djp;
  500. }
  501. if (unlikely(mythr->work_restart))
  502. goto djp;
  503. }
  504. else // ! should_be_running
  505. {
  506. if (unlikely(mythr->_job_transition_in_progress && timer_isset(&mythr->tv_morework)))
  507. {
  508. // Really only happens at startup
  509. applog(LOG_DEBUG, "%"PRIpreprv": Job transition in progress, with morework timer enabled: unsetting in-progress flag", proc->proc_repr);
  510. mythr->_job_transition_in_progress = false;
  511. }
  512. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  513. {
  514. disabled: ;
  515. if (is_running)
  516. {
  517. if (mythr->busy_state != TBS_GETTING_RESULTS)
  518. do_get_results(mythr, false);
  519. else
  520. // Avoid starting job when pending result fetch completes
  521. mythr->_proceed_with_new_job = false;
  522. }
  523. else // !mythr->_mt_disable_called
  524. mt_disable_start__async(mythr);
  525. }
  526. timer_unset(&mythr->tv_morework);
  527. }
  528. if (timer_passed(&mythr->tv_morework, &tv_now))
  529. {
  530. djp: ;
  531. if (!do_job_prepare(mythr, &tv_now))
  532. goto disabled;
  533. }
  534. defer_events:
  535. if (timer_passed(&mythr->tv_poll, &tv_now))
  536. api->poll(mythr);
  537. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  538. {
  539. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  540. bfg_watchdog(proc, &tv_now);
  541. }
  542. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  543. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  544. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  545. }
  546. do_notifier_select(thr, &tv_timeout);
  547. }
  548. }
  549. static
  550. void do_queue_flush(struct thr_info *mythr)
  551. {
  552. struct cgpu_info *proc = mythr->cgpu;
  553. struct device_drv *api = proc->drv;
  554. api->queue_flush(mythr);
  555. if (mythr->next_work)
  556. {
  557. free_work(mythr->next_work);
  558. mythr->next_work = NULL;
  559. }
  560. }
  561. void minerloop_queue(struct thr_info *thr)
  562. {
  563. struct thr_info *mythr;
  564. struct cgpu_info *cgpu = thr->cgpu;
  565. struct device_drv *api = cgpu->drv;
  566. struct timeval tv_now;
  567. struct timeval tv_timeout;
  568. struct cgpu_info *proc;
  569. bool should_be_running;
  570. struct work *work;
  571. _minerloop_setup(thr);
  572. while (likely(!cgpu->shutdown)) {
  573. tv_timeout.tv_sec = -1;
  574. timer_set_now(&tv_now);
  575. for (proc = cgpu; proc; proc = proc->next_proc)
  576. {
  577. mythr = proc->thr[0];
  578. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  579. redo:
  580. if (should_be_running)
  581. {
  582. if (unlikely(mythr->_mt_disable_called))
  583. mt_disable_finish(mythr);
  584. if (unlikely(mythr->work_restart))
  585. {
  586. mythr->work_restart = false;
  587. do_queue_flush(mythr);
  588. }
  589. while (!mythr->queue_full)
  590. {
  591. if (mythr->next_work)
  592. {
  593. work = mythr->next_work;
  594. mythr->next_work = NULL;
  595. }
  596. else
  597. {
  598. request_work(mythr);
  599. // FIXME: Allow get_work to return NULL to retry on notification
  600. work = get_and_prepare_work(mythr);
  601. }
  602. if (!work)
  603. break;
  604. if (!api->queue_append(mythr, work))
  605. mythr->next_work = work;
  606. }
  607. }
  608. else
  609. if (unlikely(!mythr->_mt_disable_called))
  610. {
  611. do_queue_flush(mythr);
  612. mt_disable_start(mythr);
  613. }
  614. if (timer_passed(&mythr->tv_poll, &tv_now))
  615. api->poll(mythr);
  616. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  617. {
  618. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  619. bfg_watchdog(proc, &tv_now);
  620. }
  621. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  622. if (should_be_running && !mythr->queue_full)
  623. goto redo;
  624. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  625. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  626. }
  627. // HACK: Some designs set the main thr tv_poll from secondary thrs
  628. reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
  629. do_notifier_select(thr, &tv_timeout);
  630. }
  631. }
  632. void *miner_thread(void *userdata)
  633. {
  634. struct thr_info *mythr = userdata;
  635. struct cgpu_info *cgpu = mythr->cgpu;
  636. struct device_drv *drv = cgpu->drv;
  637. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  638. char threadname[20];
  639. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  640. RenameThread(threadname);
  641. if (drv->thread_init && !drv->thread_init(mythr)) {
  642. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  643. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  644. dev_error(slave, REASON_THREAD_FAIL_INIT);
  645. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  646. goto out;
  647. }
  648. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  649. cgpu_set_defaults(cgpu);
  650. thread_reportout(mythr);
  651. applog(LOG_DEBUG, "Popping ping in miner thread");
  652. notifier_read(mythr->notifier); // Wait for a notification to start
  653. cgtime(&cgpu->cgminer_stats.start_tv);
  654. if (drv->minerloop)
  655. drv->minerloop(mythr);
  656. else
  657. minerloop_scanhash(mythr);
  658. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  659. out: ;
  660. struct cgpu_info *proc = cgpu;
  661. do
  662. {
  663. proc->deven = DEV_DISABLED;
  664. proc->status = LIFE_DEAD2;
  665. }
  666. while ( (proc = proc->next_proc) && !proc->threads);
  667. mythr->getwork = 0;
  668. mythr->has_pth = false;
  669. cgsleep_ms(1);
  670. if (drv->thread_shutdown)
  671. drv->thread_shutdown(mythr);
  672. notifier_destroy(mythr->notifier);
  673. return NULL;
  674. }
  675. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  676. static
  677. bool _add_cgpu(struct cgpu_info *cgpu)
  678. {
  679. int lpcount;
  680. if (!cgpu->procs)
  681. cgpu->procs = 1;
  682. lpcount = cgpu->procs;
  683. cgpu->device = cgpu;
  684. cgpu->dev_repr = malloc(6);
  685. cgpu->dev_repr_ns = malloc(6);
  686. #ifdef NEED_BFG_LOWL_VCOM
  687. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  688. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  689. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  690. #endif
  691. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  692. devices_new[total_devices_new++] = cgpu;
  693. if (lpcount > 1)
  694. {
  695. int tpp = cgpu->threads / lpcount;
  696. struct cgpu_info **nlp_p, *slave;
  697. nlp_p = &cgpu->next_proc;
  698. for (int i = 1; i < lpcount; ++i)
  699. {
  700. slave = malloc(sizeof(*slave));
  701. *slave = *cgpu;
  702. slave->proc_id = i;
  703. slave->threads = tpp;
  704. devices_new[total_devices_new++] = slave;
  705. *nlp_p = slave;
  706. nlp_p = &slave->next_proc;
  707. }
  708. *nlp_p = NULL;
  709. cgpu->proc_id = 0;
  710. cgpu->threads -= (tpp * (lpcount - 1));
  711. }
  712. renumber_cgpu(cgpu);
  713. cgpu->last_device_valid_work = time(NULL);
  714. return true;
  715. }
  716. bool add_cgpu(struct cgpu_info *cgpu)
  717. {
  718. mutex_lock(&_add_cgpu_mutex);
  719. const bool rv = _add_cgpu(cgpu);
  720. mutex_unlock(&_add_cgpu_mutex);
  721. return rv;
  722. }
  723. void add_cgpu_live(void *p)
  724. {
  725. add_cgpu(p);
  726. }
  727. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  728. {
  729. if (!prev_cgpu)
  730. return add_cgpu(cgpu);
  731. while (prev_cgpu->next_proc)
  732. prev_cgpu = prev_cgpu->next_proc;
  733. mutex_lock(&_add_cgpu_mutex);
  734. int old_total_devices = total_devices_new;
  735. if (!_add_cgpu(cgpu))
  736. {
  737. mutex_unlock(&_add_cgpu_mutex);
  738. return false;
  739. }
  740. prev_cgpu->next_proc = devices_new[old_total_devices];
  741. mutex_unlock(&_add_cgpu_mutex);
  742. return true;
  743. }
  744. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  745. {
  746. const struct bfg_set_device_definition *sdf;
  747. char *p = replybuf;
  748. bool first = true;
  749. *out_success = SDR_HELP;
  750. sdf = proc->set_device_funcs;
  751. if (!sdf)
  752. nohelp:
  753. return "No help available";
  754. size_t matchlen = 0;
  755. if (newvalue)
  756. while (!isspace(newvalue[0]))
  757. ++matchlen;
  758. for ( ; sdf->optname; ++sdf)
  759. {
  760. if (!sdf->description)
  761. continue;
  762. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  763. continue;
  764. if (first)
  765. first = false;
  766. else
  767. p++[0] = '\n';
  768. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  769. }
  770. if (replybuf == p)
  771. goto nohelp;
  772. return replybuf;
  773. }
  774. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  775. {
  776. int target_diff = proc->cutofftemp - proc->targettemp;
  777. proc->cutofftemp = atoi(newvalue);
  778. if (!proc->targettemp_user)
  779. proc->targettemp = proc->cutofftemp - target_diff;
  780. return NULL;
  781. }
  782. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  783. {
  784. proc->targettemp = atoi(newvalue);
  785. proc->targettemp_user = true;
  786. return NULL;
  787. }
  788. static inline
  789. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  790. {
  791. if (!rv)
  792. *out_success = SDR_OK;
  793. else
  794. if (!strcasecmp(optname, "help"))
  795. *out_success = SDR_HELP;
  796. else
  797. *out_success = SDR_ERR;
  798. }
  799. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  800. {
  801. const struct bfg_set_device_definition *sdf;
  802. sdf = proc->set_device_funcs;
  803. if (!sdf)
  804. {
  805. *out_success = SDR_NOSUPP;
  806. return "Device does not support setting parameters.";
  807. }
  808. for ( ; sdf->optname; ++sdf)
  809. if (!strcasecmp(optname, sdf->optname))
  810. {
  811. *out_success = SDR_AUTO;
  812. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  813. if (SDR_AUTO == *out_success)
  814. _set_auto_sdr(out_success, rv, optname);
  815. return rv;
  816. }
  817. if (!strcasecmp(optname, "help"))
  818. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  819. *out_success = SDR_UNKNOWN;
  820. sprintf(replybuf, "Unknown option: %s", optname);
  821. return replybuf;
  822. }
  823. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  824. {
  825. if (proc->drv->set_device)
  826. {
  827. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  828. _set_auto_sdr(out_success, rv, optname);
  829. return rv;
  830. }
  831. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  832. }
  833. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  834. {
  835. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  836. switch (*out_success)
  837. {
  838. case SDR_NOSUPP:
  839. case SDR_UNKNOWN:
  840. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  841. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  842. else
  843. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  844. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  845. default:
  846. break;
  847. }
  848. return rv;
  849. }
  850. #ifdef NEED_BFG_LOWL_VCOM
  851. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  852. {
  853. detectone_func_t detectone = userp;
  854. if (serial_claim(info->path, NULL))
  855. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  856. return detectone(info->path);
  857. }
  858. #endif
  859. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  860. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  861. {
  862. struct string_elist *iter, *tmp;
  863. const char *dev, *colon;
  864. bool inhibitauto = flags & 4;
  865. char found = 0;
  866. bool forceauto = flags & 1;
  867. bool hasname;
  868. size_t namel = strlen(api->name);
  869. size_t dnamel = strlen(api->dname);
  870. #ifdef NEED_BFG_LOWL_VCOM
  871. clear_detectone_meta_info();
  872. #endif
  873. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  874. dev = iter->string;
  875. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  876. size_t idlen = colon - dev;
  877. // allow either name:device or dname:device
  878. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  879. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  880. continue;
  881. dev = colon + 1;
  882. hasname = true;
  883. }
  884. else
  885. hasname = false;
  886. if (!strcmp(dev, "auto"))
  887. forceauto = true;
  888. else if (!strcmp(dev, "noauto"))
  889. inhibitauto = true;
  890. else
  891. if ((flags & 2) && !hasname)
  892. continue;
  893. else
  894. if (!detectone)
  895. {} // do nothing
  896. else
  897. if (!strcmp(dev, "all"))
  898. {} // n/a
  899. else if (detectone(dev)) {
  900. string_elist_del(&scan_devices, iter);
  901. ++found;
  902. }
  903. }
  904. if ((forceauto || !(inhibitauto || found)) && autoscan)
  905. found += autoscan();
  906. return found;
  907. }
  908. static
  909. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  910. {
  911. char fullpath[PATH_MAX];
  912. strcpy(fullpath, path);
  913. strcat(fullpath, "/");
  914. if (subdir) {
  915. strcat(fullpath, subdir);
  916. strcat(fullpath, "/");
  917. }
  918. if (sub2) {
  919. strcat(fullpath, sub2);
  920. strcat(fullpath, "/");
  921. }
  922. strcat(fullpath, filename);
  923. return fopen(fullpath, "rb");
  924. }
  925. #define _open_bitstream(path, subdir, sub2) do { \
  926. f = _open_bitstream(path, subdir, sub2, filename); \
  927. if (f) \
  928. return f; \
  929. } while(0)
  930. #define _open_bitstream2(path, path3) do { \
  931. _open_bitstream(path, NULL, path3); \
  932. _open_bitstream(path, "../share/" PACKAGE, path3); \
  933. _open_bitstream(path, "../" PACKAGE, path3); \
  934. } while(0)
  935. #define _open_bitstream3(path) do { \
  936. _open_bitstream2(path, dname); \
  937. _open_bitstream2(path, "bitstreams"); \
  938. _open_bitstream2(path, NULL); \
  939. } while(0)
  940. FILE *open_bitstream(const char *dname, const char *filename)
  941. {
  942. FILE *f;
  943. _open_bitstream3(opt_kernel_path);
  944. _open_bitstream3(cgminer_path);
  945. _open_bitstream3(".");
  946. return NULL;
  947. }
  948. void close_device_fd(struct thr_info * const thr)
  949. {
  950. struct cgpu_info * const proc = thr->cgpu;
  951. const int fd = proc->device_fd;
  952. if (fd == -1)
  953. return;
  954. if (close(fd))
  955. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  956. else
  957. {
  958. proc->device_fd = -1;
  959. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  960. }
  961. }
  962. struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, const int procid)
  963. {
  964. struct cgpu_info *proc = (void*)dev;
  965. for (int i = 0; i < procid; ++i)
  966. {
  967. proc = proc->next_proc;
  968. if (unlikely((!proc) || proc->device != dev))
  969. return NULL;
  970. }
  971. return proc;
  972. }