deviceapi.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #include <ctype.h>
  14. #ifdef WIN32
  15. #include <winsock2.h>
  16. #else
  17. #include <sys/select.h>
  18. #endif
  19. #include <stdbool.h>
  20. #include <stdint.h>
  21. #include <sys/time.h>
  22. #include <sys/types.h>
  23. #include <time.h>
  24. #include <unistd.h>
  25. #include "compat.h"
  26. #include "deviceapi.h"
  27. #include "logging.h"
  28. #include "lowlevel.h"
  29. #ifdef NEED_BFG_LOWL_VCOM
  30. #include "lowl-vcom.h"
  31. #endif
  32. #include "miner.h"
  33. #include "util.h"
  34. struct driver_registration *_bfg_drvreg1;
  35. struct driver_registration *_bfg_drvreg2;
  36. void _bfg_register_driver(const struct device_drv *drv)
  37. {
  38. static struct driver_registration *initlist;
  39. struct driver_registration *ndr;
  40. if (!drv)
  41. {
  42. // Move initlist to hashtables
  43. LL_FOREACH(initlist, ndr)
  44. {
  45. drv = ndr->drv;
  46. if (drv->drv_init)
  47. drv->drv_init();
  48. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  49. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  50. }
  51. initlist = NULL;
  52. return;
  53. }
  54. ndr = malloc(sizeof(*ndr));
  55. *ndr = (struct driver_registration){
  56. .drv = drv,
  57. };
  58. LL_PREPEND(initlist, ndr);
  59. }
  60. static
  61. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  62. {
  63. return strcmp(a->drv->dname, b->drv->dname);
  64. };
  65. static
  66. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  67. {
  68. return a->drv->probe_priority - b->drv->probe_priority;
  69. };
  70. void bfg_devapi_init()
  71. {
  72. _bfg_register_driver(NULL);
  73. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  74. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  75. }
  76. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  77. {
  78. struct cgpu_info *cgpu = thr->cgpu;
  79. const long cycle = opt_log_interval / 5 ? : 1;
  80. if (unlikely(hashes == -1)) {
  81. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  82. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  83. if (thr->scanhash_working && opt_restart) {
  84. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  85. thr->scanhash_working = false;
  86. cgpu->reinit_backoff = 5.2734375;
  87. hashes = 0;
  88. } else {
  89. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  90. cgpu->deven = DEV_RECOVER_ERR;
  91. run_cmd(cmd_idle);
  92. return false;
  93. }
  94. }
  95. else
  96. thr->scanhash_working = true;
  97. thr->hashes_done += hashes;
  98. if (hashes > cgpu->max_hashes)
  99. cgpu->max_hashes = hashes;
  100. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  101. // max_nonce management (optional)
  102. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  103. int mult;
  104. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  105. return true;
  106. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  107. mult *= cycle;
  108. if (*max_nonce > (0xffffffff * 0x400) / mult)
  109. *max_nonce = 0xffffffff;
  110. else
  111. *max_nonce = (*max_nonce * mult) / 0x400;
  112. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  113. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  114. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  115. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  116. hashmeter2(thr);
  117. return true;
  118. }
  119. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  120. {
  121. struct timeval tv_now, tv_delta;
  122. timer_set_now(&tv_now);
  123. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  124. thr->_tv_last_hashes_done_call = tv_now;
  125. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  126. }
  127. /* A generic wait function for threads that poll that will wait a specified
  128. * time tdiff waiting on a work restart request. Returns zero if the condition
  129. * was met (work restart requested) or ETIMEDOUT if not.
  130. */
  131. int restart_wait(struct thr_info *thr, unsigned int mstime)
  132. {
  133. struct timeval tv_timer, tv_now, tv_timeout;
  134. fd_set rfds;
  135. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  136. int rv;
  137. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  138. {
  139. // This is a bug!
  140. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  141. cgsleep_ms(mstime);
  142. return (thr->work_restart ? 0 : ETIMEDOUT);
  143. }
  144. timer_set_now(&tv_now);
  145. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  146. while (true)
  147. {
  148. FD_ZERO(&rfds);
  149. FD_SET(wrn, &rfds);
  150. tv_timeout = tv_timer;
  151. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  152. if (rv == 0)
  153. return ETIMEDOUT;
  154. if (rv > 0)
  155. {
  156. if (thr->work_restart)
  157. return 0;
  158. notifier_read(thr->work_restart_notifier);
  159. }
  160. timer_set_now(&tv_now);
  161. }
  162. }
  163. static
  164. struct work *get_and_prepare_work(struct thr_info *thr)
  165. {
  166. struct cgpu_info *proc = thr->cgpu;
  167. struct device_drv *api = proc->drv;
  168. struct work *work;
  169. work = get_work(thr);
  170. if (!work)
  171. return NULL;
  172. if (api->prepare_work && !api->prepare_work(thr, work)) {
  173. free_work(work);
  174. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  175. proc->deven = DEV_RECOVER_ERR;
  176. run_cmd(cmd_idle);
  177. return NULL;
  178. }
  179. return work;
  180. }
  181. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  182. void minerloop_scanhash(struct thr_info *mythr)
  183. {
  184. struct cgpu_info *cgpu = mythr->cgpu;
  185. struct device_drv *api = cgpu->drv;
  186. struct timeval tv_start, tv_end;
  187. struct timeval tv_hashes, tv_worktime;
  188. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  189. int64_t hashes;
  190. struct work *work;
  191. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  192. #ifdef HAVE_PTHREAD_CANCEL
  193. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  194. #endif
  195. if (cgpu->deven != DEV_ENABLED)
  196. mt_disable(mythr);
  197. while (likely(!cgpu->shutdown)) {
  198. mythr->work_restart = false;
  199. request_work(mythr);
  200. work = get_and_prepare_work(mythr);
  201. if (!work)
  202. break;
  203. timer_set_now(&work->tv_work_start);
  204. do {
  205. thread_reportin(mythr);
  206. /* Only allow the mining thread to be cancelled when
  207. * it is not in the driver code. */
  208. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  209. timer_set_now(&tv_start);
  210. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  211. timer_set_now(&tv_end);
  212. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  213. pthread_testcancel();
  214. thread_reportin(mythr);
  215. timersub(&tv_end, &tv_start, &tv_hashes);
  216. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  217. goto disabled;
  218. if (unlikely(mythr->work_restart)) {
  219. /* Apart from device_thread 0, we stagger the
  220. * starting of every next thread to try and get
  221. * all devices busy before worrying about
  222. * getting work for their extra threads */
  223. if (!primary) {
  224. struct timespec rgtp;
  225. rgtp.tv_sec = 0;
  226. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  227. nanosleep(&rgtp, NULL);
  228. }
  229. break;
  230. }
  231. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  232. disabled:
  233. mt_disable(mythr);
  234. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  235. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  236. free_work(work);
  237. }
  238. }
  239. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  240. {
  241. struct cgpu_info *proc = mythr->cgpu;
  242. struct device_drv *api = proc->drv;
  243. struct timeval tv_worktime;
  244. mythr->tv_morework.tv_sec = -1;
  245. mythr->_job_transition_in_progress = true;
  246. if (mythr->work)
  247. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  248. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  249. {
  250. mythr->work_restart = false;
  251. request_work(mythr);
  252. // FIXME: Allow get_work to return NULL to retry on notification
  253. if (mythr->next_work)
  254. free_work(mythr->next_work);
  255. mythr->next_work = get_and_prepare_work(mythr);
  256. if (!mythr->next_work)
  257. return false;
  258. mythr->starting_next_work = true;
  259. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  260. }
  261. else
  262. {
  263. mythr->starting_next_work = false;
  264. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  265. }
  266. job_prepare_complete(mythr);
  267. return true;
  268. }
  269. void job_prepare_complete(struct thr_info *mythr)
  270. {
  271. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  272. return;
  273. if (mythr->work)
  274. {
  275. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  276. do_get_results(mythr, true);
  277. else
  278. {} // TODO: Set a timer to call do_get_results when job is near complete
  279. }
  280. else // no job currently running
  281. do_job_start(mythr);
  282. }
  283. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  284. {
  285. struct cgpu_info *proc = mythr->cgpu;
  286. struct device_drv *api = proc->drv;
  287. struct work *work = mythr->work;
  288. mythr->_job_transition_in_progress = true;
  289. mythr->tv_results_jobstart = mythr->tv_jobstart;
  290. mythr->_proceed_with_new_job = proceed_with_new_job;
  291. if (api->job_get_results)
  292. api->job_get_results(mythr, work);
  293. else
  294. job_results_fetched(mythr);
  295. }
  296. void job_results_fetched(struct thr_info *mythr)
  297. {
  298. if (mythr->_proceed_with_new_job)
  299. do_job_start(mythr);
  300. else
  301. {
  302. if (likely(mythr->prev_work))
  303. {
  304. struct timeval tv_now;
  305. timer_set_now(&tv_now);
  306. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  307. }
  308. mt_disable_start(mythr);
  309. }
  310. }
  311. void do_job_start(struct thr_info *mythr)
  312. {
  313. struct cgpu_info *proc = mythr->cgpu;
  314. struct device_drv *api = proc->drv;
  315. thread_reportin(mythr);
  316. api->job_start(mythr);
  317. }
  318. void mt_job_transition(struct thr_info *mythr)
  319. {
  320. struct timeval tv_now;
  321. timer_set_now(&tv_now);
  322. if (mythr->starting_next_work)
  323. {
  324. mythr->next_work->tv_work_start = tv_now;
  325. if (mythr->prev_work)
  326. free_work(mythr->prev_work);
  327. mythr->prev_work = mythr->work;
  328. mythr->work = mythr->next_work;
  329. mythr->next_work = NULL;
  330. }
  331. mythr->tv_jobstart = tv_now;
  332. mythr->_job_transition_in_progress = false;
  333. }
  334. void job_start_complete(struct thr_info *mythr)
  335. {
  336. struct timeval tv_now;
  337. if (unlikely(!mythr->prev_work))
  338. return;
  339. timer_set_now(&tv_now);
  340. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  341. }
  342. void job_start_abort(struct thr_info *mythr, bool failure)
  343. {
  344. struct cgpu_info *proc = mythr->cgpu;
  345. if (failure)
  346. {
  347. proc->deven = DEV_RECOVER_ERR;
  348. run_cmd(cmd_idle);
  349. }
  350. mythr->work = NULL;
  351. mythr->_job_transition_in_progress = false;
  352. }
  353. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  354. {
  355. struct cgpu_info *proc = mythr->cgpu;
  356. struct device_drv *api = proc->drv;
  357. struct timeval tv_hashes;
  358. int64_t hashes = 0;
  359. if (api->job_process_results)
  360. hashes = api->job_process_results(mythr, work, stopping);
  361. thread_reportin(mythr);
  362. if (hashes)
  363. {
  364. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  365. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  366. return false;
  367. }
  368. return true;
  369. }
  370. static
  371. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  372. {
  373. struct cgpu_info *cgpu = thr->cgpu;
  374. struct timeval tv_now;
  375. int maxfd;
  376. fd_set rfds;
  377. timer_set_now(&tv_now);
  378. FD_ZERO(&rfds);
  379. FD_SET(thr->notifier[0], &rfds);
  380. maxfd = thr->notifier[0];
  381. FD_SET(thr->work_restart_notifier[0], &rfds);
  382. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  383. if (thr->mutex_request[1] != INVSOCK)
  384. {
  385. FD_SET(thr->mutex_request[0], &rfds);
  386. set_maxfd(&maxfd, thr->mutex_request[0]);
  387. }
  388. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  389. return;
  390. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  391. {
  392. // FIXME: This can only handle one request at a time!
  393. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  394. notifier_read(thr->mutex_request);
  395. mutex_lock(mutexp);
  396. pthread_cond_signal(&cgpu->device_cond);
  397. pthread_cond_wait(&cgpu->device_cond, mutexp);
  398. mutex_unlock(mutexp);
  399. }
  400. if (FD_ISSET(thr->notifier[0], &rfds)) {
  401. notifier_read(thr->notifier);
  402. }
  403. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  404. notifier_read(thr->work_restart_notifier);
  405. }
  406. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  407. {
  408. mutex_init(&cgpu->device_mutex);
  409. notifier_init(cgpu->thr[0]->mutex_request);
  410. pthread_cond_init(&cgpu->device_cond, NULL);
  411. }
  412. void cgpu_request_control(struct cgpu_info * const cgpu)
  413. {
  414. struct thr_info * const thr = cgpu->thr[0];
  415. if (pthread_equal(pthread_self(), thr->pth))
  416. return;
  417. mutex_lock(&cgpu->device_mutex);
  418. notifier_wake(thr->mutex_request);
  419. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  420. }
  421. void cgpu_release_control(struct cgpu_info * const cgpu)
  422. {
  423. struct thr_info * const thr = cgpu->thr[0];
  424. if (pthread_equal(pthread_self(), thr->pth))
  425. return;
  426. pthread_cond_signal(&cgpu->device_cond);
  427. mutex_unlock(&cgpu->device_mutex);
  428. }
  429. static
  430. void _minerloop_setup(struct thr_info *mythr)
  431. {
  432. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  433. if (mythr->work_restart_notifier[1] == -1)
  434. notifier_init(mythr->work_restart_notifier);
  435. for (proc = cgpu; proc; proc = proc->next_proc)
  436. {
  437. mythr = proc->thr[0];
  438. timer_set_now(&mythr->tv_watchdog);
  439. proc->disable_watchdog = true;
  440. }
  441. }
  442. void minerloop_async(struct thr_info *mythr)
  443. {
  444. struct thr_info *thr = mythr;
  445. struct cgpu_info *cgpu = mythr->cgpu;
  446. struct device_drv *api = cgpu->drv;
  447. struct timeval tv_now;
  448. struct timeval tv_timeout;
  449. struct cgpu_info *proc;
  450. bool is_running, should_be_running;
  451. _minerloop_setup(mythr);
  452. while (likely(!cgpu->shutdown)) {
  453. tv_timeout.tv_sec = -1;
  454. timer_set_now(&tv_now);
  455. for (proc = cgpu; proc; proc = proc->next_proc)
  456. {
  457. mythr = proc->thr[0];
  458. // Nothing should happen while we're starting a job
  459. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  460. goto defer_events;
  461. is_running = mythr->work;
  462. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  463. if (should_be_running)
  464. {
  465. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  466. {
  467. mt_disable_finish(mythr);
  468. goto djp;
  469. }
  470. if (unlikely(mythr->work_restart))
  471. goto djp;
  472. }
  473. else // ! should_be_running
  474. {
  475. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  476. {
  477. disabled: ;
  478. timer_unset(&mythr->tv_morework);
  479. if (is_running)
  480. {
  481. if (mythr->busy_state != TBS_GETTING_RESULTS)
  482. do_get_results(mythr, false);
  483. else
  484. // Avoid starting job when pending result fetch completes
  485. mythr->_proceed_with_new_job = false;
  486. }
  487. else // !mythr->_mt_disable_called
  488. mt_disable_start(mythr);
  489. }
  490. }
  491. if (timer_passed(&mythr->tv_morework, &tv_now))
  492. {
  493. djp: ;
  494. if (!do_job_prepare(mythr, &tv_now))
  495. goto disabled;
  496. }
  497. defer_events:
  498. if (timer_passed(&mythr->tv_poll, &tv_now))
  499. api->poll(mythr);
  500. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  501. {
  502. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  503. bfg_watchdog(proc, &tv_now);
  504. }
  505. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  506. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  507. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  508. }
  509. do_notifier_select(thr, &tv_timeout);
  510. }
  511. }
  512. static
  513. void do_queue_flush(struct thr_info *mythr)
  514. {
  515. struct cgpu_info *proc = mythr->cgpu;
  516. struct device_drv *api = proc->drv;
  517. api->queue_flush(mythr);
  518. if (mythr->next_work)
  519. {
  520. free_work(mythr->next_work);
  521. mythr->next_work = NULL;
  522. }
  523. }
  524. void minerloop_queue(struct thr_info *thr)
  525. {
  526. struct thr_info *mythr;
  527. struct cgpu_info *cgpu = thr->cgpu;
  528. struct device_drv *api = cgpu->drv;
  529. struct timeval tv_now;
  530. struct timeval tv_timeout;
  531. struct cgpu_info *proc;
  532. bool should_be_running;
  533. struct work *work;
  534. _minerloop_setup(thr);
  535. while (likely(!cgpu->shutdown)) {
  536. tv_timeout.tv_sec = -1;
  537. timer_set_now(&tv_now);
  538. for (proc = cgpu; proc; proc = proc->next_proc)
  539. {
  540. mythr = proc->thr[0];
  541. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  542. redo:
  543. if (should_be_running)
  544. {
  545. if (unlikely(mythr->_mt_disable_called))
  546. mt_disable_finish(mythr);
  547. if (unlikely(mythr->work_restart))
  548. {
  549. mythr->work_restart = false;
  550. do_queue_flush(mythr);
  551. }
  552. while (!mythr->queue_full)
  553. {
  554. if (mythr->next_work)
  555. {
  556. work = mythr->next_work;
  557. mythr->next_work = NULL;
  558. }
  559. else
  560. {
  561. request_work(mythr);
  562. // FIXME: Allow get_work to return NULL to retry on notification
  563. work = get_and_prepare_work(mythr);
  564. }
  565. if (!work)
  566. break;
  567. if (!api->queue_append(mythr, work))
  568. mythr->next_work = work;
  569. }
  570. }
  571. else
  572. if (unlikely(!mythr->_mt_disable_called))
  573. {
  574. do_queue_flush(mythr);
  575. mt_disable_start(mythr);
  576. }
  577. if (timer_passed(&mythr->tv_poll, &tv_now))
  578. api->poll(mythr);
  579. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  580. {
  581. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  582. bfg_watchdog(proc, &tv_now);
  583. }
  584. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  585. if (should_be_running && !mythr->queue_full)
  586. goto redo;
  587. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  588. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  589. }
  590. do_notifier_select(thr, &tv_timeout);
  591. }
  592. }
  593. void *miner_thread(void *userdata)
  594. {
  595. struct thr_info *mythr = userdata;
  596. struct cgpu_info *cgpu = mythr->cgpu;
  597. struct device_drv *drv = cgpu->drv;
  598. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  599. char threadname[20];
  600. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  601. RenameThread(threadname);
  602. if (drv->thread_init && !drv->thread_init(mythr)) {
  603. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  604. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  605. dev_error(slave, REASON_THREAD_FAIL_INIT);
  606. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  607. goto out;
  608. }
  609. if (drv_ready(cgpu))
  610. cgpu_set_defaults(cgpu);
  611. thread_reportout(mythr);
  612. applog(LOG_DEBUG, "Popping ping in miner thread");
  613. notifier_read(mythr->notifier); // Wait for a notification to start
  614. cgtime(&cgpu->cgminer_stats.start_tv);
  615. if (drv->minerloop)
  616. drv->minerloop(mythr);
  617. else
  618. minerloop_scanhash(mythr);
  619. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  620. out: ;
  621. struct cgpu_info *proc = cgpu;
  622. do
  623. {
  624. proc->deven = DEV_DISABLED;
  625. proc->status = LIFE_DEAD2;
  626. }
  627. while ( (proc = proc->next_proc) && !proc->threads);
  628. mythr->getwork = 0;
  629. mythr->has_pth = false;
  630. cgsleep_ms(1);
  631. if (drv->thread_shutdown)
  632. drv->thread_shutdown(mythr);
  633. notifier_destroy(mythr->notifier);
  634. return NULL;
  635. }
  636. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  637. static
  638. bool _add_cgpu(struct cgpu_info *cgpu)
  639. {
  640. int lpcount;
  641. renumber_cgpu(cgpu);
  642. if (!cgpu->procs)
  643. cgpu->procs = 1;
  644. lpcount = cgpu->procs;
  645. cgpu->device = cgpu;
  646. cgpu->dev_repr = malloc(6);
  647. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  648. cgpu->dev_repr_ns = malloc(6);
  649. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  650. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  651. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  652. #ifdef NEED_BFG_LOWL_VCOM
  653. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  654. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  655. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  656. #endif
  657. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  658. devices_new[total_devices_new++] = cgpu;
  659. if (lpcount > 1)
  660. {
  661. int ns;
  662. int tpp = cgpu->threads / lpcount;
  663. struct cgpu_info **nlp_p, *slave;
  664. const bool manylp = (lpcount > 26);
  665. const char *as = (manylp ? "aa" : "a");
  666. // Note, strcpy instead of assigning a byte to get the \0 too
  667. strcpy(&cgpu->proc_repr[5], as);
  668. ns = strlen(cgpu->proc_repr_ns);
  669. strcpy(&cgpu->proc_repr_ns[ns], as);
  670. nlp_p = &cgpu->next_proc;
  671. for (int i = 1; i < lpcount; ++i)
  672. {
  673. slave = malloc(sizeof(*slave));
  674. *slave = *cgpu;
  675. slave->proc_id = i;
  676. if (manylp)
  677. {
  678. slave->proc_repr[5] += i / 26;
  679. slave->proc_repr[6] += i % 26;
  680. slave->proc_repr_ns[ns ] += i / 26;
  681. slave->proc_repr_ns[ns + 1] += i % 26;
  682. }
  683. else
  684. {
  685. slave->proc_repr[5] += i;
  686. slave->proc_repr_ns[ns] += i;
  687. }
  688. slave->threads = tpp;
  689. devices_new[total_devices_new++] = slave;
  690. *nlp_p = slave;
  691. nlp_p = &slave->next_proc;
  692. }
  693. *nlp_p = NULL;
  694. cgpu->proc_id = 0;
  695. cgpu->threads -= (tpp * (lpcount - 1));
  696. }
  697. cgpu->last_device_valid_work = time(NULL);
  698. return true;
  699. }
  700. bool add_cgpu(struct cgpu_info *cgpu)
  701. {
  702. mutex_lock(&_add_cgpu_mutex);
  703. const bool rv = _add_cgpu(cgpu);
  704. mutex_unlock(&_add_cgpu_mutex);
  705. return rv;
  706. }
  707. void add_cgpu_live(void *p)
  708. {
  709. add_cgpu(p);
  710. }
  711. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  712. {
  713. if (!prev_cgpu)
  714. return add_cgpu(cgpu);
  715. while (prev_cgpu->next_proc)
  716. prev_cgpu = prev_cgpu->next_proc;
  717. mutex_lock(&_add_cgpu_mutex);
  718. int old_total_devices = total_devices_new;
  719. if (!_add_cgpu(cgpu))
  720. {
  721. mutex_unlock(&_add_cgpu_mutex);
  722. return false;
  723. }
  724. prev_cgpu->next_proc = devices_new[old_total_devices];
  725. mutex_unlock(&_add_cgpu_mutex);
  726. return true;
  727. }
  728. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  729. {
  730. const struct bfg_set_device_definition *sdf;
  731. char *p = replybuf;
  732. bool first = true;
  733. *out_success = SDR_HELP;
  734. sdf = proc->set_device_funcs;
  735. if (!sdf)
  736. nohelp:
  737. return "No help available";
  738. size_t matchlen = 0;
  739. if (newvalue)
  740. while (newvalue[matchlen] && !isspace(newvalue[matchlen]))
  741. ++matchlen;
  742. for ( ; sdf->optname; ++sdf)
  743. {
  744. if (!sdf->description)
  745. continue;
  746. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  747. continue;
  748. if (first)
  749. first = false;
  750. else
  751. p++[0] = '\n';
  752. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  753. }
  754. if (replybuf == p)
  755. goto nohelp;
  756. return replybuf;
  757. }
  758. static inline
  759. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  760. {
  761. if (!rv)
  762. *out_success = SDR_OK;
  763. else
  764. if (!strcasecmp(optname, "help"))
  765. *out_success = SDR_HELP;
  766. else
  767. *out_success = SDR_ERR;
  768. }
  769. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  770. {
  771. const struct bfg_set_device_definition *sdf;
  772. sdf = proc->set_device_funcs;
  773. if (!sdf)
  774. {
  775. *out_success = SDR_NOSUPP;
  776. return "Device does not support setting parameters.";
  777. }
  778. for ( ; sdf->optname; ++sdf)
  779. if (!strcasecmp(optname, sdf->optname))
  780. {
  781. *out_success = SDR_AUTO;
  782. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  783. if (SDR_AUTO == *out_success)
  784. _set_auto_sdr(out_success, rv, optname);
  785. return rv;
  786. }
  787. if (!strcasecmp(optname, "help"))
  788. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  789. *out_success = SDR_UNKNOWN;
  790. sprintf(replybuf, "Unknown option: %s", optname);
  791. return replybuf;
  792. }
  793. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  794. {
  795. if (proc->drv->set_device)
  796. {
  797. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  798. _set_auto_sdr(out_success, rv, optname);
  799. return rv;
  800. }
  801. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  802. }
  803. #ifdef NEED_BFG_LOWL_VCOM
  804. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  805. {
  806. detectone_func_t detectone = userp;
  807. if (serial_claim(info->path, NULL))
  808. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  809. return detectone(info->path);
  810. }
  811. #endif
  812. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  813. {
  814. struct string_elist *iter, *tmp;
  815. const char *dev, *colon;
  816. bool inhibitauto = flags & 4;
  817. char found = 0;
  818. bool forceauto = flags & 1;
  819. bool hasname;
  820. bool doall = false;
  821. size_t namel = strlen(api->name);
  822. size_t dnamel = strlen(api->dname);
  823. #ifdef NEED_BFG_LOWL_VCOM
  824. clear_detectone_meta_info();
  825. #endif
  826. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  827. dev = iter->string;
  828. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  829. size_t idlen = colon - dev;
  830. // allow either name:device or dname:device
  831. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  832. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  833. continue;
  834. dev = colon + 1;
  835. hasname = true;
  836. }
  837. else
  838. hasname = false;
  839. if (!strcmp(dev, "auto"))
  840. forceauto = true;
  841. else if (!strcmp(dev, "noauto"))
  842. inhibitauto = true;
  843. else
  844. if ((flags & 2) && !hasname)
  845. continue;
  846. else
  847. if (!detectone)
  848. {} // do nothing
  849. else
  850. if (!strcmp(dev, "all"))
  851. doall = true;
  852. #ifdef NEED_BFG_LOWL_VCOM
  853. else
  854. if (serial_claim(dev, NULL))
  855. {
  856. applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev);
  857. string_elist_del(&scan_devices, iter);
  858. }
  859. #endif
  860. else if (detectone(dev)) {
  861. string_elist_del(&scan_devices, iter);
  862. ++found;
  863. }
  864. }
  865. #ifdef NEED_BFG_LOWL_VCOM
  866. if (doall && detectone)
  867. found += lowlevel_detect_id(_serial_detect_all, detectone, &lowl_vcom, 0, 0);
  868. #endif
  869. if ((forceauto || !(inhibitauto || found)) && autoscan)
  870. found += autoscan();
  871. return found;
  872. }
  873. static
  874. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  875. {
  876. char fullpath[PATH_MAX];
  877. strcpy(fullpath, path);
  878. strcat(fullpath, "/");
  879. if (subdir) {
  880. strcat(fullpath, subdir);
  881. strcat(fullpath, "/");
  882. }
  883. if (sub2) {
  884. strcat(fullpath, sub2);
  885. strcat(fullpath, "/");
  886. }
  887. strcat(fullpath, filename);
  888. return fopen(fullpath, "rb");
  889. }
  890. #define _open_bitstream(path, subdir, sub2) do { \
  891. f = _open_bitstream(path, subdir, sub2, filename); \
  892. if (f) \
  893. return f; \
  894. } while(0)
  895. #define _open_bitstream2(path, path3) do { \
  896. _open_bitstream(path, NULL, path3); \
  897. _open_bitstream(path, "../share/" PACKAGE, path3); \
  898. _open_bitstream(path, "../" PACKAGE, path3); \
  899. } while(0)
  900. #define _open_bitstream3(path) do { \
  901. _open_bitstream2(path, dname); \
  902. _open_bitstream2(path, "bitstreams"); \
  903. _open_bitstream2(path, NULL); \
  904. } while(0)
  905. FILE *open_bitstream(const char *dname, const char *filename)
  906. {
  907. FILE *f;
  908. _open_bitstream3(opt_kernel_path);
  909. _open_bitstream3(cgminer_path);
  910. _open_bitstream3(".");
  911. return NULL;
  912. }
  913. void close_device_fd(struct thr_info * const thr)
  914. {
  915. struct cgpu_info * const proc = thr->cgpu;
  916. const int fd = proc->device_fd;
  917. if (fd == -1)
  918. return;
  919. if (close(fd))
  920. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  921. else
  922. {
  923. proc->device_fd = -1;
  924. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  925. }
  926. }