deviceapi.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #ifdef WIN32
  14. #include <winsock2.h>
  15. #else
  16. #include <sys/select.h>
  17. #endif
  18. #include <stdbool.h>
  19. #include <stdint.h>
  20. #include <sys/time.h>
  21. #include <sys/types.h>
  22. #include <time.h>
  23. #include <unistd.h>
  24. #include "compat.h"
  25. #include "deviceapi.h"
  26. #include "logging.h"
  27. #include "lowlevel.h"
  28. #ifdef NEED_BFG_LOWL_VCOM
  29. #include "lowl-vcom.h"
  30. #endif
  31. #include "miner.h"
  32. #include "util.h"
  33. struct driver_registration *_bfg_drvreg1;
  34. struct driver_registration *_bfg_drvreg2;
  35. void _bfg_register_driver(const struct device_drv *drv)
  36. {
  37. static struct driver_registration *initlist;
  38. struct driver_registration *ndr;
  39. if (!drv)
  40. {
  41. // Move initlist to hashtables
  42. LL_FOREACH(initlist, ndr)
  43. {
  44. drv = ndr->drv;
  45. if (drv->drv_init)
  46. drv->drv_init();
  47. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  48. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  49. }
  50. initlist = NULL;
  51. return;
  52. }
  53. ndr = malloc(sizeof(*ndr));
  54. *ndr = (struct driver_registration){
  55. .drv = drv,
  56. };
  57. LL_PREPEND(initlist, ndr);
  58. }
  59. static
  60. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  61. {
  62. return strcmp(a->drv->dname, b->drv->dname);
  63. };
  64. static
  65. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  66. {
  67. return a->drv->probe_priority - b->drv->probe_priority;
  68. };
  69. void bfg_devapi_init()
  70. {
  71. _bfg_register_driver(NULL);
  72. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  73. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  74. }
  75. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  76. {
  77. struct cgpu_info *cgpu = thr->cgpu;
  78. const long cycle = opt_log_interval / 5 ? : 1;
  79. if (unlikely(hashes == -1)) {
  80. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  81. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  82. if (thr->scanhash_working && opt_restart) {
  83. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  84. thr->scanhash_working = false;
  85. cgpu->reinit_backoff = 5.2734375;
  86. hashes = 0;
  87. } else {
  88. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  89. cgpu->deven = DEV_RECOVER_ERR;
  90. run_cmd(cmd_idle);
  91. return false;
  92. }
  93. }
  94. else
  95. thr->scanhash_working = true;
  96. thr->hashes_done += hashes;
  97. if (hashes > cgpu->max_hashes)
  98. cgpu->max_hashes = hashes;
  99. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  100. // max_nonce management (optional)
  101. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  102. int mult;
  103. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  104. return true;
  105. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  106. mult *= cycle;
  107. if (*max_nonce > ((uint64_t)0xffffffff * 0x400) / mult)
  108. *max_nonce = 0xffffffff;
  109. else
  110. *max_nonce = ((uint64_t)*max_nonce * mult) / 0x400;
  111. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  112. *max_nonce = (uint64_t)*max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  113. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  114. *max_nonce = (uint64_t)*max_nonce * 0x400 / ((((uint64_t)cycle * 1000000) + thr->tv_hashes_done.tv_usec) / ((uint64_t)cycle * 1000000 / 0x400));
  115. hashmeter2(thr);
  116. return true;
  117. }
  118. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  119. {
  120. struct timeval tv_now, tv_delta;
  121. timer_set_now(&tv_now);
  122. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  123. thr->_tv_last_hashes_done_call = tv_now;
  124. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  125. }
  126. /* A generic wait function for threads that poll that will wait a specified
  127. * time tdiff waiting on a work restart request. Returns zero if the condition
  128. * was met (work restart requested) or ETIMEDOUT if not.
  129. */
  130. int restart_wait(struct thr_info *thr, unsigned int mstime)
  131. {
  132. struct timeval tv_timer, tv_now, tv_timeout;
  133. fd_set rfds;
  134. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  135. int rv;
  136. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  137. {
  138. // This is a bug!
  139. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  140. cgsleep_ms(mstime);
  141. return (thr->work_restart ? 0 : ETIMEDOUT);
  142. }
  143. timer_set_now(&tv_now);
  144. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  145. while (true)
  146. {
  147. FD_ZERO(&rfds);
  148. FD_SET(wrn, &rfds);
  149. tv_timeout = tv_timer;
  150. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  151. if (rv == 0)
  152. return ETIMEDOUT;
  153. if (rv > 0)
  154. {
  155. if (thr->work_restart)
  156. return 0;
  157. notifier_read(thr->work_restart_notifier);
  158. }
  159. timer_set_now(&tv_now);
  160. }
  161. }
  162. static
  163. struct work *get_and_prepare_work(struct thr_info *thr)
  164. {
  165. struct cgpu_info *proc = thr->cgpu;
  166. struct device_drv *api = proc->drv;
  167. struct work *work;
  168. work = get_work(thr);
  169. if (!work)
  170. return NULL;
  171. if (api->prepare_work && !api->prepare_work(thr, work)) {
  172. free_work(work);
  173. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  174. proc->deven = DEV_RECOVER_ERR;
  175. run_cmd(cmd_idle);
  176. return NULL;
  177. }
  178. return work;
  179. }
  180. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  181. void minerloop_scanhash(struct thr_info *mythr)
  182. {
  183. struct cgpu_info *cgpu = mythr->cgpu;
  184. struct device_drv *api = cgpu->drv;
  185. struct timeval tv_start, tv_end;
  186. struct timeval tv_hashes, tv_worktime;
  187. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  188. int64_t hashes;
  189. struct work *work;
  190. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  191. #ifdef HAVE_PTHREAD_CANCEL
  192. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  193. #endif
  194. if (cgpu->deven != DEV_ENABLED)
  195. mt_disable(mythr);
  196. while (likely(!cgpu->shutdown)) {
  197. mythr->work_restart = false;
  198. request_work(mythr);
  199. work = get_and_prepare_work(mythr);
  200. if (!work)
  201. break;
  202. timer_set_now(&work->tv_work_start);
  203. do {
  204. thread_reportin(mythr);
  205. /* Only allow the mining thread to be cancelled when
  206. * it is not in the driver code. */
  207. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  208. timer_set_now(&tv_start);
  209. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  210. timer_set_now(&tv_end);
  211. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  212. pthread_testcancel();
  213. thread_reportin(mythr);
  214. timersub(&tv_end, &tv_start, &tv_hashes);
  215. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  216. goto disabled;
  217. if (unlikely(mythr->work_restart)) {
  218. /* Apart from device_thread 0, we stagger the
  219. * starting of every next thread to try and get
  220. * all devices busy before worrying about
  221. * getting work for their extra threads */
  222. if (!primary) {
  223. struct timespec rgtp;
  224. rgtp.tv_sec = 0;
  225. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  226. nanosleep(&rgtp, NULL);
  227. }
  228. break;
  229. }
  230. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  231. disabled:
  232. mt_disable(mythr);
  233. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  234. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  235. free_work(work);
  236. }
  237. }
  238. void mt_disable_start__async(struct thr_info * const mythr)
  239. {
  240. mt_disable_start(mythr);
  241. if (mythr->prev_work)
  242. free_work(mythr->prev_work);
  243. mythr->prev_work = mythr->work;
  244. mythr->work = NULL;
  245. mythr->_job_transition_in_progress = false;
  246. }
  247. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  248. {
  249. struct cgpu_info *proc = mythr->cgpu;
  250. struct device_drv *api = proc->drv;
  251. struct timeval tv_worktime;
  252. mythr->tv_morework.tv_sec = -1;
  253. mythr->_job_transition_in_progress = true;
  254. if (mythr->work)
  255. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  256. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  257. {
  258. mythr->work_restart = false;
  259. request_work(mythr);
  260. // FIXME: Allow get_work to return NULL to retry on notification
  261. if (mythr->next_work)
  262. free_work(mythr->next_work);
  263. mythr->next_work = get_and_prepare_work(mythr);
  264. if (!mythr->next_work)
  265. return false;
  266. mythr->starting_next_work = true;
  267. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  268. }
  269. else
  270. {
  271. mythr->starting_next_work = false;
  272. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  273. }
  274. job_prepare_complete(mythr);
  275. return true;
  276. }
  277. void job_prepare_complete(struct thr_info *mythr)
  278. {
  279. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  280. return;
  281. if (mythr->work)
  282. {
  283. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  284. do_get_results(mythr, true);
  285. else
  286. {} // TODO: Set a timer to call do_get_results when job is near complete
  287. }
  288. else // no job currently running
  289. do_job_start(mythr);
  290. }
  291. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  292. {
  293. struct cgpu_info *proc = mythr->cgpu;
  294. struct device_drv *api = proc->drv;
  295. struct work *work = mythr->work;
  296. mythr->_job_transition_in_progress = true;
  297. mythr->tv_results_jobstart = mythr->tv_jobstart;
  298. mythr->_proceed_with_new_job = proceed_with_new_job;
  299. if (api->job_get_results)
  300. api->job_get_results(mythr, work);
  301. else
  302. job_results_fetched(mythr);
  303. }
  304. void job_results_fetched(struct thr_info *mythr)
  305. {
  306. if (mythr->_proceed_with_new_job)
  307. do_job_start(mythr);
  308. else
  309. {
  310. if (likely(mythr->prev_work))
  311. {
  312. struct timeval tv_now;
  313. timer_set_now(&tv_now);
  314. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  315. }
  316. mt_disable_start__async(mythr);
  317. }
  318. }
  319. void do_job_start(struct thr_info *mythr)
  320. {
  321. struct cgpu_info *proc = mythr->cgpu;
  322. struct device_drv *api = proc->drv;
  323. thread_reportin(mythr);
  324. api->job_start(mythr);
  325. }
  326. void mt_job_transition(struct thr_info *mythr)
  327. {
  328. struct timeval tv_now;
  329. timer_set_now(&tv_now);
  330. if (mythr->starting_next_work)
  331. {
  332. mythr->next_work->tv_work_start = tv_now;
  333. if (mythr->prev_work)
  334. free_work(mythr->prev_work);
  335. mythr->prev_work = mythr->work;
  336. mythr->work = mythr->next_work;
  337. mythr->next_work = NULL;
  338. }
  339. mythr->tv_jobstart = tv_now;
  340. mythr->_job_transition_in_progress = false;
  341. }
  342. void job_start_complete(struct thr_info *mythr)
  343. {
  344. struct timeval tv_now;
  345. if (unlikely(!mythr->prev_work))
  346. return;
  347. timer_set_now(&tv_now);
  348. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  349. }
  350. void job_start_abort(struct thr_info *mythr, bool failure)
  351. {
  352. struct cgpu_info *proc = mythr->cgpu;
  353. if (failure)
  354. {
  355. proc->deven = DEV_RECOVER_ERR;
  356. run_cmd(cmd_idle);
  357. }
  358. mythr->work = NULL;
  359. mythr->_job_transition_in_progress = false;
  360. }
  361. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  362. {
  363. struct cgpu_info *proc = mythr->cgpu;
  364. struct device_drv *api = proc->drv;
  365. struct timeval tv_hashes;
  366. int64_t hashes = 0;
  367. if (api->job_process_results)
  368. hashes = api->job_process_results(mythr, work, stopping);
  369. thread_reportin(mythr);
  370. if (hashes)
  371. {
  372. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  373. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  374. return false;
  375. }
  376. return true;
  377. }
  378. static
  379. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  380. {
  381. struct cgpu_info *cgpu = thr->cgpu;
  382. struct timeval tv_now;
  383. int maxfd;
  384. fd_set rfds;
  385. timer_set_now(&tv_now);
  386. FD_ZERO(&rfds);
  387. FD_SET(thr->notifier[0], &rfds);
  388. maxfd = thr->notifier[0];
  389. FD_SET(thr->work_restart_notifier[0], &rfds);
  390. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  391. if (thr->mutex_request[1] != INVSOCK)
  392. {
  393. FD_SET(thr->mutex_request[0], &rfds);
  394. set_maxfd(&maxfd, thr->mutex_request[0]);
  395. }
  396. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  397. return;
  398. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  399. {
  400. // FIXME: This can only handle one request at a time!
  401. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  402. notifier_read(thr->mutex_request);
  403. mutex_lock(mutexp);
  404. pthread_cond_signal(&cgpu->device_cond);
  405. pthread_cond_wait(&cgpu->device_cond, mutexp);
  406. mutex_unlock(mutexp);
  407. }
  408. if (FD_ISSET(thr->notifier[0], &rfds)) {
  409. notifier_read(thr->notifier);
  410. }
  411. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  412. notifier_read(thr->work_restart_notifier);
  413. }
  414. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  415. {
  416. mutex_init(&cgpu->device_mutex);
  417. notifier_init(cgpu->thr[0]->mutex_request);
  418. pthread_cond_init(&cgpu->device_cond, NULL);
  419. }
  420. void cgpu_request_control(struct cgpu_info * const cgpu)
  421. {
  422. struct thr_info * const thr = cgpu->thr[0];
  423. if (pthread_equal(pthread_self(), thr->pth))
  424. return;
  425. mutex_lock(&cgpu->device_mutex);
  426. notifier_wake(thr->mutex_request);
  427. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  428. }
  429. void cgpu_release_control(struct cgpu_info * const cgpu)
  430. {
  431. struct thr_info * const thr = cgpu->thr[0];
  432. if (pthread_equal(pthread_self(), thr->pth))
  433. return;
  434. pthread_cond_signal(&cgpu->device_cond);
  435. mutex_unlock(&cgpu->device_mutex);
  436. }
  437. static
  438. void _minerloop_setup(struct thr_info *mythr)
  439. {
  440. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  441. if (mythr->work_restart_notifier[1] == -1)
  442. notifier_init(mythr->work_restart_notifier);
  443. for (proc = cgpu; proc; proc = proc->next_proc)
  444. {
  445. mythr = proc->thr[0];
  446. timer_set_now(&mythr->tv_watchdog);
  447. proc->disable_watchdog = true;
  448. }
  449. }
  450. void minerloop_async(struct thr_info *mythr)
  451. {
  452. struct thr_info *thr = mythr;
  453. struct cgpu_info *cgpu = mythr->cgpu;
  454. struct device_drv *api = cgpu->drv;
  455. struct timeval tv_now;
  456. struct timeval tv_timeout;
  457. struct cgpu_info *proc;
  458. bool is_running, should_be_running;
  459. _minerloop_setup(mythr);
  460. while (likely(!cgpu->shutdown)) {
  461. tv_timeout.tv_sec = -1;
  462. timer_set_now(&tv_now);
  463. for (proc = cgpu; proc; proc = proc->next_proc)
  464. {
  465. mythr = proc->thr[0];
  466. // Nothing should happen while we're starting a job
  467. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  468. goto defer_events;
  469. is_running = mythr->work;
  470. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  471. if (should_be_running)
  472. {
  473. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  474. {
  475. mt_disable_finish(mythr);
  476. goto djp;
  477. }
  478. if (unlikely(mythr->work_restart))
  479. goto djp;
  480. }
  481. else // ! should_be_running
  482. {
  483. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  484. {
  485. disabled: ;
  486. timer_unset(&mythr->tv_morework);
  487. if (is_running)
  488. {
  489. if (mythr->busy_state != TBS_GETTING_RESULTS)
  490. do_get_results(mythr, false);
  491. else
  492. // Avoid starting job when pending result fetch completes
  493. mythr->_proceed_with_new_job = false;
  494. }
  495. else // !mythr->_mt_disable_called
  496. mt_disable_start__async(mythr);
  497. }
  498. }
  499. if (timer_passed(&mythr->tv_morework, &tv_now))
  500. {
  501. djp: ;
  502. if (!do_job_prepare(mythr, &tv_now))
  503. goto disabled;
  504. }
  505. defer_events:
  506. if (timer_passed(&mythr->tv_poll, &tv_now))
  507. api->poll(mythr);
  508. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  509. {
  510. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  511. bfg_watchdog(proc, &tv_now);
  512. }
  513. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  514. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  515. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  516. }
  517. do_notifier_select(thr, &tv_timeout);
  518. }
  519. }
  520. static
  521. void do_queue_flush(struct thr_info *mythr)
  522. {
  523. struct cgpu_info *proc = mythr->cgpu;
  524. struct device_drv *api = proc->drv;
  525. api->queue_flush(mythr);
  526. if (mythr->next_work)
  527. {
  528. free_work(mythr->next_work);
  529. mythr->next_work = NULL;
  530. }
  531. }
  532. void minerloop_queue(struct thr_info *thr)
  533. {
  534. struct thr_info *mythr;
  535. struct cgpu_info *cgpu = thr->cgpu;
  536. struct device_drv *api = cgpu->drv;
  537. struct timeval tv_now;
  538. struct timeval tv_timeout;
  539. struct cgpu_info *proc;
  540. bool should_be_running;
  541. struct work *work;
  542. _minerloop_setup(thr);
  543. while (likely(!cgpu->shutdown)) {
  544. tv_timeout.tv_sec = -1;
  545. timer_set_now(&tv_now);
  546. for (proc = cgpu; proc; proc = proc->next_proc)
  547. {
  548. mythr = proc->thr[0];
  549. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  550. redo:
  551. if (should_be_running)
  552. {
  553. if (unlikely(mythr->_mt_disable_called))
  554. mt_disable_finish(mythr);
  555. if (unlikely(mythr->work_restart))
  556. {
  557. mythr->work_restart = false;
  558. do_queue_flush(mythr);
  559. }
  560. while (!mythr->queue_full)
  561. {
  562. if (mythr->next_work)
  563. {
  564. work = mythr->next_work;
  565. mythr->next_work = NULL;
  566. }
  567. else
  568. {
  569. request_work(mythr);
  570. // FIXME: Allow get_work to return NULL to retry on notification
  571. work = get_and_prepare_work(mythr);
  572. }
  573. if (!work)
  574. break;
  575. if (!api->queue_append(mythr, work))
  576. mythr->next_work = work;
  577. }
  578. }
  579. else
  580. if (unlikely(!mythr->_mt_disable_called))
  581. {
  582. do_queue_flush(mythr);
  583. mt_disable_start(mythr);
  584. }
  585. if (timer_passed(&mythr->tv_poll, &tv_now))
  586. api->poll(mythr);
  587. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  588. {
  589. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  590. bfg_watchdog(proc, &tv_now);
  591. }
  592. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  593. if (should_be_running && !mythr->queue_full)
  594. goto redo;
  595. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  596. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  597. }
  598. do_notifier_select(thr, &tv_timeout);
  599. }
  600. }
  601. void *miner_thread(void *userdata)
  602. {
  603. struct thr_info *mythr = userdata;
  604. struct cgpu_info *cgpu = mythr->cgpu;
  605. struct device_drv *drv = cgpu->drv;
  606. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  607. char threadname[20];
  608. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  609. RenameThread(threadname);
  610. if (drv->thread_init && !drv->thread_init(mythr)) {
  611. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  612. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  613. dev_error(slave, REASON_THREAD_FAIL_INIT);
  614. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  615. goto out;
  616. }
  617. if (drv_ready(cgpu))
  618. cgpu_set_defaults(cgpu);
  619. thread_reportout(mythr);
  620. applog(LOG_DEBUG, "Popping ping in miner thread");
  621. notifier_read(mythr->notifier); // Wait for a notification to start
  622. cgtime(&cgpu->cgminer_stats.start_tv);
  623. if (drv->minerloop)
  624. drv->minerloop(mythr);
  625. else
  626. minerloop_scanhash(mythr);
  627. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  628. out: ;
  629. struct cgpu_info *proc = cgpu;
  630. do
  631. {
  632. proc->deven = DEV_DISABLED;
  633. proc->status = LIFE_DEAD2;
  634. }
  635. while ( (proc = proc->next_proc) && !proc->threads);
  636. mythr->getwork = 0;
  637. mythr->has_pth = false;
  638. cgsleep_ms(1);
  639. if (drv->thread_shutdown)
  640. drv->thread_shutdown(mythr);
  641. notifier_destroy(mythr->notifier);
  642. return NULL;
  643. }
  644. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  645. static
  646. bool _add_cgpu(struct cgpu_info *cgpu)
  647. {
  648. int lpcount;
  649. if (!cgpu->procs)
  650. cgpu->procs = 1;
  651. lpcount = cgpu->procs;
  652. cgpu->device = cgpu;
  653. cgpu->dev_repr = malloc(6);
  654. cgpu->dev_repr_ns = malloc(6);
  655. #ifdef NEED_BFG_LOWL_VCOM
  656. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  657. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  658. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  659. #endif
  660. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  661. devices_new[total_devices_new++] = cgpu;
  662. if (lpcount > 1)
  663. {
  664. int tpp = cgpu->threads / lpcount;
  665. struct cgpu_info **nlp_p, *slave;
  666. nlp_p = &cgpu->next_proc;
  667. for (int i = 1; i < lpcount; ++i)
  668. {
  669. slave = malloc(sizeof(*slave));
  670. *slave = *cgpu;
  671. slave->proc_id = i;
  672. slave->threads = tpp;
  673. devices_new[total_devices_new++] = slave;
  674. *nlp_p = slave;
  675. nlp_p = &slave->next_proc;
  676. }
  677. *nlp_p = NULL;
  678. cgpu->proc_id = 0;
  679. cgpu->threads -= (tpp * (lpcount - 1));
  680. }
  681. renumber_cgpu(cgpu);
  682. cgpu->last_device_valid_work = time(NULL);
  683. return true;
  684. }
  685. bool add_cgpu(struct cgpu_info *cgpu)
  686. {
  687. mutex_lock(&_add_cgpu_mutex);
  688. const bool rv = _add_cgpu(cgpu);
  689. mutex_unlock(&_add_cgpu_mutex);
  690. return rv;
  691. }
  692. void add_cgpu_live(void *p)
  693. {
  694. add_cgpu(p);
  695. }
  696. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  697. {
  698. if (!prev_cgpu)
  699. return add_cgpu(cgpu);
  700. while (prev_cgpu->next_proc)
  701. prev_cgpu = prev_cgpu->next_proc;
  702. mutex_lock(&_add_cgpu_mutex);
  703. int old_total_devices = total_devices_new;
  704. if (!_add_cgpu(cgpu))
  705. {
  706. mutex_unlock(&_add_cgpu_mutex);
  707. return false;
  708. }
  709. prev_cgpu->next_proc = devices_new[old_total_devices];
  710. mutex_unlock(&_add_cgpu_mutex);
  711. return true;
  712. }
  713. #ifdef NEED_BFG_LOWL_VCOM
  714. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  715. {
  716. detectone_func_t detectone = userp;
  717. if (serial_claim(info->path, NULL))
  718. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  719. return detectone(info->path);
  720. }
  721. #endif
  722. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  723. {
  724. struct string_elist *iter, *tmp;
  725. const char *dev, *colon;
  726. bool inhibitauto = flags & 4;
  727. char found = 0;
  728. bool forceauto = flags & 1;
  729. bool hasname;
  730. bool doall = false;
  731. size_t namel = strlen(api->name);
  732. size_t dnamel = strlen(api->dname);
  733. #ifdef NEED_BFG_LOWL_VCOM
  734. clear_detectone_meta_info();
  735. #endif
  736. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  737. dev = iter->string;
  738. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  739. size_t idlen = colon - dev;
  740. // allow either name:device or dname:device
  741. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  742. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  743. continue;
  744. dev = colon + 1;
  745. hasname = true;
  746. }
  747. else
  748. hasname = false;
  749. if (!strcmp(dev, "auto"))
  750. forceauto = true;
  751. else if (!strcmp(dev, "noauto"))
  752. inhibitauto = true;
  753. else
  754. if ((flags & 2) && !hasname)
  755. continue;
  756. else
  757. if (!detectone)
  758. {} // do nothing
  759. else
  760. if (!strcmp(dev, "all"))
  761. doall = true;
  762. #ifdef NEED_BFG_LOWL_VCOM
  763. else
  764. if (serial_claim(dev, NULL))
  765. {
  766. applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev);
  767. string_elist_del(&scan_devices, iter);
  768. }
  769. #endif
  770. else if (detectone(dev)) {
  771. string_elist_del(&scan_devices, iter);
  772. ++found;
  773. }
  774. }
  775. #ifdef NEED_BFG_LOWL_VCOM
  776. if (doall && detectone)
  777. found += lowlevel_detect_id(_serial_detect_all, detectone, &lowl_vcom, 0, 0);
  778. #endif
  779. if ((forceauto || !(inhibitauto || found)) && autoscan)
  780. found += autoscan();
  781. return found;
  782. }
  783. static
  784. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  785. {
  786. char fullpath[PATH_MAX];
  787. strcpy(fullpath, path);
  788. strcat(fullpath, "/");
  789. if (subdir) {
  790. strcat(fullpath, subdir);
  791. strcat(fullpath, "/");
  792. }
  793. if (sub2) {
  794. strcat(fullpath, sub2);
  795. strcat(fullpath, "/");
  796. }
  797. strcat(fullpath, filename);
  798. return fopen(fullpath, "rb");
  799. }
  800. #define _open_bitstream(path, subdir, sub2) do { \
  801. f = _open_bitstream(path, subdir, sub2, filename); \
  802. if (f) \
  803. return f; \
  804. } while(0)
  805. #define _open_bitstream2(path, path3) do { \
  806. _open_bitstream(path, NULL, path3); \
  807. _open_bitstream(path, "../share/" PACKAGE, path3); \
  808. _open_bitstream(path, "../" PACKAGE, path3); \
  809. } while(0)
  810. #define _open_bitstream3(path) do { \
  811. _open_bitstream2(path, dname); \
  812. _open_bitstream2(path, "bitstreams"); \
  813. _open_bitstream2(path, NULL); \
  814. } while(0)
  815. FILE *open_bitstream(const char *dname, const char *filename)
  816. {
  817. FILE *f;
  818. _open_bitstream3(opt_kernel_path);
  819. _open_bitstream3(cgminer_path);
  820. _open_bitstream3(".");
  821. return NULL;
  822. }
  823. void close_device_fd(struct thr_info * const thr)
  824. {
  825. struct cgpu_info * const proc = thr->cgpu;
  826. const int fd = proc->device_fd;
  827. if (fd == -1)
  828. return;
  829. if (close(fd))
  830. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  831. else
  832. {
  833. proc->device_fd = -1;
  834. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  835. }
  836. }