deviceapi.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #include <ctype.h>
  14. #ifdef WIN32
  15. #include <winsock2.h>
  16. #else
  17. #include <sys/select.h>
  18. #endif
  19. #include <stdbool.h>
  20. #include <stdint.h>
  21. #include <sys/time.h>
  22. #include <sys/types.h>
  23. #include <time.h>
  24. #include <unistd.h>
  25. #include "compat.h"
  26. #include "deviceapi.h"
  27. #include "logging.h"
  28. #include "lowlevel.h"
  29. #ifdef NEED_BFG_LOWL_VCOM
  30. #include "lowl-vcom.h"
  31. #endif
  32. #include "miner.h"
  33. #include "util.h"
  34. struct driver_registration *_bfg_drvreg1;
  35. struct driver_registration *_bfg_drvreg2;
  36. void _bfg_register_driver(const struct device_drv *drv)
  37. {
  38. struct driver_registration *ndr;
  39. if (!drv)
  40. {
  41. // NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
  42. LL_FOREACH2(_bfg_drvreg1, ndr, next_dname)
  43. {
  44. drv = ndr->drv;
  45. if (drv->drv_init)
  46. drv->drv_init();
  47. }
  48. return;
  49. }
  50. ndr = malloc(sizeof(*ndr));
  51. *ndr = (struct driver_registration){
  52. .drv = drv,
  53. };
  54. LL_PREPEND2(_bfg_drvreg1, ndr, next_dname);
  55. LL_PREPEND2(_bfg_drvreg2, ndr, next_prio);
  56. }
  57. static
  58. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  59. {
  60. return strcmp(a->drv->dname, b->drv->dname);
  61. };
  62. static
  63. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  64. {
  65. return a->drv->probe_priority - b->drv->probe_priority;
  66. };
  67. void bfg_devapi_init()
  68. {
  69. _bfg_register_driver(NULL);
  70. LL_SORT2(_bfg_drvreg1, sort_drv_by_dname, next_dname);
  71. LL_SORT2(_bfg_drvreg2, sort_drv_by_priority, next_prio);
  72. }
  73. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  74. {
  75. struct cgpu_info *cgpu = thr->cgpu;
  76. const long cycle = opt_log_interval / 5 ? : 1;
  77. if (unlikely(hashes == -1)) {
  78. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  79. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  80. if (thr->scanhash_working && opt_restart) {
  81. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  82. thr->scanhash_working = false;
  83. cgpu->reinit_backoff = 5.2734375;
  84. hashes = 0;
  85. } else {
  86. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  87. cgpu->deven = DEV_RECOVER_ERR;
  88. run_cmd(cmd_idle);
  89. return false;
  90. }
  91. }
  92. else
  93. thr->scanhash_working = true;
  94. thr->hashes_done += hashes;
  95. if (hashes > cgpu->max_hashes)
  96. cgpu->max_hashes = hashes;
  97. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  98. // max_nonce management (optional)
  99. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  100. int mult;
  101. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  102. return true;
  103. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  104. mult *= cycle;
  105. if (*max_nonce > (0xffffffff * 0x400) / mult)
  106. *max_nonce = 0xffffffff;
  107. else
  108. *max_nonce = (*max_nonce * mult) / 0x400;
  109. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  110. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  111. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  112. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  113. hashmeter2(thr);
  114. return true;
  115. }
  116. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  117. {
  118. struct timeval tv_now, tv_delta;
  119. timer_set_now(&tv_now);
  120. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  121. thr->_tv_last_hashes_done_call = tv_now;
  122. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  123. }
  124. /* A generic wait function for threads that poll that will wait a specified
  125. * time tdiff waiting on a work restart request. Returns zero if the condition
  126. * was met (work restart requested) or ETIMEDOUT if not.
  127. */
  128. int restart_wait(struct thr_info *thr, unsigned int mstime)
  129. {
  130. struct timeval tv_timer, tv_now, tv_timeout;
  131. fd_set rfds;
  132. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  133. int rv;
  134. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  135. {
  136. // This is a bug!
  137. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  138. cgsleep_ms(mstime);
  139. return (thr->work_restart ? 0 : ETIMEDOUT);
  140. }
  141. timer_set_now(&tv_now);
  142. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  143. while (true)
  144. {
  145. FD_ZERO(&rfds);
  146. FD_SET(wrn, &rfds);
  147. tv_timeout = tv_timer;
  148. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  149. if (rv == 0)
  150. return ETIMEDOUT;
  151. if (rv > 0)
  152. {
  153. if (thr->work_restart)
  154. return 0;
  155. notifier_read(thr->work_restart_notifier);
  156. }
  157. timer_set_now(&tv_now);
  158. }
  159. }
  160. static
  161. struct work *get_and_prepare_work(struct thr_info *thr)
  162. {
  163. struct cgpu_info *proc = thr->cgpu;
  164. struct device_drv *api = proc->drv;
  165. struct work *work;
  166. work = get_work(thr);
  167. if (!work)
  168. return NULL;
  169. if (api->prepare_work && !api->prepare_work(thr, work)) {
  170. free_work(work);
  171. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  172. proc->deven = DEV_RECOVER_ERR;
  173. run_cmd(cmd_idle);
  174. return NULL;
  175. }
  176. return work;
  177. }
  178. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  179. void minerloop_scanhash(struct thr_info *mythr)
  180. {
  181. struct cgpu_info *cgpu = mythr->cgpu;
  182. struct device_drv *api = cgpu->drv;
  183. struct timeval tv_start, tv_end;
  184. struct timeval tv_hashes, tv_worktime;
  185. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  186. int64_t hashes;
  187. struct work *work;
  188. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  189. #ifdef HAVE_PTHREAD_CANCEL
  190. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  191. #endif
  192. if (cgpu->deven != DEV_ENABLED)
  193. mt_disable(mythr);
  194. while (likely(!cgpu->shutdown)) {
  195. mythr->work_restart = false;
  196. request_work(mythr);
  197. work = get_and_prepare_work(mythr);
  198. if (!work)
  199. break;
  200. timer_set_now(&work->tv_work_start);
  201. do {
  202. thread_reportin(mythr);
  203. /* Only allow the mining thread to be cancelled when
  204. * it is not in the driver code. */
  205. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  206. timer_set_now(&tv_start);
  207. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  208. timer_set_now(&tv_end);
  209. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  210. pthread_testcancel();
  211. thread_reportin(mythr);
  212. timersub(&tv_end, &tv_start, &tv_hashes);
  213. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  214. goto disabled;
  215. if (unlikely(mythr->work_restart)) {
  216. /* Apart from device_thread 0, we stagger the
  217. * starting of every next thread to try and get
  218. * all devices busy before worrying about
  219. * getting work for their extra threads */
  220. if (!primary) {
  221. struct timespec rgtp;
  222. rgtp.tv_sec = 0;
  223. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  224. nanosleep(&rgtp, NULL);
  225. }
  226. break;
  227. }
  228. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  229. disabled:
  230. mt_disable(mythr);
  231. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  232. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  233. free_work(work);
  234. }
  235. }
  236. void mt_disable_start__async(struct thr_info * const mythr)
  237. {
  238. mt_disable_start(mythr);
  239. if (mythr->prev_work)
  240. free_work(mythr->prev_work);
  241. mythr->prev_work = mythr->work;
  242. mythr->work = NULL;
  243. mythr->_job_transition_in_progress = false;
  244. }
  245. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  246. {
  247. struct cgpu_info *proc = mythr->cgpu;
  248. struct device_drv *api = proc->drv;
  249. struct timeval tv_worktime;
  250. mythr->tv_morework.tv_sec = -1;
  251. mythr->_job_transition_in_progress = true;
  252. if (mythr->work)
  253. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  254. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  255. {
  256. mythr->work_restart = false;
  257. request_work(mythr);
  258. // FIXME: Allow get_work to return NULL to retry on notification
  259. if (mythr->next_work)
  260. free_work(mythr->next_work);
  261. mythr->next_work = get_and_prepare_work(mythr);
  262. if (!mythr->next_work)
  263. return false;
  264. mythr->starting_next_work = true;
  265. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  266. }
  267. else
  268. {
  269. mythr->starting_next_work = false;
  270. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  271. }
  272. job_prepare_complete(mythr);
  273. return true;
  274. }
  275. void job_prepare_complete(struct thr_info *mythr)
  276. {
  277. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  278. return;
  279. if (mythr->work)
  280. {
  281. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  282. do_get_results(mythr, true);
  283. else
  284. {} // TODO: Set a timer to call do_get_results when job is near complete
  285. }
  286. else // no job currently running
  287. do_job_start(mythr);
  288. }
  289. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  290. {
  291. struct cgpu_info *proc = mythr->cgpu;
  292. struct device_drv *api = proc->drv;
  293. struct work *work = mythr->work;
  294. mythr->_job_transition_in_progress = true;
  295. mythr->tv_results_jobstart = mythr->tv_jobstart;
  296. mythr->_proceed_with_new_job = proceed_with_new_job;
  297. if (api->job_get_results)
  298. api->job_get_results(mythr, work);
  299. else
  300. job_results_fetched(mythr);
  301. }
  302. void job_results_fetched(struct thr_info *mythr)
  303. {
  304. if (mythr->_proceed_with_new_job)
  305. do_job_start(mythr);
  306. else
  307. {
  308. if (likely(mythr->prev_work))
  309. {
  310. struct timeval tv_now;
  311. timer_set_now(&tv_now);
  312. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  313. }
  314. mt_disable_start__async(mythr);
  315. }
  316. }
  317. void do_job_start(struct thr_info *mythr)
  318. {
  319. struct cgpu_info *proc = mythr->cgpu;
  320. struct device_drv *api = proc->drv;
  321. thread_reportin(mythr);
  322. api->job_start(mythr);
  323. }
  324. void mt_job_transition(struct thr_info *mythr)
  325. {
  326. struct timeval tv_now;
  327. timer_set_now(&tv_now);
  328. if (mythr->starting_next_work)
  329. {
  330. mythr->next_work->tv_work_start = tv_now;
  331. if (mythr->prev_work)
  332. free_work(mythr->prev_work);
  333. mythr->prev_work = mythr->work;
  334. mythr->work = mythr->next_work;
  335. mythr->next_work = NULL;
  336. }
  337. mythr->tv_jobstart = tv_now;
  338. mythr->_job_transition_in_progress = false;
  339. }
  340. void job_start_complete(struct thr_info *mythr)
  341. {
  342. struct timeval tv_now;
  343. if (unlikely(!mythr->prev_work))
  344. return;
  345. timer_set_now(&tv_now);
  346. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  347. }
  348. void job_start_abort(struct thr_info *mythr, bool failure)
  349. {
  350. struct cgpu_info *proc = mythr->cgpu;
  351. if (failure)
  352. {
  353. proc->deven = DEV_RECOVER_ERR;
  354. run_cmd(cmd_idle);
  355. }
  356. mythr->work = NULL;
  357. mythr->_job_transition_in_progress = false;
  358. }
  359. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  360. {
  361. struct cgpu_info *proc = mythr->cgpu;
  362. struct device_drv *api = proc->drv;
  363. struct timeval tv_hashes;
  364. int64_t hashes = 0;
  365. if (api->job_process_results)
  366. hashes = api->job_process_results(mythr, work, stopping);
  367. thread_reportin(mythr);
  368. if (hashes)
  369. {
  370. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  371. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  372. return false;
  373. }
  374. return true;
  375. }
  376. static
  377. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  378. {
  379. struct cgpu_info *cgpu = thr->cgpu;
  380. struct timeval tv_now;
  381. int maxfd;
  382. fd_set rfds;
  383. timer_set_now(&tv_now);
  384. FD_ZERO(&rfds);
  385. FD_SET(thr->notifier[0], &rfds);
  386. maxfd = thr->notifier[0];
  387. FD_SET(thr->work_restart_notifier[0], &rfds);
  388. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  389. if (thr->mutex_request[1] != INVSOCK)
  390. {
  391. FD_SET(thr->mutex_request[0], &rfds);
  392. set_maxfd(&maxfd, thr->mutex_request[0]);
  393. }
  394. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  395. return;
  396. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  397. {
  398. // FIXME: This can only handle one request at a time!
  399. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  400. notifier_read(thr->mutex_request);
  401. mutex_lock(mutexp);
  402. pthread_cond_signal(&cgpu->device_cond);
  403. pthread_cond_wait(&cgpu->device_cond, mutexp);
  404. mutex_unlock(mutexp);
  405. }
  406. if (FD_ISSET(thr->notifier[0], &rfds)) {
  407. notifier_read(thr->notifier);
  408. }
  409. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  410. notifier_read(thr->work_restart_notifier);
  411. }
  412. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  413. {
  414. mutex_init(&cgpu->device_mutex);
  415. notifier_init(cgpu->thr[0]->mutex_request);
  416. pthread_cond_init(&cgpu->device_cond, NULL);
  417. }
  418. void cgpu_request_control(struct cgpu_info * const cgpu)
  419. {
  420. struct thr_info * const thr = cgpu->thr[0];
  421. if (pthread_equal(pthread_self(), thr->pth))
  422. return;
  423. mutex_lock(&cgpu->device_mutex);
  424. notifier_wake(thr->mutex_request);
  425. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  426. }
  427. void cgpu_release_control(struct cgpu_info * const cgpu)
  428. {
  429. struct thr_info * const thr = cgpu->thr[0];
  430. if (pthread_equal(pthread_self(), thr->pth))
  431. return;
  432. pthread_cond_signal(&cgpu->device_cond);
  433. mutex_unlock(&cgpu->device_mutex);
  434. }
  435. static
  436. void _minerloop_setup(struct thr_info *mythr)
  437. {
  438. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  439. if (mythr->work_restart_notifier[1] == -1)
  440. notifier_init(mythr->work_restart_notifier);
  441. for (proc = cgpu; proc; proc = proc->next_proc)
  442. {
  443. mythr = proc->thr[0];
  444. timer_set_now(&mythr->tv_watchdog);
  445. proc->disable_watchdog = true;
  446. }
  447. }
  448. void minerloop_async(struct thr_info *mythr)
  449. {
  450. struct thr_info *thr = mythr;
  451. struct cgpu_info *cgpu = mythr->cgpu;
  452. struct device_drv *api = cgpu->drv;
  453. struct timeval tv_now;
  454. struct timeval tv_timeout;
  455. struct cgpu_info *proc;
  456. bool is_running, should_be_running;
  457. _minerloop_setup(mythr);
  458. while (likely(!cgpu->shutdown)) {
  459. tv_timeout.tv_sec = -1;
  460. timer_set_now(&tv_now);
  461. for (proc = cgpu; proc; proc = proc->next_proc)
  462. {
  463. mythr = proc->thr[0];
  464. // Nothing should happen while we're starting a job
  465. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  466. goto defer_events;
  467. is_running = mythr->work;
  468. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  469. if (should_be_running)
  470. {
  471. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  472. {
  473. mt_disable_finish(mythr);
  474. goto djp;
  475. }
  476. if (unlikely(mythr->work_restart))
  477. goto djp;
  478. }
  479. else // ! should_be_running
  480. {
  481. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  482. {
  483. disabled: ;
  484. timer_unset(&mythr->tv_morework);
  485. if (is_running)
  486. {
  487. if (mythr->busy_state != TBS_GETTING_RESULTS)
  488. do_get_results(mythr, false);
  489. else
  490. // Avoid starting job when pending result fetch completes
  491. mythr->_proceed_with_new_job = false;
  492. }
  493. else // !mythr->_mt_disable_called
  494. mt_disable_start__async(mythr);
  495. }
  496. }
  497. if (timer_passed(&mythr->tv_morework, &tv_now))
  498. {
  499. djp: ;
  500. if (!do_job_prepare(mythr, &tv_now))
  501. goto disabled;
  502. }
  503. defer_events:
  504. if (timer_passed(&mythr->tv_poll, &tv_now))
  505. api->poll(mythr);
  506. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  507. {
  508. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  509. bfg_watchdog(proc, &tv_now);
  510. }
  511. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  512. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  513. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  514. }
  515. do_notifier_select(thr, &tv_timeout);
  516. }
  517. }
  518. static
  519. void do_queue_flush(struct thr_info *mythr)
  520. {
  521. struct cgpu_info *proc = mythr->cgpu;
  522. struct device_drv *api = proc->drv;
  523. api->queue_flush(mythr);
  524. if (mythr->next_work)
  525. {
  526. free_work(mythr->next_work);
  527. mythr->next_work = NULL;
  528. }
  529. }
  530. void minerloop_queue(struct thr_info *thr)
  531. {
  532. struct thr_info *mythr;
  533. struct cgpu_info *cgpu = thr->cgpu;
  534. struct device_drv *api = cgpu->drv;
  535. struct timeval tv_now;
  536. struct timeval tv_timeout;
  537. struct cgpu_info *proc;
  538. bool should_be_running;
  539. struct work *work;
  540. _minerloop_setup(thr);
  541. while (likely(!cgpu->shutdown)) {
  542. tv_timeout.tv_sec = -1;
  543. timer_set_now(&tv_now);
  544. for (proc = cgpu; proc; proc = proc->next_proc)
  545. {
  546. mythr = proc->thr[0];
  547. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  548. redo:
  549. if (should_be_running)
  550. {
  551. if (unlikely(mythr->_mt_disable_called))
  552. mt_disable_finish(mythr);
  553. if (unlikely(mythr->work_restart))
  554. {
  555. mythr->work_restart = false;
  556. do_queue_flush(mythr);
  557. }
  558. while (!mythr->queue_full)
  559. {
  560. if (mythr->next_work)
  561. {
  562. work = mythr->next_work;
  563. mythr->next_work = NULL;
  564. }
  565. else
  566. {
  567. request_work(mythr);
  568. // FIXME: Allow get_work to return NULL to retry on notification
  569. work = get_and_prepare_work(mythr);
  570. }
  571. if (!work)
  572. break;
  573. if (!api->queue_append(mythr, work))
  574. mythr->next_work = work;
  575. }
  576. }
  577. else
  578. if (unlikely(!mythr->_mt_disable_called))
  579. {
  580. do_queue_flush(mythr);
  581. mt_disable_start(mythr);
  582. }
  583. if (timer_passed(&mythr->tv_poll, &tv_now))
  584. api->poll(mythr);
  585. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  586. {
  587. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  588. bfg_watchdog(proc, &tv_now);
  589. }
  590. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  591. if (should_be_running && !mythr->queue_full)
  592. goto redo;
  593. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  594. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  595. }
  596. do_notifier_select(thr, &tv_timeout);
  597. }
  598. }
  599. void *miner_thread(void *userdata)
  600. {
  601. struct thr_info *mythr = userdata;
  602. struct cgpu_info *cgpu = mythr->cgpu;
  603. struct device_drv *drv = cgpu->drv;
  604. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  605. char threadname[20];
  606. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  607. RenameThread(threadname);
  608. if (drv->thread_init && !drv->thread_init(mythr)) {
  609. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  610. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  611. dev_error(slave, REASON_THREAD_FAIL_INIT);
  612. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  613. goto out;
  614. }
  615. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  616. cgpu_set_defaults(cgpu);
  617. thread_reportout(mythr);
  618. applog(LOG_DEBUG, "Popping ping in miner thread");
  619. notifier_read(mythr->notifier); // Wait for a notification to start
  620. cgtime(&cgpu->cgminer_stats.start_tv);
  621. if (drv->minerloop)
  622. drv->minerloop(mythr);
  623. else
  624. minerloop_scanhash(mythr);
  625. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  626. out: ;
  627. struct cgpu_info *proc = cgpu;
  628. do
  629. {
  630. proc->deven = DEV_DISABLED;
  631. proc->status = LIFE_DEAD2;
  632. }
  633. while ( (proc = proc->next_proc) && !proc->threads);
  634. mythr->getwork = 0;
  635. mythr->has_pth = false;
  636. cgsleep_ms(1);
  637. if (drv->thread_shutdown)
  638. drv->thread_shutdown(mythr);
  639. notifier_destroy(mythr->notifier);
  640. return NULL;
  641. }
  642. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  643. static
  644. bool _add_cgpu(struct cgpu_info *cgpu)
  645. {
  646. int lpcount;
  647. if (!cgpu->procs)
  648. cgpu->procs = 1;
  649. lpcount = cgpu->procs;
  650. cgpu->device = cgpu;
  651. cgpu->dev_repr = malloc(6);
  652. cgpu->dev_repr_ns = malloc(6);
  653. #ifdef NEED_BFG_LOWL_VCOM
  654. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  655. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  656. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  657. #endif
  658. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  659. devices_new[total_devices_new++] = cgpu;
  660. if (lpcount > 1)
  661. {
  662. int tpp = cgpu->threads / lpcount;
  663. struct cgpu_info **nlp_p, *slave;
  664. nlp_p = &cgpu->next_proc;
  665. for (int i = 1; i < lpcount; ++i)
  666. {
  667. slave = malloc(sizeof(*slave));
  668. *slave = *cgpu;
  669. slave->proc_id = i;
  670. slave->threads = tpp;
  671. devices_new[total_devices_new++] = slave;
  672. *nlp_p = slave;
  673. nlp_p = &slave->next_proc;
  674. }
  675. *nlp_p = NULL;
  676. cgpu->proc_id = 0;
  677. cgpu->threads -= (tpp * (lpcount - 1));
  678. }
  679. renumber_cgpu(cgpu);
  680. cgpu->last_device_valid_work = time(NULL);
  681. return true;
  682. }
  683. bool add_cgpu(struct cgpu_info *cgpu)
  684. {
  685. mutex_lock(&_add_cgpu_mutex);
  686. const bool rv = _add_cgpu(cgpu);
  687. mutex_unlock(&_add_cgpu_mutex);
  688. return rv;
  689. }
  690. void add_cgpu_live(void *p)
  691. {
  692. add_cgpu(p);
  693. }
  694. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  695. {
  696. if (!prev_cgpu)
  697. return add_cgpu(cgpu);
  698. while (prev_cgpu->next_proc)
  699. prev_cgpu = prev_cgpu->next_proc;
  700. mutex_lock(&_add_cgpu_mutex);
  701. int old_total_devices = total_devices_new;
  702. if (!_add_cgpu(cgpu))
  703. {
  704. mutex_unlock(&_add_cgpu_mutex);
  705. return false;
  706. }
  707. prev_cgpu->next_proc = devices_new[old_total_devices];
  708. mutex_unlock(&_add_cgpu_mutex);
  709. return true;
  710. }
  711. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  712. {
  713. const struct bfg_set_device_definition *sdf;
  714. char *p = replybuf;
  715. bool first = true;
  716. *out_success = SDR_HELP;
  717. sdf = proc->set_device_funcs;
  718. if (!sdf)
  719. nohelp:
  720. return "No help available";
  721. size_t matchlen = 0;
  722. if (newvalue)
  723. while (!isspace(newvalue[0]))
  724. ++matchlen;
  725. for ( ; sdf->optname; ++sdf)
  726. {
  727. if (!sdf->description)
  728. continue;
  729. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  730. continue;
  731. if (first)
  732. first = false;
  733. else
  734. p++[0] = '\n';
  735. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  736. }
  737. if (replybuf == p)
  738. goto nohelp;
  739. return replybuf;
  740. }
  741. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  742. {
  743. int target_diff = proc->cutofftemp - proc->targettemp;
  744. proc->cutofftemp = atoi(newvalue);
  745. if (!proc->targettemp_user)
  746. proc->targettemp = proc->cutofftemp - target_diff;
  747. return NULL;
  748. }
  749. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  750. {
  751. proc->targettemp = atoi(newvalue);
  752. proc->targettemp_user = true;
  753. return NULL;
  754. }
  755. static inline
  756. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  757. {
  758. if (!rv)
  759. *out_success = SDR_OK;
  760. else
  761. if (!strcasecmp(optname, "help"))
  762. *out_success = SDR_HELP;
  763. else
  764. *out_success = SDR_ERR;
  765. }
  766. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  767. {
  768. const struct bfg_set_device_definition *sdf;
  769. sdf = proc->set_device_funcs;
  770. if (!sdf)
  771. {
  772. *out_success = SDR_NOSUPP;
  773. return "Device does not support setting parameters.";
  774. }
  775. for ( ; sdf->optname; ++sdf)
  776. if (!strcasecmp(optname, sdf->optname))
  777. {
  778. *out_success = SDR_AUTO;
  779. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  780. if (SDR_AUTO == *out_success)
  781. _set_auto_sdr(out_success, rv, optname);
  782. return rv;
  783. }
  784. if (!strcasecmp(optname, "help"))
  785. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  786. *out_success = SDR_UNKNOWN;
  787. sprintf(replybuf, "Unknown option: %s", optname);
  788. return replybuf;
  789. }
  790. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  791. {
  792. if (proc->drv->set_device)
  793. {
  794. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  795. _set_auto_sdr(out_success, rv, optname);
  796. return rv;
  797. }
  798. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  799. }
  800. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  801. {
  802. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  803. switch (*out_success)
  804. {
  805. case SDR_NOSUPP:
  806. case SDR_UNKNOWN:
  807. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  808. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  809. else
  810. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  811. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  812. default:
  813. break;
  814. }
  815. return rv;
  816. }
  817. #ifdef NEED_BFG_LOWL_VCOM
  818. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  819. {
  820. detectone_func_t detectone = userp;
  821. if (serial_claim(info->path, NULL))
  822. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  823. return detectone(info->path);
  824. }
  825. #endif
  826. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  827. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  828. {
  829. struct string_elist *iter, *tmp;
  830. const char *dev, *colon;
  831. bool inhibitauto = flags & 4;
  832. char found = 0;
  833. bool forceauto = flags & 1;
  834. bool hasname;
  835. size_t namel = strlen(api->name);
  836. size_t dnamel = strlen(api->dname);
  837. #ifdef NEED_BFG_LOWL_VCOM
  838. clear_detectone_meta_info();
  839. #endif
  840. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  841. dev = iter->string;
  842. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  843. size_t idlen = colon - dev;
  844. // allow either name:device or dname:device
  845. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  846. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  847. continue;
  848. dev = colon + 1;
  849. hasname = true;
  850. }
  851. else
  852. hasname = false;
  853. if (!strcmp(dev, "auto"))
  854. forceauto = true;
  855. else if (!strcmp(dev, "noauto"))
  856. inhibitauto = true;
  857. else
  858. if ((flags & 2) && !hasname)
  859. continue;
  860. else
  861. if (!detectone)
  862. {} // do nothing
  863. else
  864. if (!strcmp(dev, "all"))
  865. {} // n/a
  866. else if (detectone(dev)) {
  867. string_elist_del(&scan_devices, iter);
  868. ++found;
  869. }
  870. }
  871. if ((forceauto || !(inhibitauto || found)) && autoscan)
  872. found += autoscan();
  873. return found;
  874. }
  875. static
  876. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  877. {
  878. char fullpath[PATH_MAX];
  879. strcpy(fullpath, path);
  880. strcat(fullpath, "/");
  881. if (subdir) {
  882. strcat(fullpath, subdir);
  883. strcat(fullpath, "/");
  884. }
  885. if (sub2) {
  886. strcat(fullpath, sub2);
  887. strcat(fullpath, "/");
  888. }
  889. strcat(fullpath, filename);
  890. return fopen(fullpath, "rb");
  891. }
  892. #define _open_bitstream(path, subdir, sub2) do { \
  893. f = _open_bitstream(path, subdir, sub2, filename); \
  894. if (f) \
  895. return f; \
  896. } while(0)
  897. #define _open_bitstream2(path, path3) do { \
  898. _open_bitstream(path, NULL, path3); \
  899. _open_bitstream(path, "../share/" PACKAGE, path3); \
  900. _open_bitstream(path, "../" PACKAGE, path3); \
  901. } while(0)
  902. #define _open_bitstream3(path) do { \
  903. _open_bitstream2(path, dname); \
  904. _open_bitstream2(path, "bitstreams"); \
  905. _open_bitstream2(path, NULL); \
  906. } while(0)
  907. FILE *open_bitstream(const char *dname, const char *filename)
  908. {
  909. FILE *f;
  910. _open_bitstream3(opt_kernel_path);
  911. _open_bitstream3(cgminer_path);
  912. _open_bitstream3(".");
  913. return NULL;
  914. }
  915. void close_device_fd(struct thr_info * const thr)
  916. {
  917. struct cgpu_info * const proc = thr->cgpu;
  918. const int fd = proc->device_fd;
  919. if (fd == -1)
  920. return;
  921. if (close(fd))
  922. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  923. else
  924. {
  925. proc->device_fd = -1;
  926. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  927. }
  928. }
  929. struct cgpu_info *device_proc_by_id(struct cgpu_info * const dev, const int procid)
  930. {
  931. struct cgpu_info *proc = dev;
  932. for (int i = 0; i < procid; ++i)
  933. {
  934. proc = proc->next_proc;
  935. if (unlikely((!proc) || proc->device != dev))
  936. return NULL;
  937. }
  938. return proc;
  939. }