deviceapi.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. * Copyright 2014 Nate Woolls
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <ctype.h>
  15. #ifdef WIN32
  16. #include <winsock2.h>
  17. #else
  18. #include <sys/select.h>
  19. #endif
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <sys/time.h>
  23. #include <sys/types.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "logging.h"
  29. #include "lowlevel.h"
  30. #ifdef NEED_BFG_LOWL_VCOM
  31. #include "lowl-vcom.h"
  32. #endif
  33. #include "miner.h"
  34. #include "util.h"
  35. struct driver_registration *_bfg_drvreg1;
  36. struct driver_registration *_bfg_drvreg2;
  37. void _bfg_register_driver(const struct device_drv *drv)
  38. {
  39. static struct driver_registration *initlist;
  40. struct driver_registration *ndr;
  41. if (!drv)
  42. {
  43. // Move initlist to hashtables
  44. LL_FOREACH(initlist, ndr)
  45. {
  46. drv = ndr->drv;
  47. if (drv->drv_init)
  48. drv->drv_init();
  49. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  50. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  51. }
  52. initlist = NULL;
  53. return;
  54. }
  55. ndr = malloc(sizeof(*ndr));
  56. *ndr = (struct driver_registration){
  57. .drv = drv,
  58. };
  59. LL_PREPEND(initlist, ndr);
  60. }
  61. static
  62. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  63. {
  64. return strcmp(a->drv->dname, b->drv->dname);
  65. };
  66. static
  67. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  68. {
  69. return a->drv->probe_priority - b->drv->probe_priority;
  70. };
  71. void bfg_devapi_init()
  72. {
  73. _bfg_register_driver(NULL);
  74. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  75. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  76. }
  77. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  78. {
  79. struct cgpu_info *cgpu = thr->cgpu;
  80. const long cycle = opt_log_interval / 5 ? : 1;
  81. if (unlikely(hashes == -1)) {
  82. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  83. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  84. if (thr->scanhash_working && opt_restart) {
  85. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  86. thr->scanhash_working = false;
  87. cgpu->reinit_backoff = 5.2734375;
  88. hashes = 0;
  89. } else {
  90. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  91. cgpu->deven = DEV_RECOVER_ERR;
  92. run_cmd(cmd_idle);
  93. return false;
  94. }
  95. }
  96. else
  97. thr->scanhash_working = true;
  98. thr->hashes_done += hashes;
  99. if (hashes > cgpu->max_hashes)
  100. cgpu->max_hashes = hashes;
  101. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  102. // max_nonce management (optional)
  103. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  104. int mult;
  105. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  106. return true;
  107. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  108. mult *= cycle;
  109. if (*max_nonce > (0xffffffff * 0x400) / mult)
  110. *max_nonce = 0xffffffff;
  111. else
  112. *max_nonce = (*max_nonce * mult) / 0x400;
  113. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  114. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  115. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  116. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  117. hashmeter2(thr);
  118. return true;
  119. }
  120. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  121. {
  122. struct timeval tv_now, tv_delta;
  123. timer_set_now(&tv_now);
  124. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  125. thr->_tv_last_hashes_done_call = tv_now;
  126. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  127. }
  128. /* A generic wait function for threads that poll that will wait a specified
  129. * time tdiff waiting on a work restart request. Returns zero if the condition
  130. * was met (work restart requested) or ETIMEDOUT if not.
  131. */
  132. int restart_wait(struct thr_info *thr, unsigned int mstime)
  133. {
  134. struct timeval tv_timer, tv_now, tv_timeout;
  135. fd_set rfds;
  136. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  137. int rv;
  138. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  139. {
  140. // This is a bug!
  141. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  142. cgsleep_ms(mstime);
  143. return (thr->work_restart ? 0 : ETIMEDOUT);
  144. }
  145. timer_set_now(&tv_now);
  146. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  147. while (true)
  148. {
  149. FD_ZERO(&rfds);
  150. FD_SET(wrn, &rfds);
  151. tv_timeout = tv_timer;
  152. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  153. if (rv == 0)
  154. return ETIMEDOUT;
  155. if (rv > 0)
  156. {
  157. if (thr->work_restart)
  158. return 0;
  159. notifier_read(thr->work_restart_notifier);
  160. }
  161. timer_set_now(&tv_now);
  162. }
  163. }
  164. static
  165. struct work *get_and_prepare_work(struct thr_info *thr)
  166. {
  167. struct cgpu_info *proc = thr->cgpu;
  168. struct device_drv *api = proc->drv;
  169. struct work *work;
  170. work = get_work(thr);
  171. if (!work)
  172. return NULL;
  173. if (api->prepare_work && !api->prepare_work(thr, work)) {
  174. free_work(work);
  175. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  176. proc->deven = DEV_RECOVER_ERR;
  177. run_cmd(cmd_idle);
  178. return NULL;
  179. }
  180. return work;
  181. }
  182. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  183. void minerloop_scanhash(struct thr_info *mythr)
  184. {
  185. struct cgpu_info *cgpu = mythr->cgpu;
  186. struct device_drv *api = cgpu->drv;
  187. struct timeval tv_start, tv_end;
  188. struct timeval tv_hashes, tv_worktime;
  189. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  190. int64_t hashes;
  191. struct work *work;
  192. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  193. #ifdef HAVE_PTHREAD_CANCEL
  194. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  195. #endif
  196. if (cgpu->deven != DEV_ENABLED)
  197. mt_disable(mythr);
  198. while (likely(!cgpu->shutdown)) {
  199. mythr->work_restart = false;
  200. request_work(mythr);
  201. work = get_and_prepare_work(mythr);
  202. if (!work)
  203. break;
  204. timer_set_now(&work->tv_work_start);
  205. do {
  206. thread_reportin(mythr);
  207. /* Only allow the mining thread to be cancelled when
  208. * it is not in the driver code. */
  209. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  210. timer_set_now(&tv_start);
  211. /* api->scanhash should scan the work for valid nonces
  212. * until max_nonce is reached or thr_info->work_restart */
  213. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  214. timer_set_now(&tv_end);
  215. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  216. pthread_testcancel();
  217. thread_reportin(mythr);
  218. timersub(&tv_end, &tv_start, &tv_hashes);
  219. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  220. goto disabled;
  221. if (unlikely(mythr->work_restart)) {
  222. /* Apart from device_thread 0, we stagger the
  223. * starting of every next thread to try and get
  224. * all devices busy before worrying about
  225. * getting work for their extra threads */
  226. if (!primary) {
  227. struct timespec rgtp;
  228. rgtp.tv_sec = 0;
  229. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  230. nanosleep(&rgtp, NULL);
  231. }
  232. break;
  233. }
  234. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  235. disabled:
  236. mt_disable(mythr);
  237. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  238. /* The inner do-while loop will exit unless the device is capable of
  239. * scanning a specific nonce range (currently CPU and GPU drivers)
  240. * See abandon_work comments for more details */
  241. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  242. free_work(work);
  243. }
  244. }
  245. void mt_disable_start__async(struct thr_info * const mythr)
  246. {
  247. mt_disable_start(mythr);
  248. if (mythr->prev_work)
  249. free_work(mythr->prev_work);
  250. mythr->prev_work = mythr->work;
  251. mythr->work = NULL;
  252. mythr->_job_transition_in_progress = false;
  253. }
  254. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  255. {
  256. struct cgpu_info *proc = mythr->cgpu;
  257. struct device_drv *api = proc->drv;
  258. struct timeval tv_worktime;
  259. mythr->tv_morework.tv_sec = -1;
  260. mythr->_job_transition_in_progress = true;
  261. if (mythr->work)
  262. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  263. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  264. {
  265. mythr->work_restart = false;
  266. request_work(mythr);
  267. // FIXME: Allow get_work to return NULL to retry on notification
  268. if (mythr->next_work)
  269. free_work(mythr->next_work);
  270. mythr->next_work = get_and_prepare_work(mythr);
  271. if (!mythr->next_work)
  272. return false;
  273. mythr->starting_next_work = true;
  274. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  275. }
  276. else
  277. {
  278. mythr->starting_next_work = false;
  279. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  280. }
  281. job_prepare_complete(mythr);
  282. return true;
  283. }
  284. void job_prepare_complete(struct thr_info *mythr)
  285. {
  286. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  287. return;
  288. if (mythr->work)
  289. {
  290. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  291. do_get_results(mythr, true);
  292. else
  293. {} // TODO: Set a timer to call do_get_results when job is near complete
  294. }
  295. else // no job currently running
  296. do_job_start(mythr);
  297. }
  298. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  299. {
  300. struct cgpu_info *proc = mythr->cgpu;
  301. struct device_drv *api = proc->drv;
  302. struct work *work = mythr->work;
  303. mythr->_job_transition_in_progress = true;
  304. mythr->tv_results_jobstart = mythr->tv_jobstart;
  305. mythr->_proceed_with_new_job = proceed_with_new_job;
  306. if (api->job_get_results)
  307. api->job_get_results(mythr, work);
  308. else
  309. job_results_fetched(mythr);
  310. }
  311. void job_results_fetched(struct thr_info *mythr)
  312. {
  313. if (mythr->_proceed_with_new_job)
  314. do_job_start(mythr);
  315. else
  316. {
  317. if (likely(mythr->prev_work))
  318. {
  319. struct timeval tv_now;
  320. timer_set_now(&tv_now);
  321. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  322. }
  323. mt_disable_start__async(mythr);
  324. }
  325. }
  326. void do_job_start(struct thr_info *mythr)
  327. {
  328. struct cgpu_info *proc = mythr->cgpu;
  329. struct device_drv *api = proc->drv;
  330. thread_reportin(mythr);
  331. api->job_start(mythr);
  332. }
  333. void mt_job_transition(struct thr_info *mythr)
  334. {
  335. struct timeval tv_now;
  336. timer_set_now(&tv_now);
  337. if (mythr->starting_next_work)
  338. {
  339. mythr->next_work->tv_work_start = tv_now;
  340. if (mythr->prev_work)
  341. free_work(mythr->prev_work);
  342. mythr->prev_work = mythr->work;
  343. mythr->work = mythr->next_work;
  344. mythr->next_work = NULL;
  345. }
  346. mythr->tv_jobstart = tv_now;
  347. mythr->_job_transition_in_progress = false;
  348. }
  349. void job_start_complete(struct thr_info *mythr)
  350. {
  351. struct timeval tv_now;
  352. if (unlikely(!mythr->prev_work))
  353. return;
  354. timer_set_now(&tv_now);
  355. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  356. }
  357. void job_start_abort(struct thr_info *mythr, bool failure)
  358. {
  359. struct cgpu_info *proc = mythr->cgpu;
  360. if (failure)
  361. {
  362. proc->deven = DEV_RECOVER_ERR;
  363. run_cmd(cmd_idle);
  364. }
  365. mythr->work = NULL;
  366. mythr->_job_transition_in_progress = false;
  367. }
  368. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  369. {
  370. struct cgpu_info *proc = mythr->cgpu;
  371. struct device_drv *api = proc->drv;
  372. struct timeval tv_hashes;
  373. int64_t hashes = 0;
  374. if (api->job_process_results)
  375. hashes = api->job_process_results(mythr, work, stopping);
  376. thread_reportin(mythr);
  377. if (hashes)
  378. {
  379. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  380. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  381. return false;
  382. }
  383. return true;
  384. }
  385. static
  386. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  387. {
  388. struct cgpu_info *cgpu = thr->cgpu;
  389. struct timeval tv_now;
  390. int maxfd;
  391. fd_set rfds;
  392. timer_set_now(&tv_now);
  393. FD_ZERO(&rfds);
  394. FD_SET(thr->notifier[0], &rfds);
  395. maxfd = thr->notifier[0];
  396. FD_SET(thr->work_restart_notifier[0], &rfds);
  397. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  398. if (thr->mutex_request[1] != INVSOCK)
  399. {
  400. FD_SET(thr->mutex_request[0], &rfds);
  401. set_maxfd(&maxfd, thr->mutex_request[0]);
  402. }
  403. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  404. return;
  405. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  406. {
  407. // FIXME: This can only handle one request at a time!
  408. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  409. notifier_read(thr->mutex_request);
  410. mutex_lock(mutexp);
  411. pthread_cond_signal(&cgpu->device_cond);
  412. pthread_cond_wait(&cgpu->device_cond, mutexp);
  413. mutex_unlock(mutexp);
  414. }
  415. if (FD_ISSET(thr->notifier[0], &rfds)) {
  416. notifier_read(thr->notifier);
  417. }
  418. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  419. notifier_read(thr->work_restart_notifier);
  420. }
  421. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  422. {
  423. mutex_init(&cgpu->device_mutex);
  424. notifier_init(cgpu->thr[0]->mutex_request);
  425. pthread_cond_init(&cgpu->device_cond, NULL);
  426. }
  427. void cgpu_request_control(struct cgpu_info * const cgpu)
  428. {
  429. struct thr_info * const thr = cgpu->thr[0];
  430. if (pthread_equal(pthread_self(), thr->pth))
  431. return;
  432. mutex_lock(&cgpu->device_mutex);
  433. notifier_wake(thr->mutex_request);
  434. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  435. }
  436. void cgpu_release_control(struct cgpu_info * const cgpu)
  437. {
  438. struct thr_info * const thr = cgpu->thr[0];
  439. if (pthread_equal(pthread_self(), thr->pth))
  440. return;
  441. pthread_cond_signal(&cgpu->device_cond);
  442. mutex_unlock(&cgpu->device_mutex);
  443. }
  444. static
  445. void _minerloop_setup(struct thr_info *mythr)
  446. {
  447. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  448. if (mythr->work_restart_notifier[1] == -1)
  449. notifier_init(mythr->work_restart_notifier);
  450. for (proc = cgpu; proc; proc = proc->next_proc)
  451. {
  452. mythr = proc->thr[0];
  453. timer_set_now(&mythr->tv_watchdog);
  454. proc->disable_watchdog = true;
  455. }
  456. }
  457. void minerloop_async(struct thr_info *mythr)
  458. {
  459. struct thr_info *thr = mythr;
  460. struct cgpu_info *cgpu = mythr->cgpu;
  461. struct device_drv *api = cgpu->drv;
  462. struct timeval tv_now;
  463. struct timeval tv_timeout;
  464. struct cgpu_info *proc;
  465. bool is_running, should_be_running;
  466. _minerloop_setup(mythr);
  467. while (likely(!cgpu->shutdown)) {
  468. tv_timeout.tv_sec = -1;
  469. timer_set_now(&tv_now);
  470. for (proc = cgpu; proc; proc = proc->next_proc)
  471. {
  472. mythr = proc->thr[0];
  473. // Nothing should happen while we're starting a job
  474. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  475. goto defer_events;
  476. is_running = mythr->work;
  477. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  478. if (should_be_running)
  479. {
  480. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  481. {
  482. mt_disable_finish(mythr);
  483. goto djp;
  484. }
  485. if (unlikely(mythr->work_restart))
  486. goto djp;
  487. }
  488. else // ! should_be_running
  489. {
  490. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  491. {
  492. disabled: ;
  493. timer_unset(&mythr->tv_morework);
  494. if (is_running)
  495. {
  496. if (mythr->busy_state != TBS_GETTING_RESULTS)
  497. do_get_results(mythr, false);
  498. else
  499. // Avoid starting job when pending result fetch completes
  500. mythr->_proceed_with_new_job = false;
  501. }
  502. else // !mythr->_mt_disable_called
  503. mt_disable_start__async(mythr);
  504. }
  505. }
  506. if (timer_passed(&mythr->tv_morework, &tv_now))
  507. {
  508. djp: ;
  509. if (!do_job_prepare(mythr, &tv_now))
  510. goto disabled;
  511. }
  512. defer_events:
  513. if (timer_passed(&mythr->tv_poll, &tv_now))
  514. api->poll(mythr);
  515. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  516. {
  517. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  518. bfg_watchdog(proc, &tv_now);
  519. }
  520. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  521. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  522. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  523. }
  524. do_notifier_select(thr, &tv_timeout);
  525. }
  526. }
  527. static
  528. void do_queue_flush(struct thr_info *mythr)
  529. {
  530. struct cgpu_info *proc = mythr->cgpu;
  531. struct device_drv *api = proc->drv;
  532. api->queue_flush(mythr);
  533. if (mythr->next_work)
  534. {
  535. free_work(mythr->next_work);
  536. mythr->next_work = NULL;
  537. }
  538. }
  539. void minerloop_queue(struct thr_info *thr)
  540. {
  541. struct thr_info *mythr;
  542. struct cgpu_info *cgpu = thr->cgpu;
  543. struct device_drv *api = cgpu->drv;
  544. struct timeval tv_now;
  545. struct timeval tv_timeout;
  546. struct cgpu_info *proc;
  547. bool should_be_running;
  548. struct work *work;
  549. _minerloop_setup(thr);
  550. while (likely(!cgpu->shutdown)) {
  551. tv_timeout.tv_sec = -1;
  552. timer_set_now(&tv_now);
  553. for (proc = cgpu; proc; proc = proc->next_proc)
  554. {
  555. mythr = proc->thr[0];
  556. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  557. redo:
  558. if (should_be_running)
  559. {
  560. if (unlikely(mythr->_mt_disable_called))
  561. mt_disable_finish(mythr);
  562. if (unlikely(mythr->work_restart))
  563. {
  564. mythr->work_restart = false;
  565. do_queue_flush(mythr);
  566. }
  567. while (!mythr->queue_full)
  568. {
  569. if (mythr->next_work)
  570. {
  571. work = mythr->next_work;
  572. mythr->next_work = NULL;
  573. }
  574. else
  575. {
  576. request_work(mythr);
  577. // FIXME: Allow get_work to return NULL to retry on notification
  578. work = get_and_prepare_work(mythr);
  579. }
  580. if (!work)
  581. break;
  582. if (!api->queue_append(mythr, work))
  583. mythr->next_work = work;
  584. }
  585. }
  586. else
  587. if (unlikely(!mythr->_mt_disable_called))
  588. {
  589. do_queue_flush(mythr);
  590. mt_disable_start(mythr);
  591. }
  592. if (timer_passed(&mythr->tv_poll, &tv_now))
  593. api->poll(mythr);
  594. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  595. {
  596. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  597. bfg_watchdog(proc, &tv_now);
  598. }
  599. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  600. if (should_be_running && !mythr->queue_full)
  601. goto redo;
  602. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  603. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  604. }
  605. // HACK: Some designs set the main thr tv_poll from secondary thrs
  606. reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
  607. do_notifier_select(thr, &tv_timeout);
  608. }
  609. }
  610. void *miner_thread(void *userdata)
  611. {
  612. struct thr_info *mythr = userdata;
  613. struct cgpu_info *cgpu = mythr->cgpu;
  614. struct device_drv *drv = cgpu->drv;
  615. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  616. char threadname[20];
  617. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  618. RenameThread(threadname);
  619. if (drv->thread_init && !drv->thread_init(mythr)) {
  620. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  621. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  622. dev_error(slave, REASON_THREAD_FAIL_INIT);
  623. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  624. goto out;
  625. }
  626. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  627. cgpu_set_defaults(cgpu);
  628. thread_reportout(mythr);
  629. applog(LOG_DEBUG, "Popping ping in miner thread");
  630. notifier_read(mythr->notifier); // Wait for a notification to start
  631. cgtime(&cgpu->cgminer_stats.start_tv);
  632. if (drv->minerloop)
  633. drv->minerloop(mythr);
  634. else
  635. minerloop_scanhash(mythr);
  636. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  637. out: ;
  638. struct cgpu_info *proc = cgpu;
  639. do
  640. {
  641. proc->deven = DEV_DISABLED;
  642. proc->status = LIFE_DEAD2;
  643. }
  644. while ( (proc = proc->next_proc) && !proc->threads);
  645. mythr->getwork = 0;
  646. mythr->has_pth = false;
  647. cgsleep_ms(1);
  648. if (drv->thread_shutdown)
  649. drv->thread_shutdown(mythr);
  650. notifier_destroy(mythr->notifier);
  651. return NULL;
  652. }
  653. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  654. static
  655. bool _add_cgpu(struct cgpu_info *cgpu)
  656. {
  657. int lpcount;
  658. if (!cgpu->procs)
  659. cgpu->procs = 1;
  660. lpcount = cgpu->procs;
  661. cgpu->device = cgpu;
  662. cgpu->dev_repr = malloc(6);
  663. cgpu->dev_repr_ns = malloc(6);
  664. #ifdef NEED_BFG_LOWL_VCOM
  665. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  666. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  667. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  668. #endif
  669. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  670. devices_new[total_devices_new++] = cgpu;
  671. if (lpcount > 1)
  672. {
  673. int tpp = cgpu->threads / lpcount;
  674. struct cgpu_info **nlp_p, *slave;
  675. nlp_p = &cgpu->next_proc;
  676. for (int i = 1; i < lpcount; ++i)
  677. {
  678. slave = malloc(sizeof(*slave));
  679. *slave = *cgpu;
  680. slave->proc_id = i;
  681. slave->threads = tpp;
  682. devices_new[total_devices_new++] = slave;
  683. *nlp_p = slave;
  684. nlp_p = &slave->next_proc;
  685. }
  686. *nlp_p = NULL;
  687. cgpu->proc_id = 0;
  688. cgpu->threads -= (tpp * (lpcount - 1));
  689. }
  690. renumber_cgpu(cgpu);
  691. cgpu->last_device_valid_work = time(NULL);
  692. return true;
  693. }
  694. bool add_cgpu(struct cgpu_info *cgpu)
  695. {
  696. mutex_lock(&_add_cgpu_mutex);
  697. const bool rv = _add_cgpu(cgpu);
  698. mutex_unlock(&_add_cgpu_mutex);
  699. return rv;
  700. }
  701. void add_cgpu_live(void *p)
  702. {
  703. add_cgpu(p);
  704. }
  705. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  706. {
  707. if (!prev_cgpu)
  708. return add_cgpu(cgpu);
  709. while (prev_cgpu->next_proc)
  710. prev_cgpu = prev_cgpu->next_proc;
  711. mutex_lock(&_add_cgpu_mutex);
  712. int old_total_devices = total_devices_new;
  713. if (!_add_cgpu(cgpu))
  714. {
  715. mutex_unlock(&_add_cgpu_mutex);
  716. return false;
  717. }
  718. prev_cgpu->next_proc = devices_new[old_total_devices];
  719. mutex_unlock(&_add_cgpu_mutex);
  720. return true;
  721. }
  722. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  723. {
  724. const struct bfg_set_device_definition *sdf;
  725. char *p = replybuf;
  726. bool first = true;
  727. *out_success = SDR_HELP;
  728. sdf = proc->set_device_funcs;
  729. if (!sdf)
  730. nohelp:
  731. return "No help available";
  732. size_t matchlen = 0;
  733. if (newvalue)
  734. while (!isspace(newvalue[0]))
  735. ++matchlen;
  736. for ( ; sdf->optname; ++sdf)
  737. {
  738. if (!sdf->description)
  739. continue;
  740. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  741. continue;
  742. if (first)
  743. first = false;
  744. else
  745. p++[0] = '\n';
  746. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  747. }
  748. if (replybuf == p)
  749. goto nohelp;
  750. return replybuf;
  751. }
  752. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  753. {
  754. int target_diff = proc->cutofftemp - proc->targettemp;
  755. proc->cutofftemp = atoi(newvalue);
  756. if (!proc->targettemp_user)
  757. proc->targettemp = proc->cutofftemp - target_diff;
  758. return NULL;
  759. }
  760. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  761. {
  762. proc->targettemp = atoi(newvalue);
  763. proc->targettemp_user = true;
  764. return NULL;
  765. }
  766. static inline
  767. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  768. {
  769. if (!rv)
  770. *out_success = SDR_OK;
  771. else
  772. if (!strcasecmp(optname, "help"))
  773. *out_success = SDR_HELP;
  774. else
  775. *out_success = SDR_ERR;
  776. }
  777. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  778. {
  779. const struct bfg_set_device_definition *sdf;
  780. sdf = proc->set_device_funcs;
  781. if (!sdf)
  782. {
  783. *out_success = SDR_NOSUPP;
  784. return "Device does not support setting parameters.";
  785. }
  786. for ( ; sdf->optname; ++sdf)
  787. if (!strcasecmp(optname, sdf->optname))
  788. {
  789. *out_success = SDR_AUTO;
  790. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  791. if (SDR_AUTO == *out_success)
  792. _set_auto_sdr(out_success, rv, optname);
  793. return rv;
  794. }
  795. if (!strcasecmp(optname, "help"))
  796. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  797. *out_success = SDR_UNKNOWN;
  798. sprintf(replybuf, "Unknown option: %s", optname);
  799. return replybuf;
  800. }
  801. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  802. {
  803. if (proc->drv->set_device)
  804. {
  805. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  806. _set_auto_sdr(out_success, rv, optname);
  807. return rv;
  808. }
  809. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  810. }
  811. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  812. {
  813. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  814. switch (*out_success)
  815. {
  816. case SDR_NOSUPP:
  817. case SDR_UNKNOWN:
  818. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  819. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  820. else
  821. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  822. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  823. default:
  824. break;
  825. }
  826. return rv;
  827. }
  828. #ifdef NEED_BFG_LOWL_VCOM
  829. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  830. {
  831. detectone_func_t detectone = userp;
  832. if (serial_claim(info->path, NULL))
  833. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  834. return detectone(info->path);
  835. }
  836. #endif
  837. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  838. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  839. {
  840. struct string_elist *iter, *tmp;
  841. const char *dev, *colon;
  842. bool inhibitauto = flags & 4;
  843. char found = 0;
  844. bool forceauto = flags & 1;
  845. bool hasname;
  846. size_t namel = strlen(api->name);
  847. size_t dnamel = strlen(api->dname);
  848. #ifdef NEED_BFG_LOWL_VCOM
  849. clear_detectone_meta_info();
  850. #endif
  851. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  852. dev = iter->string;
  853. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  854. size_t idlen = colon - dev;
  855. // allow either name:device or dname:device
  856. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  857. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  858. continue;
  859. dev = colon + 1;
  860. hasname = true;
  861. }
  862. else
  863. hasname = false;
  864. if (!strcmp(dev, "auto"))
  865. forceauto = true;
  866. else if (!strcmp(dev, "noauto"))
  867. inhibitauto = true;
  868. else
  869. if ((flags & 2) && !hasname)
  870. continue;
  871. else
  872. if (!detectone)
  873. {} // do nothing
  874. else
  875. if (!strcmp(dev, "all"))
  876. {} // n/a
  877. else if (detectone(dev)) {
  878. string_elist_del(&scan_devices, iter);
  879. ++found;
  880. }
  881. }
  882. if ((forceauto || !(inhibitauto || found)) && autoscan)
  883. found += autoscan();
  884. return found;
  885. }
  886. static
  887. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  888. {
  889. char fullpath[PATH_MAX];
  890. strcpy(fullpath, path);
  891. strcat(fullpath, "/");
  892. if (subdir) {
  893. strcat(fullpath, subdir);
  894. strcat(fullpath, "/");
  895. }
  896. if (sub2) {
  897. strcat(fullpath, sub2);
  898. strcat(fullpath, "/");
  899. }
  900. strcat(fullpath, filename);
  901. return fopen(fullpath, "rb");
  902. }
  903. #define _open_bitstream(path, subdir, sub2) do { \
  904. f = _open_bitstream(path, subdir, sub2, filename); \
  905. if (f) \
  906. return f; \
  907. } while(0)
  908. #define _open_bitstream2(path, path3) do { \
  909. _open_bitstream(path, NULL, path3); \
  910. _open_bitstream(path, "../share/" PACKAGE, path3); \
  911. _open_bitstream(path, "../" PACKAGE, path3); \
  912. } while(0)
  913. #define _open_bitstream3(path) do { \
  914. _open_bitstream2(path, dname); \
  915. _open_bitstream2(path, "bitstreams"); \
  916. _open_bitstream2(path, NULL); \
  917. } while(0)
  918. FILE *open_bitstream(const char *dname, const char *filename)
  919. {
  920. FILE *f;
  921. _open_bitstream3(opt_kernel_path);
  922. _open_bitstream3(cgminer_path);
  923. _open_bitstream3(".");
  924. return NULL;
  925. }
  926. void close_device_fd(struct thr_info * const thr)
  927. {
  928. struct cgpu_info * const proc = thr->cgpu;
  929. const int fd = proc->device_fd;
  930. if (fd == -1)
  931. return;
  932. if (close(fd))
  933. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  934. else
  935. {
  936. proc->device_fd = -1;
  937. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  938. }
  939. }
  940. struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, const int procid)
  941. {
  942. struct cgpu_info *proc = (void*)dev;
  943. for (int i = 0; i < procid; ++i)
  944. {
  945. proc = proc->next_proc;
  946. if (unlikely((!proc) || proc->device != dev))
  947. return NULL;
  948. }
  949. return proc;
  950. }