deviceapi.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #include <ctype.h>
  14. #ifdef WIN32
  15. #include <winsock2.h>
  16. #else
  17. #include <sys/select.h>
  18. #endif
  19. #include <stdbool.h>
  20. #include <stdint.h>
  21. #include <sys/time.h>
  22. #include <sys/types.h>
  23. #include <time.h>
  24. #include <unistd.h>
  25. #include "compat.h"
  26. #include "deviceapi.h"
  27. #include "logging.h"
  28. #include "lowlevel.h"
  29. #ifdef NEED_BFG_LOWL_VCOM
  30. #include "lowl-vcom.h"
  31. #endif
  32. #include "miner.h"
  33. #include "util.h"
  34. struct driver_registration *_bfg_drvreg1;
  35. struct driver_registration *_bfg_drvreg2;
  36. void _bfg_register_driver(const struct device_drv *drv)
  37. {
  38. static struct driver_registration *initlist;
  39. struct driver_registration *ndr;
  40. if (!drv)
  41. {
  42. // Move initlist to hashtables
  43. LL_FOREACH(initlist, ndr)
  44. {
  45. drv = ndr->drv;
  46. if (drv->drv_init)
  47. drv->drv_init();
  48. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  49. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  50. }
  51. initlist = NULL;
  52. return;
  53. }
  54. ndr = malloc(sizeof(*ndr));
  55. *ndr = (struct driver_registration){
  56. .drv = drv,
  57. };
  58. LL_PREPEND(initlist, ndr);
  59. }
  60. static
  61. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  62. {
  63. return strcmp(a->drv->dname, b->drv->dname);
  64. };
  65. static
  66. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  67. {
  68. return a->drv->probe_priority - b->drv->probe_priority;
  69. };
  70. void bfg_devapi_init()
  71. {
  72. _bfg_register_driver(NULL);
  73. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  74. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  75. }
  76. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  77. {
  78. struct cgpu_info *cgpu = thr->cgpu;
  79. const long cycle = opt_log_interval / 5 ? : 1;
  80. if (unlikely(hashes == -1)) {
  81. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  82. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  83. if (thr->scanhash_working && opt_restart) {
  84. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  85. thr->scanhash_working = false;
  86. cgpu->reinit_backoff = 5.2734375;
  87. hashes = 0;
  88. } else {
  89. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  90. cgpu->deven = DEV_RECOVER_ERR;
  91. run_cmd(cmd_idle);
  92. return false;
  93. }
  94. }
  95. else
  96. thr->scanhash_working = true;
  97. thr->hashes_done += hashes;
  98. if (hashes > cgpu->max_hashes)
  99. cgpu->max_hashes = hashes;
  100. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  101. // max_nonce management (optional)
  102. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  103. int mult;
  104. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  105. return true;
  106. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  107. mult *= cycle;
  108. if (*max_nonce > (0xffffffff * 0x400) / mult)
  109. *max_nonce = 0xffffffff;
  110. else
  111. *max_nonce = (*max_nonce * mult) / 0x400;
  112. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  113. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  114. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  115. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  116. hashmeter2(thr);
  117. return true;
  118. }
  119. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  120. {
  121. struct timeval tv_now, tv_delta;
  122. timer_set_now(&tv_now);
  123. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  124. thr->_tv_last_hashes_done_call = tv_now;
  125. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  126. }
  127. /* A generic wait function for threads that poll that will wait a specified
  128. * time tdiff waiting on a work restart request. Returns zero if the condition
  129. * was met (work restart requested) or ETIMEDOUT if not.
  130. */
  131. int restart_wait(struct thr_info *thr, unsigned int mstime)
  132. {
  133. struct timeval tv_timer, tv_now, tv_timeout;
  134. fd_set rfds;
  135. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  136. int rv;
  137. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  138. {
  139. // This is a bug!
  140. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  141. cgsleep_ms(mstime);
  142. return (thr->work_restart ? 0 : ETIMEDOUT);
  143. }
  144. timer_set_now(&tv_now);
  145. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  146. while (true)
  147. {
  148. FD_ZERO(&rfds);
  149. FD_SET(wrn, &rfds);
  150. tv_timeout = tv_timer;
  151. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  152. if (rv == 0)
  153. return ETIMEDOUT;
  154. if (rv > 0)
  155. {
  156. if (thr->work_restart)
  157. return 0;
  158. notifier_read(thr->work_restart_notifier);
  159. }
  160. timer_set_now(&tv_now);
  161. }
  162. }
  163. static
  164. struct work *get_and_prepare_work(struct thr_info *thr)
  165. {
  166. struct cgpu_info *proc = thr->cgpu;
  167. struct device_drv *api = proc->drv;
  168. struct work *work;
  169. work = get_work(thr);
  170. if (!work)
  171. return NULL;
  172. if (api->prepare_work && !api->prepare_work(thr, work)) {
  173. free_work(work);
  174. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  175. proc->deven = DEV_RECOVER_ERR;
  176. run_cmd(cmd_idle);
  177. return NULL;
  178. }
  179. return work;
  180. }
  181. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  182. void minerloop_scanhash(struct thr_info *mythr)
  183. {
  184. struct cgpu_info *cgpu = mythr->cgpu;
  185. struct device_drv *api = cgpu->drv;
  186. struct timeval tv_start, tv_end;
  187. struct timeval tv_hashes, tv_worktime;
  188. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  189. int64_t hashes;
  190. struct work *work;
  191. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  192. #ifdef HAVE_PTHREAD_CANCEL
  193. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  194. #endif
  195. if (cgpu->deven != DEV_ENABLED)
  196. mt_disable(mythr);
  197. while (likely(!cgpu->shutdown)) {
  198. mythr->work_restart = false;
  199. request_work(mythr);
  200. work = get_and_prepare_work(mythr);
  201. if (!work)
  202. break;
  203. timer_set_now(&work->tv_work_start);
  204. do {
  205. thread_reportin(mythr);
  206. /* Only allow the mining thread to be cancelled when
  207. * it is not in the driver code. */
  208. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  209. timer_set_now(&tv_start);
  210. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  211. timer_set_now(&tv_end);
  212. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  213. pthread_testcancel();
  214. thread_reportin(mythr);
  215. timersub(&tv_end, &tv_start, &tv_hashes);
  216. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  217. goto disabled;
  218. if (unlikely(mythr->work_restart)) {
  219. /* Apart from device_thread 0, we stagger the
  220. * starting of every next thread to try and get
  221. * all devices busy before worrying about
  222. * getting work for their extra threads */
  223. if (!primary) {
  224. struct timespec rgtp;
  225. rgtp.tv_sec = 0;
  226. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  227. nanosleep(&rgtp, NULL);
  228. }
  229. break;
  230. }
  231. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  232. disabled:
  233. mt_disable(mythr);
  234. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  235. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  236. free_work(work);
  237. }
  238. }
  239. void mt_disable_start__async(struct thr_info * const mythr)
  240. {
  241. mt_disable_start(mythr);
  242. if (mythr->prev_work)
  243. free_work(mythr->prev_work);
  244. mythr->prev_work = mythr->work;
  245. mythr->work = NULL;
  246. mythr->_job_transition_in_progress = false;
  247. }
  248. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  249. {
  250. struct cgpu_info *proc = mythr->cgpu;
  251. struct device_drv *api = proc->drv;
  252. struct timeval tv_worktime;
  253. mythr->tv_morework.tv_sec = -1;
  254. mythr->_job_transition_in_progress = true;
  255. if (mythr->work)
  256. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  257. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  258. {
  259. mythr->work_restart = false;
  260. request_work(mythr);
  261. // FIXME: Allow get_work to return NULL to retry on notification
  262. if (mythr->next_work)
  263. free_work(mythr->next_work);
  264. mythr->next_work = get_and_prepare_work(mythr);
  265. if (!mythr->next_work)
  266. return false;
  267. mythr->starting_next_work = true;
  268. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  269. }
  270. else
  271. {
  272. mythr->starting_next_work = false;
  273. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  274. }
  275. job_prepare_complete(mythr);
  276. return true;
  277. }
  278. void job_prepare_complete(struct thr_info *mythr)
  279. {
  280. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  281. return;
  282. if (mythr->work)
  283. {
  284. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  285. do_get_results(mythr, true);
  286. else
  287. {} // TODO: Set a timer to call do_get_results when job is near complete
  288. }
  289. else // no job currently running
  290. do_job_start(mythr);
  291. }
  292. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  293. {
  294. struct cgpu_info *proc = mythr->cgpu;
  295. struct device_drv *api = proc->drv;
  296. struct work *work = mythr->work;
  297. mythr->_job_transition_in_progress = true;
  298. mythr->tv_results_jobstart = mythr->tv_jobstart;
  299. mythr->_proceed_with_new_job = proceed_with_new_job;
  300. if (api->job_get_results)
  301. api->job_get_results(mythr, work);
  302. else
  303. job_results_fetched(mythr);
  304. }
  305. void job_results_fetched(struct thr_info *mythr)
  306. {
  307. if (mythr->_proceed_with_new_job)
  308. do_job_start(mythr);
  309. else
  310. {
  311. if (likely(mythr->prev_work))
  312. {
  313. struct timeval tv_now;
  314. timer_set_now(&tv_now);
  315. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  316. }
  317. mt_disable_start__async(mythr);
  318. }
  319. }
  320. void do_job_start(struct thr_info *mythr)
  321. {
  322. struct cgpu_info *proc = mythr->cgpu;
  323. struct device_drv *api = proc->drv;
  324. thread_reportin(mythr);
  325. api->job_start(mythr);
  326. }
  327. void mt_job_transition(struct thr_info *mythr)
  328. {
  329. struct timeval tv_now;
  330. timer_set_now(&tv_now);
  331. if (mythr->starting_next_work)
  332. {
  333. mythr->next_work->tv_work_start = tv_now;
  334. if (mythr->prev_work)
  335. free_work(mythr->prev_work);
  336. mythr->prev_work = mythr->work;
  337. mythr->work = mythr->next_work;
  338. mythr->next_work = NULL;
  339. }
  340. mythr->tv_jobstart = tv_now;
  341. mythr->_job_transition_in_progress = false;
  342. }
  343. void job_start_complete(struct thr_info *mythr)
  344. {
  345. struct timeval tv_now;
  346. if (unlikely(!mythr->prev_work))
  347. return;
  348. timer_set_now(&tv_now);
  349. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  350. }
  351. void job_start_abort(struct thr_info *mythr, bool failure)
  352. {
  353. struct cgpu_info *proc = mythr->cgpu;
  354. if (failure)
  355. {
  356. proc->deven = DEV_RECOVER_ERR;
  357. run_cmd(cmd_idle);
  358. }
  359. mythr->work = NULL;
  360. mythr->_job_transition_in_progress = false;
  361. }
  362. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  363. {
  364. struct cgpu_info *proc = mythr->cgpu;
  365. struct device_drv *api = proc->drv;
  366. struct timeval tv_hashes;
  367. int64_t hashes = 0;
  368. if (api->job_process_results)
  369. hashes = api->job_process_results(mythr, work, stopping);
  370. thread_reportin(mythr);
  371. if (hashes)
  372. {
  373. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  374. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  375. return false;
  376. }
  377. return true;
  378. }
  379. static
  380. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  381. {
  382. struct cgpu_info *cgpu = thr->cgpu;
  383. struct timeval tv_now;
  384. int maxfd;
  385. fd_set rfds;
  386. timer_set_now(&tv_now);
  387. FD_ZERO(&rfds);
  388. FD_SET(thr->notifier[0], &rfds);
  389. maxfd = thr->notifier[0];
  390. FD_SET(thr->work_restart_notifier[0], &rfds);
  391. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  392. if (thr->mutex_request[1] != INVSOCK)
  393. {
  394. FD_SET(thr->mutex_request[0], &rfds);
  395. set_maxfd(&maxfd, thr->mutex_request[0]);
  396. }
  397. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  398. return;
  399. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  400. {
  401. // FIXME: This can only handle one request at a time!
  402. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  403. notifier_read(thr->mutex_request);
  404. mutex_lock(mutexp);
  405. pthread_cond_signal(&cgpu->device_cond);
  406. pthread_cond_wait(&cgpu->device_cond, mutexp);
  407. mutex_unlock(mutexp);
  408. }
  409. if (FD_ISSET(thr->notifier[0], &rfds)) {
  410. notifier_read(thr->notifier);
  411. }
  412. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  413. notifier_read(thr->work_restart_notifier);
  414. }
  415. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  416. {
  417. mutex_init(&cgpu->device_mutex);
  418. notifier_init(cgpu->thr[0]->mutex_request);
  419. pthread_cond_init(&cgpu->device_cond, NULL);
  420. }
  421. void cgpu_request_control(struct cgpu_info * const cgpu)
  422. {
  423. struct thr_info * const thr = cgpu->thr[0];
  424. if (pthread_equal(pthread_self(), thr->pth))
  425. return;
  426. mutex_lock(&cgpu->device_mutex);
  427. notifier_wake(thr->mutex_request);
  428. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  429. }
  430. void cgpu_release_control(struct cgpu_info * const cgpu)
  431. {
  432. struct thr_info * const thr = cgpu->thr[0];
  433. if (pthread_equal(pthread_self(), thr->pth))
  434. return;
  435. pthread_cond_signal(&cgpu->device_cond);
  436. mutex_unlock(&cgpu->device_mutex);
  437. }
  438. static
  439. void _minerloop_setup(struct thr_info *mythr)
  440. {
  441. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  442. if (mythr->work_restart_notifier[1] == -1)
  443. notifier_init(mythr->work_restart_notifier);
  444. for (proc = cgpu; proc; proc = proc->next_proc)
  445. {
  446. mythr = proc->thr[0];
  447. timer_set_now(&mythr->tv_watchdog);
  448. proc->disable_watchdog = true;
  449. }
  450. }
  451. void minerloop_async(struct thr_info *mythr)
  452. {
  453. struct thr_info *thr = mythr;
  454. struct cgpu_info *cgpu = mythr->cgpu;
  455. struct device_drv *api = cgpu->drv;
  456. struct timeval tv_now;
  457. struct timeval tv_timeout;
  458. struct cgpu_info *proc;
  459. bool is_running, should_be_running;
  460. _minerloop_setup(mythr);
  461. while (likely(!cgpu->shutdown)) {
  462. tv_timeout.tv_sec = -1;
  463. timer_set_now(&tv_now);
  464. for (proc = cgpu; proc; proc = proc->next_proc)
  465. {
  466. mythr = proc->thr[0];
  467. // Nothing should happen while we're starting a job
  468. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  469. goto defer_events;
  470. is_running = mythr->work;
  471. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  472. if (should_be_running)
  473. {
  474. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  475. {
  476. mt_disable_finish(mythr);
  477. goto djp;
  478. }
  479. if (unlikely(mythr->work_restart))
  480. goto djp;
  481. }
  482. else // ! should_be_running
  483. {
  484. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  485. {
  486. disabled: ;
  487. timer_unset(&mythr->tv_morework);
  488. if (is_running)
  489. {
  490. if (mythr->busy_state != TBS_GETTING_RESULTS)
  491. do_get_results(mythr, false);
  492. else
  493. // Avoid starting job when pending result fetch completes
  494. mythr->_proceed_with_new_job = false;
  495. }
  496. else // !mythr->_mt_disable_called
  497. mt_disable_start__async(mythr);
  498. }
  499. }
  500. if (timer_passed(&mythr->tv_morework, &tv_now))
  501. {
  502. djp: ;
  503. if (!do_job_prepare(mythr, &tv_now))
  504. goto disabled;
  505. }
  506. defer_events:
  507. if (timer_passed(&mythr->tv_poll, &tv_now))
  508. api->poll(mythr);
  509. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  510. {
  511. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  512. bfg_watchdog(proc, &tv_now);
  513. }
  514. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  515. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  516. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  517. }
  518. do_notifier_select(thr, &tv_timeout);
  519. }
  520. }
  521. static
  522. void do_queue_flush(struct thr_info *mythr)
  523. {
  524. struct cgpu_info *proc = mythr->cgpu;
  525. struct device_drv *api = proc->drv;
  526. api->queue_flush(mythr);
  527. if (mythr->next_work)
  528. {
  529. free_work(mythr->next_work);
  530. mythr->next_work = NULL;
  531. }
  532. }
  533. void minerloop_queue(struct thr_info *thr)
  534. {
  535. struct thr_info *mythr;
  536. struct cgpu_info *cgpu = thr->cgpu;
  537. struct device_drv *api = cgpu->drv;
  538. struct timeval tv_now;
  539. struct timeval tv_timeout;
  540. struct cgpu_info *proc;
  541. bool should_be_running;
  542. struct work *work;
  543. _minerloop_setup(thr);
  544. while (likely(!cgpu->shutdown)) {
  545. tv_timeout.tv_sec = -1;
  546. timer_set_now(&tv_now);
  547. for (proc = cgpu; proc; proc = proc->next_proc)
  548. {
  549. mythr = proc->thr[0];
  550. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  551. redo:
  552. if (should_be_running)
  553. {
  554. if (unlikely(mythr->_mt_disable_called))
  555. mt_disable_finish(mythr);
  556. if (unlikely(mythr->work_restart))
  557. {
  558. mythr->work_restart = false;
  559. do_queue_flush(mythr);
  560. }
  561. while (!mythr->queue_full)
  562. {
  563. if (mythr->next_work)
  564. {
  565. work = mythr->next_work;
  566. mythr->next_work = NULL;
  567. }
  568. else
  569. {
  570. request_work(mythr);
  571. // FIXME: Allow get_work to return NULL to retry on notification
  572. work = get_and_prepare_work(mythr);
  573. }
  574. if (!work)
  575. break;
  576. if (!api->queue_append(mythr, work))
  577. mythr->next_work = work;
  578. }
  579. }
  580. else
  581. if (unlikely(!mythr->_mt_disable_called))
  582. {
  583. do_queue_flush(mythr);
  584. mt_disable_start(mythr);
  585. }
  586. if (timer_passed(&mythr->tv_poll, &tv_now))
  587. api->poll(mythr);
  588. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  589. {
  590. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  591. bfg_watchdog(proc, &tv_now);
  592. }
  593. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  594. if (should_be_running && !mythr->queue_full)
  595. goto redo;
  596. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  597. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  598. }
  599. do_notifier_select(thr, &tv_timeout);
  600. }
  601. }
  602. void *miner_thread(void *userdata)
  603. {
  604. struct thr_info *mythr = userdata;
  605. struct cgpu_info *cgpu = mythr->cgpu;
  606. struct device_drv *drv = cgpu->drv;
  607. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  608. char threadname[20];
  609. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  610. RenameThread(threadname);
  611. if (drv->thread_init && !drv->thread_init(mythr)) {
  612. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  613. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  614. dev_error(slave, REASON_THREAD_FAIL_INIT);
  615. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  616. goto out;
  617. }
  618. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  619. cgpu_set_defaults(cgpu);
  620. thread_reportout(mythr);
  621. applog(LOG_DEBUG, "Popping ping in miner thread");
  622. notifier_read(mythr->notifier); // Wait for a notification to start
  623. cgtime(&cgpu->cgminer_stats.start_tv);
  624. if (drv->minerloop)
  625. drv->minerloop(mythr);
  626. else
  627. minerloop_scanhash(mythr);
  628. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  629. out: ;
  630. struct cgpu_info *proc = cgpu;
  631. do
  632. {
  633. proc->deven = DEV_DISABLED;
  634. proc->status = LIFE_DEAD2;
  635. }
  636. while ( (proc = proc->next_proc) && !proc->threads);
  637. mythr->getwork = 0;
  638. mythr->has_pth = false;
  639. cgsleep_ms(1);
  640. if (drv->thread_shutdown)
  641. drv->thread_shutdown(mythr);
  642. notifier_destroy(mythr->notifier);
  643. return NULL;
  644. }
  645. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  646. static
  647. bool _add_cgpu(struct cgpu_info *cgpu)
  648. {
  649. int lpcount;
  650. renumber_cgpu(cgpu);
  651. if (!cgpu->procs)
  652. cgpu->procs = 1;
  653. lpcount = cgpu->procs;
  654. cgpu->device = cgpu;
  655. cgpu->dev_repr = malloc(6);
  656. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  657. cgpu->dev_repr_ns = malloc(6);
  658. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  659. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  660. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  661. #ifdef NEED_BFG_LOWL_VCOM
  662. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  663. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  664. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  665. #endif
  666. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  667. devices_new[total_devices_new++] = cgpu;
  668. if (lpcount > 1)
  669. {
  670. int ns;
  671. int tpp = cgpu->threads / lpcount;
  672. struct cgpu_info **nlp_p, *slave;
  673. int lpdigits = 1;
  674. for (int i = lpcount; i > 26 && lpdigits < 3; i /= 26)
  675. ++lpdigits;
  676. memset(&cgpu->proc_repr[5], 'a', lpdigits);
  677. cgpu->proc_repr[5 + lpdigits] = '\0';
  678. ns = strlen(cgpu->proc_repr_ns);
  679. strcpy(&cgpu->proc_repr_ns[ns], &cgpu->proc_repr[5]);
  680. nlp_p = &cgpu->next_proc;
  681. for (int i = 1; i < lpcount; ++i)
  682. {
  683. slave = malloc(sizeof(*slave));
  684. *slave = *cgpu;
  685. slave->proc_id = i;
  686. for (int x = i, y = lpdigits; --y, x; x /= 26)
  687. {
  688. slave->proc_repr_ns[ns + y] =
  689. slave->proc_repr[5 + y] += (x % 26);
  690. }
  691. slave->threads = tpp;
  692. devices_new[total_devices_new++] = slave;
  693. *nlp_p = slave;
  694. nlp_p = &slave->next_proc;
  695. }
  696. *nlp_p = NULL;
  697. cgpu->proc_id = 0;
  698. cgpu->threads -= (tpp * (lpcount - 1));
  699. }
  700. cgpu->last_device_valid_work = time(NULL);
  701. return true;
  702. }
  703. bool add_cgpu(struct cgpu_info *cgpu)
  704. {
  705. mutex_lock(&_add_cgpu_mutex);
  706. const bool rv = _add_cgpu(cgpu);
  707. mutex_unlock(&_add_cgpu_mutex);
  708. return rv;
  709. }
  710. void add_cgpu_live(void *p)
  711. {
  712. add_cgpu(p);
  713. }
  714. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  715. {
  716. if (!prev_cgpu)
  717. return add_cgpu(cgpu);
  718. while (prev_cgpu->next_proc)
  719. prev_cgpu = prev_cgpu->next_proc;
  720. mutex_lock(&_add_cgpu_mutex);
  721. int old_total_devices = total_devices_new;
  722. if (!_add_cgpu(cgpu))
  723. {
  724. mutex_unlock(&_add_cgpu_mutex);
  725. return false;
  726. }
  727. prev_cgpu->next_proc = devices_new[old_total_devices];
  728. mutex_unlock(&_add_cgpu_mutex);
  729. return true;
  730. }
  731. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  732. {
  733. const struct bfg_set_device_definition *sdf;
  734. char *p = replybuf;
  735. bool first = true;
  736. *out_success = SDR_HELP;
  737. sdf = proc->set_device_funcs;
  738. if (!sdf)
  739. nohelp:
  740. return "No help available";
  741. size_t matchlen = 0;
  742. if (newvalue)
  743. while (!isspace(newvalue[0]))
  744. ++matchlen;
  745. for ( ; sdf->optname; ++sdf)
  746. {
  747. if (!sdf->description)
  748. continue;
  749. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  750. continue;
  751. if (first)
  752. first = false;
  753. else
  754. p++[0] = '\n';
  755. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  756. }
  757. if (replybuf == p)
  758. goto nohelp;
  759. return replybuf;
  760. }
  761. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  762. {
  763. int target_diff = proc->cutofftemp - proc->targettemp;
  764. proc->cutofftemp = atoi(newvalue);
  765. if (!proc->targettemp_user)
  766. proc->targettemp = proc->cutofftemp - target_diff;
  767. return NULL;
  768. }
  769. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  770. {
  771. proc->targettemp = atoi(newvalue);
  772. proc->targettemp_user = true;
  773. return NULL;
  774. }
  775. static inline
  776. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  777. {
  778. if (!rv)
  779. *out_success = SDR_OK;
  780. else
  781. if (!strcasecmp(optname, "help"))
  782. *out_success = SDR_HELP;
  783. else
  784. *out_success = SDR_ERR;
  785. }
  786. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  787. {
  788. const struct bfg_set_device_definition *sdf;
  789. sdf = proc->set_device_funcs;
  790. if (!sdf)
  791. {
  792. *out_success = SDR_NOSUPP;
  793. return "Device does not support setting parameters.";
  794. }
  795. for ( ; sdf->optname; ++sdf)
  796. if (!strcasecmp(optname, sdf->optname))
  797. {
  798. *out_success = SDR_AUTO;
  799. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  800. if (SDR_AUTO == *out_success)
  801. _set_auto_sdr(out_success, rv, optname);
  802. return rv;
  803. }
  804. if (!strcasecmp(optname, "help"))
  805. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  806. *out_success = SDR_UNKNOWN;
  807. sprintf(replybuf, "Unknown option: %s", optname);
  808. return replybuf;
  809. }
  810. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  811. {
  812. if (proc->drv->set_device)
  813. {
  814. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  815. _set_auto_sdr(out_success, rv, optname);
  816. return rv;
  817. }
  818. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  819. }
  820. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  821. {
  822. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  823. switch (*out_success)
  824. {
  825. case SDR_NOSUPP:
  826. case SDR_UNKNOWN:
  827. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  828. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  829. else
  830. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  831. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  832. default:
  833. break;
  834. }
  835. return rv;
  836. }
  837. #ifdef NEED_BFG_LOWL_VCOM
  838. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  839. {
  840. detectone_func_t detectone = userp;
  841. if (serial_claim(info->path, NULL))
  842. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  843. return detectone(info->path);
  844. }
  845. #endif
  846. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  847. {
  848. struct string_elist *iter, *tmp;
  849. const char *dev, *colon;
  850. bool inhibitauto = flags & 4;
  851. char found = 0;
  852. bool forceauto = flags & 1;
  853. bool hasname;
  854. bool doall = false;
  855. size_t namel = strlen(api->name);
  856. size_t dnamel = strlen(api->dname);
  857. #ifdef NEED_BFG_LOWL_VCOM
  858. clear_detectone_meta_info();
  859. #endif
  860. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  861. dev = iter->string;
  862. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  863. size_t idlen = colon - dev;
  864. // allow either name:device or dname:device
  865. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  866. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  867. continue;
  868. dev = colon + 1;
  869. hasname = true;
  870. }
  871. else
  872. hasname = false;
  873. if (!strcmp(dev, "auto"))
  874. forceauto = true;
  875. else if (!strcmp(dev, "noauto"))
  876. inhibitauto = true;
  877. else
  878. if ((flags & 2) && !hasname)
  879. continue;
  880. else
  881. if (!detectone)
  882. {} // do nothing
  883. else
  884. if (!strcmp(dev, "all"))
  885. doall = true;
  886. #ifdef NEED_BFG_LOWL_VCOM
  887. else
  888. if (serial_claim(dev, NULL))
  889. {
  890. applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev);
  891. string_elist_del(&scan_devices, iter);
  892. }
  893. #endif
  894. else if (detectone(dev)) {
  895. string_elist_del(&scan_devices, iter);
  896. ++found;
  897. }
  898. }
  899. #ifdef NEED_BFG_LOWL_VCOM
  900. if (doall && detectone)
  901. found += lowlevel_detect_id(_serial_detect_all, detectone, &lowl_vcom, 0, 0);
  902. #endif
  903. if ((forceauto || !(inhibitauto || found)) && autoscan)
  904. found += autoscan();
  905. return found;
  906. }
  907. static
  908. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  909. {
  910. char fullpath[PATH_MAX];
  911. strcpy(fullpath, path);
  912. strcat(fullpath, "/");
  913. if (subdir) {
  914. strcat(fullpath, subdir);
  915. strcat(fullpath, "/");
  916. }
  917. if (sub2) {
  918. strcat(fullpath, sub2);
  919. strcat(fullpath, "/");
  920. }
  921. strcat(fullpath, filename);
  922. return fopen(fullpath, "rb");
  923. }
  924. #define _open_bitstream(path, subdir, sub2) do { \
  925. f = _open_bitstream(path, subdir, sub2, filename); \
  926. if (f) \
  927. return f; \
  928. } while(0)
  929. #define _open_bitstream2(path, path3) do { \
  930. _open_bitstream(path, NULL, path3); \
  931. _open_bitstream(path, "../share/" PACKAGE, path3); \
  932. _open_bitstream(path, "../" PACKAGE, path3); \
  933. } while(0)
  934. #define _open_bitstream3(path) do { \
  935. _open_bitstream2(path, dname); \
  936. _open_bitstream2(path, "bitstreams"); \
  937. _open_bitstream2(path, NULL); \
  938. } while(0)
  939. FILE *open_bitstream(const char *dname, const char *filename)
  940. {
  941. FILE *f;
  942. _open_bitstream3(opt_kernel_path);
  943. _open_bitstream3(cgminer_path);
  944. _open_bitstream3(".");
  945. return NULL;
  946. }
  947. void close_device_fd(struct thr_info * const thr)
  948. {
  949. struct cgpu_info * const proc = thr->cgpu;
  950. const int fd = proc->device_fd;
  951. if (fd == -1)
  952. return;
  953. if (close(fd))
  954. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  955. else
  956. {
  957. proc->device_fd = -1;
  958. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  959. }
  960. }