deviceapi.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. * Copyright 2014 Nate Woolls
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <ctype.h>
  15. #ifdef WIN32
  16. #include <winsock2.h>
  17. #else
  18. #include <sys/select.h>
  19. #endif
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <sys/time.h>
  23. #include <sys/types.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "logging.h"
  29. #include "lowlevel.h"
  30. #ifdef NEED_BFG_LOWL_VCOM
  31. #include "lowl-vcom.h"
  32. #endif
  33. #include "miner.h"
  34. #include "util.h"
  35. struct driver_registration *_bfg_drvreg1;
  36. struct driver_registration *_bfg_drvreg2;
  37. void _bfg_register_driver(const struct device_drv *drv)
  38. {
  39. struct driver_registration *ndr;
  40. if (!drv)
  41. {
  42. // NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
  43. LL_FOREACH2(_bfg_drvreg1, ndr, next_dname)
  44. {
  45. drv = ndr->drv;
  46. if (drv->drv_init)
  47. drv->drv_init();
  48. }
  49. return;
  50. }
  51. ndr = malloc(sizeof(*ndr));
  52. *ndr = (struct driver_registration){
  53. .drv = drv,
  54. };
  55. LL_PREPEND2(_bfg_drvreg1, ndr, next_dname);
  56. LL_PREPEND2(_bfg_drvreg2, ndr, next_prio);
  57. }
  58. static
  59. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  60. {
  61. return strcmp(a->drv->dname, b->drv->dname);
  62. };
  63. static
  64. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  65. {
  66. return a->drv->probe_priority - b->drv->probe_priority;
  67. };
  68. void bfg_devapi_init()
  69. {
  70. _bfg_register_driver(NULL);
  71. LL_SORT2(_bfg_drvreg1, sort_drv_by_dname, next_dname);
  72. LL_SORT2(_bfg_drvreg2, sort_drv_by_priority, next_prio);
  73. }
  74. float common_sha256d_and_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  75. {
  76. switch (malgo->algo)
  77. {
  78. #ifdef USE_SCRYPT
  79. case POW_SCRYPT:
  80. return 1./0x10000;
  81. #endif
  82. #ifdef USE_SHA256D
  83. case POW_SHA256D:
  84. return 1.;
  85. #endif
  86. default:
  87. return -1.;
  88. }
  89. }
  90. #ifdef USE_SCRYPT
  91. float common_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  92. {
  93. return (malgo->algo == POW_SCRYPT) ? (1./0x10000) : -1.;
  94. }
  95. #endif
  96. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  97. {
  98. struct cgpu_info *cgpu = thr->cgpu;
  99. const long cycle = opt_log_interval / 5 ? : 1;
  100. if (unlikely(hashes == -1)) {
  101. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  102. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  103. if (thr->scanhash_working && opt_restart) {
  104. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  105. thr->scanhash_working = false;
  106. cgpu->reinit_backoff = 5.2734375;
  107. hashes = 0;
  108. } else {
  109. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  110. cgpu->deven = DEV_RECOVER_ERR;
  111. run_cmd(cmd_idle);
  112. return false;
  113. }
  114. }
  115. else
  116. thr->scanhash_working = true;
  117. thr->hashes_done += hashes;
  118. if (hashes > cgpu->max_hashes)
  119. cgpu->max_hashes = hashes;
  120. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  121. // max_nonce management (optional)
  122. if (max_nonce)
  123. {
  124. uint64_t new_max_nonce = *max_nonce;
  125. new_max_nonce *= cycle;
  126. new_max_nonce *= 1000000;
  127. new_max_nonce /= ((uint64_t)thr->tv_hashes_done.tv_sec * 1000000) + thr->tv_hashes_done.tv_usec;
  128. if (new_max_nonce > 0xffffffff)
  129. new_max_nonce = 0xffffffff;
  130. *max_nonce = new_max_nonce;
  131. }
  132. hashmeter2(thr);
  133. return true;
  134. }
  135. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  136. {
  137. struct timeval tv_now, tv_delta;
  138. timer_set_now(&tv_now);
  139. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  140. thr->_tv_last_hashes_done_call = tv_now;
  141. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  142. }
  143. /* A generic wait function for threads that poll that will wait a specified
  144. * time tdiff waiting on a work restart request. Returns zero if the condition
  145. * was met (work restart requested) or ETIMEDOUT if not.
  146. */
  147. int restart_wait(struct thr_info *thr, unsigned int mstime)
  148. {
  149. struct timeval tv_timer, tv_now, tv_timeout;
  150. fd_set rfds;
  151. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  152. int rv;
  153. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  154. {
  155. // This is a bug!
  156. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  157. cgsleep_ms(mstime);
  158. return (thr->work_restart ? 0 : ETIMEDOUT);
  159. }
  160. timer_set_now(&tv_now);
  161. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  162. while (true)
  163. {
  164. FD_ZERO(&rfds);
  165. FD_SET(wrn, &rfds);
  166. tv_timeout = tv_timer;
  167. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  168. if (rv == 0)
  169. return ETIMEDOUT;
  170. if (rv > 0)
  171. {
  172. if (thr->work_restart)
  173. return 0;
  174. notifier_read(thr->work_restart_notifier);
  175. }
  176. timer_set_now(&tv_now);
  177. }
  178. }
  179. static
  180. struct work *get_and_prepare_work(struct thr_info *thr)
  181. {
  182. struct cgpu_info *proc = thr->cgpu;
  183. struct device_drv *api = proc->drv;
  184. struct work *work;
  185. work = get_work(thr);
  186. if (!work)
  187. return NULL;
  188. if (api->prepare_work && !api->prepare_work(thr, work)) {
  189. free_work(work);
  190. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  191. proc->deven = DEV_RECOVER_ERR;
  192. run_cmd(cmd_idle);
  193. return NULL;
  194. }
  195. return work;
  196. }
  197. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  198. void minerloop_scanhash(struct thr_info *mythr)
  199. {
  200. struct cgpu_info *cgpu = mythr->cgpu;
  201. struct device_drv *api = cgpu->drv;
  202. struct timeval tv_start, tv_end;
  203. struct timeval tv_hashes, tv_worktime;
  204. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  205. int64_t hashes;
  206. struct work *work;
  207. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  208. #ifdef HAVE_PTHREAD_CANCEL
  209. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  210. #endif
  211. if (cgpu->deven != DEV_ENABLED)
  212. mt_disable(mythr);
  213. while (likely(!cgpu->shutdown)) {
  214. mythr->work_restart = false;
  215. request_work(mythr);
  216. work = get_and_prepare_work(mythr);
  217. if (!work)
  218. break;
  219. timer_set_now(&work->tv_work_start);
  220. do {
  221. thread_reportin(mythr);
  222. /* Only allow the mining thread to be cancelled when
  223. * it is not in the driver code. */
  224. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  225. timer_set_now(&tv_start);
  226. /* api->scanhash should scan the work for valid nonces
  227. * until max_nonce is reached or thr_info->work_restart */
  228. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  229. timer_set_now(&tv_end);
  230. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  231. pthread_testcancel();
  232. thread_reportin(mythr);
  233. timersub(&tv_end, &tv_start, &tv_hashes);
  234. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  235. goto disabled;
  236. if (unlikely(mythr->work_restart)) {
  237. /* Apart from device_thread 0, we stagger the
  238. * starting of every next thread to try and get
  239. * all devices busy before worrying about
  240. * getting work for their extra threads */
  241. if (!primary) {
  242. struct timespec rgtp;
  243. rgtp.tv_sec = 0;
  244. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  245. nanosleep(&rgtp, NULL);
  246. }
  247. break;
  248. }
  249. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  250. disabled:
  251. mt_disable(mythr);
  252. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  253. /* The inner do-while loop will exit unless the device is capable of
  254. * scanning a specific nonce range (currently CPU and GPU drivers)
  255. * See abandon_work comments for more details */
  256. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  257. free_work(work);
  258. }
  259. }
  260. void mt_disable_start__async(struct thr_info * const mythr)
  261. {
  262. mt_disable_start(mythr);
  263. if (mythr->prev_work)
  264. free_work(mythr->prev_work);
  265. mythr->prev_work = mythr->work;
  266. mythr->work = NULL;
  267. mythr->_job_transition_in_progress = false;
  268. }
  269. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  270. {
  271. struct cgpu_info *proc = mythr->cgpu;
  272. struct device_drv *api = proc->drv;
  273. struct timeval tv_worktime;
  274. mythr->tv_morework.tv_sec = -1;
  275. mythr->_job_transition_in_progress = true;
  276. if (mythr->work)
  277. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  278. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  279. {
  280. mythr->work_restart = false;
  281. request_work(mythr);
  282. // FIXME: Allow get_work to return NULL to retry on notification
  283. if (mythr->next_work)
  284. free_work(mythr->next_work);
  285. mythr->next_work = get_and_prepare_work(mythr);
  286. if (!mythr->next_work)
  287. return false;
  288. mythr->starting_next_work = true;
  289. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  290. }
  291. else
  292. {
  293. mythr->starting_next_work = false;
  294. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  295. }
  296. job_prepare_complete(mythr);
  297. return true;
  298. }
  299. void job_prepare_complete(struct thr_info *mythr)
  300. {
  301. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  302. return;
  303. if (mythr->work)
  304. {
  305. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  306. do_get_results(mythr, true);
  307. else
  308. {} // TODO: Set a timer to call do_get_results when job is near complete
  309. }
  310. else // no job currently running
  311. do_job_start(mythr);
  312. }
  313. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  314. {
  315. struct cgpu_info *proc = mythr->cgpu;
  316. struct device_drv *api = proc->drv;
  317. struct work *work = mythr->work;
  318. mythr->_job_transition_in_progress = true;
  319. mythr->tv_results_jobstart = mythr->tv_jobstart;
  320. mythr->_proceed_with_new_job = proceed_with_new_job;
  321. if (api->job_get_results)
  322. api->job_get_results(mythr, work);
  323. else
  324. job_results_fetched(mythr);
  325. }
  326. void job_results_fetched(struct thr_info *mythr)
  327. {
  328. if (mythr->_proceed_with_new_job)
  329. do_job_start(mythr);
  330. else
  331. {
  332. if (likely(mythr->prev_work))
  333. {
  334. struct timeval tv_now;
  335. timer_set_now(&tv_now);
  336. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  337. }
  338. mt_disable_start__async(mythr);
  339. }
  340. }
  341. void do_job_start(struct thr_info *mythr)
  342. {
  343. struct cgpu_info *proc = mythr->cgpu;
  344. struct device_drv *api = proc->drv;
  345. thread_reportin(mythr);
  346. api->job_start(mythr);
  347. }
  348. void mt_job_transition(struct thr_info *mythr)
  349. {
  350. struct timeval tv_now;
  351. timer_set_now(&tv_now);
  352. if (mythr->starting_next_work)
  353. {
  354. mythr->next_work->tv_work_start = tv_now;
  355. if (mythr->prev_work)
  356. free_work(mythr->prev_work);
  357. mythr->prev_work = mythr->work;
  358. mythr->work = mythr->next_work;
  359. mythr->next_work = NULL;
  360. }
  361. mythr->tv_jobstart = tv_now;
  362. mythr->_job_transition_in_progress = false;
  363. }
  364. void job_start_complete(struct thr_info *mythr)
  365. {
  366. struct timeval tv_now;
  367. if (unlikely(!mythr->prev_work))
  368. return;
  369. timer_set_now(&tv_now);
  370. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  371. }
  372. void job_start_abort(struct thr_info *mythr, bool failure)
  373. {
  374. struct cgpu_info *proc = mythr->cgpu;
  375. if (failure)
  376. {
  377. proc->deven = DEV_RECOVER_ERR;
  378. run_cmd(cmd_idle);
  379. }
  380. mythr->work = NULL;
  381. mythr->_job_transition_in_progress = false;
  382. }
  383. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  384. {
  385. struct cgpu_info *proc = mythr->cgpu;
  386. struct device_drv *api = proc->drv;
  387. struct timeval tv_hashes;
  388. int64_t hashes = 0;
  389. if (api->job_process_results)
  390. hashes = api->job_process_results(mythr, work, stopping);
  391. thread_reportin(mythr);
  392. if (hashes)
  393. {
  394. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  395. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  396. return false;
  397. }
  398. return true;
  399. }
  400. static
  401. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  402. {
  403. struct cgpu_info *cgpu = thr->cgpu;
  404. struct timeval tv_now;
  405. int maxfd;
  406. fd_set rfds;
  407. timer_set_now(&tv_now);
  408. FD_ZERO(&rfds);
  409. FD_SET(thr->notifier[0], &rfds);
  410. maxfd = thr->notifier[0];
  411. FD_SET(thr->work_restart_notifier[0], &rfds);
  412. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  413. if (thr->mutex_request[1] != INVSOCK)
  414. {
  415. FD_SET(thr->mutex_request[0], &rfds);
  416. set_maxfd(&maxfd, thr->mutex_request[0]);
  417. }
  418. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  419. return;
  420. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  421. {
  422. // FIXME: This can only handle one request at a time!
  423. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  424. notifier_read(thr->mutex_request);
  425. mutex_lock(mutexp);
  426. pthread_cond_signal(&cgpu->device_cond);
  427. pthread_cond_wait(&cgpu->device_cond, mutexp);
  428. mutex_unlock(mutexp);
  429. }
  430. if (FD_ISSET(thr->notifier[0], &rfds)) {
  431. notifier_read(thr->notifier);
  432. }
  433. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  434. notifier_read(thr->work_restart_notifier);
  435. }
  436. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  437. {
  438. mutex_init(&cgpu->device_mutex);
  439. notifier_init(cgpu->thr[0]->mutex_request);
  440. pthread_cond_init(&cgpu->device_cond, bfg_condattr);
  441. }
  442. void cgpu_request_control(struct cgpu_info * const cgpu)
  443. {
  444. struct thr_info * const thr = cgpu->thr[0];
  445. if (pthread_equal(pthread_self(), thr->pth))
  446. return;
  447. mutex_lock(&cgpu->device_mutex);
  448. notifier_wake(thr->mutex_request);
  449. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  450. }
  451. void cgpu_release_control(struct cgpu_info * const cgpu)
  452. {
  453. struct thr_info * const thr = cgpu->thr[0];
  454. if (pthread_equal(pthread_self(), thr->pth))
  455. return;
  456. pthread_cond_signal(&cgpu->device_cond);
  457. mutex_unlock(&cgpu->device_mutex);
  458. }
  459. static
  460. void _minerloop_setup(struct thr_info *mythr)
  461. {
  462. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  463. if (mythr->work_restart_notifier[1] == -1)
  464. notifier_init(mythr->work_restart_notifier);
  465. for (proc = cgpu; proc; proc = proc->next_proc)
  466. {
  467. mythr = proc->thr[0];
  468. timer_set_now(&mythr->tv_watchdog);
  469. proc->disable_watchdog = true;
  470. }
  471. }
  472. void minerloop_async(struct thr_info *mythr)
  473. {
  474. struct thr_info *thr = mythr;
  475. struct cgpu_info *cgpu = mythr->cgpu;
  476. struct device_drv *api = cgpu->drv;
  477. struct timeval tv_now;
  478. struct timeval tv_timeout;
  479. struct cgpu_info *proc;
  480. bool is_running, should_be_running;
  481. _minerloop_setup(mythr);
  482. while (likely(!cgpu->shutdown)) {
  483. tv_timeout.tv_sec = -1;
  484. timer_set_now(&tv_now);
  485. for (proc = cgpu; proc; proc = proc->next_proc)
  486. {
  487. mythr = proc->thr[0];
  488. // Nothing should happen while we're starting a job
  489. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  490. goto defer_events;
  491. is_running = mythr->work;
  492. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  493. if (should_be_running)
  494. {
  495. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  496. {
  497. mt_disable_finish(mythr);
  498. goto djp;
  499. }
  500. if (unlikely(mythr->work_restart))
  501. goto djp;
  502. }
  503. else // ! should_be_running
  504. {
  505. if (unlikely(mythr->_job_transition_in_progress && timer_isset(&mythr->tv_morework)))
  506. {
  507. // Really only happens at startup
  508. applog(LOG_DEBUG, "%"PRIpreprv": Job transition in progress, with morework timer enabled: unsetting in-progress flag", proc->proc_repr);
  509. mythr->_job_transition_in_progress = false;
  510. }
  511. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  512. {
  513. disabled: ;
  514. if (is_running)
  515. {
  516. if (mythr->busy_state != TBS_GETTING_RESULTS)
  517. do_get_results(mythr, false);
  518. else
  519. // Avoid starting job when pending result fetch completes
  520. mythr->_proceed_with_new_job = false;
  521. }
  522. else // !mythr->_mt_disable_called
  523. mt_disable_start__async(mythr);
  524. }
  525. timer_unset(&mythr->tv_morework);
  526. }
  527. if (timer_passed(&mythr->tv_morework, &tv_now))
  528. {
  529. djp: ;
  530. if (!do_job_prepare(mythr, &tv_now))
  531. goto disabled;
  532. }
  533. defer_events:
  534. if (timer_passed(&mythr->tv_poll, &tv_now))
  535. api->poll(mythr);
  536. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  537. {
  538. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  539. bfg_watchdog(proc, &tv_now);
  540. }
  541. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  542. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  543. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  544. }
  545. do_notifier_select(thr, &tv_timeout);
  546. }
  547. }
  548. static
  549. void do_queue_flush(struct thr_info *mythr)
  550. {
  551. struct cgpu_info *proc = mythr->cgpu;
  552. struct device_drv *api = proc->drv;
  553. api->queue_flush(mythr);
  554. if (mythr->next_work)
  555. {
  556. free_work(mythr->next_work);
  557. mythr->next_work = NULL;
  558. }
  559. }
  560. void minerloop_queue(struct thr_info *thr)
  561. {
  562. struct thr_info *mythr;
  563. struct cgpu_info *cgpu = thr->cgpu;
  564. struct device_drv *api = cgpu->drv;
  565. struct timeval tv_now;
  566. struct timeval tv_timeout;
  567. struct cgpu_info *proc;
  568. bool should_be_running;
  569. struct work *work;
  570. _minerloop_setup(thr);
  571. while (likely(!cgpu->shutdown)) {
  572. tv_timeout.tv_sec = -1;
  573. timer_set_now(&tv_now);
  574. for (proc = cgpu; proc; proc = proc->next_proc)
  575. {
  576. mythr = proc->thr[0];
  577. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  578. redo:
  579. if (should_be_running)
  580. {
  581. if (unlikely(mythr->_mt_disable_called))
  582. mt_disable_finish(mythr);
  583. if (unlikely(mythr->work_restart))
  584. {
  585. mythr->work_restart = false;
  586. do_queue_flush(mythr);
  587. }
  588. while (!mythr->queue_full)
  589. {
  590. if (mythr->next_work)
  591. {
  592. work = mythr->next_work;
  593. mythr->next_work = NULL;
  594. }
  595. else
  596. {
  597. request_work(mythr);
  598. // FIXME: Allow get_work to return NULL to retry on notification
  599. work = get_and_prepare_work(mythr);
  600. }
  601. if (!work)
  602. break;
  603. if (!api->queue_append(mythr, work))
  604. mythr->next_work = work;
  605. }
  606. }
  607. else
  608. if (unlikely(!mythr->_mt_disable_called))
  609. {
  610. do_queue_flush(mythr);
  611. mt_disable_start(mythr);
  612. }
  613. if (timer_passed(&mythr->tv_poll, &tv_now))
  614. api->poll(mythr);
  615. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  616. {
  617. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  618. bfg_watchdog(proc, &tv_now);
  619. }
  620. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  621. if (should_be_running && !mythr->queue_full)
  622. goto redo;
  623. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  624. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  625. }
  626. // HACK: Some designs set the main thr tv_poll from secondary thrs
  627. reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
  628. do_notifier_select(thr, &tv_timeout);
  629. }
  630. }
  631. void *miner_thread(void *userdata)
  632. {
  633. struct thr_info *mythr = userdata;
  634. struct cgpu_info *cgpu = mythr->cgpu;
  635. struct device_drv *drv = cgpu->drv;
  636. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  637. char threadname[20];
  638. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  639. RenameThread(threadname);
  640. if (drv->thread_init && !drv->thread_init(mythr)) {
  641. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  642. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  643. dev_error(slave, REASON_THREAD_FAIL_INIT);
  644. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  645. goto out;
  646. }
  647. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  648. cgpu_set_defaults(cgpu);
  649. thread_reportout(mythr);
  650. applog(LOG_DEBUG, "Popping ping in miner thread");
  651. notifier_read(mythr->notifier); // Wait for a notification to start
  652. cgtime(&cgpu->cgminer_stats.start_tv);
  653. if (drv->minerloop)
  654. drv->minerloop(mythr);
  655. else
  656. minerloop_scanhash(mythr);
  657. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  658. out: ;
  659. struct cgpu_info *proc = cgpu;
  660. do
  661. {
  662. proc->deven = DEV_DISABLED;
  663. proc->status = LIFE_DEAD2;
  664. }
  665. while ( (proc = proc->next_proc) && !proc->threads);
  666. mythr->getwork = 0;
  667. mythr->has_pth = false;
  668. cgsleep_ms(1);
  669. if (drv->thread_shutdown)
  670. drv->thread_shutdown(mythr);
  671. notifier_destroy(mythr->notifier);
  672. return NULL;
  673. }
  674. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  675. static
  676. bool _add_cgpu(struct cgpu_info *cgpu)
  677. {
  678. int lpcount;
  679. if (!cgpu->procs)
  680. cgpu->procs = 1;
  681. lpcount = cgpu->procs;
  682. cgpu->device = cgpu;
  683. cgpu->dev_repr = malloc(6);
  684. cgpu->dev_repr_ns = malloc(6);
  685. #ifdef NEED_BFG_LOWL_VCOM
  686. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  687. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  688. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  689. #endif
  690. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  691. devices_new[total_devices_new++] = cgpu;
  692. if (lpcount > 1)
  693. {
  694. int tpp = cgpu->threads / lpcount;
  695. struct cgpu_info **nlp_p, *slave;
  696. nlp_p = &cgpu->next_proc;
  697. for (int i = 1; i < lpcount; ++i)
  698. {
  699. slave = malloc(sizeof(*slave));
  700. *slave = *cgpu;
  701. slave->proc_id = i;
  702. slave->threads = tpp;
  703. devices_new[total_devices_new++] = slave;
  704. *nlp_p = slave;
  705. nlp_p = &slave->next_proc;
  706. }
  707. *nlp_p = NULL;
  708. cgpu->proc_id = 0;
  709. cgpu->threads -= (tpp * (lpcount - 1));
  710. }
  711. renumber_cgpu(cgpu);
  712. cgpu->last_device_valid_work = time(NULL);
  713. return true;
  714. }
  715. bool add_cgpu(struct cgpu_info *cgpu)
  716. {
  717. mutex_lock(&_add_cgpu_mutex);
  718. const bool rv = _add_cgpu(cgpu);
  719. mutex_unlock(&_add_cgpu_mutex);
  720. return rv;
  721. }
  722. void add_cgpu_live(void *p)
  723. {
  724. add_cgpu(p);
  725. }
  726. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  727. {
  728. if (!prev_cgpu)
  729. return add_cgpu(cgpu);
  730. while (prev_cgpu->next_proc)
  731. prev_cgpu = prev_cgpu->next_proc;
  732. mutex_lock(&_add_cgpu_mutex);
  733. int old_total_devices = total_devices_new;
  734. if (!_add_cgpu(cgpu))
  735. {
  736. mutex_unlock(&_add_cgpu_mutex);
  737. return false;
  738. }
  739. prev_cgpu->next_proc = devices_new[old_total_devices];
  740. mutex_unlock(&_add_cgpu_mutex);
  741. return true;
  742. }
  743. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  744. {
  745. const struct bfg_set_device_definition *sdf;
  746. char *p = replybuf;
  747. bool first = true;
  748. *out_success = SDR_HELP;
  749. sdf = proc->set_device_funcs;
  750. if (!sdf)
  751. nohelp:
  752. return "No help available";
  753. size_t matchlen = 0;
  754. if (newvalue)
  755. while (newvalue[matchlen] && !isspace(newvalue[matchlen]))
  756. ++matchlen;
  757. for ( ; sdf->optname; ++sdf)
  758. {
  759. if (!sdf->description)
  760. continue;
  761. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  762. continue;
  763. if (first)
  764. first = false;
  765. else
  766. p++[0] = '\n';
  767. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  768. }
  769. if (replybuf == p)
  770. goto nohelp;
  771. return replybuf;
  772. }
  773. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  774. {
  775. int target_diff = proc->cutofftemp - proc->targettemp;
  776. proc->cutofftemp = atoi(newvalue);
  777. if (!proc->targettemp_user)
  778. proc->targettemp = proc->cutofftemp - target_diff;
  779. return NULL;
  780. }
  781. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  782. {
  783. proc->targettemp = atoi(newvalue);
  784. proc->targettemp_user = true;
  785. return NULL;
  786. }
  787. static inline
  788. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  789. {
  790. if (!rv)
  791. *out_success = SDR_OK;
  792. else
  793. if (!strcasecmp(optname, "help"))
  794. *out_success = SDR_HELP;
  795. else
  796. *out_success = SDR_ERR;
  797. }
  798. static
  799. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  800. {
  801. const struct bfg_set_device_definition *sdf;
  802. sdf = proc->set_device_funcs;
  803. if (!sdf)
  804. {
  805. *out_success = SDR_NOSUPP;
  806. return "Device does not support setting parameters.";
  807. }
  808. for ( ; sdf->optname; ++sdf)
  809. if (!strcasecmp(optname, sdf->optname))
  810. {
  811. *out_success = SDR_AUTO;
  812. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  813. if (SDR_AUTO == *out_success)
  814. _set_auto_sdr(out_success, rv, optname);
  815. return rv;
  816. }
  817. if (!strcasecmp(optname, "help"))
  818. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  819. *out_success = SDR_UNKNOWN;
  820. sprintf(replybuf, "Unknown option: %s", optname);
  821. return replybuf;
  822. }
  823. static
  824. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  825. {
  826. if (proc->drv->set_device)
  827. {
  828. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  829. _set_auto_sdr(out_success, rv, optname);
  830. return rv;
  831. }
  832. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  833. }
  834. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char *newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  835. {
  836. if (!newvalue)
  837. newvalue = "";
  838. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  839. switch (*out_success)
  840. {
  841. case SDR_NOSUPP:
  842. case SDR_UNKNOWN:
  843. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  844. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  845. else
  846. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  847. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  848. default:
  849. break;
  850. }
  851. return rv;
  852. }
  853. #ifdef HAVE_CURSES
  854. const char *proc_set_device_tui_wrapper(struct cgpu_info * const proc, char * const optname, const bfg_set_device_func_t func, const char * const prompt, const char * const success_msg)
  855. {
  856. static char replybuf[0x2001];
  857. char * const cvar = curses_input(prompt);
  858. if (!cvar)
  859. return "Cancelled\n";
  860. enum bfg_set_device_replytype success;
  861. const char * const reply = func(proc, optname, cvar, replybuf, &success);
  862. free(cvar);
  863. if (reply)
  864. {
  865. if (reply != replybuf)
  866. snprintf(replybuf, sizeof(replybuf), "%s\n", reply);
  867. else
  868. tailsprintf(replybuf, sizeof(replybuf), "\n");
  869. return replybuf;
  870. }
  871. return success_msg ?: "Successful\n";
  872. }
  873. #endif
  874. #ifdef NEED_BFG_LOWL_VCOM
  875. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  876. {
  877. detectone_func_t detectone = userp;
  878. if (serial_claim(info->path, NULL))
  879. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  880. return detectone(info->path);
  881. }
  882. #endif
  883. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  884. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  885. {
  886. struct string_elist *iter, *tmp;
  887. const char *dev, *colon;
  888. bool inhibitauto = flags & 4;
  889. char found = 0;
  890. bool forceauto = flags & 1;
  891. bool hasname;
  892. size_t namel = strlen(api->name);
  893. size_t dnamel = strlen(api->dname);
  894. #ifdef NEED_BFG_LOWL_VCOM
  895. clear_detectone_meta_info();
  896. #endif
  897. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  898. dev = iter->string;
  899. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  900. size_t idlen = colon - dev;
  901. // allow either name:device or dname:device
  902. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  903. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  904. continue;
  905. dev = colon + 1;
  906. hasname = true;
  907. }
  908. else
  909. hasname = false;
  910. if (!strcmp(dev, "auto"))
  911. forceauto = true;
  912. else if (!strcmp(dev, "noauto"))
  913. inhibitauto = true;
  914. else
  915. if ((flags & 2) && !hasname)
  916. continue;
  917. else
  918. if (!detectone)
  919. {} // do nothing
  920. else
  921. if (!strcmp(dev, "all"))
  922. {} // n/a
  923. else if (detectone(dev)) {
  924. string_elist_del(&scan_devices, iter);
  925. ++found;
  926. }
  927. }
  928. if ((forceauto || !(inhibitauto || found)) && autoscan)
  929. found += autoscan();
  930. return found;
  931. }
  932. static
  933. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  934. {
  935. char fullpath[PATH_MAX];
  936. strcpy(fullpath, path);
  937. strcat(fullpath, "/");
  938. if (subdir) {
  939. strcat(fullpath, subdir);
  940. strcat(fullpath, "/");
  941. }
  942. if (sub2) {
  943. strcat(fullpath, sub2);
  944. strcat(fullpath, "/");
  945. }
  946. strcat(fullpath, filename);
  947. return fopen(fullpath, "rb");
  948. }
  949. #define _open_bitstream(path, subdir, sub2) do { \
  950. f = _open_bitstream(path, subdir, sub2, filename); \
  951. if (f) \
  952. return f; \
  953. } while(0)
  954. #define _open_bitstream2(path, path3) do { \
  955. _open_bitstream(path, NULL, path3); \
  956. _open_bitstream(path, "../share/" PACKAGE, path3); \
  957. _open_bitstream(path, "../" PACKAGE, path3); \
  958. } while(0)
  959. #define _open_bitstream3(path) do { \
  960. _open_bitstream2(path, dname); \
  961. _open_bitstream2(path, "bitstreams"); \
  962. _open_bitstream2(path, NULL); \
  963. } while(0)
  964. FILE *open_bitstream(const char *dname, const char *filename)
  965. {
  966. FILE *f;
  967. _open_bitstream3(opt_kernel_path);
  968. _open_bitstream3(cgminer_path);
  969. _open_bitstream3(".");
  970. return NULL;
  971. }
  972. void close_device_fd(struct thr_info * const thr)
  973. {
  974. struct cgpu_info * const proc = thr->cgpu;
  975. const int fd = proc->device_fd;
  976. if (fd == -1)
  977. return;
  978. if (close(fd))
  979. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  980. else
  981. {
  982. proc->device_fd = -1;
  983. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  984. }
  985. }
  986. struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, const int procid)
  987. {
  988. struct cgpu_info *proc = (void*)dev;
  989. for (int i = 0; i < procid; ++i)
  990. {
  991. proc = proc->next_proc;
  992. if (unlikely((!proc) || proc->device != dev))
  993. return NULL;
  994. }
  995. return proc;
  996. }