deviceapi.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #include <ctype.h>
  14. #ifdef WIN32
  15. #include <winsock2.h>
  16. #else
  17. #include <sys/select.h>
  18. #endif
  19. #include <stdbool.h>
  20. #include <stdint.h>
  21. #include <sys/time.h>
  22. #include <sys/types.h>
  23. #include <time.h>
  24. #include <unistd.h>
  25. #include <utlist.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "logging.h"
  29. #include "lowlevel.h"
  30. #ifdef NEED_BFG_LOWL_VCOM
  31. #include "lowl-vcom.h"
  32. #endif
  33. #include "miner.h"
  34. #include "util.h"
  35. struct driver_registration *_bfg_drvreg1;
  36. struct driver_registration *_bfg_drvreg2;
  37. void _bfg_register_driver(const struct device_drv *drv)
  38. {
  39. struct driver_registration *ndr;
  40. if (!drv)
  41. {
  42. // NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
  43. LL_FOREACH2(_bfg_drvreg1, ndr, next_dname)
  44. {
  45. drv = ndr->drv;
  46. if (drv->drv_init)
  47. drv->drv_init();
  48. }
  49. return;
  50. }
  51. ndr = malloc(sizeof(*ndr));
  52. *ndr = (struct driver_registration){
  53. .drv = drv,
  54. };
  55. LL_PREPEND2(_bfg_drvreg1, ndr, next_dname);
  56. LL_PREPEND2(_bfg_drvreg2, ndr, next_prio);
  57. }
  58. static
  59. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  60. {
  61. return strcmp(a->drv->dname, b->drv->dname);
  62. };
  63. static
  64. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  65. {
  66. return a->drv->probe_priority - b->drv->probe_priority;
  67. };
  68. void bfg_devapi_init()
  69. {
  70. _bfg_register_driver(NULL);
  71. #ifdef LL_SORT2
  72. LL_SORT2(_bfg_drvreg1, sort_drv_by_dname, next_dname);
  73. LL_SORT2(_bfg_drvreg2, sort_drv_by_priority, next_prio);
  74. #else
  75. #define next next_dname
  76. LL_SORT(_bfg_drvreg1, sort_drv_by_dname);
  77. #undef next
  78. #define next next_prio
  79. LL_SORT(_bfg_drvreg2, sort_drv_by_priority);
  80. #undef next
  81. #endif
  82. }
  83. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  84. {
  85. struct cgpu_info *cgpu = thr->cgpu;
  86. const long cycle = opt_log_interval / 5 ? : 1;
  87. if (unlikely(hashes == -1)) {
  88. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  89. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  90. if (thr->scanhash_working && opt_restart) {
  91. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  92. thr->scanhash_working = false;
  93. cgpu->reinit_backoff = 5.2734375;
  94. hashes = 0;
  95. } else {
  96. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  97. cgpu->deven = DEV_RECOVER_ERR;
  98. run_cmd(cmd_idle);
  99. return false;
  100. }
  101. }
  102. else
  103. thr->scanhash_working = true;
  104. thr->hashes_done += hashes;
  105. if (hashes > cgpu->max_hashes)
  106. cgpu->max_hashes = hashes;
  107. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  108. // max_nonce management (optional)
  109. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  110. int mult;
  111. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  112. return true;
  113. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  114. mult *= cycle;
  115. if (*max_nonce > (0xffffffff * 0x400) / mult)
  116. *max_nonce = 0xffffffff;
  117. else
  118. *max_nonce = (*max_nonce * mult) / 0x400;
  119. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  120. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  121. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  122. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  123. hashmeter2(thr);
  124. return true;
  125. }
  126. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  127. {
  128. struct timeval tv_now, tv_delta;
  129. timer_set_now(&tv_now);
  130. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  131. thr->_tv_last_hashes_done_call = tv_now;
  132. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  133. }
  134. /* A generic wait function for threads that poll that will wait a specified
  135. * time tdiff waiting on a work restart request. Returns zero if the condition
  136. * was met (work restart requested) or ETIMEDOUT if not.
  137. */
  138. int restart_wait(struct thr_info *thr, unsigned int mstime)
  139. {
  140. struct timeval tv_timer, tv_now, tv_timeout;
  141. fd_set rfds;
  142. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  143. int rv;
  144. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  145. {
  146. // This is a bug!
  147. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  148. cgsleep_ms(mstime);
  149. return (thr->work_restart ? 0 : ETIMEDOUT);
  150. }
  151. timer_set_now(&tv_now);
  152. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  153. while (true)
  154. {
  155. FD_ZERO(&rfds);
  156. FD_SET(wrn, &rfds);
  157. tv_timeout = tv_timer;
  158. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  159. if (rv == 0)
  160. return ETIMEDOUT;
  161. if (rv > 0)
  162. {
  163. if (thr->work_restart)
  164. return 0;
  165. notifier_read(thr->work_restart_notifier);
  166. }
  167. timer_set_now(&tv_now);
  168. }
  169. }
  170. static
  171. struct work *get_and_prepare_work(struct thr_info *thr)
  172. {
  173. struct cgpu_info *proc = thr->cgpu;
  174. struct device_drv *api = proc->drv;
  175. struct work *work;
  176. work = get_work(thr);
  177. if (!work)
  178. return NULL;
  179. if (api->prepare_work && !api->prepare_work(thr, work)) {
  180. free_work(work);
  181. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  182. proc->deven = DEV_RECOVER_ERR;
  183. run_cmd(cmd_idle);
  184. return NULL;
  185. }
  186. return work;
  187. }
  188. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  189. void minerloop_scanhash(struct thr_info *mythr)
  190. {
  191. struct cgpu_info *cgpu = mythr->cgpu;
  192. struct device_drv *api = cgpu->drv;
  193. struct timeval tv_start, tv_end;
  194. struct timeval tv_hashes, tv_worktime;
  195. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  196. int64_t hashes;
  197. struct work *work;
  198. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  199. #ifdef HAVE_PTHREAD_CANCEL
  200. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  201. #endif
  202. if (cgpu->deven != DEV_ENABLED)
  203. mt_disable(mythr);
  204. while (likely(!cgpu->shutdown)) {
  205. mythr->work_restart = false;
  206. request_work(mythr);
  207. work = get_and_prepare_work(mythr);
  208. if (!work)
  209. break;
  210. timer_set_now(&work->tv_work_start);
  211. do {
  212. thread_reportin(mythr);
  213. /* Only allow the mining thread to be cancelled when
  214. * it is not in the driver code. */
  215. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  216. timer_set_now(&tv_start);
  217. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  218. timer_set_now(&tv_end);
  219. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  220. pthread_testcancel();
  221. thread_reportin(mythr);
  222. timersub(&tv_end, &tv_start, &tv_hashes);
  223. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  224. goto disabled;
  225. if (unlikely(mythr->work_restart)) {
  226. /* Apart from device_thread 0, we stagger the
  227. * starting of every next thread to try and get
  228. * all devices busy before worrying about
  229. * getting work for their extra threads */
  230. if (!primary) {
  231. struct timespec rgtp;
  232. rgtp.tv_sec = 0;
  233. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  234. nanosleep(&rgtp, NULL);
  235. }
  236. break;
  237. }
  238. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  239. disabled:
  240. mt_disable(mythr);
  241. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  242. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  243. free_work(work);
  244. }
  245. }
  246. void mt_disable_start__async(struct thr_info * const mythr)
  247. {
  248. mt_disable_start(mythr);
  249. if (mythr->prev_work)
  250. free_work(mythr->prev_work);
  251. mythr->prev_work = mythr->work;
  252. mythr->work = NULL;
  253. mythr->_job_transition_in_progress = false;
  254. }
  255. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  256. {
  257. struct cgpu_info *proc = mythr->cgpu;
  258. struct device_drv *api = proc->drv;
  259. struct timeval tv_worktime;
  260. mythr->tv_morework.tv_sec = -1;
  261. mythr->_job_transition_in_progress = true;
  262. if (mythr->work)
  263. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  264. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  265. {
  266. mythr->work_restart = false;
  267. request_work(mythr);
  268. // FIXME: Allow get_work to return NULL to retry on notification
  269. if (mythr->next_work)
  270. free_work(mythr->next_work);
  271. mythr->next_work = get_and_prepare_work(mythr);
  272. if (!mythr->next_work)
  273. return false;
  274. mythr->starting_next_work = true;
  275. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  276. }
  277. else
  278. {
  279. mythr->starting_next_work = false;
  280. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  281. }
  282. job_prepare_complete(mythr);
  283. return true;
  284. }
  285. void job_prepare_complete(struct thr_info *mythr)
  286. {
  287. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  288. return;
  289. if (mythr->work)
  290. {
  291. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  292. do_get_results(mythr, true);
  293. else
  294. {} // TODO: Set a timer to call do_get_results when job is near complete
  295. }
  296. else // no job currently running
  297. do_job_start(mythr);
  298. }
  299. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  300. {
  301. struct cgpu_info *proc = mythr->cgpu;
  302. struct device_drv *api = proc->drv;
  303. struct work *work = mythr->work;
  304. mythr->_job_transition_in_progress = true;
  305. mythr->tv_results_jobstart = mythr->tv_jobstart;
  306. mythr->_proceed_with_new_job = proceed_with_new_job;
  307. if (api->job_get_results)
  308. api->job_get_results(mythr, work);
  309. else
  310. job_results_fetched(mythr);
  311. }
  312. void job_results_fetched(struct thr_info *mythr)
  313. {
  314. if (mythr->_proceed_with_new_job)
  315. do_job_start(mythr);
  316. else
  317. {
  318. if (likely(mythr->prev_work))
  319. {
  320. struct timeval tv_now;
  321. timer_set_now(&tv_now);
  322. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  323. }
  324. mt_disable_start__async(mythr);
  325. }
  326. }
  327. void do_job_start(struct thr_info *mythr)
  328. {
  329. struct cgpu_info *proc = mythr->cgpu;
  330. struct device_drv *api = proc->drv;
  331. thread_reportin(mythr);
  332. api->job_start(mythr);
  333. }
  334. void mt_job_transition(struct thr_info *mythr)
  335. {
  336. struct timeval tv_now;
  337. timer_set_now(&tv_now);
  338. if (mythr->starting_next_work)
  339. {
  340. mythr->next_work->tv_work_start = tv_now;
  341. if (mythr->prev_work)
  342. free_work(mythr->prev_work);
  343. mythr->prev_work = mythr->work;
  344. mythr->work = mythr->next_work;
  345. mythr->next_work = NULL;
  346. }
  347. mythr->tv_jobstart = tv_now;
  348. mythr->_job_transition_in_progress = false;
  349. }
  350. void job_start_complete(struct thr_info *mythr)
  351. {
  352. struct timeval tv_now;
  353. if (unlikely(!mythr->prev_work))
  354. return;
  355. timer_set_now(&tv_now);
  356. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  357. }
  358. void job_start_abort(struct thr_info *mythr, bool failure)
  359. {
  360. struct cgpu_info *proc = mythr->cgpu;
  361. if (failure)
  362. {
  363. proc->deven = DEV_RECOVER_ERR;
  364. run_cmd(cmd_idle);
  365. }
  366. mythr->work = NULL;
  367. mythr->_job_transition_in_progress = false;
  368. }
  369. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  370. {
  371. struct cgpu_info *proc = mythr->cgpu;
  372. struct device_drv *api = proc->drv;
  373. struct timeval tv_hashes;
  374. int64_t hashes = 0;
  375. if (api->job_process_results)
  376. hashes = api->job_process_results(mythr, work, stopping);
  377. thread_reportin(mythr);
  378. if (hashes)
  379. {
  380. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  381. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  382. return false;
  383. }
  384. return true;
  385. }
  386. static
  387. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  388. {
  389. struct cgpu_info *cgpu = thr->cgpu;
  390. struct timeval tv_now;
  391. int maxfd;
  392. fd_set rfds;
  393. timer_set_now(&tv_now);
  394. FD_ZERO(&rfds);
  395. FD_SET(thr->notifier[0], &rfds);
  396. maxfd = thr->notifier[0];
  397. FD_SET(thr->work_restart_notifier[0], &rfds);
  398. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  399. if (thr->mutex_request[1] != INVSOCK)
  400. {
  401. FD_SET(thr->mutex_request[0], &rfds);
  402. set_maxfd(&maxfd, thr->mutex_request[0]);
  403. }
  404. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  405. return;
  406. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  407. {
  408. // FIXME: This can only handle one request at a time!
  409. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  410. notifier_read(thr->mutex_request);
  411. mutex_lock(mutexp);
  412. pthread_cond_signal(&cgpu->device_cond);
  413. pthread_cond_wait(&cgpu->device_cond, mutexp);
  414. mutex_unlock(mutexp);
  415. }
  416. if (FD_ISSET(thr->notifier[0], &rfds)) {
  417. notifier_read(thr->notifier);
  418. }
  419. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  420. notifier_read(thr->work_restart_notifier);
  421. }
  422. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  423. {
  424. mutex_init(&cgpu->device_mutex);
  425. notifier_init(cgpu->thr[0]->mutex_request);
  426. pthread_cond_init(&cgpu->device_cond, NULL);
  427. }
  428. void cgpu_request_control(struct cgpu_info * const cgpu)
  429. {
  430. struct thr_info * const thr = cgpu->thr[0];
  431. if (pthread_equal(pthread_self(), thr->pth))
  432. return;
  433. mutex_lock(&cgpu->device_mutex);
  434. notifier_wake(thr->mutex_request);
  435. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  436. }
  437. void cgpu_release_control(struct cgpu_info * const cgpu)
  438. {
  439. struct thr_info * const thr = cgpu->thr[0];
  440. if (pthread_equal(pthread_self(), thr->pth))
  441. return;
  442. pthread_cond_signal(&cgpu->device_cond);
  443. mutex_unlock(&cgpu->device_mutex);
  444. }
  445. static
  446. void _minerloop_setup(struct thr_info *mythr)
  447. {
  448. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  449. if (mythr->work_restart_notifier[1] == -1)
  450. notifier_init(mythr->work_restart_notifier);
  451. for (proc = cgpu; proc; proc = proc->next_proc)
  452. {
  453. mythr = proc->thr[0];
  454. timer_set_now(&mythr->tv_watchdog);
  455. proc->disable_watchdog = true;
  456. }
  457. }
  458. void minerloop_async(struct thr_info *mythr)
  459. {
  460. struct thr_info *thr = mythr;
  461. struct cgpu_info *cgpu = mythr->cgpu;
  462. struct device_drv *api = cgpu->drv;
  463. struct timeval tv_now;
  464. struct timeval tv_timeout;
  465. struct cgpu_info *proc;
  466. bool is_running, should_be_running;
  467. _minerloop_setup(mythr);
  468. while (likely(!cgpu->shutdown)) {
  469. tv_timeout.tv_sec = -1;
  470. timer_set_now(&tv_now);
  471. for (proc = cgpu; proc; proc = proc->next_proc)
  472. {
  473. mythr = proc->thr[0];
  474. // Nothing should happen while we're starting a job
  475. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  476. goto defer_events;
  477. is_running = mythr->work;
  478. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  479. if (should_be_running)
  480. {
  481. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  482. {
  483. mt_disable_finish(mythr);
  484. goto djp;
  485. }
  486. if (unlikely(mythr->work_restart))
  487. goto djp;
  488. }
  489. else // ! should_be_running
  490. {
  491. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  492. {
  493. disabled: ;
  494. timer_unset(&mythr->tv_morework);
  495. if (is_running)
  496. {
  497. if (mythr->busy_state != TBS_GETTING_RESULTS)
  498. do_get_results(mythr, false);
  499. else
  500. // Avoid starting job when pending result fetch completes
  501. mythr->_proceed_with_new_job = false;
  502. }
  503. else // !mythr->_mt_disable_called
  504. mt_disable_start__async(mythr);
  505. }
  506. }
  507. if (timer_passed(&mythr->tv_morework, &tv_now))
  508. {
  509. djp: ;
  510. if (!do_job_prepare(mythr, &tv_now))
  511. goto disabled;
  512. }
  513. defer_events:
  514. if (timer_passed(&mythr->tv_poll, &tv_now))
  515. api->poll(mythr);
  516. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  517. {
  518. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  519. bfg_watchdog(proc, &tv_now);
  520. }
  521. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  522. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  523. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  524. }
  525. do_notifier_select(thr, &tv_timeout);
  526. }
  527. }
  528. static
  529. void do_queue_flush(struct thr_info *mythr)
  530. {
  531. struct cgpu_info *proc = mythr->cgpu;
  532. struct device_drv *api = proc->drv;
  533. api->queue_flush(mythr);
  534. if (mythr->next_work)
  535. {
  536. free_work(mythr->next_work);
  537. mythr->next_work = NULL;
  538. }
  539. }
  540. void minerloop_queue(struct thr_info *thr)
  541. {
  542. struct thr_info *mythr;
  543. struct cgpu_info *cgpu = thr->cgpu;
  544. struct device_drv *api = cgpu->drv;
  545. struct timeval tv_now;
  546. struct timeval tv_timeout;
  547. struct cgpu_info *proc;
  548. bool should_be_running;
  549. struct work *work;
  550. _minerloop_setup(thr);
  551. while (likely(!cgpu->shutdown)) {
  552. tv_timeout.tv_sec = -1;
  553. timer_set_now(&tv_now);
  554. for (proc = cgpu; proc; proc = proc->next_proc)
  555. {
  556. mythr = proc->thr[0];
  557. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  558. redo:
  559. if (should_be_running)
  560. {
  561. if (unlikely(mythr->_mt_disable_called))
  562. mt_disable_finish(mythr);
  563. if (unlikely(mythr->work_restart))
  564. {
  565. mythr->work_restart = false;
  566. do_queue_flush(mythr);
  567. }
  568. while (!mythr->queue_full)
  569. {
  570. if (mythr->next_work)
  571. {
  572. work = mythr->next_work;
  573. mythr->next_work = NULL;
  574. }
  575. else
  576. {
  577. request_work(mythr);
  578. // FIXME: Allow get_work to return NULL to retry on notification
  579. work = get_and_prepare_work(mythr);
  580. }
  581. if (!work)
  582. break;
  583. if (!api->queue_append(mythr, work))
  584. mythr->next_work = work;
  585. }
  586. }
  587. else
  588. if (unlikely(!mythr->_mt_disable_called))
  589. {
  590. do_queue_flush(mythr);
  591. mt_disable_start(mythr);
  592. }
  593. if (timer_passed(&mythr->tv_poll, &tv_now))
  594. api->poll(mythr);
  595. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  596. {
  597. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  598. bfg_watchdog(proc, &tv_now);
  599. }
  600. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  601. if (should_be_running && !mythr->queue_full)
  602. goto redo;
  603. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  604. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  605. }
  606. do_notifier_select(thr, &tv_timeout);
  607. }
  608. }
  609. void *miner_thread(void *userdata)
  610. {
  611. struct thr_info *mythr = userdata;
  612. struct cgpu_info *cgpu = mythr->cgpu;
  613. struct device_drv *drv = cgpu->drv;
  614. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  615. char threadname[20];
  616. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  617. RenameThread(threadname);
  618. if (drv->thread_init && !drv->thread_init(mythr)) {
  619. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  620. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  621. dev_error(slave, REASON_THREAD_FAIL_INIT);
  622. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  623. goto out;
  624. }
  625. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  626. cgpu_set_defaults(cgpu);
  627. thread_reportout(mythr);
  628. applog(LOG_DEBUG, "Popping ping in miner thread");
  629. notifier_read(mythr->notifier); // Wait for a notification to start
  630. cgtime(&cgpu->cgminer_stats.start_tv);
  631. if (drv->minerloop)
  632. drv->minerloop(mythr);
  633. else
  634. minerloop_scanhash(mythr);
  635. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  636. out: ;
  637. struct cgpu_info *proc = cgpu;
  638. do
  639. {
  640. proc->deven = DEV_DISABLED;
  641. proc->status = LIFE_DEAD2;
  642. }
  643. while ( (proc = proc->next_proc) && !proc->threads);
  644. mythr->getwork = 0;
  645. mythr->has_pth = false;
  646. cgsleep_ms(1);
  647. if (drv->thread_shutdown)
  648. drv->thread_shutdown(mythr);
  649. notifier_destroy(mythr->notifier);
  650. return NULL;
  651. }
  652. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  653. static
  654. bool _add_cgpu(struct cgpu_info *cgpu)
  655. {
  656. int lpcount;
  657. if (!cgpu->procs)
  658. cgpu->procs = 1;
  659. lpcount = cgpu->procs;
  660. cgpu->device = cgpu;
  661. cgpu->dev_repr = malloc(6);
  662. cgpu->dev_repr_ns = malloc(6);
  663. #ifdef NEED_BFG_LOWL_VCOM
  664. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  665. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  666. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  667. #endif
  668. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  669. devices_new[total_devices_new++] = cgpu;
  670. if (lpcount > 1)
  671. {
  672. int tpp = cgpu->threads / lpcount;
  673. struct cgpu_info **nlp_p, *slave;
  674. nlp_p = &cgpu->next_proc;
  675. for (int i = 1; i < lpcount; ++i)
  676. {
  677. slave = malloc(sizeof(*slave));
  678. *slave = *cgpu;
  679. slave->proc_id = i;
  680. slave->threads = tpp;
  681. devices_new[total_devices_new++] = slave;
  682. *nlp_p = slave;
  683. nlp_p = &slave->next_proc;
  684. }
  685. *nlp_p = NULL;
  686. cgpu->proc_id = 0;
  687. cgpu->threads -= (tpp * (lpcount - 1));
  688. }
  689. renumber_cgpu(cgpu);
  690. cgpu->last_device_valid_work = time(NULL);
  691. return true;
  692. }
  693. bool add_cgpu(struct cgpu_info *cgpu)
  694. {
  695. mutex_lock(&_add_cgpu_mutex);
  696. const bool rv = _add_cgpu(cgpu);
  697. mutex_unlock(&_add_cgpu_mutex);
  698. return rv;
  699. }
  700. void add_cgpu_live(void *p)
  701. {
  702. add_cgpu(p);
  703. }
  704. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  705. {
  706. if (!prev_cgpu)
  707. return add_cgpu(cgpu);
  708. while (prev_cgpu->next_proc)
  709. prev_cgpu = prev_cgpu->next_proc;
  710. mutex_lock(&_add_cgpu_mutex);
  711. int old_total_devices = total_devices_new;
  712. if (!_add_cgpu(cgpu))
  713. {
  714. mutex_unlock(&_add_cgpu_mutex);
  715. return false;
  716. }
  717. prev_cgpu->next_proc = devices_new[old_total_devices];
  718. mutex_unlock(&_add_cgpu_mutex);
  719. return true;
  720. }
  721. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  722. {
  723. const struct bfg_set_device_definition *sdf;
  724. char *p = replybuf;
  725. bool first = true;
  726. *out_success = SDR_HELP;
  727. sdf = proc->set_device_funcs;
  728. if (!sdf)
  729. nohelp:
  730. return "No help available";
  731. size_t matchlen = 0;
  732. if (newvalue)
  733. while (!isspace(newvalue[0]))
  734. ++matchlen;
  735. for ( ; sdf->optname; ++sdf)
  736. {
  737. if (!sdf->description)
  738. continue;
  739. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  740. continue;
  741. if (first)
  742. first = false;
  743. else
  744. p++[0] = '\n';
  745. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  746. }
  747. if (replybuf == p)
  748. goto nohelp;
  749. return replybuf;
  750. }
  751. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  752. {
  753. int target_diff = proc->cutofftemp - proc->targettemp;
  754. proc->cutofftemp = atoi(newvalue);
  755. if (!proc->targettemp_user)
  756. proc->targettemp = proc->cutofftemp - target_diff;
  757. return NULL;
  758. }
  759. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  760. {
  761. proc->targettemp = atoi(newvalue);
  762. proc->targettemp_user = true;
  763. return NULL;
  764. }
  765. static inline
  766. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  767. {
  768. if (!rv)
  769. *out_success = SDR_OK;
  770. else
  771. if (!strcasecmp(optname, "help"))
  772. *out_success = SDR_HELP;
  773. else
  774. *out_success = SDR_ERR;
  775. }
  776. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  777. {
  778. const struct bfg_set_device_definition *sdf;
  779. sdf = proc->set_device_funcs;
  780. if (!sdf)
  781. {
  782. *out_success = SDR_NOSUPP;
  783. return "Device does not support setting parameters.";
  784. }
  785. for ( ; sdf->optname; ++sdf)
  786. if (!strcasecmp(optname, sdf->optname))
  787. {
  788. *out_success = SDR_AUTO;
  789. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  790. if (SDR_AUTO == *out_success)
  791. _set_auto_sdr(out_success, rv, optname);
  792. return rv;
  793. }
  794. if (!strcasecmp(optname, "help"))
  795. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  796. *out_success = SDR_UNKNOWN;
  797. sprintf(replybuf, "Unknown option: %s", optname);
  798. return replybuf;
  799. }
  800. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  801. {
  802. if (proc->drv->set_device)
  803. {
  804. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  805. _set_auto_sdr(out_success, rv, optname);
  806. return rv;
  807. }
  808. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  809. }
  810. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  811. {
  812. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  813. switch (*out_success)
  814. {
  815. case SDR_NOSUPP:
  816. case SDR_UNKNOWN:
  817. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  818. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  819. else
  820. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  821. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  822. default:
  823. break;
  824. }
  825. return rv;
  826. }
  827. #ifdef NEED_BFG_LOWL_VCOM
  828. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  829. {
  830. detectone_func_t detectone = userp;
  831. if (serial_claim(info->path, NULL))
  832. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  833. return detectone(info->path);
  834. }
  835. #endif
  836. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  837. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  838. {
  839. struct string_elist *iter, *tmp;
  840. const char *dev, *colon;
  841. bool inhibitauto = flags & 4;
  842. char found = 0;
  843. bool forceauto = flags & 1;
  844. bool hasname;
  845. size_t namel = strlen(api->name);
  846. size_t dnamel = strlen(api->dname);
  847. #ifdef NEED_BFG_LOWL_VCOM
  848. clear_detectone_meta_info();
  849. #endif
  850. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  851. dev = iter->string;
  852. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  853. size_t idlen = colon - dev;
  854. // allow either name:device or dname:device
  855. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  856. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  857. continue;
  858. dev = colon + 1;
  859. hasname = true;
  860. }
  861. else
  862. hasname = false;
  863. if (!strcmp(dev, "auto"))
  864. forceauto = true;
  865. else if (!strcmp(dev, "noauto"))
  866. inhibitauto = true;
  867. else
  868. if ((flags & 2) && !hasname)
  869. continue;
  870. else
  871. if (!detectone)
  872. {} // do nothing
  873. else
  874. if (!strcmp(dev, "all"))
  875. {} // n/a
  876. else if (detectone(dev)) {
  877. string_elist_del(&scan_devices, iter);
  878. ++found;
  879. }
  880. }
  881. if ((forceauto || !(inhibitauto || found)) && autoscan)
  882. found += autoscan();
  883. return found;
  884. }
  885. static
  886. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  887. {
  888. char fullpath[PATH_MAX];
  889. strcpy(fullpath, path);
  890. strcat(fullpath, "/");
  891. if (subdir) {
  892. strcat(fullpath, subdir);
  893. strcat(fullpath, "/");
  894. }
  895. if (sub2) {
  896. strcat(fullpath, sub2);
  897. strcat(fullpath, "/");
  898. }
  899. strcat(fullpath, filename);
  900. return fopen(fullpath, "rb");
  901. }
  902. #define _open_bitstream(path, subdir, sub2) do { \
  903. f = _open_bitstream(path, subdir, sub2, filename); \
  904. if (f) \
  905. return f; \
  906. } while(0)
  907. #define _open_bitstream2(path, path3) do { \
  908. _open_bitstream(path, NULL, path3); \
  909. _open_bitstream(path, "../share/" PACKAGE, path3); \
  910. _open_bitstream(path, "../" PACKAGE, path3); \
  911. } while(0)
  912. #define _open_bitstream3(path) do { \
  913. _open_bitstream2(path, dname); \
  914. _open_bitstream2(path, "bitstreams"); \
  915. _open_bitstream2(path, NULL); \
  916. } while(0)
  917. FILE *open_bitstream(const char *dname, const char *filename)
  918. {
  919. FILE *f;
  920. _open_bitstream3(opt_kernel_path);
  921. _open_bitstream3(cgminer_path);
  922. _open_bitstream3(".");
  923. return NULL;
  924. }
  925. void close_device_fd(struct thr_info * const thr)
  926. {
  927. struct cgpu_info * const proc = thr->cgpu;
  928. const int fd = proc->device_fd;
  929. if (fd == -1)
  930. return;
  931. if (close(fd))
  932. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  933. else
  934. {
  935. proc->device_fd = -1;
  936. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  937. }
  938. }
  939. struct cgpu_info *device_proc_by_id(struct cgpu_info * const dev, const int procid)
  940. {
  941. struct cgpu_info *proc = dev;
  942. for (int i = 0; i < procid; ++i)
  943. {
  944. proc = proc->next_proc;
  945. if (unlikely((!proc) || proc->device != dev))
  946. return NULL;
  947. }
  948. return proc;
  949. }