deviceapi.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. * Copyright 2014 Nate Woolls
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <ctype.h>
  15. #ifdef WIN32
  16. #include <winsock2.h>
  17. #else
  18. #include <sys/select.h>
  19. #endif
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <sys/time.h>
  23. #include <sys/types.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "logging.h"
  29. #include "lowlevel.h"
  30. #ifdef NEED_BFG_LOWL_VCOM
  31. #include "lowl-vcom.h"
  32. #endif
  33. #include "miner.h"
  34. #include "util.h"
  35. struct driver_registration *_bfg_drvreg1;
  36. struct driver_registration *_bfg_drvreg2;
  37. void _bfg_register_driver(const struct device_drv *drv)
  38. {
  39. struct driver_registration *ndr;
  40. if (!drv)
  41. {
  42. // NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
  43. LL_FOREACH2(_bfg_drvreg1, ndr, next_dname)
  44. {
  45. drv = ndr->drv;
  46. if (drv->drv_init)
  47. drv->drv_init();
  48. }
  49. return;
  50. }
  51. ndr = malloc(sizeof(*ndr));
  52. *ndr = (struct driver_registration){
  53. .drv = drv,
  54. };
  55. LL_PREPEND2(_bfg_drvreg1, ndr, next_dname);
  56. LL_PREPEND2(_bfg_drvreg2, ndr, next_prio);
  57. }
  58. static
  59. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  60. {
  61. return strcmp(a->drv->dname, b->drv->dname);
  62. };
  63. static
  64. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  65. {
  66. return a->drv->probe_priority - b->drv->probe_priority;
  67. };
  68. void bfg_devapi_init()
  69. {
  70. _bfg_register_driver(NULL);
  71. LL_SORT2(_bfg_drvreg1, sort_drv_by_dname, next_dname);
  72. LL_SORT2(_bfg_drvreg2, sort_drv_by_priority, next_prio);
  73. }
  74. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  75. {
  76. struct cgpu_info *cgpu = thr->cgpu;
  77. const long cycle = opt_log_interval / 5 ? : 1;
  78. if (unlikely(hashes == -1)) {
  79. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  80. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  81. if (thr->scanhash_working && opt_restart) {
  82. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  83. thr->scanhash_working = false;
  84. cgpu->reinit_backoff = 5.2734375;
  85. hashes = 0;
  86. } else {
  87. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  88. cgpu->deven = DEV_RECOVER_ERR;
  89. run_cmd(cmd_idle);
  90. return false;
  91. }
  92. }
  93. else
  94. thr->scanhash_working = true;
  95. thr->hashes_done += hashes;
  96. if (hashes > cgpu->max_hashes)
  97. cgpu->max_hashes = hashes;
  98. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  99. // max_nonce management (optional)
  100. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  101. int mult;
  102. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  103. return true;
  104. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  105. mult *= cycle;
  106. if (*max_nonce > ((uint64_t)0xffffffff * 0x400) / mult)
  107. *max_nonce = 0xffffffff;
  108. else
  109. *max_nonce = ((uint64_t)*max_nonce * mult) / 0x400;
  110. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  111. *max_nonce = (uint64_t)*max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  112. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  113. *max_nonce = (uint64_t)*max_nonce * 0x400 / ((((uint64_t)cycle * 1000000) + thr->tv_hashes_done.tv_usec) / ((uint64_t)cycle * 1000000 / 0x400));
  114. hashmeter2(thr);
  115. return true;
  116. }
  117. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  118. {
  119. struct timeval tv_now, tv_delta;
  120. timer_set_now(&tv_now);
  121. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  122. thr->_tv_last_hashes_done_call = tv_now;
  123. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  124. }
  125. /* A generic wait function for threads that poll that will wait a specified
  126. * time tdiff waiting on a work restart request. Returns zero if the condition
  127. * was met (work restart requested) or ETIMEDOUT if not.
  128. */
  129. int restart_wait(struct thr_info *thr, unsigned int mstime)
  130. {
  131. struct timeval tv_timer, tv_now, tv_timeout;
  132. fd_set rfds;
  133. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  134. int rv;
  135. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  136. {
  137. // This is a bug!
  138. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  139. cgsleep_ms(mstime);
  140. return (thr->work_restart ? 0 : ETIMEDOUT);
  141. }
  142. timer_set_now(&tv_now);
  143. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  144. while (true)
  145. {
  146. FD_ZERO(&rfds);
  147. FD_SET(wrn, &rfds);
  148. tv_timeout = tv_timer;
  149. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  150. if (rv == 0)
  151. return ETIMEDOUT;
  152. if (rv > 0)
  153. {
  154. if (thr->work_restart)
  155. return 0;
  156. notifier_read(thr->work_restart_notifier);
  157. }
  158. timer_set_now(&tv_now);
  159. }
  160. }
  161. static
  162. struct work *get_and_prepare_work(struct thr_info *thr)
  163. {
  164. struct cgpu_info *proc = thr->cgpu;
  165. struct device_drv *api = proc->drv;
  166. struct work *work;
  167. work = get_work(thr);
  168. if (!work)
  169. return NULL;
  170. if (api->prepare_work && !api->prepare_work(thr, work)) {
  171. free_work(work);
  172. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  173. proc->deven = DEV_RECOVER_ERR;
  174. run_cmd(cmd_idle);
  175. return NULL;
  176. }
  177. return work;
  178. }
  179. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  180. void minerloop_scanhash(struct thr_info *mythr)
  181. {
  182. struct cgpu_info *cgpu = mythr->cgpu;
  183. struct device_drv *api = cgpu->drv;
  184. struct timeval tv_start, tv_end;
  185. struct timeval tv_hashes, tv_worktime;
  186. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  187. int64_t hashes;
  188. struct work *work;
  189. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  190. #ifdef HAVE_PTHREAD_CANCEL
  191. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  192. #endif
  193. if (cgpu->deven != DEV_ENABLED)
  194. mt_disable(mythr);
  195. while (likely(!cgpu->shutdown)) {
  196. mythr->work_restart = false;
  197. request_work(mythr);
  198. work = get_and_prepare_work(mythr);
  199. if (!work)
  200. break;
  201. timer_set_now(&work->tv_work_start);
  202. do {
  203. thread_reportin(mythr);
  204. /* Only allow the mining thread to be cancelled when
  205. * it is not in the driver code. */
  206. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  207. timer_set_now(&tv_start);
  208. /* api->scanhash should scan the work for valid nonces
  209. * until max_nonce is reached or thr_info->work_restart */
  210. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  211. timer_set_now(&tv_end);
  212. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  213. pthread_testcancel();
  214. thread_reportin(mythr);
  215. timersub(&tv_end, &tv_start, &tv_hashes);
  216. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  217. goto disabled;
  218. if (unlikely(mythr->work_restart)) {
  219. /* Apart from device_thread 0, we stagger the
  220. * starting of every next thread to try and get
  221. * all devices busy before worrying about
  222. * getting work for their extra threads */
  223. if (!primary) {
  224. struct timespec rgtp;
  225. rgtp.tv_sec = 0;
  226. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  227. nanosleep(&rgtp, NULL);
  228. }
  229. break;
  230. }
  231. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  232. disabled:
  233. mt_disable(mythr);
  234. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  235. /* The inner do-while loop will exit unless the device is capable of
  236. * scanning a specific nonce range (currently CPU and GPU drivers)
  237. * See abandon_work comments for more details */
  238. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  239. free_work(work);
  240. }
  241. }
  242. void mt_disable_start__async(struct thr_info * const mythr)
  243. {
  244. mt_disable_start(mythr);
  245. if (mythr->prev_work)
  246. free_work(mythr->prev_work);
  247. mythr->prev_work = mythr->work;
  248. mythr->work = NULL;
  249. mythr->_job_transition_in_progress = false;
  250. }
  251. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  252. {
  253. struct cgpu_info *proc = mythr->cgpu;
  254. struct device_drv *api = proc->drv;
  255. struct timeval tv_worktime;
  256. mythr->tv_morework.tv_sec = -1;
  257. mythr->_job_transition_in_progress = true;
  258. if (mythr->work)
  259. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  260. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  261. {
  262. mythr->work_restart = false;
  263. request_work(mythr);
  264. // FIXME: Allow get_work to return NULL to retry on notification
  265. if (mythr->next_work)
  266. free_work(mythr->next_work);
  267. mythr->next_work = get_and_prepare_work(mythr);
  268. if (!mythr->next_work)
  269. return false;
  270. mythr->starting_next_work = true;
  271. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  272. }
  273. else
  274. {
  275. mythr->starting_next_work = false;
  276. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  277. }
  278. job_prepare_complete(mythr);
  279. return true;
  280. }
  281. void job_prepare_complete(struct thr_info *mythr)
  282. {
  283. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  284. return;
  285. if (mythr->work)
  286. {
  287. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  288. do_get_results(mythr, true);
  289. else
  290. {} // TODO: Set a timer to call do_get_results when job is near complete
  291. }
  292. else // no job currently running
  293. do_job_start(mythr);
  294. }
  295. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  296. {
  297. struct cgpu_info *proc = mythr->cgpu;
  298. struct device_drv *api = proc->drv;
  299. struct work *work = mythr->work;
  300. mythr->_job_transition_in_progress = true;
  301. mythr->tv_results_jobstart = mythr->tv_jobstart;
  302. mythr->_proceed_with_new_job = proceed_with_new_job;
  303. if (api->job_get_results)
  304. api->job_get_results(mythr, work);
  305. else
  306. job_results_fetched(mythr);
  307. }
  308. void job_results_fetched(struct thr_info *mythr)
  309. {
  310. if (mythr->_proceed_with_new_job)
  311. do_job_start(mythr);
  312. else
  313. {
  314. if (likely(mythr->prev_work))
  315. {
  316. struct timeval tv_now;
  317. timer_set_now(&tv_now);
  318. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  319. }
  320. mt_disable_start__async(mythr);
  321. }
  322. }
  323. void do_job_start(struct thr_info *mythr)
  324. {
  325. struct cgpu_info *proc = mythr->cgpu;
  326. struct device_drv *api = proc->drv;
  327. thread_reportin(mythr);
  328. api->job_start(mythr);
  329. }
  330. void mt_job_transition(struct thr_info *mythr)
  331. {
  332. struct timeval tv_now;
  333. timer_set_now(&tv_now);
  334. if (mythr->starting_next_work)
  335. {
  336. mythr->next_work->tv_work_start = tv_now;
  337. if (mythr->prev_work)
  338. free_work(mythr->prev_work);
  339. mythr->prev_work = mythr->work;
  340. mythr->work = mythr->next_work;
  341. mythr->next_work = NULL;
  342. }
  343. mythr->tv_jobstart = tv_now;
  344. mythr->_job_transition_in_progress = false;
  345. }
  346. void job_start_complete(struct thr_info *mythr)
  347. {
  348. struct timeval tv_now;
  349. if (unlikely(!mythr->prev_work))
  350. return;
  351. timer_set_now(&tv_now);
  352. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  353. }
  354. void job_start_abort(struct thr_info *mythr, bool failure)
  355. {
  356. struct cgpu_info *proc = mythr->cgpu;
  357. if (failure)
  358. {
  359. proc->deven = DEV_RECOVER_ERR;
  360. run_cmd(cmd_idle);
  361. }
  362. mythr->work = NULL;
  363. mythr->_job_transition_in_progress = false;
  364. }
  365. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  366. {
  367. struct cgpu_info *proc = mythr->cgpu;
  368. struct device_drv *api = proc->drv;
  369. struct timeval tv_hashes;
  370. int64_t hashes = 0;
  371. if (api->job_process_results)
  372. hashes = api->job_process_results(mythr, work, stopping);
  373. thread_reportin(mythr);
  374. if (hashes)
  375. {
  376. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  377. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  378. return false;
  379. }
  380. return true;
  381. }
  382. static
  383. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  384. {
  385. struct cgpu_info *cgpu = thr->cgpu;
  386. struct timeval tv_now;
  387. int maxfd;
  388. fd_set rfds;
  389. timer_set_now(&tv_now);
  390. FD_ZERO(&rfds);
  391. FD_SET(thr->notifier[0], &rfds);
  392. maxfd = thr->notifier[0];
  393. FD_SET(thr->work_restart_notifier[0], &rfds);
  394. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  395. if (thr->mutex_request[1] != INVSOCK)
  396. {
  397. FD_SET(thr->mutex_request[0], &rfds);
  398. set_maxfd(&maxfd, thr->mutex_request[0]);
  399. }
  400. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  401. return;
  402. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  403. {
  404. // FIXME: This can only handle one request at a time!
  405. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  406. notifier_read(thr->mutex_request);
  407. mutex_lock(mutexp);
  408. pthread_cond_signal(&cgpu->device_cond);
  409. pthread_cond_wait(&cgpu->device_cond, mutexp);
  410. mutex_unlock(mutexp);
  411. }
  412. if (FD_ISSET(thr->notifier[0], &rfds)) {
  413. notifier_read(thr->notifier);
  414. }
  415. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  416. notifier_read(thr->work_restart_notifier);
  417. }
  418. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  419. {
  420. mutex_init(&cgpu->device_mutex);
  421. notifier_init(cgpu->thr[0]->mutex_request);
  422. pthread_cond_init(&cgpu->device_cond, bfg_condattr);
  423. }
  424. void cgpu_request_control(struct cgpu_info * const cgpu)
  425. {
  426. struct thr_info * const thr = cgpu->thr[0];
  427. if (pthread_equal(pthread_self(), thr->pth))
  428. return;
  429. mutex_lock(&cgpu->device_mutex);
  430. notifier_wake(thr->mutex_request);
  431. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  432. }
  433. void cgpu_release_control(struct cgpu_info * const cgpu)
  434. {
  435. struct thr_info * const thr = cgpu->thr[0];
  436. if (pthread_equal(pthread_self(), thr->pth))
  437. return;
  438. pthread_cond_signal(&cgpu->device_cond);
  439. mutex_unlock(&cgpu->device_mutex);
  440. }
  441. static
  442. void _minerloop_setup(struct thr_info *mythr)
  443. {
  444. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  445. if (mythr->work_restart_notifier[1] == -1)
  446. notifier_init(mythr->work_restart_notifier);
  447. for (proc = cgpu; proc; proc = proc->next_proc)
  448. {
  449. mythr = proc->thr[0];
  450. timer_set_now(&mythr->tv_watchdog);
  451. proc->disable_watchdog = true;
  452. }
  453. }
  454. void minerloop_async(struct thr_info *mythr)
  455. {
  456. struct thr_info *thr = mythr;
  457. struct cgpu_info *cgpu = mythr->cgpu;
  458. struct device_drv *api = cgpu->drv;
  459. struct timeval tv_now;
  460. struct timeval tv_timeout;
  461. struct cgpu_info *proc;
  462. bool is_running, should_be_running;
  463. _minerloop_setup(mythr);
  464. while (likely(!cgpu->shutdown)) {
  465. tv_timeout.tv_sec = -1;
  466. timer_set_now(&tv_now);
  467. for (proc = cgpu; proc; proc = proc->next_proc)
  468. {
  469. mythr = proc->thr[0];
  470. // Nothing should happen while we're starting a job
  471. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  472. goto defer_events;
  473. is_running = mythr->work;
  474. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  475. if (should_be_running)
  476. {
  477. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  478. {
  479. mt_disable_finish(mythr);
  480. goto djp;
  481. }
  482. if (unlikely(mythr->work_restart))
  483. goto djp;
  484. }
  485. else // ! should_be_running
  486. {
  487. if (unlikely(mythr->_job_transition_in_progress && timer_isset(&mythr->tv_morework)))
  488. {
  489. // Really only happens at startup
  490. applog(LOG_DEBUG, "%"PRIpreprv": Job transition in progress, with morework timer enabled: unsetting in-progress flag", proc->proc_repr);
  491. mythr->_job_transition_in_progress = false;
  492. }
  493. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  494. {
  495. disabled: ;
  496. if (is_running)
  497. {
  498. if (mythr->busy_state != TBS_GETTING_RESULTS)
  499. do_get_results(mythr, false);
  500. else
  501. // Avoid starting job when pending result fetch completes
  502. mythr->_proceed_with_new_job = false;
  503. }
  504. else // !mythr->_mt_disable_called
  505. mt_disable_start__async(mythr);
  506. }
  507. timer_unset(&mythr->tv_morework);
  508. }
  509. if (timer_passed(&mythr->tv_morework, &tv_now))
  510. {
  511. djp: ;
  512. if (!do_job_prepare(mythr, &tv_now))
  513. goto disabled;
  514. }
  515. defer_events:
  516. if (timer_passed(&mythr->tv_poll, &tv_now))
  517. api->poll(mythr);
  518. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  519. {
  520. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  521. bfg_watchdog(proc, &tv_now);
  522. }
  523. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  524. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  525. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  526. }
  527. do_notifier_select(thr, &tv_timeout);
  528. }
  529. }
  530. static
  531. void do_queue_flush(struct thr_info *mythr)
  532. {
  533. struct cgpu_info *proc = mythr->cgpu;
  534. struct device_drv *api = proc->drv;
  535. api->queue_flush(mythr);
  536. if (mythr->next_work)
  537. {
  538. free_work(mythr->next_work);
  539. mythr->next_work = NULL;
  540. }
  541. }
  542. void minerloop_queue(struct thr_info *thr)
  543. {
  544. struct thr_info *mythr;
  545. struct cgpu_info *cgpu = thr->cgpu;
  546. struct device_drv *api = cgpu->drv;
  547. struct timeval tv_now;
  548. struct timeval tv_timeout;
  549. struct cgpu_info *proc;
  550. bool should_be_running;
  551. struct work *work;
  552. _minerloop_setup(thr);
  553. while (likely(!cgpu->shutdown)) {
  554. tv_timeout.tv_sec = -1;
  555. timer_set_now(&tv_now);
  556. for (proc = cgpu; proc; proc = proc->next_proc)
  557. {
  558. mythr = proc->thr[0];
  559. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  560. redo:
  561. if (should_be_running)
  562. {
  563. if (unlikely(mythr->_mt_disable_called))
  564. mt_disable_finish(mythr);
  565. if (unlikely(mythr->work_restart))
  566. {
  567. mythr->work_restart = false;
  568. do_queue_flush(mythr);
  569. }
  570. while (!mythr->queue_full)
  571. {
  572. if (mythr->next_work)
  573. {
  574. work = mythr->next_work;
  575. mythr->next_work = NULL;
  576. }
  577. else
  578. {
  579. request_work(mythr);
  580. // FIXME: Allow get_work to return NULL to retry on notification
  581. work = get_and_prepare_work(mythr);
  582. }
  583. if (!work)
  584. break;
  585. if (!api->queue_append(mythr, work))
  586. mythr->next_work = work;
  587. }
  588. }
  589. else
  590. if (unlikely(!mythr->_mt_disable_called))
  591. {
  592. do_queue_flush(mythr);
  593. mt_disable_start(mythr);
  594. }
  595. if (timer_passed(&mythr->tv_poll, &tv_now))
  596. api->poll(mythr);
  597. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  598. {
  599. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  600. bfg_watchdog(proc, &tv_now);
  601. }
  602. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  603. if (should_be_running && !mythr->queue_full)
  604. goto redo;
  605. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  606. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  607. }
  608. // HACK: Some designs set the main thr tv_poll from secondary thrs
  609. reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
  610. do_notifier_select(thr, &tv_timeout);
  611. }
  612. }
  613. void *miner_thread(void *userdata)
  614. {
  615. struct thr_info *mythr = userdata;
  616. struct cgpu_info *cgpu = mythr->cgpu;
  617. struct device_drv *drv = cgpu->drv;
  618. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  619. char threadname[20];
  620. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  621. RenameThread(threadname);
  622. if (drv->thread_init && !drv->thread_init(mythr)) {
  623. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  624. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  625. dev_error(slave, REASON_THREAD_FAIL_INIT);
  626. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  627. goto out;
  628. }
  629. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  630. cgpu_set_defaults(cgpu);
  631. thread_reportout(mythr);
  632. applog(LOG_DEBUG, "Popping ping in miner thread");
  633. notifier_read(mythr->notifier); // Wait for a notification to start
  634. cgtime(&cgpu->cgminer_stats.start_tv);
  635. if (drv->minerloop)
  636. drv->minerloop(mythr);
  637. else
  638. minerloop_scanhash(mythr);
  639. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  640. out: ;
  641. struct cgpu_info *proc = cgpu;
  642. do
  643. {
  644. proc->deven = DEV_DISABLED;
  645. proc->status = LIFE_DEAD2;
  646. }
  647. while ( (proc = proc->next_proc) && !proc->threads);
  648. mythr->getwork = 0;
  649. mythr->has_pth = false;
  650. cgsleep_ms(1);
  651. if (drv->thread_shutdown)
  652. drv->thread_shutdown(mythr);
  653. notifier_destroy(mythr->notifier);
  654. return NULL;
  655. }
  656. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  657. static
  658. bool _add_cgpu(struct cgpu_info *cgpu)
  659. {
  660. int lpcount;
  661. if (!cgpu->procs)
  662. cgpu->procs = 1;
  663. lpcount = cgpu->procs;
  664. cgpu->device = cgpu;
  665. cgpu->dev_repr = malloc(6);
  666. cgpu->dev_repr_ns = malloc(6);
  667. #ifdef NEED_BFG_LOWL_VCOM
  668. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  669. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  670. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  671. #endif
  672. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  673. devices_new[total_devices_new++] = cgpu;
  674. if (lpcount > 1)
  675. {
  676. int tpp = cgpu->threads / lpcount;
  677. struct cgpu_info **nlp_p, *slave;
  678. nlp_p = &cgpu->next_proc;
  679. for (int i = 1; i < lpcount; ++i)
  680. {
  681. slave = malloc(sizeof(*slave));
  682. *slave = *cgpu;
  683. slave->proc_id = i;
  684. slave->threads = tpp;
  685. devices_new[total_devices_new++] = slave;
  686. *nlp_p = slave;
  687. nlp_p = &slave->next_proc;
  688. }
  689. *nlp_p = NULL;
  690. cgpu->proc_id = 0;
  691. cgpu->threads -= (tpp * (lpcount - 1));
  692. }
  693. renumber_cgpu(cgpu);
  694. cgpu->last_device_valid_work = time(NULL);
  695. return true;
  696. }
  697. bool add_cgpu(struct cgpu_info *cgpu)
  698. {
  699. mutex_lock(&_add_cgpu_mutex);
  700. const bool rv = _add_cgpu(cgpu);
  701. mutex_unlock(&_add_cgpu_mutex);
  702. return rv;
  703. }
  704. void add_cgpu_live(void *p)
  705. {
  706. add_cgpu(p);
  707. }
  708. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  709. {
  710. if (!prev_cgpu)
  711. return add_cgpu(cgpu);
  712. while (prev_cgpu->next_proc)
  713. prev_cgpu = prev_cgpu->next_proc;
  714. mutex_lock(&_add_cgpu_mutex);
  715. int old_total_devices = total_devices_new;
  716. if (!_add_cgpu(cgpu))
  717. {
  718. mutex_unlock(&_add_cgpu_mutex);
  719. return false;
  720. }
  721. prev_cgpu->next_proc = devices_new[old_total_devices];
  722. mutex_unlock(&_add_cgpu_mutex);
  723. return true;
  724. }
  725. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  726. {
  727. const struct bfg_set_device_definition *sdf;
  728. char *p = replybuf;
  729. bool first = true;
  730. *out_success = SDR_HELP;
  731. sdf = proc->set_device_funcs;
  732. if (!sdf)
  733. nohelp:
  734. return "No help available";
  735. size_t matchlen = 0;
  736. if (newvalue)
  737. while (newvalue[matchlen] && !isspace(newvalue[matchlen]))
  738. ++matchlen;
  739. for ( ; sdf->optname; ++sdf)
  740. {
  741. if (!sdf->description)
  742. continue;
  743. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  744. continue;
  745. if (first)
  746. first = false;
  747. else
  748. p++[0] = '\n';
  749. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  750. }
  751. if (replybuf == p)
  752. goto nohelp;
  753. return replybuf;
  754. }
  755. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  756. {
  757. int target_diff = proc->cutofftemp - proc->targettemp;
  758. proc->cutofftemp = atoi(newvalue);
  759. if (!proc->targettemp_user)
  760. proc->targettemp = proc->cutofftemp - target_diff;
  761. return NULL;
  762. }
  763. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  764. {
  765. proc->targettemp = atoi(newvalue);
  766. proc->targettemp_user = true;
  767. return NULL;
  768. }
  769. static inline
  770. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  771. {
  772. if (!rv)
  773. *out_success = SDR_OK;
  774. else
  775. if (!strcasecmp(optname, "help"))
  776. *out_success = SDR_HELP;
  777. else
  778. *out_success = SDR_ERR;
  779. }
  780. static
  781. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  782. {
  783. const struct bfg_set_device_definition *sdf;
  784. sdf = proc->set_device_funcs;
  785. if (!sdf)
  786. {
  787. *out_success = SDR_NOSUPP;
  788. return "Device does not support setting parameters.";
  789. }
  790. for ( ; sdf->optname; ++sdf)
  791. if (!strcasecmp(optname, sdf->optname))
  792. {
  793. *out_success = SDR_AUTO;
  794. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  795. if (SDR_AUTO == *out_success)
  796. _set_auto_sdr(out_success, rv, optname);
  797. return rv;
  798. }
  799. if (!strcasecmp(optname, "help"))
  800. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  801. *out_success = SDR_UNKNOWN;
  802. sprintf(replybuf, "Unknown option: %s", optname);
  803. return replybuf;
  804. }
  805. static
  806. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  807. {
  808. if (proc->drv->set_device)
  809. {
  810. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  811. _set_auto_sdr(out_success, rv, optname);
  812. return rv;
  813. }
  814. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  815. }
  816. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char *newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  817. {
  818. if (!newvalue)
  819. newvalue = "";
  820. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  821. switch (*out_success)
  822. {
  823. case SDR_NOSUPP:
  824. case SDR_UNKNOWN:
  825. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  826. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  827. else
  828. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  829. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  830. default:
  831. break;
  832. }
  833. return rv;
  834. }
  835. #ifdef NEED_BFG_LOWL_VCOM
  836. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  837. {
  838. detectone_func_t detectone = userp;
  839. if (serial_claim(info->path, NULL))
  840. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  841. return detectone(info->path);
  842. }
  843. #endif
  844. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  845. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  846. {
  847. struct string_elist *iter, *tmp;
  848. const char *dev, *colon;
  849. bool inhibitauto = flags & 4;
  850. char found = 0;
  851. bool forceauto = flags & 1;
  852. bool hasname;
  853. size_t namel = strlen(api->name);
  854. size_t dnamel = strlen(api->dname);
  855. #ifdef NEED_BFG_LOWL_VCOM
  856. clear_detectone_meta_info();
  857. #endif
  858. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  859. dev = iter->string;
  860. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  861. size_t idlen = colon - dev;
  862. // allow either name:device or dname:device
  863. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  864. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  865. continue;
  866. dev = colon + 1;
  867. hasname = true;
  868. }
  869. else
  870. hasname = false;
  871. if (!strcmp(dev, "auto"))
  872. forceauto = true;
  873. else if (!strcmp(dev, "noauto"))
  874. inhibitauto = true;
  875. else
  876. if ((flags & 2) && !hasname)
  877. continue;
  878. else
  879. if (!detectone)
  880. {} // do nothing
  881. else
  882. if (!strcmp(dev, "all"))
  883. {} // n/a
  884. else if (detectone(dev)) {
  885. string_elist_del(&scan_devices, iter);
  886. ++found;
  887. }
  888. }
  889. if ((forceauto || !(inhibitauto || found)) && autoscan)
  890. found += autoscan();
  891. return found;
  892. }
  893. static
  894. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  895. {
  896. char fullpath[PATH_MAX];
  897. strcpy(fullpath, path);
  898. strcat(fullpath, "/");
  899. if (subdir) {
  900. strcat(fullpath, subdir);
  901. strcat(fullpath, "/");
  902. }
  903. if (sub2) {
  904. strcat(fullpath, sub2);
  905. strcat(fullpath, "/");
  906. }
  907. strcat(fullpath, filename);
  908. return fopen(fullpath, "rb");
  909. }
  910. #define _open_bitstream(path, subdir, sub2) do { \
  911. f = _open_bitstream(path, subdir, sub2, filename); \
  912. if (f) \
  913. return f; \
  914. } while(0)
  915. #define _open_bitstream2(path, path3) do { \
  916. _open_bitstream(path, NULL, path3); \
  917. _open_bitstream(path, "../share/" PACKAGE, path3); \
  918. _open_bitstream(path, "../" PACKAGE, path3); \
  919. } while(0)
  920. #define _open_bitstream3(path) do { \
  921. _open_bitstream2(path, dname); \
  922. _open_bitstream2(path, "bitstreams"); \
  923. _open_bitstream2(path, NULL); \
  924. } while(0)
  925. FILE *open_bitstream(const char *dname, const char *filename)
  926. {
  927. FILE *f;
  928. _open_bitstream3(opt_kernel_path);
  929. _open_bitstream3(cgminer_path);
  930. _open_bitstream3(".");
  931. return NULL;
  932. }
  933. void close_device_fd(struct thr_info * const thr)
  934. {
  935. struct cgpu_info * const proc = thr->cgpu;
  936. const int fd = proc->device_fd;
  937. if (fd == -1)
  938. return;
  939. if (close(fd))
  940. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  941. else
  942. {
  943. proc->device_fd = -1;
  944. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  945. }
  946. }
  947. struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, const int procid)
  948. {
  949. struct cgpu_info *proc = (void*)dev;
  950. for (int i = 0; i < procid; ++i)
  951. {
  952. proc = proc->next_proc;
  953. if (unlikely((!proc) || proc->device != dev))
  954. return NULL;
  955. }
  956. return proc;
  957. }