deviceapi.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. /*
  2. * Copyright 2011-2014 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. * Copyright 2014 Nate Woolls
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #include <ctype.h>
  15. #ifdef WIN32
  16. #include <winsock2.h>
  17. #else
  18. #include <sys/select.h>
  19. #endif
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <sys/time.h>
  23. #include <sys/types.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "logging.h"
  29. #include "lowlevel.h"
  30. #ifdef NEED_BFG_LOWL_VCOM
  31. #include "lowl-vcom.h"
  32. #endif
  33. #include "miner.h"
  34. #include "util.h"
  35. struct driver_registration *_bfg_drvreg1;
  36. struct driver_registration *_bfg_drvreg2;
  37. void _bfg_register_driver(const struct device_drv *drv)
  38. {
  39. static struct driver_registration *initlist;
  40. struct driver_registration *ndr;
  41. if (!drv)
  42. {
  43. // Move initlist to hashtables
  44. LL_FOREACH(initlist, ndr)
  45. {
  46. drv = ndr->drv;
  47. if (drv->drv_init)
  48. drv->drv_init();
  49. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  50. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  51. }
  52. initlist = NULL;
  53. return;
  54. }
  55. ndr = malloc(sizeof(*ndr));
  56. *ndr = (struct driver_registration){
  57. .drv = drv,
  58. };
  59. LL_PREPEND(initlist, ndr);
  60. }
  61. static
  62. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  63. {
  64. return strcmp(a->drv->dname, b->drv->dname);
  65. };
  66. static
  67. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  68. {
  69. return a->drv->probe_priority - b->drv->probe_priority;
  70. };
  71. void bfg_devapi_init()
  72. {
  73. _bfg_register_driver(NULL);
  74. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  75. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  76. }
  77. float common_sha256d_and_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  78. {
  79. switch (malgo->algo)
  80. {
  81. #ifdef USE_SCRYPT
  82. case POW_SCRYPT:
  83. return 1./0x10000;
  84. #endif
  85. case POW_SHA256D:
  86. return 1.;
  87. default:
  88. return -1.;
  89. }
  90. }
  91. #ifdef USE_SCRYPT
  92. float common_scrypt_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  93. {
  94. return (malgo->algo == POW_SCRYPT) ? (1./0x10000) : -1.;
  95. }
  96. #endif
  97. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  98. {
  99. struct cgpu_info *cgpu = thr->cgpu;
  100. const long cycle = opt_log_interval / 5 ? : 1;
  101. if (unlikely(hashes == -1)) {
  102. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  103. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  104. if (thr->scanhash_working && opt_restart) {
  105. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  106. thr->scanhash_working = false;
  107. cgpu->reinit_backoff = 5.2734375;
  108. hashes = 0;
  109. } else {
  110. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  111. cgpu->deven = DEV_RECOVER_ERR;
  112. run_cmd(cmd_idle);
  113. return false;
  114. }
  115. }
  116. else
  117. thr->scanhash_working = true;
  118. thr->hashes_done += hashes;
  119. if (hashes > cgpu->max_hashes)
  120. cgpu->max_hashes = hashes;
  121. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  122. // max_nonce management (optional)
  123. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  124. int mult;
  125. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  126. return true;
  127. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  128. mult *= cycle;
  129. if (*max_nonce > (0xffffffff * 0x400) / mult)
  130. *max_nonce = 0xffffffff;
  131. else
  132. *max_nonce = (*max_nonce * mult) / 0x400;
  133. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  134. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  135. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  136. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  137. hashmeter2(thr);
  138. return true;
  139. }
  140. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  141. {
  142. struct timeval tv_now, tv_delta;
  143. timer_set_now(&tv_now);
  144. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  145. thr->_tv_last_hashes_done_call = tv_now;
  146. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  147. }
  148. /* A generic wait function for threads that poll that will wait a specified
  149. * time tdiff waiting on a work restart request. Returns zero if the condition
  150. * was met (work restart requested) or ETIMEDOUT if not.
  151. */
  152. int restart_wait(struct thr_info *thr, unsigned int mstime)
  153. {
  154. struct timeval tv_timer, tv_now, tv_timeout;
  155. fd_set rfds;
  156. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  157. int rv;
  158. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  159. {
  160. // This is a bug!
  161. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  162. cgsleep_ms(mstime);
  163. return (thr->work_restart ? 0 : ETIMEDOUT);
  164. }
  165. timer_set_now(&tv_now);
  166. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  167. while (true)
  168. {
  169. FD_ZERO(&rfds);
  170. FD_SET(wrn, &rfds);
  171. tv_timeout = tv_timer;
  172. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  173. if (rv == 0)
  174. return ETIMEDOUT;
  175. if (rv > 0)
  176. {
  177. if (thr->work_restart)
  178. return 0;
  179. notifier_read(thr->work_restart_notifier);
  180. }
  181. timer_set_now(&tv_now);
  182. }
  183. }
  184. static
  185. struct work *get_and_prepare_work(struct thr_info *thr)
  186. {
  187. struct cgpu_info *proc = thr->cgpu;
  188. struct device_drv *api = proc->drv;
  189. struct work *work;
  190. work = get_work(thr);
  191. if (!work)
  192. return NULL;
  193. if (api->prepare_work && !api->prepare_work(thr, work)) {
  194. free_work(work);
  195. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  196. proc->deven = DEV_RECOVER_ERR;
  197. run_cmd(cmd_idle);
  198. return NULL;
  199. }
  200. return work;
  201. }
  202. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  203. void minerloop_scanhash(struct thr_info *mythr)
  204. {
  205. struct cgpu_info *cgpu = mythr->cgpu;
  206. struct device_drv *api = cgpu->drv;
  207. struct timeval tv_start, tv_end;
  208. struct timeval tv_hashes, tv_worktime;
  209. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  210. int64_t hashes;
  211. struct work *work;
  212. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  213. #ifdef HAVE_PTHREAD_CANCEL
  214. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  215. #endif
  216. if (cgpu->deven != DEV_ENABLED)
  217. mt_disable(mythr);
  218. while (likely(!cgpu->shutdown)) {
  219. mythr->work_restart = false;
  220. request_work(mythr);
  221. work = get_and_prepare_work(mythr);
  222. if (!work)
  223. break;
  224. timer_set_now(&work->tv_work_start);
  225. do {
  226. thread_reportin(mythr);
  227. /* Only allow the mining thread to be cancelled when
  228. * it is not in the driver code. */
  229. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  230. timer_set_now(&tv_start);
  231. /* api->scanhash should scan the work for valid nonces
  232. * until max_nonce is reached or thr_info->work_restart */
  233. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  234. timer_set_now(&tv_end);
  235. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  236. pthread_testcancel();
  237. thread_reportin(mythr);
  238. timersub(&tv_end, &tv_start, &tv_hashes);
  239. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  240. goto disabled;
  241. if (unlikely(mythr->work_restart)) {
  242. /* Apart from device_thread 0, we stagger the
  243. * starting of every next thread to try and get
  244. * all devices busy before worrying about
  245. * getting work for their extra threads */
  246. if (!primary) {
  247. struct timespec rgtp;
  248. rgtp.tv_sec = 0;
  249. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  250. nanosleep(&rgtp, NULL);
  251. }
  252. break;
  253. }
  254. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  255. disabled:
  256. mt_disable(mythr);
  257. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  258. /* The inner do-while loop will exit unless the device is capable of
  259. * scanning a specific nonce range (currently CPU and GPU drivers)
  260. * See abandon_work comments for more details */
  261. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  262. free_work(work);
  263. }
  264. }
  265. void mt_disable_start__async(struct thr_info * const mythr)
  266. {
  267. mt_disable_start(mythr);
  268. if (mythr->prev_work)
  269. free_work(mythr->prev_work);
  270. mythr->prev_work = mythr->work;
  271. mythr->work = NULL;
  272. mythr->_job_transition_in_progress = false;
  273. }
  274. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  275. {
  276. struct cgpu_info *proc = mythr->cgpu;
  277. struct device_drv *api = proc->drv;
  278. struct timeval tv_worktime;
  279. mythr->tv_morework.tv_sec = -1;
  280. mythr->_job_transition_in_progress = true;
  281. if (mythr->work)
  282. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  283. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  284. {
  285. mythr->work_restart = false;
  286. request_work(mythr);
  287. // FIXME: Allow get_work to return NULL to retry on notification
  288. if (mythr->next_work)
  289. free_work(mythr->next_work);
  290. mythr->next_work = get_and_prepare_work(mythr);
  291. if (!mythr->next_work)
  292. return false;
  293. mythr->starting_next_work = true;
  294. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  295. }
  296. else
  297. {
  298. mythr->starting_next_work = false;
  299. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  300. }
  301. job_prepare_complete(mythr);
  302. return true;
  303. }
  304. void job_prepare_complete(struct thr_info *mythr)
  305. {
  306. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  307. return;
  308. if (mythr->work)
  309. {
  310. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  311. do_get_results(mythr, true);
  312. else
  313. {} // TODO: Set a timer to call do_get_results when job is near complete
  314. }
  315. else // no job currently running
  316. do_job_start(mythr);
  317. }
  318. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  319. {
  320. struct cgpu_info *proc = mythr->cgpu;
  321. struct device_drv *api = proc->drv;
  322. struct work *work = mythr->work;
  323. mythr->_job_transition_in_progress = true;
  324. mythr->tv_results_jobstart = mythr->tv_jobstart;
  325. mythr->_proceed_with_new_job = proceed_with_new_job;
  326. if (api->job_get_results)
  327. api->job_get_results(mythr, work);
  328. else
  329. job_results_fetched(mythr);
  330. }
  331. void job_results_fetched(struct thr_info *mythr)
  332. {
  333. if (mythr->_proceed_with_new_job)
  334. do_job_start(mythr);
  335. else
  336. {
  337. if (likely(mythr->prev_work))
  338. {
  339. struct timeval tv_now;
  340. timer_set_now(&tv_now);
  341. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  342. }
  343. mt_disable_start__async(mythr);
  344. }
  345. }
  346. void do_job_start(struct thr_info *mythr)
  347. {
  348. struct cgpu_info *proc = mythr->cgpu;
  349. struct device_drv *api = proc->drv;
  350. thread_reportin(mythr);
  351. api->job_start(mythr);
  352. }
  353. void mt_job_transition(struct thr_info *mythr)
  354. {
  355. struct timeval tv_now;
  356. timer_set_now(&tv_now);
  357. if (mythr->starting_next_work)
  358. {
  359. mythr->next_work->tv_work_start = tv_now;
  360. if (mythr->prev_work)
  361. free_work(mythr->prev_work);
  362. mythr->prev_work = mythr->work;
  363. mythr->work = mythr->next_work;
  364. mythr->next_work = NULL;
  365. }
  366. mythr->tv_jobstart = tv_now;
  367. mythr->_job_transition_in_progress = false;
  368. }
  369. void job_start_complete(struct thr_info *mythr)
  370. {
  371. struct timeval tv_now;
  372. if (unlikely(!mythr->prev_work))
  373. return;
  374. timer_set_now(&tv_now);
  375. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  376. }
  377. void job_start_abort(struct thr_info *mythr, bool failure)
  378. {
  379. struct cgpu_info *proc = mythr->cgpu;
  380. if (failure)
  381. {
  382. proc->deven = DEV_RECOVER_ERR;
  383. run_cmd(cmd_idle);
  384. }
  385. mythr->work = NULL;
  386. mythr->_job_transition_in_progress = false;
  387. }
  388. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  389. {
  390. struct cgpu_info *proc = mythr->cgpu;
  391. struct device_drv *api = proc->drv;
  392. struct timeval tv_hashes;
  393. int64_t hashes = 0;
  394. if (api->job_process_results)
  395. hashes = api->job_process_results(mythr, work, stopping);
  396. thread_reportin(mythr);
  397. if (hashes)
  398. {
  399. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  400. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  401. return false;
  402. }
  403. return true;
  404. }
  405. static
  406. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  407. {
  408. struct cgpu_info *cgpu = thr->cgpu;
  409. struct timeval tv_now;
  410. int maxfd;
  411. fd_set rfds;
  412. timer_set_now(&tv_now);
  413. FD_ZERO(&rfds);
  414. FD_SET(thr->notifier[0], &rfds);
  415. maxfd = thr->notifier[0];
  416. FD_SET(thr->work_restart_notifier[0], &rfds);
  417. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  418. if (thr->mutex_request[1] != INVSOCK)
  419. {
  420. FD_SET(thr->mutex_request[0], &rfds);
  421. set_maxfd(&maxfd, thr->mutex_request[0]);
  422. }
  423. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  424. return;
  425. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  426. {
  427. // FIXME: This can only handle one request at a time!
  428. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  429. notifier_read(thr->mutex_request);
  430. mutex_lock(mutexp);
  431. pthread_cond_signal(&cgpu->device_cond);
  432. pthread_cond_wait(&cgpu->device_cond, mutexp);
  433. mutex_unlock(mutexp);
  434. }
  435. if (FD_ISSET(thr->notifier[0], &rfds)) {
  436. notifier_read(thr->notifier);
  437. }
  438. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  439. notifier_read(thr->work_restart_notifier);
  440. }
  441. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  442. {
  443. mutex_init(&cgpu->device_mutex);
  444. notifier_init(cgpu->thr[0]->mutex_request);
  445. pthread_cond_init(&cgpu->device_cond, bfg_condattr);
  446. }
  447. void cgpu_request_control(struct cgpu_info * const cgpu)
  448. {
  449. struct thr_info * const thr = cgpu->thr[0];
  450. if (pthread_equal(pthread_self(), thr->pth))
  451. return;
  452. mutex_lock(&cgpu->device_mutex);
  453. notifier_wake(thr->mutex_request);
  454. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  455. }
  456. void cgpu_release_control(struct cgpu_info * const cgpu)
  457. {
  458. struct thr_info * const thr = cgpu->thr[0];
  459. if (pthread_equal(pthread_self(), thr->pth))
  460. return;
  461. pthread_cond_signal(&cgpu->device_cond);
  462. mutex_unlock(&cgpu->device_mutex);
  463. }
  464. static
  465. void _minerloop_setup(struct thr_info *mythr)
  466. {
  467. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  468. if (mythr->work_restart_notifier[1] == -1)
  469. notifier_init(mythr->work_restart_notifier);
  470. for (proc = cgpu; proc; proc = proc->next_proc)
  471. {
  472. mythr = proc->thr[0];
  473. timer_set_now(&mythr->tv_watchdog);
  474. proc->disable_watchdog = true;
  475. }
  476. }
  477. void minerloop_async(struct thr_info *mythr)
  478. {
  479. struct thr_info *thr = mythr;
  480. struct cgpu_info *cgpu = mythr->cgpu;
  481. struct device_drv *api = cgpu->drv;
  482. struct timeval tv_now;
  483. struct timeval tv_timeout;
  484. struct cgpu_info *proc;
  485. bool is_running, should_be_running;
  486. _minerloop_setup(mythr);
  487. while (likely(!cgpu->shutdown)) {
  488. tv_timeout.tv_sec = -1;
  489. timer_set_now(&tv_now);
  490. for (proc = cgpu; proc; proc = proc->next_proc)
  491. {
  492. mythr = proc->thr[0];
  493. // Nothing should happen while we're starting a job
  494. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  495. goto defer_events;
  496. is_running = mythr->work;
  497. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  498. if (should_be_running)
  499. {
  500. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  501. {
  502. mt_disable_finish(mythr);
  503. goto djp;
  504. }
  505. if (unlikely(mythr->work_restart))
  506. goto djp;
  507. }
  508. else // ! should_be_running
  509. {
  510. if (unlikely(mythr->_job_transition_in_progress && timer_isset(&mythr->tv_morework)))
  511. {
  512. // Really only happens at startup
  513. applog(LOG_DEBUG, "%"PRIpreprv": Job transition in progress, with morework timer enabled: unsetting in-progress flag", proc->proc_repr);
  514. mythr->_job_transition_in_progress = false;
  515. }
  516. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  517. {
  518. disabled: ;
  519. if (is_running)
  520. {
  521. if (mythr->busy_state != TBS_GETTING_RESULTS)
  522. do_get_results(mythr, false);
  523. else
  524. // Avoid starting job when pending result fetch completes
  525. mythr->_proceed_with_new_job = false;
  526. }
  527. else // !mythr->_mt_disable_called
  528. mt_disable_start__async(mythr);
  529. }
  530. timer_unset(&mythr->tv_morework);
  531. }
  532. if (timer_passed(&mythr->tv_morework, &tv_now))
  533. {
  534. djp: ;
  535. if (!do_job_prepare(mythr, &tv_now))
  536. goto disabled;
  537. }
  538. defer_events:
  539. if (timer_passed(&mythr->tv_poll, &tv_now))
  540. api->poll(mythr);
  541. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  542. {
  543. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  544. bfg_watchdog(proc, &tv_now);
  545. }
  546. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  547. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  548. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  549. }
  550. do_notifier_select(thr, &tv_timeout);
  551. }
  552. }
  553. static
  554. void do_queue_flush(struct thr_info *mythr)
  555. {
  556. struct cgpu_info *proc = mythr->cgpu;
  557. struct device_drv *api = proc->drv;
  558. api->queue_flush(mythr);
  559. if (mythr->next_work)
  560. {
  561. free_work(mythr->next_work);
  562. mythr->next_work = NULL;
  563. }
  564. }
  565. void minerloop_queue(struct thr_info *thr)
  566. {
  567. struct thr_info *mythr;
  568. struct cgpu_info *cgpu = thr->cgpu;
  569. struct device_drv *api = cgpu->drv;
  570. struct timeval tv_now;
  571. struct timeval tv_timeout;
  572. struct cgpu_info *proc;
  573. bool should_be_running;
  574. struct work *work;
  575. _minerloop_setup(thr);
  576. while (likely(!cgpu->shutdown)) {
  577. tv_timeout.tv_sec = -1;
  578. timer_set_now(&tv_now);
  579. for (proc = cgpu; proc; proc = proc->next_proc)
  580. {
  581. mythr = proc->thr[0];
  582. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  583. redo:
  584. if (should_be_running)
  585. {
  586. if (unlikely(mythr->_mt_disable_called))
  587. mt_disable_finish(mythr);
  588. if (unlikely(mythr->work_restart))
  589. {
  590. mythr->work_restart = false;
  591. do_queue_flush(mythr);
  592. }
  593. while (!mythr->queue_full)
  594. {
  595. if (mythr->next_work)
  596. {
  597. work = mythr->next_work;
  598. mythr->next_work = NULL;
  599. }
  600. else
  601. {
  602. request_work(mythr);
  603. // FIXME: Allow get_work to return NULL to retry on notification
  604. work = get_and_prepare_work(mythr);
  605. }
  606. if (!work)
  607. break;
  608. if (!api->queue_append(mythr, work))
  609. mythr->next_work = work;
  610. }
  611. }
  612. else
  613. if (unlikely(!mythr->_mt_disable_called))
  614. {
  615. do_queue_flush(mythr);
  616. mt_disable_start(mythr);
  617. }
  618. if (timer_passed(&mythr->tv_poll, &tv_now))
  619. api->poll(mythr);
  620. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  621. {
  622. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  623. bfg_watchdog(proc, &tv_now);
  624. }
  625. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  626. if (should_be_running && !mythr->queue_full)
  627. goto redo;
  628. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  629. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  630. }
  631. // HACK: Some designs set the main thr tv_poll from secondary thrs
  632. reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
  633. do_notifier_select(thr, &tv_timeout);
  634. }
  635. }
  636. void *miner_thread(void *userdata)
  637. {
  638. struct thr_info *mythr = userdata;
  639. struct cgpu_info *cgpu = mythr->cgpu;
  640. struct device_drv *drv = cgpu->drv;
  641. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  642. char threadname[20];
  643. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  644. RenameThread(threadname);
  645. if (drv->thread_init && !drv->thread_init(mythr)) {
  646. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  647. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  648. dev_error(slave, REASON_THREAD_FAIL_INIT);
  649. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  650. goto out;
  651. }
  652. if (drv_ready(cgpu) && !cgpu->already_set_defaults)
  653. cgpu_set_defaults(cgpu);
  654. thread_reportout(mythr);
  655. applog(LOG_DEBUG, "Popping ping in miner thread");
  656. notifier_read(mythr->notifier); // Wait for a notification to start
  657. cgtime(&cgpu->cgminer_stats.start_tv);
  658. if (drv->minerloop)
  659. drv->minerloop(mythr);
  660. else
  661. minerloop_scanhash(mythr);
  662. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  663. out: ;
  664. struct cgpu_info *proc = cgpu;
  665. do
  666. {
  667. proc->deven = DEV_DISABLED;
  668. proc->status = LIFE_DEAD2;
  669. }
  670. while ( (proc = proc->next_proc) && !proc->threads);
  671. mythr->getwork = 0;
  672. mythr->has_pth = false;
  673. cgsleep_ms(1);
  674. if (drv->thread_shutdown)
  675. drv->thread_shutdown(mythr);
  676. notifier_destroy(mythr->notifier);
  677. return NULL;
  678. }
  679. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  680. static
  681. bool _add_cgpu(struct cgpu_info *cgpu)
  682. {
  683. int lpcount;
  684. if (!cgpu->procs)
  685. cgpu->procs = 1;
  686. lpcount = cgpu->procs;
  687. cgpu->device = cgpu;
  688. cgpu->dev_repr = malloc(6);
  689. cgpu->dev_repr_ns = malloc(6);
  690. #ifdef NEED_BFG_LOWL_VCOM
  691. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  692. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  693. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  694. #endif
  695. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  696. devices_new[total_devices_new++] = cgpu;
  697. if (lpcount > 1)
  698. {
  699. int tpp = cgpu->threads / lpcount;
  700. struct cgpu_info **nlp_p, *slave;
  701. nlp_p = &cgpu->next_proc;
  702. for (int i = 1; i < lpcount; ++i)
  703. {
  704. slave = malloc(sizeof(*slave));
  705. *slave = *cgpu;
  706. slave->proc_id = i;
  707. slave->threads = tpp;
  708. devices_new[total_devices_new++] = slave;
  709. *nlp_p = slave;
  710. nlp_p = &slave->next_proc;
  711. }
  712. *nlp_p = NULL;
  713. cgpu->proc_id = 0;
  714. cgpu->threads -= (tpp * (lpcount - 1));
  715. }
  716. renumber_cgpu(cgpu);
  717. cgpu->last_device_valid_work = time(NULL);
  718. return true;
  719. }
  720. bool add_cgpu(struct cgpu_info *cgpu)
  721. {
  722. mutex_lock(&_add_cgpu_mutex);
  723. const bool rv = _add_cgpu(cgpu);
  724. mutex_unlock(&_add_cgpu_mutex);
  725. return rv;
  726. }
  727. void add_cgpu_live(void *p)
  728. {
  729. add_cgpu(p);
  730. }
  731. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  732. {
  733. if (!prev_cgpu)
  734. return add_cgpu(cgpu);
  735. while (prev_cgpu->next_proc)
  736. prev_cgpu = prev_cgpu->next_proc;
  737. mutex_lock(&_add_cgpu_mutex);
  738. int old_total_devices = total_devices_new;
  739. if (!_add_cgpu(cgpu))
  740. {
  741. mutex_unlock(&_add_cgpu_mutex);
  742. return false;
  743. }
  744. prev_cgpu->next_proc = devices_new[old_total_devices];
  745. mutex_unlock(&_add_cgpu_mutex);
  746. return true;
  747. }
  748. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  749. {
  750. const struct bfg_set_device_definition *sdf;
  751. char *p = replybuf;
  752. bool first = true;
  753. *out_success = SDR_HELP;
  754. sdf = proc->set_device_funcs;
  755. if (!sdf)
  756. nohelp:
  757. return "No help available";
  758. size_t matchlen = 0;
  759. if (newvalue)
  760. while (!isspace(newvalue[0]))
  761. ++matchlen;
  762. for ( ; sdf->optname; ++sdf)
  763. {
  764. if (!sdf->description)
  765. continue;
  766. if (matchlen && (strncasecmp(optname, sdf->optname, matchlen) || optname[matchlen]))
  767. continue;
  768. if (first)
  769. first = false;
  770. else
  771. p++[0] = '\n';
  772. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  773. }
  774. if (replybuf == p)
  775. goto nohelp;
  776. return replybuf;
  777. }
  778. const char *proc_set_device_temp_cutoff(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  779. {
  780. int target_diff = proc->cutofftemp - proc->targettemp;
  781. proc->cutofftemp = atoi(newvalue);
  782. if (!proc->targettemp_user)
  783. proc->targettemp = proc->cutofftemp - target_diff;
  784. return NULL;
  785. }
  786. const char *proc_set_device_temp_target(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  787. {
  788. proc->targettemp = atoi(newvalue);
  789. proc->targettemp_user = true;
  790. return NULL;
  791. }
  792. static inline
  793. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  794. {
  795. if (!rv)
  796. *out_success = SDR_OK;
  797. else
  798. if (!strcasecmp(optname, "help"))
  799. *out_success = SDR_HELP;
  800. else
  801. *out_success = SDR_ERR;
  802. }
  803. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  804. {
  805. const struct bfg_set_device_definition *sdf;
  806. sdf = proc->set_device_funcs;
  807. if (!sdf)
  808. {
  809. *out_success = SDR_NOSUPP;
  810. return "Device does not support setting parameters.";
  811. }
  812. for ( ; sdf->optname; ++sdf)
  813. if (!strcasecmp(optname, sdf->optname))
  814. {
  815. *out_success = SDR_AUTO;
  816. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  817. if (SDR_AUTO == *out_success)
  818. _set_auto_sdr(out_success, rv, optname);
  819. return rv;
  820. }
  821. if (!strcasecmp(optname, "help"))
  822. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  823. *out_success = SDR_UNKNOWN;
  824. sprintf(replybuf, "Unknown option: %s", optname);
  825. return replybuf;
  826. }
  827. const char *__proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  828. {
  829. if (proc->drv->set_device)
  830. {
  831. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  832. _set_auto_sdr(out_success, rv, optname);
  833. return rv;
  834. }
  835. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  836. }
  837. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  838. {
  839. const char * const rv = __proc_set_device(proc, optname, newvalue, replybuf, out_success);
  840. switch (*out_success)
  841. {
  842. case SDR_NOSUPP:
  843. case SDR_UNKNOWN:
  844. if (!strcasecmp(optname, "temp-cutoff") || !strcasecmp(optname, "temp_cutoff"))
  845. return proc_set_device_temp_cutoff(proc, optname, newvalue, replybuf, out_success);
  846. else
  847. if (!strcasecmp(optname, "temp-target") || !strcasecmp(optname, "temp_target"))
  848. return proc_set_device_temp_target(proc, optname, newvalue, replybuf, out_success);
  849. default:
  850. break;
  851. }
  852. return rv;
  853. }
  854. #ifdef NEED_BFG_LOWL_VCOM
  855. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  856. {
  857. detectone_func_t detectone = userp;
  858. if (serial_claim(info->path, NULL))
  859. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  860. return detectone(info->path);
  861. }
  862. #endif
  863. // NOTE: This is never used for any actual VCOM devices, which should use the new lowlevel interface
  864. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  865. {
  866. struct string_elist *iter, *tmp;
  867. const char *dev, *colon;
  868. bool inhibitauto = flags & 4;
  869. char found = 0;
  870. bool forceauto = flags & 1;
  871. bool hasname;
  872. size_t namel = strlen(api->name);
  873. size_t dnamel = strlen(api->dname);
  874. #ifdef NEED_BFG_LOWL_VCOM
  875. clear_detectone_meta_info();
  876. #endif
  877. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  878. dev = iter->string;
  879. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  880. size_t idlen = colon - dev;
  881. // allow either name:device or dname:device
  882. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  883. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  884. continue;
  885. dev = colon + 1;
  886. hasname = true;
  887. }
  888. else
  889. hasname = false;
  890. if (!strcmp(dev, "auto"))
  891. forceauto = true;
  892. else if (!strcmp(dev, "noauto"))
  893. inhibitauto = true;
  894. else
  895. if ((flags & 2) && !hasname)
  896. continue;
  897. else
  898. if (!detectone)
  899. {} // do nothing
  900. else
  901. if (!strcmp(dev, "all"))
  902. {} // n/a
  903. else if (detectone(dev)) {
  904. string_elist_del(&scan_devices, iter);
  905. ++found;
  906. }
  907. }
  908. if ((forceauto || !(inhibitauto || found)) && autoscan)
  909. found += autoscan();
  910. return found;
  911. }
  912. static
  913. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  914. {
  915. char fullpath[PATH_MAX];
  916. strcpy(fullpath, path);
  917. strcat(fullpath, "/");
  918. if (subdir) {
  919. strcat(fullpath, subdir);
  920. strcat(fullpath, "/");
  921. }
  922. if (sub2) {
  923. strcat(fullpath, sub2);
  924. strcat(fullpath, "/");
  925. }
  926. strcat(fullpath, filename);
  927. return fopen(fullpath, "rb");
  928. }
  929. #define _open_bitstream(path, subdir, sub2) do { \
  930. f = _open_bitstream(path, subdir, sub2, filename); \
  931. if (f) \
  932. return f; \
  933. } while(0)
  934. #define _open_bitstream2(path, path3) do { \
  935. _open_bitstream(path, NULL, path3); \
  936. _open_bitstream(path, "../share/" PACKAGE, path3); \
  937. _open_bitstream(path, "../" PACKAGE, path3); \
  938. } while(0)
  939. #define _open_bitstream3(path) do { \
  940. _open_bitstream2(path, dname); \
  941. _open_bitstream2(path, "bitstreams"); \
  942. _open_bitstream2(path, NULL); \
  943. } while(0)
  944. FILE *open_bitstream(const char *dname, const char *filename)
  945. {
  946. FILE *f;
  947. _open_bitstream3(opt_kernel_path);
  948. _open_bitstream3(cgminer_path);
  949. _open_bitstream3(".");
  950. return NULL;
  951. }
  952. void close_device_fd(struct thr_info * const thr)
  953. {
  954. struct cgpu_info * const proc = thr->cgpu;
  955. const int fd = proc->device_fd;
  956. if (fd == -1)
  957. return;
  958. if (close(fd))
  959. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  960. else
  961. {
  962. proc->device_fd = -1;
  963. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  964. }
  965. }
  966. struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, const int procid)
  967. {
  968. struct cgpu_info *proc = (void*)dev;
  969. for (int i = 0; i < procid; ++i)
  970. {
  971. proc = proc->next_proc;
  972. if (unlikely((!proc) || proc->device != dev))
  973. return NULL;
  974. }
  975. return proc;
  976. }