deviceapi.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #ifdef WIN32
  14. #include <winsock2.h>
  15. #else
  16. #include <sys/select.h>
  17. #endif
  18. #include <stdbool.h>
  19. #include <stdint.h>
  20. #include <sys/time.h>
  21. #include <sys/types.h>
  22. #include <time.h>
  23. #include <unistd.h>
  24. #include "compat.h"
  25. #include "deviceapi.h"
  26. #include "logging.h"
  27. #include "lowlevel.h"
  28. #ifdef NEED_BFG_LOWL_VCOM
  29. #include "lowl-vcom.h"
  30. #endif
  31. #include "miner.h"
  32. #include "util.h"
  33. struct driver_registration *_bfg_drvreg1;
  34. struct driver_registration *_bfg_drvreg2;
  35. void _bfg_register_driver(const struct device_drv *drv)
  36. {
  37. static struct driver_registration *initlist;
  38. struct driver_registration *ndr;
  39. if (!drv)
  40. {
  41. // Move initlist to hashtables
  42. LL_FOREACH(initlist, ndr)
  43. {
  44. drv = ndr->drv;
  45. if (drv->drv_init)
  46. drv->drv_init();
  47. HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr);
  48. HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr);
  49. }
  50. initlist = NULL;
  51. return;
  52. }
  53. ndr = malloc(sizeof(*ndr));
  54. *ndr = (struct driver_registration){
  55. .drv = drv,
  56. };
  57. LL_PREPEND(initlist, ndr);
  58. }
  59. static
  60. int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b)
  61. {
  62. return strcmp(a->drv->dname, b->drv->dname);
  63. };
  64. static
  65. int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b)
  66. {
  67. return a->drv->probe_priority - b->drv->probe_priority;
  68. };
  69. void bfg_devapi_init()
  70. {
  71. _bfg_register_driver(NULL);
  72. HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname );
  73. HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority);
  74. }
  75. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  76. {
  77. struct cgpu_info *cgpu = thr->cgpu;
  78. const long cycle = opt_log_interval / 5 ? : 1;
  79. if (unlikely(hashes == -1)) {
  80. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  81. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  82. if (thr->scanhash_working && opt_restart) {
  83. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  84. thr->scanhash_working = false;
  85. cgpu->reinit_backoff = 5.2734375;
  86. hashes = 0;
  87. } else {
  88. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  89. cgpu->deven = DEV_RECOVER_ERR;
  90. run_cmd(cmd_idle);
  91. return false;
  92. }
  93. }
  94. else
  95. thr->scanhash_working = true;
  96. thr->hashes_done += hashes;
  97. if (hashes > cgpu->max_hashes)
  98. cgpu->max_hashes = hashes;
  99. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  100. // max_nonce management (optional)
  101. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  102. int mult;
  103. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  104. return true;
  105. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  106. mult *= cycle;
  107. if (*max_nonce > (0xffffffff * 0x400) / mult)
  108. *max_nonce = 0xffffffff;
  109. else
  110. *max_nonce = (*max_nonce * mult) / 0x400;
  111. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  112. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  113. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  114. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  115. hashmeter2(thr);
  116. return true;
  117. }
  118. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  119. {
  120. struct timeval tv_now, tv_delta;
  121. timer_set_now(&tv_now);
  122. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  123. thr->_tv_last_hashes_done_call = tv_now;
  124. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  125. }
  126. /* A generic wait function for threads that poll that will wait a specified
  127. * time tdiff waiting on a work restart request. Returns zero if the condition
  128. * was met (work restart requested) or ETIMEDOUT if not.
  129. */
  130. int restart_wait(struct thr_info *thr, unsigned int mstime)
  131. {
  132. struct timeval tv_timer, tv_now, tv_timeout;
  133. fd_set rfds;
  134. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  135. int rv;
  136. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  137. {
  138. // This is a bug!
  139. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  140. cgsleep_ms(mstime);
  141. return (thr->work_restart ? 0 : ETIMEDOUT);
  142. }
  143. timer_set_now(&tv_now);
  144. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  145. while (true)
  146. {
  147. FD_ZERO(&rfds);
  148. FD_SET(wrn, &rfds);
  149. tv_timeout = tv_timer;
  150. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  151. if (rv == 0)
  152. return ETIMEDOUT;
  153. if (rv > 0)
  154. {
  155. if (thr->work_restart)
  156. return 0;
  157. notifier_read(thr->work_restart_notifier);
  158. }
  159. timer_set_now(&tv_now);
  160. }
  161. }
  162. static
  163. struct work *get_and_prepare_work(struct thr_info *thr)
  164. {
  165. struct cgpu_info *proc = thr->cgpu;
  166. struct device_drv *api = proc->drv;
  167. struct work *work;
  168. work = get_work(thr);
  169. if (!work)
  170. return NULL;
  171. if (api->prepare_work && !api->prepare_work(thr, work)) {
  172. free_work(work);
  173. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  174. proc->deven = DEV_RECOVER_ERR;
  175. run_cmd(cmd_idle);
  176. return NULL;
  177. }
  178. return work;
  179. }
  180. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  181. void minerloop_scanhash(struct thr_info *mythr)
  182. {
  183. struct cgpu_info *cgpu = mythr->cgpu;
  184. struct device_drv *api = cgpu->drv;
  185. struct timeval tv_start, tv_end;
  186. struct timeval tv_hashes, tv_worktime;
  187. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  188. int64_t hashes;
  189. struct work *work;
  190. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  191. #ifdef HAVE_PTHREAD_CANCEL
  192. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  193. #endif
  194. if (cgpu->deven != DEV_ENABLED)
  195. mt_disable(mythr);
  196. while (likely(!cgpu->shutdown)) {
  197. mythr->work_restart = false;
  198. request_work(mythr);
  199. work = get_and_prepare_work(mythr);
  200. if (!work)
  201. break;
  202. timer_set_now(&work->tv_work_start);
  203. do {
  204. thread_reportin(mythr);
  205. /* Only allow the mining thread to be cancelled when
  206. * it is not in the driver code. */
  207. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  208. timer_set_now(&tv_start);
  209. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  210. timer_set_now(&tv_end);
  211. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  212. pthread_testcancel();
  213. thread_reportin(mythr);
  214. timersub(&tv_end, &tv_start, &tv_hashes);
  215. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  216. goto disabled;
  217. if (unlikely(mythr->work_restart)) {
  218. /* Apart from device_thread 0, we stagger the
  219. * starting of every next thread to try and get
  220. * all devices busy before worrying about
  221. * getting work for their extra threads */
  222. if (!primary) {
  223. struct timespec rgtp;
  224. rgtp.tv_sec = 0;
  225. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  226. nanosleep(&rgtp, NULL);
  227. }
  228. break;
  229. }
  230. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  231. disabled:
  232. mt_disable(mythr);
  233. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  234. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  235. free_work(work);
  236. }
  237. }
  238. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  239. {
  240. struct cgpu_info *proc = mythr->cgpu;
  241. struct device_drv *api = proc->drv;
  242. struct timeval tv_worktime;
  243. mythr->tv_morework.tv_sec = -1;
  244. mythr->_job_transition_in_progress = true;
  245. if (mythr->work)
  246. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  247. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  248. {
  249. mythr->work_restart = false;
  250. request_work(mythr);
  251. // FIXME: Allow get_work to return NULL to retry on notification
  252. if (mythr->next_work)
  253. free_work(mythr->next_work);
  254. mythr->next_work = get_and_prepare_work(mythr);
  255. if (!mythr->next_work)
  256. return false;
  257. mythr->starting_next_work = true;
  258. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  259. }
  260. else
  261. {
  262. mythr->starting_next_work = false;
  263. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  264. }
  265. job_prepare_complete(mythr);
  266. return true;
  267. }
  268. void job_prepare_complete(struct thr_info *mythr)
  269. {
  270. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  271. return;
  272. if (mythr->work)
  273. {
  274. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  275. do_get_results(mythr, true);
  276. else
  277. {} // TODO: Set a timer to call do_get_results when job is near complete
  278. }
  279. else // no job currently running
  280. do_job_start(mythr);
  281. }
  282. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  283. {
  284. struct cgpu_info *proc = mythr->cgpu;
  285. struct device_drv *api = proc->drv;
  286. struct work *work = mythr->work;
  287. mythr->_job_transition_in_progress = true;
  288. mythr->tv_results_jobstart = mythr->tv_jobstart;
  289. mythr->_proceed_with_new_job = proceed_with_new_job;
  290. if (api->job_get_results)
  291. api->job_get_results(mythr, work);
  292. else
  293. job_results_fetched(mythr);
  294. }
  295. void job_results_fetched(struct thr_info *mythr)
  296. {
  297. if (mythr->_proceed_with_new_job)
  298. do_job_start(mythr);
  299. else
  300. {
  301. if (likely(mythr->prev_work))
  302. {
  303. struct timeval tv_now;
  304. timer_set_now(&tv_now);
  305. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  306. }
  307. mt_disable_start(mythr);
  308. }
  309. }
  310. void do_job_start(struct thr_info *mythr)
  311. {
  312. struct cgpu_info *proc = mythr->cgpu;
  313. struct device_drv *api = proc->drv;
  314. thread_reportin(mythr);
  315. api->job_start(mythr);
  316. }
  317. void mt_job_transition(struct thr_info *mythr)
  318. {
  319. struct timeval tv_now;
  320. timer_set_now(&tv_now);
  321. if (mythr->starting_next_work)
  322. {
  323. mythr->next_work->tv_work_start = tv_now;
  324. if (mythr->prev_work)
  325. free_work(mythr->prev_work);
  326. mythr->prev_work = mythr->work;
  327. mythr->work = mythr->next_work;
  328. mythr->next_work = NULL;
  329. }
  330. mythr->tv_jobstart = tv_now;
  331. mythr->_job_transition_in_progress = false;
  332. }
  333. void job_start_complete(struct thr_info *mythr)
  334. {
  335. struct timeval tv_now;
  336. if (unlikely(!mythr->prev_work))
  337. return;
  338. timer_set_now(&tv_now);
  339. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  340. }
  341. void job_start_abort(struct thr_info *mythr, bool failure)
  342. {
  343. struct cgpu_info *proc = mythr->cgpu;
  344. if (failure)
  345. {
  346. proc->deven = DEV_RECOVER_ERR;
  347. run_cmd(cmd_idle);
  348. }
  349. mythr->work = NULL;
  350. mythr->_job_transition_in_progress = false;
  351. }
  352. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  353. {
  354. struct cgpu_info *proc = mythr->cgpu;
  355. struct device_drv *api = proc->drv;
  356. struct timeval tv_hashes;
  357. int64_t hashes = 0;
  358. if (api->job_process_results)
  359. hashes = api->job_process_results(mythr, work, stopping);
  360. thread_reportin(mythr);
  361. if (hashes)
  362. {
  363. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  364. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  365. return false;
  366. }
  367. return true;
  368. }
  369. static
  370. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  371. {
  372. struct cgpu_info *cgpu = thr->cgpu;
  373. struct timeval tv_now;
  374. int maxfd;
  375. fd_set rfds;
  376. timer_set_now(&tv_now);
  377. FD_ZERO(&rfds);
  378. FD_SET(thr->notifier[0], &rfds);
  379. maxfd = thr->notifier[0];
  380. FD_SET(thr->work_restart_notifier[0], &rfds);
  381. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  382. if (thr->mutex_request[1] != INVSOCK)
  383. {
  384. FD_SET(thr->mutex_request[0], &rfds);
  385. set_maxfd(&maxfd, thr->mutex_request[0]);
  386. }
  387. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  388. return;
  389. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  390. {
  391. // FIXME: This can only handle one request at a time!
  392. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  393. notifier_read(thr->mutex_request);
  394. mutex_lock(mutexp);
  395. pthread_cond_signal(&cgpu->device_cond);
  396. pthread_cond_wait(&cgpu->device_cond, mutexp);
  397. mutex_unlock(mutexp);
  398. }
  399. if (FD_ISSET(thr->notifier[0], &rfds)) {
  400. notifier_read(thr->notifier);
  401. }
  402. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  403. notifier_read(thr->work_restart_notifier);
  404. }
  405. void cgpu_setup_control_requests(struct cgpu_info * const cgpu)
  406. {
  407. mutex_init(&cgpu->device_mutex);
  408. notifier_init(cgpu->thr[0]->mutex_request);
  409. pthread_cond_init(&cgpu->device_cond, NULL);
  410. }
  411. void cgpu_request_control(struct cgpu_info * const cgpu)
  412. {
  413. struct thr_info * const thr = cgpu->thr[0];
  414. if (pthread_equal(pthread_self(), thr->pth))
  415. return;
  416. mutex_lock(&cgpu->device_mutex);
  417. notifier_wake(thr->mutex_request);
  418. pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex);
  419. }
  420. void cgpu_release_control(struct cgpu_info * const cgpu)
  421. {
  422. struct thr_info * const thr = cgpu->thr[0];
  423. if (pthread_equal(pthread_self(), thr->pth))
  424. return;
  425. pthread_cond_signal(&cgpu->device_cond);
  426. mutex_unlock(&cgpu->device_mutex);
  427. }
  428. static
  429. void _minerloop_setup(struct thr_info *mythr)
  430. {
  431. struct cgpu_info * const cgpu = mythr->cgpu, *proc;
  432. if (mythr->work_restart_notifier[1] == -1)
  433. notifier_init(mythr->work_restart_notifier);
  434. for (proc = cgpu; proc; proc = proc->next_proc)
  435. {
  436. mythr = proc->thr[0];
  437. timer_set_now(&mythr->tv_watchdog);
  438. proc->disable_watchdog = true;
  439. }
  440. }
  441. void minerloop_async(struct thr_info *mythr)
  442. {
  443. struct thr_info *thr = mythr;
  444. struct cgpu_info *cgpu = mythr->cgpu;
  445. struct device_drv *api = cgpu->drv;
  446. struct timeval tv_now;
  447. struct timeval tv_timeout;
  448. struct cgpu_info *proc;
  449. bool is_running, should_be_running;
  450. _minerloop_setup(mythr);
  451. while (likely(!cgpu->shutdown)) {
  452. tv_timeout.tv_sec = -1;
  453. timer_set_now(&tv_now);
  454. for (proc = cgpu; proc; proc = proc->next_proc)
  455. {
  456. mythr = proc->thr[0];
  457. // Nothing should happen while we're starting a job
  458. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  459. goto defer_events;
  460. is_running = mythr->work;
  461. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  462. if (should_be_running)
  463. {
  464. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  465. {
  466. mt_disable_finish(mythr);
  467. goto djp;
  468. }
  469. if (unlikely(mythr->work_restart))
  470. goto djp;
  471. }
  472. else // ! should_be_running
  473. {
  474. if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress))
  475. {
  476. disabled: ;
  477. timer_unset(&mythr->tv_morework);
  478. if (is_running)
  479. {
  480. if (mythr->busy_state != TBS_GETTING_RESULTS)
  481. do_get_results(mythr, false);
  482. else
  483. // Avoid starting job when pending result fetch completes
  484. mythr->_proceed_with_new_job = false;
  485. }
  486. else // !mythr->_mt_disable_called
  487. mt_disable_start(mythr);
  488. }
  489. }
  490. if (timer_passed(&mythr->tv_morework, &tv_now))
  491. {
  492. djp: ;
  493. if (!do_job_prepare(mythr, &tv_now))
  494. goto disabled;
  495. }
  496. defer_events:
  497. if (timer_passed(&mythr->tv_poll, &tv_now))
  498. api->poll(mythr);
  499. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  500. {
  501. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  502. bfg_watchdog(proc, &tv_now);
  503. }
  504. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  505. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  506. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  507. }
  508. do_notifier_select(thr, &tv_timeout);
  509. }
  510. }
  511. static
  512. void do_queue_flush(struct thr_info *mythr)
  513. {
  514. struct cgpu_info *proc = mythr->cgpu;
  515. struct device_drv *api = proc->drv;
  516. api->queue_flush(mythr);
  517. if (mythr->next_work)
  518. {
  519. free_work(mythr->next_work);
  520. mythr->next_work = NULL;
  521. }
  522. }
  523. void minerloop_queue(struct thr_info *thr)
  524. {
  525. struct thr_info *mythr;
  526. struct cgpu_info *cgpu = thr->cgpu;
  527. struct device_drv *api = cgpu->drv;
  528. struct timeval tv_now;
  529. struct timeval tv_timeout;
  530. struct cgpu_info *proc;
  531. bool should_be_running;
  532. struct work *work;
  533. _minerloop_setup(thr);
  534. while (likely(!cgpu->shutdown)) {
  535. tv_timeout.tv_sec = -1;
  536. timer_set_now(&tv_now);
  537. for (proc = cgpu; proc; proc = proc->next_proc)
  538. {
  539. mythr = proc->thr[0];
  540. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  541. redo:
  542. if (should_be_running)
  543. {
  544. if (unlikely(mythr->_mt_disable_called))
  545. mt_disable_finish(mythr);
  546. if (unlikely(mythr->work_restart))
  547. {
  548. mythr->work_restart = false;
  549. do_queue_flush(mythr);
  550. }
  551. while (!mythr->queue_full)
  552. {
  553. if (mythr->next_work)
  554. {
  555. work = mythr->next_work;
  556. mythr->next_work = NULL;
  557. }
  558. else
  559. {
  560. request_work(mythr);
  561. // FIXME: Allow get_work to return NULL to retry on notification
  562. work = get_and_prepare_work(mythr);
  563. }
  564. if (!work)
  565. break;
  566. if (!api->queue_append(mythr, work))
  567. mythr->next_work = work;
  568. }
  569. }
  570. else
  571. if (unlikely(!mythr->_mt_disable_called))
  572. {
  573. do_queue_flush(mythr);
  574. mt_disable_start(mythr);
  575. }
  576. if (timer_passed(&mythr->tv_poll, &tv_now))
  577. api->poll(mythr);
  578. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  579. {
  580. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  581. bfg_watchdog(proc, &tv_now);
  582. }
  583. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  584. if (should_be_running && !mythr->queue_full)
  585. goto redo;
  586. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  587. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  588. }
  589. do_notifier_select(thr, &tv_timeout);
  590. }
  591. }
  592. void *miner_thread(void *userdata)
  593. {
  594. struct thr_info *mythr = userdata;
  595. struct cgpu_info *cgpu = mythr->cgpu;
  596. struct device_drv *drv = cgpu->drv;
  597. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  598. char threadname[20];
  599. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  600. RenameThread(threadname);
  601. if (drv->thread_init && !drv->thread_init(mythr)) {
  602. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  603. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  604. dev_error(slave, REASON_THREAD_FAIL_INIT);
  605. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  606. goto out;
  607. }
  608. if (drv_ready(cgpu))
  609. cgpu_set_defaults(cgpu);
  610. thread_reportout(mythr);
  611. applog(LOG_DEBUG, "Popping ping in miner thread");
  612. notifier_read(mythr->notifier); // Wait for a notification to start
  613. cgtime(&cgpu->cgminer_stats.start_tv);
  614. if (drv->minerloop)
  615. drv->minerloop(mythr);
  616. else
  617. minerloop_scanhash(mythr);
  618. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  619. out: ;
  620. struct cgpu_info *proc = cgpu;
  621. do
  622. {
  623. proc->deven = DEV_DISABLED;
  624. proc->status = LIFE_DEAD2;
  625. }
  626. while ( (proc = proc->next_proc) && !proc->threads);
  627. mythr->getwork = 0;
  628. mythr->has_pth = false;
  629. cgsleep_ms(1);
  630. if (drv->thread_shutdown)
  631. drv->thread_shutdown(mythr);
  632. notifier_destroy(mythr->notifier);
  633. return NULL;
  634. }
  635. static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER;
  636. static
  637. bool _add_cgpu(struct cgpu_info *cgpu)
  638. {
  639. int lpcount;
  640. renumber_cgpu(cgpu);
  641. if (!cgpu->procs)
  642. cgpu->procs = 1;
  643. lpcount = cgpu->procs;
  644. cgpu->device = cgpu;
  645. cgpu->dev_repr = malloc(6);
  646. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  647. cgpu->dev_repr_ns = malloc(6);
  648. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  649. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  650. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  651. #ifdef NEED_BFG_LOWL_VCOM
  652. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  653. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  654. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  655. #endif
  656. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  657. devices_new[total_devices_new++] = cgpu;
  658. if (lpcount > 1)
  659. {
  660. int ns;
  661. int tpp = cgpu->threads / lpcount;
  662. struct cgpu_info **nlp_p, *slave;
  663. const bool manylp = (lpcount > 26);
  664. const char *as = (manylp ? "aa" : "a");
  665. // Note, strcpy instead of assigning a byte to get the \0 too
  666. strcpy(&cgpu->proc_repr[5], as);
  667. ns = strlen(cgpu->proc_repr_ns);
  668. strcpy(&cgpu->proc_repr_ns[ns], as);
  669. nlp_p = &cgpu->next_proc;
  670. for (int i = 1; i < lpcount; ++i)
  671. {
  672. slave = malloc(sizeof(*slave));
  673. *slave = *cgpu;
  674. slave->proc_id = i;
  675. if (manylp)
  676. {
  677. slave->proc_repr[5] += i / 26;
  678. slave->proc_repr[6] += i % 26;
  679. slave->proc_repr_ns[ns ] += i / 26;
  680. slave->proc_repr_ns[ns + 1] += i % 26;
  681. }
  682. else
  683. {
  684. slave->proc_repr[5] += i;
  685. slave->proc_repr_ns[ns] += i;
  686. }
  687. slave->threads = tpp;
  688. devices_new[total_devices_new++] = slave;
  689. *nlp_p = slave;
  690. nlp_p = &slave->next_proc;
  691. }
  692. *nlp_p = NULL;
  693. cgpu->proc_id = 0;
  694. cgpu->threads -= (tpp * (lpcount - 1));
  695. }
  696. cgpu->last_device_valid_work = time(NULL);
  697. return true;
  698. }
  699. bool add_cgpu(struct cgpu_info *cgpu)
  700. {
  701. mutex_lock(&_add_cgpu_mutex);
  702. const bool rv = _add_cgpu(cgpu);
  703. mutex_unlock(&_add_cgpu_mutex);
  704. return rv;
  705. }
  706. void add_cgpu_live(void *p)
  707. {
  708. add_cgpu(p);
  709. }
  710. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  711. {
  712. if (!prev_cgpu)
  713. return add_cgpu(cgpu);
  714. while (prev_cgpu->next_proc)
  715. prev_cgpu = prev_cgpu->next_proc;
  716. mutex_lock(&_add_cgpu_mutex);
  717. int old_total_devices = total_devices_new;
  718. if (!_add_cgpu(cgpu))
  719. {
  720. mutex_unlock(&_add_cgpu_mutex);
  721. return false;
  722. }
  723. prev_cgpu->next_proc = devices_new[old_total_devices];
  724. mutex_unlock(&_add_cgpu_mutex);
  725. return true;
  726. }
  727. const char *proc_set_device_help(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  728. {
  729. const struct bfg_set_device_definition *sdf;
  730. char *p = replybuf;
  731. bool first = true;
  732. *out_success = SDR_HELP;
  733. sdf = proc->set_device_funcs;
  734. if (!sdf)
  735. nohelp:
  736. return "No help available";
  737. for ( ; sdf->optname; ++sdf)
  738. {
  739. if (!sdf->description)
  740. continue;
  741. if (first)
  742. first = false;
  743. else
  744. p++[0] = '\n';
  745. p += sprintf(p, "%s: %s", sdf->optname, sdf->description);
  746. }
  747. if (replybuf == p)
  748. goto nohelp;
  749. return replybuf;
  750. }
  751. static inline
  752. void _set_auto_sdr(enum bfg_set_device_replytype * const out_success, const char * const rv, const char * const optname)
  753. {
  754. if (!rv)
  755. *out_success = SDR_OK;
  756. else
  757. if (!strcasecmp(optname, "help"))
  758. *out_success = SDR_HELP;
  759. else
  760. *out_success = SDR_ERR;
  761. }
  762. const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  763. {
  764. const struct bfg_set_device_definition *sdf;
  765. sdf = proc->set_device_funcs;
  766. if (!sdf)
  767. {
  768. *out_success = SDR_NOSUPP;
  769. return "Device does not support setting parameters.";
  770. }
  771. for ( ; sdf->optname; ++sdf)
  772. if (!strcasecmp(optname, sdf->optname))
  773. {
  774. *out_success = SDR_AUTO;
  775. const char * const rv = sdf->func(proc, optname, newvalue, replybuf, out_success);
  776. if (SDR_AUTO == *out_success)
  777. _set_auto_sdr(out_success, rv, optname);
  778. return rv;
  779. }
  780. if (!strcasecmp(optname, "help"))
  781. return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
  782. *out_success = SDR_UNKNOWN;
  783. sprintf(replybuf, "Unknown option: %s", optname);
  784. return replybuf;
  785. }
  786. const char *proc_set_device(struct cgpu_info * const proc, char * const optname, char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  787. {
  788. if (proc->drv->set_device)
  789. {
  790. const char * const rv = proc->drv->set_device(proc, optname, newvalue, replybuf);
  791. _set_auto_sdr(out_success, rv, optname);
  792. return rv;
  793. }
  794. return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
  795. }
  796. #ifdef NEED_BFG_LOWL_VCOM
  797. bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
  798. {
  799. detectone_func_t detectone = userp;
  800. if (serial_claim(info->path, NULL))
  801. applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
  802. return detectone(info->path);
  803. }
  804. #endif
  805. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  806. {
  807. struct string_elist *iter, *tmp;
  808. const char *dev, *colon;
  809. bool inhibitauto = flags & 4;
  810. char found = 0;
  811. bool forceauto = flags & 1;
  812. bool hasname;
  813. bool doall = false;
  814. size_t namel = strlen(api->name);
  815. size_t dnamel = strlen(api->dname);
  816. #ifdef NEED_BFG_LOWL_VCOM
  817. clear_detectone_meta_info();
  818. #endif
  819. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  820. dev = iter->string;
  821. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  822. size_t idlen = colon - dev;
  823. // allow either name:device or dname:device
  824. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  825. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  826. continue;
  827. dev = colon + 1;
  828. hasname = true;
  829. }
  830. else
  831. hasname = false;
  832. if (!strcmp(dev, "auto"))
  833. forceauto = true;
  834. else if (!strcmp(dev, "noauto"))
  835. inhibitauto = true;
  836. else
  837. if ((flags & 2) && !hasname)
  838. continue;
  839. else
  840. if (!detectone)
  841. {} // do nothing
  842. else
  843. if (!strcmp(dev, "all"))
  844. doall = true;
  845. #ifdef NEED_BFG_LOWL_VCOM
  846. else
  847. if (serial_claim(dev, NULL))
  848. {
  849. applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev);
  850. string_elist_del(&scan_devices, iter);
  851. }
  852. #endif
  853. else if (detectone(dev)) {
  854. string_elist_del(&scan_devices, iter);
  855. ++found;
  856. }
  857. }
  858. #ifdef NEED_BFG_LOWL_VCOM
  859. if (doall && detectone)
  860. found += lowlevel_detect_id(_serial_detect_all, detectone, &lowl_vcom, 0, 0);
  861. #endif
  862. if ((forceauto || !(inhibitauto || found)) && autoscan)
  863. found += autoscan();
  864. return found;
  865. }
  866. static
  867. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  868. {
  869. char fullpath[PATH_MAX];
  870. strcpy(fullpath, path);
  871. strcat(fullpath, "/");
  872. if (subdir) {
  873. strcat(fullpath, subdir);
  874. strcat(fullpath, "/");
  875. }
  876. if (sub2) {
  877. strcat(fullpath, sub2);
  878. strcat(fullpath, "/");
  879. }
  880. strcat(fullpath, filename);
  881. return fopen(fullpath, "rb");
  882. }
  883. #define _open_bitstream(path, subdir, sub2) do { \
  884. f = _open_bitstream(path, subdir, sub2, filename); \
  885. if (f) \
  886. return f; \
  887. } while(0)
  888. #define _open_bitstream2(path, path3) do { \
  889. _open_bitstream(path, NULL, path3); \
  890. _open_bitstream(path, "../share/" PACKAGE, path3); \
  891. _open_bitstream(path, "../" PACKAGE, path3); \
  892. } while(0)
  893. #define _open_bitstream3(path) do { \
  894. _open_bitstream2(path, dname); \
  895. _open_bitstream2(path, "bitstreams"); \
  896. _open_bitstream2(path, NULL); \
  897. } while(0)
  898. FILE *open_bitstream(const char *dname, const char *filename)
  899. {
  900. FILE *f;
  901. _open_bitstream3(opt_kernel_path);
  902. _open_bitstream3(cgminer_path);
  903. _open_bitstream3(".");
  904. return NULL;
  905. }
  906. void close_device_fd(struct thr_info * const thr)
  907. {
  908. struct cgpu_info * const proc = thr->cgpu;
  909. const int fd = proc->device_fd;
  910. if (fd == -1)
  911. return;
  912. if (close(fd))
  913. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  914. else
  915. {
  916. proc->device_fd = -1;
  917. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  918. }
  919. }