deviceapi.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #ifdef WIN32
  14. #include <winsock2.h>
  15. #else
  16. #include <sys/select.h>
  17. #endif
  18. #include <stdbool.h>
  19. #include <stdint.h>
  20. #include <sys/time.h>
  21. #include <sys/types.h>
  22. #include <time.h>
  23. #include <unistd.h>
  24. #include "compat.h"
  25. #include "deviceapi.h"
  26. #include "fpgautils.h"
  27. #include "logging.h"
  28. #include "miner.h"
  29. #include "util.h"
  30. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  31. {
  32. struct cgpu_info *cgpu = thr->cgpu;
  33. const long cycle = opt_log_interval / 5 ? : 1;
  34. if (unlikely(hashes == -1)) {
  35. if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
  36. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  37. if (thr->scanhash_working && opt_restart) {
  38. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  39. thr->scanhash_working = false;
  40. cgpu->reinit_backoff = 5.2734375;
  41. hashes = 0;
  42. } else {
  43. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  44. cgpu->deven = DEV_RECOVER_ERR;
  45. run_cmd(cmd_idle);
  46. return false;
  47. }
  48. }
  49. else
  50. thr->scanhash_working = true;
  51. thr->hashes_done += hashes;
  52. if (hashes > cgpu->max_hashes)
  53. cgpu->max_hashes = hashes;
  54. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  55. // max_nonce management (optional)
  56. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  57. int mult;
  58. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  59. return true;
  60. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  61. mult *= cycle;
  62. if (*max_nonce > (0xffffffff * 0x400) / mult)
  63. *max_nonce = 0xffffffff;
  64. else
  65. *max_nonce = (*max_nonce * mult) / 0x400;
  66. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  67. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  68. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  69. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  70. hashmeter2(thr);
  71. return true;
  72. }
  73. bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce)
  74. {
  75. struct timeval tv_now, tv_delta;
  76. timer_set_now(&tv_now);
  77. timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta);
  78. thr->_tv_last_hashes_done_call = tv_now;
  79. return hashes_done(thr, hashes, &tv_delta, max_nonce);
  80. }
  81. /* A generic wait function for threads that poll that will wait a specified
  82. * time tdiff waiting on a work restart request. Returns zero if the condition
  83. * was met (work restart requested) or ETIMEDOUT if not.
  84. */
  85. int restart_wait(struct thr_info *thr, unsigned int mstime)
  86. {
  87. struct timeval tv_timer, tv_now, tv_timeout;
  88. fd_set rfds;
  89. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  90. int rv;
  91. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  92. {
  93. // This is a bug!
  94. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  95. cgsleep_ms(mstime);
  96. return (thr->work_restart ? 0 : ETIMEDOUT);
  97. }
  98. timer_set_now(&tv_now);
  99. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  100. while (true)
  101. {
  102. FD_ZERO(&rfds);
  103. FD_SET(wrn, &rfds);
  104. tv_timeout = tv_timer;
  105. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  106. if (rv == 0)
  107. return ETIMEDOUT;
  108. if (rv > 0)
  109. {
  110. if (thr->work_restart)
  111. return 0;
  112. notifier_read(thr->work_restart_notifier);
  113. }
  114. timer_set_now(&tv_now);
  115. }
  116. }
  117. static
  118. struct work *get_and_prepare_work(struct thr_info *thr)
  119. {
  120. struct cgpu_info *proc = thr->cgpu;
  121. struct device_drv *api = proc->drv;
  122. struct work *work;
  123. work = get_work(thr);
  124. if (!work)
  125. return NULL;
  126. if (api->prepare_work && !api->prepare_work(thr, work)) {
  127. free_work(work);
  128. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  129. proc->deven = DEV_RECOVER_ERR;
  130. run_cmd(cmd_idle);
  131. return NULL;
  132. }
  133. return work;
  134. }
  135. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  136. void minerloop_scanhash(struct thr_info *mythr)
  137. {
  138. struct cgpu_info *cgpu = mythr->cgpu;
  139. struct device_drv *api = cgpu->drv;
  140. struct timeval tv_start, tv_end;
  141. struct timeval tv_hashes, tv_worktime;
  142. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  143. int64_t hashes;
  144. struct work *work;
  145. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  146. #ifdef HAVE_PTHREAD_CANCEL
  147. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  148. #endif
  149. while (likely(!cgpu->shutdown)) {
  150. mythr->work_restart = false;
  151. request_work(mythr);
  152. work = get_and_prepare_work(mythr);
  153. if (!work)
  154. break;
  155. timer_set_now(&work->tv_work_start);
  156. do {
  157. thread_reportin(mythr);
  158. /* Only allow the mining thread to be cancelled when
  159. * it is not in the driver code. */
  160. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  161. timer_set_now(&tv_start);
  162. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  163. timer_set_now(&tv_end);
  164. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  165. pthread_testcancel();
  166. thread_reportin(mythr);
  167. timersub(&tv_end, &tv_start, &tv_hashes);
  168. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  169. goto disabled;
  170. if (unlikely(mythr->work_restart)) {
  171. /* Apart from device_thread 0, we stagger the
  172. * starting of every next thread to try and get
  173. * all devices busy before worrying about
  174. * getting work for their extra threads */
  175. if (!primary) {
  176. struct timespec rgtp;
  177. rgtp.tv_sec = 0;
  178. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  179. nanosleep(&rgtp, NULL);
  180. }
  181. break;
  182. }
  183. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  184. disabled:
  185. mt_disable(mythr);
  186. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  187. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  188. free_work(work);
  189. }
  190. }
  191. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  192. {
  193. struct cgpu_info *proc = mythr->cgpu;
  194. struct device_drv *api = proc->drv;
  195. struct timeval tv_worktime;
  196. mythr->tv_morework.tv_sec = -1;
  197. mythr->_job_transition_in_progress = true;
  198. if (mythr->work)
  199. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  200. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  201. {
  202. mythr->work_restart = false;
  203. request_work(mythr);
  204. // FIXME: Allow get_work to return NULL to retry on notification
  205. if (mythr->next_work)
  206. free_work(mythr->next_work);
  207. mythr->next_work = get_and_prepare_work(mythr);
  208. if (!mythr->next_work)
  209. return false;
  210. mythr->starting_next_work = true;
  211. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  212. }
  213. else
  214. {
  215. mythr->starting_next_work = false;
  216. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  217. }
  218. job_prepare_complete(mythr);
  219. return true;
  220. }
  221. void job_prepare_complete(struct thr_info *mythr)
  222. {
  223. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  224. return;
  225. if (mythr->work)
  226. {
  227. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  228. do_get_results(mythr, true);
  229. else
  230. {} // TODO: Set a timer to call do_get_results when job is near complete
  231. }
  232. else // no job currently running
  233. do_job_start(mythr);
  234. }
  235. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  236. {
  237. struct cgpu_info *proc = mythr->cgpu;
  238. struct device_drv *api = proc->drv;
  239. struct work *work = mythr->work;
  240. mythr->_job_transition_in_progress = true;
  241. mythr->tv_results_jobstart = mythr->tv_jobstart;
  242. mythr->_proceed_with_new_job = proceed_with_new_job;
  243. if (api->job_get_results)
  244. api->job_get_results(mythr, work);
  245. else
  246. job_results_fetched(mythr);
  247. }
  248. void job_results_fetched(struct thr_info *mythr)
  249. {
  250. if (mythr->_proceed_with_new_job)
  251. do_job_start(mythr);
  252. else
  253. if (likely(mythr->prev_work))
  254. {
  255. struct timeval tv_now;
  256. timer_set_now(&tv_now);
  257. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  258. }
  259. }
  260. void do_job_start(struct thr_info *mythr)
  261. {
  262. struct cgpu_info *proc = mythr->cgpu;
  263. struct device_drv *api = proc->drv;
  264. thread_reportin(mythr);
  265. api->job_start(mythr);
  266. }
  267. void mt_job_transition(struct thr_info *mythr)
  268. {
  269. struct timeval tv_now;
  270. timer_set_now(&tv_now);
  271. if (mythr->starting_next_work)
  272. {
  273. mythr->next_work->tv_work_start = tv_now;
  274. if (mythr->prev_work)
  275. free_work(mythr->prev_work);
  276. mythr->prev_work = mythr->work;
  277. mythr->work = mythr->next_work;
  278. mythr->next_work = NULL;
  279. }
  280. mythr->tv_jobstart = tv_now;
  281. mythr->_job_transition_in_progress = false;
  282. }
  283. void job_start_complete(struct thr_info *mythr)
  284. {
  285. struct timeval tv_now;
  286. if (unlikely(!mythr->prev_work))
  287. return;
  288. timer_set_now(&tv_now);
  289. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  290. }
  291. void job_start_abort(struct thr_info *mythr, bool failure)
  292. {
  293. struct cgpu_info *proc = mythr->cgpu;
  294. if (failure)
  295. {
  296. proc->deven = DEV_RECOVER_ERR;
  297. run_cmd(cmd_idle);
  298. }
  299. mythr->work = NULL;
  300. mythr->_job_transition_in_progress = false;
  301. }
  302. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  303. {
  304. struct cgpu_info *proc = mythr->cgpu;
  305. struct device_drv *api = proc->drv;
  306. struct timeval tv_hashes;
  307. int64_t hashes = 0;
  308. if (api->job_process_results)
  309. hashes = api->job_process_results(mythr, work, stopping);
  310. thread_reportin(mythr);
  311. if (hashes)
  312. {
  313. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  314. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  315. return false;
  316. }
  317. return true;
  318. }
  319. static
  320. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  321. {
  322. struct cgpu_info *cgpu = thr->cgpu;
  323. struct timeval tv_now;
  324. int maxfd;
  325. fd_set rfds;
  326. timer_set_now(&tv_now);
  327. FD_ZERO(&rfds);
  328. FD_SET(thr->notifier[0], &rfds);
  329. maxfd = thr->notifier[0];
  330. FD_SET(thr->work_restart_notifier[0], &rfds);
  331. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  332. if (thr->mutex_request[1] != INVSOCK)
  333. {
  334. FD_SET(thr->mutex_request[0], &rfds);
  335. set_maxfd(&maxfd, thr->mutex_request[0]);
  336. }
  337. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  338. return;
  339. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  340. {
  341. // FIXME: This can only handle one request at a time!
  342. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  343. notifier_read(thr->mutex_request);
  344. mutex_lock(mutexp);
  345. pthread_cond_signal(&cgpu->device_cond);
  346. pthread_cond_wait(&cgpu->device_cond, mutexp);
  347. mutex_unlock(mutexp);
  348. }
  349. if (FD_ISSET(thr->notifier[0], &rfds)) {
  350. notifier_read(thr->notifier);
  351. }
  352. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  353. notifier_read(thr->work_restart_notifier);
  354. }
  355. void minerloop_async(struct thr_info *mythr)
  356. {
  357. struct thr_info *thr = mythr;
  358. struct cgpu_info *cgpu = mythr->cgpu;
  359. struct device_drv *api = cgpu->drv;
  360. struct timeval tv_now;
  361. struct timeval tv_timeout;
  362. struct cgpu_info *proc;
  363. bool is_running, should_be_running;
  364. if (mythr->work_restart_notifier[1] == -1)
  365. notifier_init(mythr->work_restart_notifier);
  366. for (proc = cgpu; proc; proc = proc->next_proc)
  367. {
  368. mythr = proc->thr[0];
  369. timer_set_now(&mythr->tv_watchdog);
  370. proc->disable_watchdog = true;
  371. }
  372. while (likely(!cgpu->shutdown)) {
  373. tv_timeout.tv_sec = -1;
  374. timer_set_now(&tv_now);
  375. for (proc = cgpu; proc; proc = proc->next_proc)
  376. {
  377. mythr = proc->thr[0];
  378. // Nothing should happen while we're starting a job
  379. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  380. goto defer_events;
  381. is_running = mythr->work;
  382. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  383. if (should_be_running)
  384. {
  385. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  386. {
  387. mt_disable_finish(mythr);
  388. goto djp;
  389. }
  390. if (unlikely(mythr->work_restart))
  391. goto djp;
  392. }
  393. else // ! should_be_running
  394. {
  395. if (unlikely(is_running && !mythr->_job_transition_in_progress))
  396. {
  397. disabled: ;
  398. mythr->tv_morework.tv_sec = -1;
  399. if (mythr->busy_state != TBS_GETTING_RESULTS)
  400. do_get_results(mythr, false);
  401. else
  402. // Avoid starting job when pending result fetch completes
  403. mythr->_proceed_with_new_job = false;
  404. }
  405. }
  406. if (timer_passed(&mythr->tv_morework, &tv_now))
  407. {
  408. djp: ;
  409. if (!do_job_prepare(mythr, &tv_now))
  410. goto disabled;
  411. }
  412. defer_events:
  413. if (timer_passed(&mythr->tv_poll, &tv_now))
  414. api->poll(mythr);
  415. if (timer_passed(&mythr->tv_watchdog, &tv_now))
  416. {
  417. timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
  418. bfg_watchdog(proc, &tv_now);
  419. }
  420. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  421. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  422. reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
  423. }
  424. do_notifier_select(thr, &tv_timeout);
  425. }
  426. }
  427. static
  428. void do_queue_flush(struct thr_info *mythr)
  429. {
  430. struct cgpu_info *proc = mythr->cgpu;
  431. struct device_drv *api = proc->drv;
  432. api->queue_flush(mythr);
  433. if (mythr->next_work)
  434. {
  435. free_work(mythr->next_work);
  436. mythr->next_work = NULL;
  437. }
  438. }
  439. void minerloop_queue(struct thr_info *thr)
  440. {
  441. struct thr_info *mythr;
  442. struct cgpu_info *cgpu = thr->cgpu;
  443. struct device_drv *api = cgpu->drv;
  444. struct timeval tv_now;
  445. struct timeval tv_timeout;
  446. struct cgpu_info *proc;
  447. bool should_be_running;
  448. struct work *work;
  449. if (thr->work_restart_notifier[1] == -1)
  450. notifier_init(thr->work_restart_notifier);
  451. while (likely(!cgpu->shutdown)) {
  452. tv_timeout.tv_sec = -1;
  453. timer_set_now(&tv_now);
  454. for (proc = cgpu; proc; proc = proc->next_proc)
  455. {
  456. mythr = proc->thr[0];
  457. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  458. redo:
  459. if (should_be_running)
  460. {
  461. if (unlikely(!mythr->_last_sbr_state))
  462. {
  463. mt_disable_finish(mythr);
  464. mythr->_last_sbr_state = should_be_running;
  465. }
  466. if (unlikely(mythr->work_restart))
  467. {
  468. mythr->work_restart = false;
  469. do_queue_flush(mythr);
  470. }
  471. while (!mythr->queue_full)
  472. {
  473. if (mythr->next_work)
  474. {
  475. work = mythr->next_work;
  476. mythr->next_work = NULL;
  477. }
  478. else
  479. {
  480. request_work(mythr);
  481. // FIXME: Allow get_work to return NULL to retry on notification
  482. work = get_and_prepare_work(mythr);
  483. }
  484. if (!work)
  485. break;
  486. if (!api->queue_append(mythr, work))
  487. mythr->next_work = work;
  488. }
  489. }
  490. else
  491. if (unlikely(mythr->_last_sbr_state))
  492. {
  493. mythr->_last_sbr_state = should_be_running;
  494. do_queue_flush(mythr);
  495. }
  496. if (timer_passed(&mythr->tv_poll, &tv_now))
  497. api->poll(mythr);
  498. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  499. if (should_be_running && !mythr->queue_full)
  500. goto redo;
  501. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  502. }
  503. do_notifier_select(thr, &tv_timeout);
  504. }
  505. }
  506. void *miner_thread(void *userdata)
  507. {
  508. struct thr_info *mythr = userdata;
  509. struct cgpu_info *cgpu = mythr->cgpu;
  510. struct device_drv *drv = cgpu->drv;
  511. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  512. char threadname[20];
  513. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  514. RenameThread(threadname);
  515. if (drv->thread_init && !drv->thread_init(mythr)) {
  516. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  517. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  518. dev_error(slave, REASON_THREAD_FAIL_INIT);
  519. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  520. goto out;
  521. }
  522. if (cgpu->deven != DEV_ENABLED)
  523. mt_disable_start(mythr);
  524. thread_reportout(mythr);
  525. applog(LOG_DEBUG, "Popping ping in miner thread");
  526. notifier_read(mythr->notifier); // Wait for a notification to start
  527. cgtime(&cgpu->cgminer_stats.start_tv);
  528. if (drv->minerloop)
  529. drv->minerloop(mythr);
  530. else
  531. minerloop_scanhash(mythr);
  532. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  533. out: ;
  534. struct cgpu_info *proc = cgpu;
  535. do
  536. {
  537. proc->deven = DEV_DISABLED;
  538. proc->status = LIFE_DEAD2;
  539. }
  540. while ( (proc = proc->next_proc) && !proc->threads);
  541. mythr->getwork = 0;
  542. mythr->has_pth = false;
  543. cgsleep_ms(1000);
  544. if (drv->thread_shutdown)
  545. drv->thread_shutdown(mythr);
  546. notifier_destroy(mythr->notifier);
  547. return NULL;
  548. }
  549. bool add_cgpu(struct cgpu_info *cgpu)
  550. {
  551. int lpcount;
  552. renumber_cgpu(cgpu);
  553. if (!cgpu->procs)
  554. cgpu->procs = 1;
  555. lpcount = cgpu->procs;
  556. cgpu->device = cgpu;
  557. cgpu->dev_repr = malloc(6);
  558. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  559. cgpu->dev_repr_ns = malloc(6);
  560. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  561. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  562. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  563. #ifdef HAVE_FPGAUTILS
  564. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  565. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  566. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  567. #endif
  568. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  569. devices_new[total_devices_new++] = cgpu;
  570. if (lpcount > 1)
  571. {
  572. int ns;
  573. int tpp = cgpu->threads / lpcount;
  574. struct cgpu_info **nlp_p, *slave;
  575. const bool manylp = (lpcount > 26);
  576. const char *as = (manylp ? "aa" : "a");
  577. // Note, strcpy instead of assigning a byte to get the \0 too
  578. strcpy(&cgpu->proc_repr[5], as);
  579. ns = strlen(cgpu->proc_repr_ns);
  580. strcpy(&cgpu->proc_repr_ns[ns], as);
  581. nlp_p = &cgpu->next_proc;
  582. for (int i = 1; i < lpcount; ++i)
  583. {
  584. slave = malloc(sizeof(*slave));
  585. *slave = *cgpu;
  586. slave->proc_id = i;
  587. if (manylp)
  588. {
  589. slave->proc_repr[5] += i / 26;
  590. slave->proc_repr[6] += i % 26;
  591. slave->proc_repr_ns[ns ] += i / 26;
  592. slave->proc_repr_ns[ns + 1] += i % 26;
  593. }
  594. else
  595. {
  596. slave->proc_repr[5] += i;
  597. slave->proc_repr_ns[ns] += i;
  598. }
  599. slave->threads = tpp;
  600. devices_new[total_devices_new++] = slave;
  601. *nlp_p = slave;
  602. nlp_p = &slave->next_proc;
  603. }
  604. *nlp_p = NULL;
  605. cgpu->proc_id = 0;
  606. cgpu->threads -= (tpp * (lpcount - 1));
  607. }
  608. cgpu->last_device_valid_work = time(NULL);
  609. return true;
  610. }
  611. void add_cgpu_live(void *p)
  612. {
  613. add_cgpu(p);
  614. }
  615. bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
  616. {
  617. int old_total_devices = total_devices_new;
  618. if (!prev_cgpu)
  619. return add_cgpu(cgpu);
  620. while (prev_cgpu->next_proc)
  621. prev_cgpu = prev_cgpu->next_proc;
  622. if (!add_cgpu(cgpu))
  623. return false;
  624. prev_cgpu->next_proc = devices_new[old_total_devices];
  625. return true;
  626. }
  627. int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags)
  628. {
  629. struct string_elist *iter, *tmp;
  630. const char *dev, *colon;
  631. bool inhibitauto = flags & 4;
  632. char found = 0;
  633. bool forceauto = flags & 1;
  634. bool hasname;
  635. size_t namel = strlen(api->name);
  636. size_t dnamel = strlen(api->dname);
  637. #ifdef HAVE_FPGAUTILS
  638. clear_detectone_meta_info();
  639. #endif
  640. DL_FOREACH_SAFE(scan_devices, iter, tmp) {
  641. dev = iter->string;
  642. if ((colon = strchr(dev, ':')) && colon[1] != '\0') {
  643. size_t idlen = colon - dev;
  644. // allow either name:device or dname:device
  645. if ((idlen != namel || strncasecmp(dev, api->name, idlen))
  646. && (idlen != dnamel || strncasecmp(dev, api->dname, idlen)))
  647. continue;
  648. dev = colon + 1;
  649. hasname = true;
  650. }
  651. else
  652. hasname = false;
  653. if (!strcmp(dev, "auto"))
  654. forceauto = true;
  655. else if (!strcmp(dev, "noauto"))
  656. inhibitauto = true;
  657. else
  658. if ((flags & 2) && !hasname)
  659. continue;
  660. else
  661. if (!detectone)
  662. {} // do nothing
  663. #ifdef HAVE_FPGAUTILS
  664. else
  665. if (serial_claim(dev, NULL))
  666. {
  667. applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev);
  668. string_elist_del(&scan_devices, iter);
  669. }
  670. #endif
  671. else if (detectone(dev)) {
  672. string_elist_del(&scan_devices, iter);
  673. inhibitauto = true;
  674. ++found;
  675. }
  676. }
  677. if ((forceauto || !inhibitauto) && autoscan)
  678. found += autoscan();
  679. return found;
  680. }
  681. static
  682. FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename)
  683. {
  684. char fullpath[PATH_MAX];
  685. strcpy(fullpath, path);
  686. strcat(fullpath, "/");
  687. if (subdir) {
  688. strcat(fullpath, subdir);
  689. strcat(fullpath, "/");
  690. }
  691. if (sub2) {
  692. strcat(fullpath, sub2);
  693. strcat(fullpath, "/");
  694. }
  695. strcat(fullpath, filename);
  696. return fopen(fullpath, "rb");
  697. }
  698. #define _open_bitstream(path, subdir, sub2) do { \
  699. f = _open_bitstream(path, subdir, sub2, filename); \
  700. if (f) \
  701. return f; \
  702. } while(0)
  703. #define _open_bitstream2(path, path3) do { \
  704. _open_bitstream(path, NULL, path3); \
  705. _open_bitstream(path, "../share/" PACKAGE, path3); \
  706. _open_bitstream(path, "../" PACKAGE, path3); \
  707. } while(0)
  708. #define _open_bitstream3(path) do { \
  709. _open_bitstream2(path, dname); \
  710. _open_bitstream2(path, "bitstreams"); \
  711. _open_bitstream2(path, NULL); \
  712. } while(0)
  713. FILE *open_bitstream(const char *dname, const char *filename)
  714. {
  715. FILE *f;
  716. _open_bitstream3(opt_kernel_path);
  717. _open_bitstream3(cgminer_path);
  718. _open_bitstream3(".");
  719. return NULL;
  720. }
  721. void close_device_fd(struct thr_info * const thr)
  722. {
  723. struct cgpu_info * const proc = thr->cgpu;
  724. const int fd = proc->device_fd;
  725. if (fd == -1)
  726. return;
  727. if (close(fd))
  728. applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
  729. else
  730. {
  731. proc->device_fd = -1;
  732. applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr);
  733. }
  734. }