deviceapi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * Copyright 2011-2013 Luke Dashjr
  3. * Copyright 2011-2012 Con Kolivas
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #ifdef WIN32
  14. #include <winsock2.h>
  15. #else
  16. #include <sys/select.h>
  17. #endif
  18. #include <stdbool.h>
  19. #include <stdint.h>
  20. #include <sys/time.h>
  21. #include <sys/types.h>
  22. #include <time.h>
  23. #include <unistd.h>
  24. #include "compat.h"
  25. #include "deviceapi.h"
  26. #include "fpgautils.h"
  27. #include "logging.h"
  28. #include "miner.h"
  29. #include "util.h"
  30. bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce)
  31. {
  32. struct cgpu_info *cgpu = thr->cgpu;
  33. const long cycle = opt_log_interval / 5 ? : 1;
  34. if (unlikely(hashes == -1)) {
  35. time_t now = time(NULL);
  36. if (difftime(now, cgpu->device_last_not_well) > 1.)
  37. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  38. if (thr->scanhash_working && opt_restart) {
  39. applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
  40. thr->scanhash_working = false;
  41. cgpu->reinit_backoff = 5.2734375;
  42. hashes = 0;
  43. } else {
  44. applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr);
  45. cgpu->deven = DEV_RECOVER_ERR;
  46. run_cmd(cmd_idle);
  47. return false;
  48. }
  49. }
  50. else
  51. thr->scanhash_working = true;
  52. thr->hashes_done += hashes;
  53. if (hashes > cgpu->max_hashes)
  54. cgpu->max_hashes = hashes;
  55. timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
  56. // max_nonce management (optional)
  57. if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) {
  58. int mult;
  59. if (likely(!max_nonce || *max_nonce == 0xffffffff))
  60. return true;
  61. mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10;
  62. mult *= cycle;
  63. if (*max_nonce > (0xffffffff * 0x400) / mult)
  64. *max_nonce = 0xffffffff;
  65. else
  66. *max_nonce = (*max_nonce * mult) / 0x400;
  67. } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce)
  68. *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec;
  69. else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce)
  70. *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400));
  71. hashmeter2(thr);
  72. return true;
  73. }
  74. /* A generic wait function for threads that poll that will wait a specified
  75. * time tdiff waiting on a work restart request. Returns zero if the condition
  76. * was met (work restart requested) or ETIMEDOUT if not.
  77. */
  78. int restart_wait(struct thr_info *thr, unsigned int mstime)
  79. {
  80. struct timeval tv_timer, tv_now, tv_timeout;
  81. fd_set rfds;
  82. SOCKETTYPE wrn = thr->work_restart_notifier[0];
  83. int rv;
  84. if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
  85. {
  86. // This is a bug!
  87. applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
  88. nmsleep(mstime);
  89. return (thr->work_restart ? 0 : ETIMEDOUT);
  90. }
  91. gettimeofday(&tv_now, NULL);
  92. timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
  93. while (true)
  94. {
  95. FD_ZERO(&rfds);
  96. FD_SET(wrn, &rfds);
  97. tv_timeout = tv_timer;
  98. rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
  99. if (rv == 0)
  100. return ETIMEDOUT;
  101. if (rv > 0)
  102. {
  103. if (thr->work_restart)
  104. return 0;
  105. notifier_read(thr->work_restart_notifier);
  106. }
  107. gettimeofday(&tv_now, NULL);
  108. }
  109. }
  110. static
  111. struct work *get_and_prepare_work(struct thr_info *thr)
  112. {
  113. struct cgpu_info *proc = thr->cgpu;
  114. struct device_drv *api = proc->drv;
  115. struct work *work;
  116. work = get_work(thr);
  117. if (!work)
  118. return NULL;
  119. if (api->prepare_work && !api->prepare_work(thr, work)) {
  120. free_work(work);
  121. applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr);
  122. proc->deven = DEV_RECOVER_ERR;
  123. run_cmd(cmd_idle);
  124. return NULL;
  125. }
  126. return work;
  127. }
  128. // Miner loop to manage a single processor (with possibly multiple threads per processor)
  129. void minerloop_scanhash(struct thr_info *mythr)
  130. {
  131. struct cgpu_info *cgpu = mythr->cgpu;
  132. struct device_drv *api = cgpu->drv;
  133. struct timeval tv_start, tv_end;
  134. struct timeval tv_hashes, tv_worktime;
  135. uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
  136. int64_t hashes;
  137. struct work *work;
  138. const bool primary = (!mythr->device_thread) || mythr->primary_thread;
  139. #ifdef HAVE_PTHREAD_CANCEL
  140. pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
  141. #endif
  142. while (likely(!cgpu->shutdown)) {
  143. mythr->work_restart = false;
  144. request_work(mythr);
  145. work = get_and_prepare_work(mythr);
  146. if (!work)
  147. break;
  148. gettimeofday(&(work->tv_work_start), NULL);
  149. do {
  150. thread_reportin(mythr);
  151. /* Only allow the mining thread to be cancelled when
  152. * it is not in the driver code. */
  153. pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
  154. gettimeofday(&tv_start, NULL);
  155. hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
  156. gettimeofday(&tv_end, NULL);
  157. pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
  158. pthread_testcancel();
  159. thread_reportin(mythr);
  160. timersub(&tv_end, &tv_start, &tv_hashes);
  161. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
  162. goto disabled;
  163. if (unlikely(mythr->work_restart)) {
  164. /* Apart from device_thread 0, we stagger the
  165. * starting of every next thread to try and get
  166. * all devices busy before worrying about
  167. * getting work for their extra threads */
  168. if (!primary) {
  169. struct timespec rgtp;
  170. rgtp.tv_sec = 0;
  171. rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
  172. nanosleep(&rgtp, NULL);
  173. }
  174. break;
  175. }
  176. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  177. disabled:
  178. mt_disable(mythr);
  179. timersub(&tv_end, &work->tv_work_start, &tv_worktime);
  180. } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
  181. free_work(work);
  182. }
  183. }
  184. bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
  185. {
  186. struct cgpu_info *proc = mythr->cgpu;
  187. struct device_drv *api = proc->drv;
  188. struct timeval tv_worktime;
  189. mythr->tv_morework.tv_sec = -1;
  190. mythr->_job_transition_in_progress = true;
  191. if (mythr->work)
  192. timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
  193. if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
  194. {
  195. mythr->work_restart = false;
  196. request_work(mythr);
  197. // FIXME: Allow get_work to return NULL to retry on notification
  198. if (mythr->next_work)
  199. free_work(mythr->next_work);
  200. mythr->next_work = get_and_prepare_work(mythr);
  201. if (!mythr->next_work)
  202. return false;
  203. mythr->starting_next_work = true;
  204. api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
  205. }
  206. else
  207. {
  208. mythr->starting_next_work = false;
  209. api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
  210. }
  211. job_prepare_complete(mythr);
  212. return true;
  213. }
  214. void job_prepare_complete(struct thr_info *mythr)
  215. {
  216. if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS))
  217. return;
  218. if (mythr->work)
  219. {
  220. if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart))
  221. do_get_results(mythr, true);
  222. else
  223. {} // TODO: Set a timer to call do_get_results when job is near complete
  224. }
  225. else // no job currently running
  226. do_job_start(mythr);
  227. }
  228. void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
  229. {
  230. struct cgpu_info *proc = mythr->cgpu;
  231. struct device_drv *api = proc->drv;
  232. struct work *work = mythr->work;
  233. mythr->_job_transition_in_progress = true;
  234. mythr->tv_results_jobstart = mythr->tv_jobstart;
  235. mythr->_proceed_with_new_job = proceed_with_new_job;
  236. if (api->job_get_results)
  237. api->job_get_results(mythr, work);
  238. else
  239. job_results_fetched(mythr);
  240. }
  241. void job_results_fetched(struct thr_info *mythr)
  242. {
  243. if (mythr->_proceed_with_new_job)
  244. do_job_start(mythr);
  245. else
  246. {
  247. struct timeval tv_now;
  248. gettimeofday(&tv_now, NULL);
  249. do_process_results(mythr, &tv_now, mythr->prev_work, true);
  250. }
  251. }
  252. void do_job_start(struct thr_info *mythr)
  253. {
  254. struct cgpu_info *proc = mythr->cgpu;
  255. struct device_drv *api = proc->drv;
  256. thread_reportin(mythr);
  257. api->job_start(mythr);
  258. }
  259. void mt_job_transition(struct thr_info *mythr)
  260. {
  261. struct timeval tv_now;
  262. gettimeofday(&tv_now, NULL);
  263. if (mythr->starting_next_work)
  264. {
  265. mythr->next_work->tv_work_start = tv_now;
  266. if (mythr->prev_work)
  267. free_work(mythr->prev_work);
  268. mythr->prev_work = mythr->work;
  269. mythr->work = mythr->next_work;
  270. mythr->next_work = NULL;
  271. }
  272. mythr->tv_jobstart = tv_now;
  273. mythr->_job_transition_in_progress = false;
  274. }
  275. void job_start_complete(struct thr_info *mythr)
  276. {
  277. struct timeval tv_now;
  278. gettimeofday(&tv_now, NULL);
  279. do_process_results(mythr, &tv_now, mythr->prev_work, false);
  280. }
  281. void job_start_abort(struct thr_info *mythr, bool failure)
  282. {
  283. struct cgpu_info *proc = mythr->cgpu;
  284. if (failure)
  285. {
  286. proc->deven = DEV_RECOVER_ERR;
  287. run_cmd(cmd_idle);
  288. }
  289. mythr->work = NULL;
  290. mythr->_job_transition_in_progress = false;
  291. }
  292. bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping)
  293. {
  294. struct cgpu_info *proc = mythr->cgpu;
  295. struct device_drv *api = proc->drv;
  296. struct timeval tv_hashes;
  297. int64_t hashes = 0;
  298. if (api->job_process_results)
  299. hashes = api->job_process_results(mythr, work, stopping);
  300. thread_reportin(mythr);
  301. if (hashes)
  302. {
  303. timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
  304. if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
  305. return false;
  306. }
  307. return true;
  308. }
  309. static
  310. void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
  311. {
  312. struct cgpu_info *cgpu = thr->cgpu;
  313. struct timeval tv_now;
  314. int maxfd;
  315. fd_set rfds;
  316. gettimeofday(&tv_now, NULL);
  317. FD_ZERO(&rfds);
  318. FD_SET(thr->notifier[0], &rfds);
  319. maxfd = thr->notifier[0];
  320. FD_SET(thr->work_restart_notifier[0], &rfds);
  321. set_maxfd(&maxfd, thr->work_restart_notifier[0]);
  322. if (thr->mutex_request[1] != INVSOCK)
  323. {
  324. FD_SET(thr->mutex_request[0], &rfds);
  325. set_maxfd(&maxfd, thr->mutex_request[0]);
  326. }
  327. if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
  328. return;
  329. if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
  330. {
  331. // FIXME: This can only handle one request at a time!
  332. pthread_mutex_t *mutexp = &cgpu->device_mutex;
  333. notifier_read(thr->mutex_request);
  334. mutex_lock(mutexp);
  335. pthread_cond_signal(&cgpu->device_cond);
  336. pthread_cond_wait(&cgpu->device_cond, mutexp);
  337. mutex_unlock(mutexp);
  338. }
  339. if (FD_ISSET(thr->notifier[0], &rfds)) {
  340. notifier_read(thr->notifier);
  341. }
  342. if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
  343. notifier_read(thr->work_restart_notifier);
  344. }
  345. void minerloop_async(struct thr_info *mythr)
  346. {
  347. struct thr_info *thr = mythr;
  348. struct cgpu_info *cgpu = mythr->cgpu;
  349. struct device_drv *api = cgpu->drv;
  350. struct timeval tv_now;
  351. struct timeval tv_timeout;
  352. struct cgpu_info *proc;
  353. bool is_running, should_be_running;
  354. if (mythr->work_restart_notifier[1] == -1)
  355. notifier_init(mythr->work_restart_notifier);
  356. while (likely(!cgpu->shutdown)) {
  357. tv_timeout.tv_sec = -1;
  358. gettimeofday(&tv_now, NULL);
  359. for (proc = cgpu; proc; proc = proc->next_proc)
  360. {
  361. mythr = proc->thr[0];
  362. // Nothing should happen while we're starting a job
  363. if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
  364. goto defer_events;
  365. is_running = mythr->work;
  366. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  367. if (should_be_running)
  368. {
  369. if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
  370. {
  371. mt_disable_finish(mythr);
  372. goto djp;
  373. }
  374. if (unlikely(mythr->work_restart))
  375. goto djp;
  376. }
  377. else // ! should_be_running
  378. {
  379. if (unlikely(is_running && !mythr->_job_transition_in_progress))
  380. {
  381. disabled: ;
  382. mythr->tv_morework.tv_sec = -1;
  383. if (mythr->busy_state != TBS_GETTING_RESULTS)
  384. do_get_results(mythr, false);
  385. else
  386. // Avoid starting job when pending result fetch completes
  387. mythr->_proceed_with_new_job = false;
  388. }
  389. }
  390. if (timer_passed(&mythr->tv_morework, &tv_now))
  391. {
  392. djp: ;
  393. if (!do_job_prepare(mythr, &tv_now))
  394. goto disabled;
  395. }
  396. defer_events:
  397. if (timer_passed(&mythr->tv_poll, &tv_now))
  398. api->poll(mythr);
  399. reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
  400. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  401. }
  402. do_notifier_select(thr, &tv_timeout);
  403. }
  404. }
  405. static
  406. void do_queue_flush(struct thr_info *mythr)
  407. {
  408. struct cgpu_info *proc = mythr->cgpu;
  409. struct device_drv *api = proc->drv;
  410. api->queue_flush(mythr);
  411. if (mythr->next_work)
  412. {
  413. free_work(mythr->next_work);
  414. mythr->next_work = NULL;
  415. }
  416. }
  417. void minerloop_queue(struct thr_info *thr)
  418. {
  419. struct thr_info *mythr;
  420. struct cgpu_info *cgpu = thr->cgpu;
  421. struct device_drv *api = cgpu->drv;
  422. struct timeval tv_now;
  423. struct timeval tv_timeout;
  424. struct cgpu_info *proc;
  425. bool should_be_running;
  426. struct work *work;
  427. if (thr->work_restart_notifier[1] == -1)
  428. notifier_init(thr->work_restart_notifier);
  429. while (likely(!cgpu->shutdown)) {
  430. tv_timeout.tv_sec = -1;
  431. gettimeofday(&tv_now, NULL);
  432. for (proc = cgpu; proc; proc = proc->next_proc)
  433. {
  434. mythr = proc->thr[0];
  435. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  436. redo:
  437. if (should_be_running)
  438. {
  439. if (unlikely(!mythr->_last_sbr_state))
  440. {
  441. mt_disable_finish(mythr);
  442. mythr->_last_sbr_state = should_be_running;
  443. }
  444. if (unlikely(mythr->work_restart))
  445. {
  446. mythr->work_restart = false;
  447. do_queue_flush(mythr);
  448. }
  449. while (!mythr->queue_full)
  450. {
  451. if (mythr->next_work)
  452. {
  453. work = mythr->next_work;
  454. mythr->next_work = NULL;
  455. }
  456. else
  457. {
  458. request_work(mythr);
  459. // FIXME: Allow get_work to return NULL to retry on notification
  460. work = get_and_prepare_work(mythr);
  461. }
  462. if (!work)
  463. break;
  464. if (!api->queue_append(mythr, work))
  465. mythr->next_work = work;
  466. }
  467. }
  468. else
  469. if (unlikely(mythr->_last_sbr_state))
  470. {
  471. mythr->_last_sbr_state = should_be_running;
  472. do_queue_flush(mythr);
  473. }
  474. if (timer_passed(&mythr->tv_poll, &tv_now))
  475. api->poll(mythr);
  476. should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
  477. if (should_be_running && !mythr->queue_full)
  478. goto redo;
  479. reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
  480. }
  481. do_notifier_select(thr, &tv_timeout);
  482. }
  483. }
  484. void *miner_thread(void *userdata)
  485. {
  486. struct thr_info *mythr = userdata;
  487. struct cgpu_info *cgpu = mythr->cgpu;
  488. struct device_drv *drv = cgpu->drv;
  489. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  490. char threadname[20];
  491. snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
  492. RenameThread(threadname);
  493. if (drv->thread_init && !drv->thread_init(mythr)) {
  494. dev_error(cgpu, REASON_THREAD_FAIL_INIT);
  495. for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
  496. dev_error(slave, REASON_THREAD_FAIL_INIT);
  497. __thr_being_msg(LOG_ERR, mythr, "failure, exiting");
  498. goto out;
  499. }
  500. thread_reportout(mythr);
  501. applog(LOG_DEBUG, "Popping ping in miner thread");
  502. notifier_read(mythr->notifier); // Wait for a notification to start
  503. cgtime(&cgpu->cgminer_stats.start_tv);
  504. if (drv->minerloop)
  505. drv->minerloop(mythr);
  506. else
  507. minerloop_scanhash(mythr);
  508. __thr_being_msg(LOG_NOTICE, mythr, "shutting down");
  509. out: ;
  510. struct cgpu_info *proc = cgpu;
  511. do
  512. {
  513. proc->deven = DEV_DISABLED;
  514. proc->status = LIFE_DEAD2;
  515. }
  516. while ( (proc = proc->next_proc) && !proc->threads);
  517. mythr->getwork = 0;
  518. mythr->has_pth = false;
  519. nmsleep(1000);
  520. if (drv->thread_shutdown)
  521. drv->thread_shutdown(mythr);
  522. notifier_destroy(mythr->notifier);
  523. return NULL;
  524. }
  525. bool add_cgpu(struct cgpu_info *cgpu)
  526. {
  527. int lpcount;
  528. renumber_cgpu(cgpu);
  529. if (!cgpu->procs)
  530. cgpu->procs = 1;
  531. lpcount = cgpu->procs;
  532. cgpu->device = cgpu;
  533. cgpu->dev_repr = malloc(6);
  534. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  535. cgpu->dev_repr_ns = malloc(6);
  536. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  537. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  538. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  539. maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
  540. maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
  541. maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
  542. devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
  543. devices_new[total_devices_new++] = cgpu;
  544. if (lpcount > 1)
  545. {
  546. int ns;
  547. int tpp = cgpu->threads / lpcount;
  548. struct cgpu_info **nlp_p, *slave;
  549. const bool manylp = (lpcount > 26);
  550. const char *as = (manylp ? "aa" : "a");
  551. // Note, strcpy instead of assigning a byte to get the \0 too
  552. strcpy(&cgpu->proc_repr[5], as);
  553. ns = strlen(cgpu->proc_repr_ns);
  554. strcpy(&cgpu->proc_repr_ns[ns], as);
  555. nlp_p = &cgpu->next_proc;
  556. for (int i = 1; i < lpcount; ++i)
  557. {
  558. slave = malloc(sizeof(*slave));
  559. *slave = *cgpu;
  560. slave->proc_id = i;
  561. if (manylp)
  562. {
  563. slave->proc_repr[5] += i / 26;
  564. slave->proc_repr[6] += i % 26;
  565. slave->proc_repr_ns[ns ] += i / 26;
  566. slave->proc_repr_ns[ns + 1] += i % 26;
  567. }
  568. else
  569. {
  570. slave->proc_repr[5] += i;
  571. slave->proc_repr_ns[ns] += i;
  572. }
  573. slave->threads = tpp;
  574. devices_new[total_devices_new++] = slave;
  575. *nlp_p = slave;
  576. nlp_p = &slave->next_proc;
  577. }
  578. *nlp_p = NULL;
  579. cgpu->proc_id = 0;
  580. cgpu->threads -= (tpp * (lpcount - 1));
  581. }
  582. cgpu->last_device_valid_work = time(NULL);
  583. return true;
  584. }