device-cpu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. /*
  2. * Copyright 2011-2012 Con Kolivas
  3. * Copyright 2011-2012 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include "config.h"
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <signal.h>
  19. #include <sys/stat.h>
  20. #include <sys/types.h>
  21. #ifndef WIN32
  22. #include <sys/resource.h>
  23. #endif
  24. #include <libgen.h>
  25. #include "compat.h"
  26. #include "miner.h"
  27. #include "bench_block.h"
  28. #include "device-cpu.h"
  29. #if defined(unix)
  30. #include <errno.h>
  31. #include <fcntl.h>
  32. #endif
  33. #ifdef __linux /* Linux specific policy and affinity management */
  34. #include <sched.h>
  35. static inline void drop_policy(void)
  36. {
  37. struct sched_param param;
  38. #ifdef SCHED_BATCH
  39. #ifdef SCHED_IDLE
  40. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  41. #endif
  42. sched_setscheduler(0, SCHED_BATCH, &param);
  43. #endif
  44. }
  45. static inline void affine_to_cpu(int id, int cpu)
  46. {
  47. cpu_set_t set;
  48. CPU_ZERO(&set);
  49. CPU_SET(cpu, &set);
  50. sched_setaffinity(0, sizeof(&set), &set);
  51. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  52. }
  53. #else
  54. static inline void drop_policy(void)
  55. {
  56. }
  57. static inline void affine_to_cpu(int id, int cpu)
  58. {
  59. }
  60. #endif
  61. /* TODO: resolve externals */
  62. extern bool submit_work_sync(struct thr_info *thr, const struct work *work_in);
  63. extern char *set_int_range(const char *arg, int *i, int min, int max);
  64. extern int dev_from_id(int thr_id);
  65. #ifdef WANT_CPUMINE
  66. static size_t max_name_len = 0;
  67. static char *name_spaces_pad = NULL;
  68. const char *algo_names[] = {
  69. [ALGO_C] = "c",
  70. #ifdef WANT_SSE2_4WAY
  71. [ALGO_4WAY] = "4way",
  72. #endif
  73. #ifdef WANT_VIA_PADLOCK
  74. [ALGO_VIA] = "via",
  75. #endif
  76. [ALGO_CRYPTOPP] = "cryptopp",
  77. #ifdef WANT_CRYPTOPP_ASM32
  78. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  79. #endif
  80. #ifdef WANT_X8632_SSE2
  81. [ALGO_SSE2_32] = "sse2_32",
  82. #endif
  83. #ifdef WANT_X8664_SSE2
  84. [ALGO_SSE2_64] = "sse2_64",
  85. #endif
  86. #ifdef WANT_X8664_SSE4
  87. [ALGO_SSE4_64] = "sse4_64",
  88. #endif
  89. #ifdef WANT_ALTIVEC_4WAY
  90. [ALGO_ALTIVEC_4WAY] = "altivec_4way",
  91. #endif
  92. };
  93. static const sha256_func sha256_funcs[] = {
  94. [ALGO_C] = (sha256_func)scanhash_c,
  95. #ifdef WANT_SSE2_4WAY
  96. [ALGO_4WAY] = (sha256_func)ScanHash_4WaySSE2,
  97. #endif
  98. #ifdef WANT_ALTIVEC_4WAY
  99. [ALGO_ALTIVEC_4WAY] = (sha256_func) ScanHash_altivec_4way,
  100. #endif
  101. #ifdef WANT_VIA_PADLOCK
  102. [ALGO_VIA] = (sha256_func)scanhash_via,
  103. #endif
  104. [ALGO_CRYPTOPP] = (sha256_func)scanhash_cryptopp,
  105. #ifdef WANT_CRYPTOPP_ASM32
  106. [ALGO_CRYPTOPP_ASM32] = (sha256_func)scanhash_asm32,
  107. #endif
  108. #ifdef WANT_X8632_SSE2
  109. [ALGO_SSE2_32] = (sha256_func)scanhash_sse2_32,
  110. #endif
  111. #ifdef WANT_X8664_SSE2
  112. [ALGO_SSE2_64] = (sha256_func)scanhash_sse2_64,
  113. #endif
  114. #ifdef WANT_X8664_SSE4
  115. [ALGO_SSE4_64] = (sha256_func)scanhash_sse4_64
  116. #endif
  117. };
  118. #endif
  119. #ifdef WANT_CPUMINE
  120. #if defined(WANT_X8664_SSE2) && defined(__SSE2__)
  121. enum sha256_algos opt_algo = ALGO_SSE2_64;
  122. #elif defined(WANT_X8632_SSE2) && defined(__SSE2__)
  123. enum sha256_algos opt_algo = ALGO_SSE2_32;
  124. #else
  125. enum sha256_algos opt_algo = ALGO_C;
  126. #endif
  127. bool opt_usecpu = false;
  128. static bool forced_n_threads;
  129. #endif
  130. #ifdef WANT_CPUMINE
  131. // Algo benchmark, crash-prone, system independent stage
  132. double bench_algo_stage3(
  133. enum sha256_algos algo
  134. )
  135. {
  136. // Use a random work block pulled from a pool
  137. static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK };
  138. struct work work __attribute__((aligned(128)));
  139. size_t bench_size = sizeof(work);
  140. size_t work_size = sizeof(bench_block);
  141. size_t min_size = (work_size < bench_size ? work_size : bench_size);
  142. memset(&work, 0, sizeof(work));
  143. memcpy(&work, &bench_block, min_size);
  144. struct work_restart dummy;
  145. work_restart = &dummy;
  146. struct timeval end;
  147. struct timeval start;
  148. uint32_t max_nonce = (1<<22);
  149. uint32_t last_nonce = 0;
  150. gettimeofday(&start, 0);
  151. {
  152. sha256_func func = sha256_funcs[algo];
  153. (*func)(
  154. 0,
  155. work.midstate,
  156. work.data,
  157. work.hash1,
  158. work.hash,
  159. work.target,
  160. max_nonce,
  161. &last_nonce,
  162. work.blk.nonce
  163. );
  164. }
  165. gettimeofday(&end, 0);
  166. work_restart = NULL;
  167. uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
  168. uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
  169. uint64_t usec_elapsed = usec_end - usec_start;
  170. double rate = -1.0;
  171. if (0<usec_elapsed) {
  172. rate = (1.0*(last_nonce+1))/usec_elapsed;
  173. }
  174. return rate;
  175. }
  176. #if defined(unix)
  177. // Change non-blocking status on a file descriptor
  178. static void set_non_blocking(
  179. int fd,
  180. int yes
  181. )
  182. {
  183. int flags = fcntl(fd, F_GETFL, 0);
  184. if (flags<0) {
  185. perror("fcntl(GET) failed");
  186. exit(1);
  187. }
  188. flags = yes ? (flags|O_NONBLOCK) : (flags&~O_NONBLOCK);
  189. int r = fcntl(fd, F_SETFL, flags);
  190. if (r<0) {
  191. perror("fcntl(SET) failed");
  192. exit(1);
  193. }
  194. }
  195. #endif // defined(unix)
  196. // Algo benchmark, crash-safe, system-dependent stage
  197. static double bench_algo_stage2(
  198. enum sha256_algos algo
  199. )
  200. {
  201. // Here, the gig is to safely run a piece of code that potentially
  202. // crashes. Unfortunately, the Right Way (tm) to do this is rather
  203. // heavily platform dependent :(
  204. double rate = -1.23457;
  205. #if defined(unix)
  206. // Make a pipe: [readFD, writeFD]
  207. int pfd[2];
  208. int r = pipe(pfd);
  209. if (r<0) {
  210. perror("pipe - failed to create pipe for --algo auto");
  211. exit(1);
  212. }
  213. // Make pipe non blocking
  214. set_non_blocking(pfd[0], 1);
  215. set_non_blocking(pfd[1], 1);
  216. // Don't allow a crashing child to kill the main process
  217. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  218. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  219. if (SIG_ERR==sr0 || SIG_ERR==sr1) {
  220. perror("signal - failed to edit signal mask for --algo auto");
  221. exit(1);
  222. }
  223. // Fork a child to do the actual benchmarking
  224. pid_t child_pid = fork();
  225. if (child_pid<0) {
  226. perror("fork - failed to create a child process for --algo auto");
  227. exit(1);
  228. }
  229. // Do the dangerous work in the child, knowing we might crash
  230. if (0==child_pid) {
  231. // TODO: some umask trickery to prevent coredumps
  232. // Benchmark this algorithm
  233. double r = bench_algo_stage3(algo);
  234. // We survived, send result to parent and bail
  235. int loop_count = 0;
  236. while (1) {
  237. ssize_t bytes_written = write(pfd[1], &r, sizeof(r));
  238. int try_again = (0==bytes_written || (bytes_written<0 && EAGAIN==errno));
  239. int success = (sizeof(r)==(size_t)bytes_written);
  240. if (success)
  241. break;
  242. if (!try_again) {
  243. perror("write - child failed to write benchmark result to pipe");
  244. exit(1);
  245. }
  246. if (5<loop_count) {
  247. applog(LOG_ERR, "child tried %d times to communicate with parent, giving up", loop_count);
  248. exit(1);
  249. }
  250. ++loop_count;
  251. sleep(1);
  252. }
  253. exit(0);
  254. }
  255. // Parent waits for a result from child
  256. int loop_count = 0;
  257. while (1) {
  258. // Wait for child to die
  259. int status;
  260. int r = waitpid(child_pid, &status, WNOHANG);
  261. if ((child_pid==r) || (r<0 && ECHILD==errno)) {
  262. // Child died somehow. Grab result and bail
  263. double tmp;
  264. ssize_t bytes_read = read(pfd[0], &tmp, sizeof(tmp));
  265. if (sizeof(tmp)==(size_t)bytes_read)
  266. rate = tmp;
  267. break;
  268. } else if (r<0) {
  269. perror("bench_algo: waitpid failed. giving up.");
  270. exit(1);
  271. }
  272. // Give up on child after a ~60s
  273. if (60<loop_count) {
  274. kill(child_pid, SIGKILL);
  275. waitpid(child_pid, &status, 0);
  276. break;
  277. }
  278. // Wait a bit longer
  279. ++loop_count;
  280. sleep(1);
  281. }
  282. // Close pipe
  283. r = close(pfd[0]);
  284. if (r<0) {
  285. perror("close - failed to close read end of pipe for --algo auto");
  286. exit(1);
  287. }
  288. r = close(pfd[1]);
  289. if (r<0) {
  290. perror("close - failed to close read end of pipe for --algo auto");
  291. exit(1);
  292. }
  293. #elif defined(WIN32)
  294. // Get handle to current exe
  295. HINSTANCE module = GetModuleHandle(0);
  296. if (!module) {
  297. applog(LOG_ERR, "failed to retrieve module handle");
  298. exit(1);
  299. }
  300. // Create a unique name
  301. char unique_name[32];
  302. snprintf(
  303. unique_name,
  304. sizeof(unique_name)-1,
  305. "cgminer-%p",
  306. (void*)module
  307. );
  308. // Create and init a chunked of shared memory
  309. HANDLE map_handle = CreateFileMapping(
  310. INVALID_HANDLE_VALUE, // use paging file
  311. NULL, // default security attributes
  312. PAGE_READWRITE, // read/write access
  313. 0, // size: high 32-bits
  314. 4096, // size: low 32-bits
  315. unique_name // name of map object
  316. );
  317. if (NULL==map_handle) {
  318. applog(LOG_ERR, "could not create shared memory");
  319. exit(1);
  320. }
  321. void *shared_mem = MapViewOfFile(
  322. map_handle, // object to map view of
  323. FILE_MAP_WRITE, // read/write access
  324. 0, // high offset: map from
  325. 0, // low offset: beginning
  326. 0 // default: map entire file
  327. );
  328. if (NULL==shared_mem) {
  329. applog(LOG_ERR, "could not map shared memory");
  330. exit(1);
  331. }
  332. SetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name);
  333. CopyMemory(shared_mem, &rate, sizeof(rate));
  334. // Get path to current exe
  335. char cmd_line[256 + MAX_PATH];
  336. const size_t n = sizeof(cmd_line)-200;
  337. DWORD size = GetModuleFileName(module, cmd_line, n);
  338. if (0==size) {
  339. applog(LOG_ERR, "failed to retrieve module path");
  340. exit(1);
  341. }
  342. // Construct new command line based on that
  343. char *p = strlen(cmd_line) + cmd_line;
  344. sprintf(p, " --bench-algo %d", algo);
  345. SetEnvironmentVariable("CGMINER_BENCH_ALGO", "1");
  346. // Launch a debug copy of cgminer
  347. STARTUPINFO startup_info;
  348. PROCESS_INFORMATION process_info;
  349. ZeroMemory(&startup_info, sizeof(startup_info));
  350. ZeroMemory(&process_info, sizeof(process_info));
  351. startup_info.cb = sizeof(startup_info);
  352. BOOL ok = CreateProcess(
  353. NULL, // No module name (use command line)
  354. cmd_line, // Command line
  355. NULL, // Process handle not inheritable
  356. NULL, // Thread handle not inheritable
  357. FALSE, // Set handle inheritance to FALSE
  358. DEBUG_ONLY_THIS_PROCESS,// We're going to debug the child
  359. NULL, // Use parent's environment block
  360. NULL, // Use parent's starting directory
  361. &startup_info, // Pointer to STARTUPINFO structure
  362. &process_info // Pointer to PROCESS_INFORMATION structure
  363. );
  364. if (!ok) {
  365. applog(LOG_ERR, "CreateProcess failed with error %d\n", GetLastError() );
  366. exit(1);
  367. }
  368. // Debug the child (only clean way to catch exceptions)
  369. while (1) {
  370. // Wait for child to do something
  371. DEBUG_EVENT debug_event;
  372. ZeroMemory(&debug_event, sizeof(debug_event));
  373. BOOL ok = WaitForDebugEvent(&debug_event, 60 * 1000);
  374. if (!ok)
  375. break;
  376. // Decide if event is "normal"
  377. int go_on =
  378. CREATE_PROCESS_DEBUG_EVENT== debug_event.dwDebugEventCode ||
  379. CREATE_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  380. EXIT_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  381. EXCEPTION_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  382. LOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  383. OUTPUT_DEBUG_STRING_EVENT == debug_event.dwDebugEventCode ||
  384. UNLOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode;
  385. if (!go_on)
  386. break;
  387. // Some exceptions are also "normal", apparently.
  388. if (EXCEPTION_DEBUG_EVENT== debug_event.dwDebugEventCode) {
  389. int go_on =
  390. EXCEPTION_BREAKPOINT== debug_event.u.Exception.ExceptionRecord.ExceptionCode;
  391. if (!go_on)
  392. break;
  393. }
  394. // If nothing unexpected happened, let child proceed
  395. ContinueDebugEvent(
  396. debug_event.dwProcessId,
  397. debug_event.dwThreadId,
  398. DBG_CONTINUE
  399. );
  400. }
  401. // Clean up child process
  402. TerminateProcess(process_info.hProcess, 1);
  403. CloseHandle(process_info.hProcess);
  404. CloseHandle(process_info.hThread);
  405. // Reap return value and cleanup
  406. CopyMemory(&rate, shared_mem, sizeof(rate));
  407. (void)UnmapViewOfFile(shared_mem);
  408. (void)CloseHandle(map_handle);
  409. #else
  410. // Not linux, not unix, not WIN32 ... do our best
  411. rate = bench_algo_stage3(algo);
  412. #endif // defined(unix)
  413. // Done
  414. return rate;
  415. }
  416. static void bench_algo(
  417. double *best_rate,
  418. enum sha256_algos *best_algo,
  419. enum sha256_algos algo
  420. )
  421. {
  422. size_t n = max_name_len - strlen(algo_names[algo]);
  423. memset(name_spaces_pad, ' ', n);
  424. name_spaces_pad[n] = 0;
  425. applog(
  426. LOG_ERR,
  427. "\"%s\"%s : benchmarking algorithm ...",
  428. algo_names[algo],
  429. name_spaces_pad
  430. );
  431. double rate = bench_algo_stage2(algo);
  432. if (rate<0.0) {
  433. applog(
  434. LOG_ERR,
  435. "\"%s\"%s : algorithm fails on this platform",
  436. algo_names[algo],
  437. name_spaces_pad
  438. );
  439. } else {
  440. applog(
  441. LOG_ERR,
  442. "\"%s\"%s : algorithm runs at %.5f MH/s",
  443. algo_names[algo],
  444. name_spaces_pad,
  445. rate
  446. );
  447. if (*best_rate<rate) {
  448. *best_rate = rate;
  449. *best_algo = algo;
  450. }
  451. }
  452. }
  453. // Figure out the longest algorithm name
  454. void init_max_name_len()
  455. {
  456. size_t i;
  457. size_t nb_names = sizeof(algo_names)/sizeof(algo_names[0]);
  458. for (i=0; i<nb_names; ++i) {
  459. const char *p = algo_names[i];
  460. size_t name_len = p ? strlen(p) : 0;
  461. if (max_name_len<name_len)
  462. max_name_len = name_len;
  463. }
  464. name_spaces_pad = (char*) malloc(max_name_len+16);
  465. if (0==name_spaces_pad) {
  466. perror("malloc failed");
  467. exit(1);
  468. }
  469. }
  470. // Pick the fastest CPU hasher
  471. static enum sha256_algos pick_fastest_algo()
  472. {
  473. double best_rate = -1.0;
  474. enum sha256_algos best_algo = 0;
  475. applog(LOG_ERR, "benchmarking all sha256 algorithms ...");
  476. bench_algo(&best_rate, &best_algo, ALGO_C);
  477. #if defined(WANT_SSE2_4WAY)
  478. bench_algo(&best_rate, &best_algo, ALGO_4WAY);
  479. #endif
  480. #if defined(WANT_VIA_PADLOCK)
  481. bench_algo(&best_rate, &best_algo, ALGO_VIA);
  482. #endif
  483. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP);
  484. #if defined(WANT_CRYPTOPP_ASM32)
  485. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
  486. #endif
  487. #if defined(WANT_X8632_SSE2)
  488. bench_algo(&best_rate, &best_algo, ALGO_SSE2_32);
  489. #endif
  490. #if defined(WANT_X8664_SSE2)
  491. bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
  492. #endif
  493. #if defined(WANT_X8664_SSE4)
  494. bench_algo(&best_rate, &best_algo, ALGO_SSE4_64);
  495. #endif
  496. #if defined(WANT_ALTIVEC_4WAY)
  497. bench_algo(&best_rate, &best_algo, ALGO_ALTIVEC_4WAY);
  498. #endif
  499. size_t n = max_name_len - strlen(algo_names[best_algo]);
  500. memset(name_spaces_pad, ' ', n);
  501. name_spaces_pad[n] = 0;
  502. applog(
  503. LOG_ERR,
  504. "\"%s\"%s : is fastest algorithm at %.5f MH/s",
  505. algo_names[best_algo],
  506. name_spaces_pad,
  507. best_rate
  508. );
  509. return best_algo;
  510. }
  511. /* FIXME: Use asprintf for better errors. */
  512. char *set_algo(const char *arg, enum sha256_algos *algo)
  513. {
  514. enum sha256_algos i;
  515. if (!strcmp(arg, "auto")) {
  516. *algo = pick_fastest_algo();
  517. return NULL;
  518. }
  519. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  520. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  521. *algo = i;
  522. return NULL;
  523. }
  524. }
  525. return "Unknown algorithm";
  526. }
  527. void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  528. {
  529. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  530. }
  531. #endif
  532. #ifdef WANT_CPUMINE
  533. char *force_nthreads_int(const char *arg, int *i)
  534. {
  535. forced_n_threads = true;
  536. return set_int_range(arg, i, 0, 9999);
  537. }
  538. #endif
  539. #ifdef WANT_CPUMINE
  540. static void cpu_detect()
  541. {
  542. int i;
  543. // Reckon number of cores in the box
  544. #if defined(WIN32)
  545. {
  546. DWORD system_am;
  547. DWORD process_am;
  548. BOOL ok = GetProcessAffinityMask(
  549. GetCurrentProcess(),
  550. &system_am,
  551. &process_am
  552. );
  553. if (!ok) {
  554. applog(LOG_ERR, "couldn't figure out number of processors :(");
  555. num_processors = 1;
  556. } else {
  557. size_t n = 32;
  558. num_processors = 0;
  559. while (n--)
  560. if (process_am & (1<<n))
  561. ++num_processors;
  562. }
  563. }
  564. #else
  565. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  566. #endif /* !WIN32 */
  567. if (opt_n_threads < 0 || !forced_n_threads) {
  568. if (total_devices && !opt_usecpu)
  569. opt_n_threads = 0;
  570. else
  571. opt_n_threads = num_processors;
  572. }
  573. if (num_processors < 1)
  574. return;
  575. if (total_devices + opt_n_threads > MAX_DEVICES)
  576. opt_n_threads = MAX_DEVICES - total_devices;
  577. cpus = calloc(opt_n_threads, sizeof(struct cgpu_info));
  578. if (unlikely(!cpus))
  579. quit(1, "Failed to calloc cpus");
  580. for (i = 0; i < opt_n_threads; ++i) {
  581. struct cgpu_info *cgpu;
  582. cgpu = devices[total_devices + i] = &cpus[i];
  583. cgpu->api = &cpu_api;
  584. cgpu->enabled = true;
  585. cgpu->device_id = i;
  586. cgpu->threads = 1;
  587. }
  588. total_devices += opt_n_threads;
  589. }
  590. static bool cpu_thread_prepare(struct thr_info *thr)
  591. {
  592. thread_reportin(thr);
  593. return true;
  594. }
  595. static uint64_t cpu_can_limit_work(struct thr_info *thr)
  596. {
  597. return 0xfffff;
  598. }
  599. static bool cpu_thread_init(struct thr_info *thr)
  600. {
  601. const int thr_id = thr->id;
  602. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  603. * and if that fails, then SCHED_BATCH. No need for this to be an
  604. * error if it fails */
  605. setpriority(PRIO_PROCESS, 0, 19);
  606. drop_policy();
  607. /* Cpu affinity only makes sense if the number of threads is a multiple
  608. * of the number of CPUs */
  609. if (!(opt_n_threads % num_processors))
  610. affine_to_cpu(dev_from_id(thr_id), dev_from_id(thr_id) % num_processors);
  611. return true;
  612. }
  613. static uint64_t cpu_scanhash(struct thr_info *thr, struct work *work, uint64_t max_nonce)
  614. {
  615. const int thr_id = thr->id;
  616. uint32_t first_nonce = work->blk.nonce;
  617. uint32_t last_nonce;
  618. bool rc;
  619. CPUSearch:
  620. last_nonce = first_nonce;
  621. rc = false;
  622. /* scan nonces for a proof-of-work hash */
  623. {
  624. sha256_func func = sha256_funcs[opt_algo];
  625. rc = (*func)(
  626. thr_id,
  627. work->midstate,
  628. work->data,
  629. work->hash1,
  630. work->hash,
  631. work->target,
  632. max_nonce,
  633. &last_nonce,
  634. work->blk.nonce
  635. );
  636. }
  637. /* if nonce found, submit work */
  638. if (unlikely(rc)) {
  639. if (opt_debug)
  640. applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
  641. if (unlikely(!submit_work_sync(thr, work))) {
  642. applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
  643. }
  644. work->blk.nonce = last_nonce + 1;
  645. goto CPUSearch;
  646. }
  647. else
  648. if (unlikely(last_nonce == first_nonce))
  649. return 0;
  650. work->blk.nonce = last_nonce + 1;
  651. return last_nonce - first_nonce + 1;
  652. }
  653. struct device_api cpu_api = {
  654. .name = "CPU",
  655. .api_detect = cpu_detect,
  656. .thread_prepare = cpu_thread_prepare,
  657. .can_limit_work = cpu_can_limit_work,
  658. .thread_init = cpu_thread_init,
  659. .scanhash = cpu_scanhash,
  660. };
  661. #endif