device-cpu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * Copyright 2011-2012 Con Kolivas
  3. * Copyright 2011-2012 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include "config.h"
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <signal.h>
  19. #include <sys/stat.h>
  20. #include <sys/types.h>
  21. #ifndef WIN32
  22. #include <sys/resource.h>
  23. #endif
  24. #include <libgen.h>
  25. #include "compat.h"
  26. #include "miner.h"
  27. #include "bench_block.h"
  28. #include "device-cpu.h"
  29. #if defined(unix)
  30. #include <errno.h>
  31. #include <fcntl.h>
  32. #endif
  33. #ifdef __linux /* Linux specific policy and affinity management */
  34. #include <sched.h>
  35. static inline void drop_policy(void)
  36. {
  37. struct sched_param param;
  38. #ifdef SCHED_BATCH
  39. #ifdef SCHED_IDLE
  40. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  41. #endif
  42. sched_setscheduler(0, SCHED_BATCH, &param);
  43. #endif
  44. }
  45. static inline void affine_to_cpu(int id, int cpu)
  46. {
  47. cpu_set_t set;
  48. CPU_ZERO(&set);
  49. CPU_SET(cpu, &set);
  50. sched_setaffinity(0, sizeof(&set), &set);
  51. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  52. }
  53. #else
  54. static inline void drop_policy(void)
  55. {
  56. }
  57. static inline void affine_to_cpu(int id, int cpu)
  58. {
  59. }
  60. #endif
  61. /* TODO: resolve externals */
  62. extern bool submit_work_sync(struct thr_info *thr, const struct work *work_in);
  63. extern char *set_int_range(const char *arg, int *i, int min, int max);
  64. extern int dev_from_id(int thr_id);
  65. #ifdef WANT_CPUMINE
  66. static size_t max_name_len = 0;
  67. static char *name_spaces_pad = NULL;
  68. const char *algo_names[] = {
  69. [ALGO_C] = "c",
  70. #ifdef WANT_SSE2_4WAY
  71. [ALGO_4WAY] = "4way",
  72. #endif
  73. #ifdef WANT_VIA_PADLOCK
  74. [ALGO_VIA] = "via",
  75. #endif
  76. [ALGO_CRYPTOPP] = "cryptopp",
  77. #ifdef WANT_CRYPTOPP_ASM32
  78. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  79. #endif
  80. #ifdef WANT_X8632_SSE2
  81. [ALGO_SSE2_32] = "sse2_32",
  82. #endif
  83. #ifdef WANT_X8664_SSE2
  84. [ALGO_SSE2_64] = "sse2_64",
  85. #endif
  86. #ifdef WANT_X8664_SSE4
  87. [ALGO_SSE4_64] = "sse4_64",
  88. #endif
  89. #ifdef WANT_ALTIVEC_4WAY
  90. [ALGO_ALTIVEC_4WAY] = "altivec_4way",
  91. #endif
  92. };
  93. static const sha256_func sha256_funcs[] = {
  94. [ALGO_C] = (sha256_func)scanhash_c,
  95. #ifdef WANT_SSE2_4WAY
  96. [ALGO_4WAY] = (sha256_func)ScanHash_4WaySSE2,
  97. #endif
  98. #ifdef WANT_ALTIVEC_4WAY
  99. [ALGO_ALTIVEC_4WAY] = (sha256_func) ScanHash_altivec_4way,
  100. #endif
  101. #ifdef WANT_VIA_PADLOCK
  102. [ALGO_VIA] = (sha256_func)scanhash_via,
  103. #endif
  104. [ALGO_CRYPTOPP] = (sha256_func)scanhash_cryptopp,
  105. #ifdef WANT_CRYPTOPP_ASM32
  106. [ALGO_CRYPTOPP_ASM32] = (sha256_func)scanhash_asm32,
  107. #endif
  108. #ifdef WANT_X8632_SSE2
  109. [ALGO_SSE2_32] = (sha256_func)scanhash_sse2_32,
  110. #endif
  111. #ifdef WANT_X8664_SSE2
  112. [ALGO_SSE2_64] = (sha256_func)scanhash_sse2_64,
  113. #endif
  114. #ifdef WANT_X8664_SSE4
  115. [ALGO_SSE4_64] = (sha256_func)scanhash_sse4_64
  116. #endif
  117. };
  118. #endif
  119. #ifdef WANT_CPUMINE
  120. #if defined(WANT_X8664_SSE2) && defined(__SSE2__)
  121. enum sha256_algos opt_algo = ALGO_SSE2_64;
  122. #elif defined(WANT_X8632_SSE2) && defined(__SSE2__)
  123. enum sha256_algos opt_algo = ALGO_SSE2_32;
  124. #else
  125. enum sha256_algos opt_algo = ALGO_C;
  126. #endif
  127. bool opt_usecpu = false;
  128. static int cpur_thr_id;
  129. static bool forced_n_threads;
  130. #endif
  131. #ifdef WANT_CPUMINE
  132. // Algo benchmark, crash-prone, system independent stage
  133. double bench_algo_stage3(
  134. enum sha256_algos algo
  135. )
  136. {
  137. // Use a random work block pulled from a pool
  138. static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK };
  139. struct work work __attribute__((aligned(128)));
  140. size_t bench_size = sizeof(work);
  141. size_t work_size = sizeof(bench_block);
  142. size_t min_size = (work_size < bench_size ? work_size : bench_size);
  143. memset(&work, 0, sizeof(work));
  144. memcpy(&work, &bench_block, min_size);
  145. struct work_restart dummy;
  146. work_restart = &dummy;
  147. struct timeval end;
  148. struct timeval start;
  149. uint32_t max_nonce = (1<<22);
  150. uint32_t last_nonce = 0;
  151. gettimeofday(&start, 0);
  152. {
  153. sha256_func func = sha256_funcs[algo];
  154. (*func)(
  155. 0,
  156. work.midstate,
  157. work.data,
  158. work.hash1,
  159. work.hash,
  160. work.target,
  161. max_nonce,
  162. &last_nonce,
  163. work.blk.nonce
  164. );
  165. }
  166. gettimeofday(&end, 0);
  167. work_restart = NULL;
  168. uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
  169. uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
  170. uint64_t usec_elapsed = usec_end - usec_start;
  171. double rate = -1.0;
  172. if (0<usec_elapsed) {
  173. rate = (1.0*(last_nonce+1))/usec_elapsed;
  174. }
  175. return rate;
  176. }
  177. #if defined(unix)
  178. // Change non-blocking status on a file descriptor
  179. static void set_non_blocking(
  180. int fd,
  181. int yes
  182. )
  183. {
  184. int flags = fcntl(fd, F_GETFL, 0);
  185. if (flags<0) {
  186. perror("fcntl(GET) failed");
  187. exit(1);
  188. }
  189. flags = yes ? (flags|O_NONBLOCK) : (flags&~O_NONBLOCK);
  190. int r = fcntl(fd, F_SETFL, flags);
  191. if (r<0) {
  192. perror("fcntl(SET) failed");
  193. exit(1);
  194. }
  195. }
  196. #endif // defined(unix)
  197. // Algo benchmark, crash-safe, system-dependent stage
  198. static double bench_algo_stage2(
  199. enum sha256_algos algo
  200. )
  201. {
  202. // Here, the gig is to safely run a piece of code that potentially
  203. // crashes. Unfortunately, the Right Way (tm) to do this is rather
  204. // heavily platform dependent :(
  205. double rate = -1.23457;
  206. #if defined(unix)
  207. // Make a pipe: [readFD, writeFD]
  208. int pfd[2];
  209. int r = pipe(pfd);
  210. if (r<0) {
  211. perror("pipe - failed to create pipe for --algo auto");
  212. exit(1);
  213. }
  214. // Make pipe non blocking
  215. set_non_blocking(pfd[0], 1);
  216. set_non_blocking(pfd[1], 1);
  217. // Don't allow a crashing child to kill the main process
  218. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  219. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  220. if (SIG_ERR==sr0 || SIG_ERR==sr1) {
  221. perror("signal - failed to edit signal mask for --algo auto");
  222. exit(1);
  223. }
  224. // Fork a child to do the actual benchmarking
  225. pid_t child_pid = fork();
  226. if (child_pid<0) {
  227. perror("fork - failed to create a child process for --algo auto");
  228. exit(1);
  229. }
  230. // Do the dangerous work in the child, knowing we might crash
  231. if (0==child_pid) {
  232. // TODO: some umask trickery to prevent coredumps
  233. // Benchmark this algorithm
  234. double r = bench_algo_stage3(algo);
  235. // We survived, send result to parent and bail
  236. int loop_count = 0;
  237. while (1) {
  238. ssize_t bytes_written = write(pfd[1], &r, sizeof(r));
  239. int try_again = (0==bytes_written || (bytes_written<0 && EAGAIN==errno));
  240. int success = (sizeof(r)==(size_t)bytes_written);
  241. if (success)
  242. break;
  243. if (!try_again) {
  244. perror("write - child failed to write benchmark result to pipe");
  245. exit(1);
  246. }
  247. if (5<loop_count) {
  248. applog(LOG_ERR, "child tried %d times to communicate with parent, giving up", loop_count);
  249. exit(1);
  250. }
  251. ++loop_count;
  252. sleep(1);
  253. }
  254. exit(0);
  255. }
  256. // Parent waits for a result from child
  257. int loop_count = 0;
  258. while (1) {
  259. // Wait for child to die
  260. int status;
  261. int r = waitpid(child_pid, &status, WNOHANG);
  262. if ((child_pid==r) || (r<0 && ECHILD==errno)) {
  263. // Child died somehow. Grab result and bail
  264. double tmp;
  265. ssize_t bytes_read = read(pfd[0], &tmp, sizeof(tmp));
  266. if (sizeof(tmp)==(size_t)bytes_read)
  267. rate = tmp;
  268. break;
  269. } else if (r<0) {
  270. perror("bench_algo: waitpid failed. giving up.");
  271. exit(1);
  272. }
  273. // Give up on child after a ~60s
  274. if (60<loop_count) {
  275. kill(child_pid, SIGKILL);
  276. waitpid(child_pid, &status, 0);
  277. break;
  278. }
  279. // Wait a bit longer
  280. ++loop_count;
  281. sleep(1);
  282. }
  283. // Close pipe
  284. r = close(pfd[0]);
  285. if (r<0) {
  286. perror("close - failed to close read end of pipe for --algo auto");
  287. exit(1);
  288. }
  289. r = close(pfd[1]);
  290. if (r<0) {
  291. perror("close - failed to close read end of pipe for --algo auto");
  292. exit(1);
  293. }
  294. #elif defined(WIN32)
  295. // Get handle to current exe
  296. HINSTANCE module = GetModuleHandle(0);
  297. if (!module) {
  298. applog(LOG_ERR, "failed to retrieve module handle");
  299. exit(1);
  300. }
  301. // Create a unique name
  302. char unique_name[32];
  303. snprintf(
  304. unique_name,
  305. sizeof(unique_name)-1,
  306. "cgminer-%p",
  307. (void*)module
  308. );
  309. // Create and init a chunked of shared memory
  310. HANDLE map_handle = CreateFileMapping(
  311. INVALID_HANDLE_VALUE, // use paging file
  312. NULL, // default security attributes
  313. PAGE_READWRITE, // read/write access
  314. 0, // size: high 32-bits
  315. 4096, // size: low 32-bits
  316. unique_name // name of map object
  317. );
  318. if (NULL==map_handle) {
  319. applog(LOG_ERR, "could not create shared memory");
  320. exit(1);
  321. }
  322. void *shared_mem = MapViewOfFile(
  323. map_handle, // object to map view of
  324. FILE_MAP_WRITE, // read/write access
  325. 0, // high offset: map from
  326. 0, // low offset: beginning
  327. 0 // default: map entire file
  328. );
  329. if (NULL==shared_mem) {
  330. applog(LOG_ERR, "could not map shared memory");
  331. exit(1);
  332. }
  333. SetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name);
  334. CopyMemory(shared_mem, &rate, sizeof(rate));
  335. // Get path to current exe
  336. char cmd_line[256 + MAX_PATH];
  337. const size_t n = sizeof(cmd_line)-200;
  338. DWORD size = GetModuleFileName(module, cmd_line, n);
  339. if (0==size) {
  340. applog(LOG_ERR, "failed to retrieve module path");
  341. exit(1);
  342. }
  343. // Construct new command line based on that
  344. char *p = strlen(cmd_line) + cmd_line;
  345. sprintf(p, " --bench-algo %d", algo);
  346. SetEnvironmentVariable("CGMINER_BENCH_ALGO", "1");
  347. // Launch a debug copy of cgminer
  348. STARTUPINFO startup_info;
  349. PROCESS_INFORMATION process_info;
  350. ZeroMemory(&startup_info, sizeof(startup_info));
  351. ZeroMemory(&process_info, sizeof(process_info));
  352. startup_info.cb = sizeof(startup_info);
  353. BOOL ok = CreateProcess(
  354. NULL, // No module name (use command line)
  355. cmd_line, // Command line
  356. NULL, // Process handle not inheritable
  357. NULL, // Thread handle not inheritable
  358. FALSE, // Set handle inheritance to FALSE
  359. DEBUG_ONLY_THIS_PROCESS,// We're going to debug the child
  360. NULL, // Use parent's environment block
  361. NULL, // Use parent's starting directory
  362. &startup_info, // Pointer to STARTUPINFO structure
  363. &process_info // Pointer to PROCESS_INFORMATION structure
  364. );
  365. if (!ok) {
  366. applog(LOG_ERR, "CreateProcess failed with error %d\n", GetLastError() );
  367. exit(1);
  368. }
  369. // Debug the child (only clean way to catch exceptions)
  370. while (1) {
  371. // Wait for child to do something
  372. DEBUG_EVENT debug_event;
  373. ZeroMemory(&debug_event, sizeof(debug_event));
  374. BOOL ok = WaitForDebugEvent(&debug_event, 60 * 1000);
  375. if (!ok)
  376. break;
  377. // Decide if event is "normal"
  378. int go_on =
  379. CREATE_PROCESS_DEBUG_EVENT== debug_event.dwDebugEventCode ||
  380. CREATE_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  381. EXIT_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  382. EXCEPTION_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  383. LOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  384. OUTPUT_DEBUG_STRING_EVENT == debug_event.dwDebugEventCode ||
  385. UNLOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode;
  386. if (!go_on)
  387. break;
  388. // Some exceptions are also "normal", apparently.
  389. if (EXCEPTION_DEBUG_EVENT== debug_event.dwDebugEventCode) {
  390. int go_on =
  391. EXCEPTION_BREAKPOINT== debug_event.u.Exception.ExceptionRecord.ExceptionCode;
  392. if (!go_on)
  393. break;
  394. }
  395. // If nothing unexpected happened, let child proceed
  396. ContinueDebugEvent(
  397. debug_event.dwProcessId,
  398. debug_event.dwThreadId,
  399. DBG_CONTINUE
  400. );
  401. }
  402. // Clean up child process
  403. TerminateProcess(process_info.hProcess, 1);
  404. CloseHandle(process_info.hProcess);
  405. CloseHandle(process_info.hThread);
  406. // Reap return value and cleanup
  407. CopyMemory(&rate, shared_mem, sizeof(rate));
  408. (void)UnmapViewOfFile(shared_mem);
  409. (void)CloseHandle(map_handle);
  410. #else
  411. // Not linux, not unix, not WIN32 ... do our best
  412. rate = bench_algo_stage3(algo);
  413. #endif // defined(unix)
  414. // Done
  415. return rate;
  416. }
  417. static void bench_algo(
  418. double *best_rate,
  419. enum sha256_algos *best_algo,
  420. enum sha256_algos algo
  421. )
  422. {
  423. size_t n = max_name_len - strlen(algo_names[algo]);
  424. memset(name_spaces_pad, ' ', n);
  425. name_spaces_pad[n] = 0;
  426. applog(
  427. LOG_ERR,
  428. "\"%s\"%s : benchmarking algorithm ...",
  429. algo_names[algo],
  430. name_spaces_pad
  431. );
  432. double rate = bench_algo_stage2(algo);
  433. if (rate<0.0) {
  434. applog(
  435. LOG_ERR,
  436. "\"%s\"%s : algorithm fails on this platform",
  437. algo_names[algo],
  438. name_spaces_pad
  439. );
  440. } else {
  441. applog(
  442. LOG_ERR,
  443. "\"%s\"%s : algorithm runs at %.5f MH/s",
  444. algo_names[algo],
  445. name_spaces_pad,
  446. rate
  447. );
  448. if (*best_rate<rate) {
  449. *best_rate = rate;
  450. *best_algo = algo;
  451. }
  452. }
  453. }
  454. // Figure out the longest algorithm name
  455. void init_max_name_len()
  456. {
  457. size_t i;
  458. size_t nb_names = sizeof(algo_names)/sizeof(algo_names[0]);
  459. for (i=0; i<nb_names; ++i) {
  460. const char *p = algo_names[i];
  461. size_t name_len = p ? strlen(p) : 0;
  462. if (max_name_len<name_len)
  463. max_name_len = name_len;
  464. }
  465. name_spaces_pad = (char*) malloc(max_name_len+16);
  466. if (0==name_spaces_pad) {
  467. perror("malloc failed");
  468. exit(1);
  469. }
  470. }
  471. // Pick the fastest CPU hasher
  472. static enum sha256_algos pick_fastest_algo()
  473. {
  474. double best_rate = -1.0;
  475. enum sha256_algos best_algo = 0;
  476. applog(LOG_ERR, "benchmarking all sha256 algorithms ...");
  477. bench_algo(&best_rate, &best_algo, ALGO_C);
  478. #if defined(WANT_SSE2_4WAY)
  479. bench_algo(&best_rate, &best_algo, ALGO_4WAY);
  480. #endif
  481. #if defined(WANT_VIA_PADLOCK)
  482. bench_algo(&best_rate, &best_algo, ALGO_VIA);
  483. #endif
  484. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP);
  485. #if defined(WANT_CRYPTOPP_ASM32)
  486. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
  487. #endif
  488. #if defined(WANT_X8632_SSE2)
  489. bench_algo(&best_rate, &best_algo, ALGO_SSE2_32);
  490. #endif
  491. #if defined(WANT_X8664_SSE2)
  492. bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
  493. #endif
  494. #if defined(WANT_X8664_SSE4)
  495. bench_algo(&best_rate, &best_algo, ALGO_SSE4_64);
  496. #endif
  497. #if defined(WANT_ALTIVEC_4WAY)
  498. bench_algo(&best_rate, &best_algo, ALGO_ALTIVEC_4WAY);
  499. #endif
  500. size_t n = max_name_len - strlen(algo_names[best_algo]);
  501. memset(name_spaces_pad, ' ', n);
  502. name_spaces_pad[n] = 0;
  503. applog(
  504. LOG_ERR,
  505. "\"%s\"%s : is fastest algorithm at %.5f MH/s",
  506. algo_names[best_algo],
  507. name_spaces_pad,
  508. best_rate
  509. );
  510. return best_algo;
  511. }
  512. /* FIXME: Use asprintf for better errors. */
  513. char *set_algo(const char *arg, enum sha256_algos *algo)
  514. {
  515. enum sha256_algos i;
  516. if (!strcmp(arg, "auto")) {
  517. *algo = pick_fastest_algo();
  518. return NULL;
  519. }
  520. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  521. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  522. *algo = i;
  523. return NULL;
  524. }
  525. }
  526. return "Unknown algorithm";
  527. }
  528. void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  529. {
  530. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  531. }
  532. #endif
  533. #ifdef WANT_CPUMINE
  534. char *force_nthreads_int(const char *arg, int *i)
  535. {
  536. forced_n_threads = true;
  537. return set_int_range(arg, i, 0, 9999);
  538. }
  539. #endif
  540. #ifdef WANT_CPUMINE
  541. static void cpu_detect()
  542. {
  543. int i;
  544. // Reckon number of cores in the box
  545. #if defined(WIN32)
  546. {
  547. DWORD system_am;
  548. DWORD process_am;
  549. BOOL ok = GetProcessAffinityMask(
  550. GetCurrentProcess(),
  551. &system_am,
  552. &process_am
  553. );
  554. if (!ok) {
  555. applog(LOG_ERR, "couldn't figure out number of processors :(");
  556. num_processors = 1;
  557. } else {
  558. size_t n = 32;
  559. num_processors = 0;
  560. while (n--)
  561. if (process_am & (1<<n))
  562. ++num_processors;
  563. }
  564. }
  565. #elif defined(_SC_NPROCESSORS_ONLN)
  566. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  567. #elif defined(HW_NCPU)
  568. int req[] = { CTL_HW, HW_NCPU };
  569. size_t len = sizeof(num_processors);
  570. v = sysctl(req, 2, &num_processors, &len, NULL, 0);
  571. #else
  572. num_processors = 1;
  573. #endif /* !WIN32 */
  574. if (opt_n_threads < 0 || !forced_n_threads) {
  575. if (total_devices && !opt_usecpu)
  576. opt_n_threads = 0;
  577. else
  578. opt_n_threads = num_processors;
  579. }
  580. if (num_processors < 1)
  581. return;
  582. if (total_devices + opt_n_threads > MAX_DEVICES)
  583. opt_n_threads = MAX_DEVICES - total_devices;
  584. cpus = calloc(opt_n_threads, sizeof(struct cgpu_info));
  585. if (unlikely(!cpus))
  586. quit(1, "Failed to calloc cpus");
  587. for (i = 0; i < opt_n_threads; ++i) {
  588. struct cgpu_info *cgpu;
  589. cgpu = devices[total_devices + i] = &cpus[i];
  590. cgpu->api = &cpu_api;
  591. cgpu->enabled = true;
  592. cgpu->device_id = i;
  593. cgpu->threads = 1;
  594. }
  595. total_devices += opt_n_threads;
  596. }
  597. static void reinit_cpu_device(struct cgpu_info *cpu)
  598. {
  599. tq_push(thr_info[cpur_thr_id].q, cpu);
  600. }
  601. static bool cpu_thread_prepare(struct thr_info *thr)
  602. {
  603. thread_reportin(thr);
  604. return true;
  605. }
  606. static uint64_t cpu_can_limit_work(struct thr_info *thr)
  607. {
  608. return 0xfffff;
  609. }
  610. static bool cpu_thread_init(struct thr_info *thr)
  611. {
  612. const int thr_id = thr->id;
  613. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  614. * and if that fails, then SCHED_BATCH. No need for this to be an
  615. * error if it fails */
  616. setpriority(PRIO_PROCESS, 0, 19);
  617. drop_policy();
  618. /* Cpu affinity only makes sense if the number of threads is a multiple
  619. * of the number of CPUs */
  620. if (!(opt_n_threads % num_processors))
  621. affine_to_cpu(dev_from_id(thr_id), dev_from_id(thr_id) % num_processors);
  622. return true;
  623. }
  624. static uint64_t cpu_scanhash(struct thr_info *thr, struct work *work, uint64_t max_nonce)
  625. {
  626. const int thr_id = thr->id;
  627. uint32_t first_nonce = work->blk.nonce;
  628. uint32_t last_nonce;
  629. bool rc;
  630. CPUSearch:
  631. last_nonce = first_nonce;
  632. rc = false;
  633. /* scan nonces for a proof-of-work hash */
  634. {
  635. sha256_func func = sha256_funcs[opt_algo];
  636. rc = (*func)(
  637. thr_id,
  638. work->midstate,
  639. work->data,
  640. work->hash1,
  641. work->hash,
  642. work->target,
  643. max_nonce,
  644. &last_nonce,
  645. work->blk.nonce
  646. );
  647. }
  648. /* if nonce found, submit work */
  649. if (unlikely(rc)) {
  650. if (opt_debug)
  651. applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
  652. if (unlikely(!submit_work_sync(thr, work))) {
  653. applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
  654. }
  655. work->blk.nonce = last_nonce + 1;
  656. goto CPUSearch;
  657. }
  658. else
  659. if (unlikely(last_nonce == first_nonce))
  660. return 0;
  661. work->blk.nonce = last_nonce + 1;
  662. return last_nonce - first_nonce + 1;
  663. }
  664. struct device_api cpu_api = {
  665. .name = "CPU",
  666. .api_detect = cpu_detect,
  667. .reinit_device = reinit_cpu_device,
  668. .thread_prepare = cpu_thread_prepare,
  669. .can_limit_work = cpu_can_limit_work,
  670. .thread_init = cpu_thread_init,
  671. .scanhash = cpu_scanhash,
  672. };
  673. #endif