device-cpu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844
  1. /*
  2. * Copyright 2011-2012 Con Kolivas
  3. * Copyright 2011-2012 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include "config.h"
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <signal.h>
  19. #include <sys/stat.h>
  20. #include <sys/types.h>
  21. #ifndef WIN32
  22. #include <sys/wait.h>
  23. #include <sys/resource.h>
  24. #endif
  25. #include <libgen.h>
  26. #include "compat.h"
  27. #include "miner.h"
  28. #include "bench_block.h"
  29. #include "device-cpu.h"
  30. #if defined(unix)
  31. #include <errno.h>
  32. #include <fcntl.h>
  33. #endif
  34. #ifdef __linux /* Linux specific policy and affinity management */
  35. #include <sched.h>
  36. static inline void drop_policy(void)
  37. {
  38. struct sched_param param;
  39. #ifdef SCHED_BATCH
  40. #ifdef SCHED_IDLE
  41. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  42. #endif
  43. sched_setscheduler(0, SCHED_BATCH, &param);
  44. #endif
  45. }
  46. static inline void affine_to_cpu(int id, int cpu)
  47. {
  48. cpu_set_t set;
  49. CPU_ZERO(&set);
  50. CPU_SET(cpu, &set);
  51. sched_setaffinity(0, sizeof(&set), &set);
  52. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  53. }
  54. #else
  55. static inline void drop_policy(void)
  56. {
  57. }
  58. static inline void affine_to_cpu(int id, int cpu)
  59. {
  60. }
  61. #endif
  62. /* TODO: resolve externals */
  63. extern bool submit_work_sync(struct thr_info *thr, const struct work *work_in);
  64. extern char *set_int_range(const char *arg, int *i, int min, int max);
  65. extern int dev_from_id(int thr_id);
  66. /* chipset-optimized hash functions */
  67. extern bool ScanHash_4WaySSE2(int, const unsigned char *pmidstate,
  68. unsigned char *pdata, unsigned char *phash1, unsigned char *phash,
  69. const unsigned char *ptarget,
  70. uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  71. extern bool ScanHash_altivec_4way(int thr_id, const unsigned char *pmidstate,
  72. unsigned char *pdata,
  73. unsigned char *phash1, unsigned char *phash,
  74. const unsigned char *ptarget,
  75. uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  76. extern bool scanhash_via(int, const unsigned char *pmidstate,
  77. unsigned char *pdata,
  78. unsigned char *phash1, unsigned char *phash,
  79. const unsigned char *target,
  80. uint32_t max_nonce, uint32_t *last_nonce, uint32_t n);
  81. extern bool scanhash_c(int, const unsigned char *midstate, unsigned char *data,
  82. unsigned char *hash1, unsigned char *hash,
  83. const unsigned char *target,
  84. uint32_t max_nonce, uint32_t *last_nonce, uint32_t n);
  85. extern bool scanhash_cryptopp(int, const unsigned char *midstate,unsigned char *data,
  86. unsigned char *hash1, unsigned char *hash,
  87. const unsigned char *target,
  88. uint32_t max_nonce, uint32_t *last_nonce, uint32_t n);
  89. extern bool scanhash_asm32(int, const unsigned char *midstate,unsigned char *data,
  90. unsigned char *hash1, unsigned char *hash,
  91. const unsigned char *target,
  92. uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  93. extern bool scanhash_sse2_64(int, const unsigned char *pmidstate, unsigned char *pdata,
  94. unsigned char *phash1, unsigned char *phash,
  95. const unsigned char *ptarget,
  96. uint32_t max_nonce, uint32_t *last_nonce,
  97. uint32_t nonce);
  98. extern bool scanhash_sse4_64(int, const unsigned char *pmidstate, unsigned char *pdata,
  99. unsigned char *phash1, unsigned char *phash,
  100. const unsigned char *ptarget,
  101. uint32_t max_nonce, uint32_t *last_nonce,
  102. uint32_t nonce);
  103. extern bool scanhash_sse2_32(int, const unsigned char *pmidstate, unsigned char *pdata,
  104. unsigned char *phash1, unsigned char *phash,
  105. const unsigned char *ptarget,
  106. uint32_t max_nonce, uint32_t *last_nonce,
  107. uint32_t nonce);
  108. #ifdef WANT_CPUMINE
  109. static size_t max_name_len = 0;
  110. static char *name_spaces_pad = NULL;
  111. const char *algo_names[] = {
  112. [ALGO_C] = "c",
  113. #ifdef WANT_SSE2_4WAY
  114. [ALGO_4WAY] = "4way",
  115. #endif
  116. #ifdef WANT_VIA_PADLOCK
  117. [ALGO_VIA] = "via",
  118. #endif
  119. [ALGO_CRYPTOPP] = "cryptopp",
  120. #ifdef WANT_CRYPTOPP_ASM32
  121. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  122. #endif
  123. #ifdef WANT_X8632_SSE2
  124. [ALGO_SSE2_32] = "sse2_32",
  125. #endif
  126. #ifdef WANT_X8664_SSE2
  127. [ALGO_SSE2_64] = "sse2_64",
  128. #endif
  129. #ifdef WANT_X8664_SSE4
  130. [ALGO_SSE4_64] = "sse4_64",
  131. #endif
  132. #ifdef WANT_ALTIVEC_4WAY
  133. [ALGO_ALTIVEC_4WAY] = "altivec_4way",
  134. #endif
  135. };
  136. static const sha256_func sha256_funcs[] = {
  137. [ALGO_C] = (sha256_func)scanhash_c,
  138. #ifdef WANT_SSE2_4WAY
  139. [ALGO_4WAY] = (sha256_func)ScanHash_4WaySSE2,
  140. #endif
  141. #ifdef WANT_ALTIVEC_4WAY
  142. [ALGO_ALTIVEC_4WAY] = (sha256_func) ScanHash_altivec_4way,
  143. #endif
  144. #ifdef WANT_VIA_PADLOCK
  145. [ALGO_VIA] = (sha256_func)scanhash_via,
  146. #endif
  147. [ALGO_CRYPTOPP] = (sha256_func)scanhash_cryptopp,
  148. #ifdef WANT_CRYPTOPP_ASM32
  149. [ALGO_CRYPTOPP_ASM32] = (sha256_func)scanhash_asm32,
  150. #endif
  151. #ifdef WANT_X8632_SSE2
  152. [ALGO_SSE2_32] = (sha256_func)scanhash_sse2_32,
  153. #endif
  154. #ifdef WANT_X8664_SSE2
  155. [ALGO_SSE2_64] = (sha256_func)scanhash_sse2_64,
  156. #endif
  157. #ifdef WANT_X8664_SSE4
  158. [ALGO_SSE4_64] = (sha256_func)scanhash_sse4_64
  159. #endif
  160. };
  161. #endif
  162. #ifdef WANT_CPUMINE
  163. #if defined(WANT_X8664_SSE4) && defined(__SSE4_1__)
  164. enum sha256_algos opt_algo = ALGO_SSE4_64;
  165. #elif defined(WANT_X8664_SSE2) && defined(__SSE2__)
  166. enum sha256_algos opt_algo = ALGO_SSE2_64;
  167. #elif defined(WANT_X8632_SSE2) && defined(__SSE2__)
  168. enum sha256_algos opt_algo = ALGO_SSE2_32;
  169. #else
  170. enum sha256_algos opt_algo = ALGO_C;
  171. #endif
  172. bool opt_usecpu = false;
  173. static int cpur_thr_id;
  174. static bool forced_n_threads;
  175. #endif
  176. #ifdef WANT_CPUMINE
  177. // Algo benchmark, crash-prone, system independent stage
  178. double bench_algo_stage3(
  179. enum sha256_algos algo
  180. )
  181. {
  182. // Use a random work block pulled from a pool
  183. static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK };
  184. struct work work __attribute__((aligned(128)));
  185. size_t bench_size = sizeof(work);
  186. size_t work_size = sizeof(bench_block);
  187. size_t min_size = (work_size < bench_size ? work_size : bench_size);
  188. memset(&work, 0, sizeof(work));
  189. memcpy(&work, &bench_block, min_size);
  190. struct work_restart dummy;
  191. work_restart = &dummy;
  192. struct timeval end;
  193. struct timeval start;
  194. uint32_t max_nonce = (1<<22);
  195. uint32_t last_nonce = 0;
  196. gettimeofday(&start, 0);
  197. {
  198. sha256_func func = sha256_funcs[algo];
  199. (*func)(
  200. 0,
  201. work.midstate,
  202. work.data,
  203. work.hash1,
  204. work.hash,
  205. work.target,
  206. max_nonce,
  207. &last_nonce,
  208. work.blk.nonce
  209. );
  210. }
  211. gettimeofday(&end, 0);
  212. work_restart = NULL;
  213. uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
  214. uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
  215. uint64_t usec_elapsed = usec_end - usec_start;
  216. double rate = -1.0;
  217. if (0<usec_elapsed) {
  218. rate = (1.0*(last_nonce+1))/usec_elapsed;
  219. }
  220. return rate;
  221. }
  222. #if defined(unix)
  223. // Change non-blocking status on a file descriptor
  224. static void set_non_blocking(
  225. int fd,
  226. int yes
  227. )
  228. {
  229. int flags = fcntl(fd, F_GETFL, 0);
  230. if (flags<0) {
  231. perror("fcntl(GET) failed");
  232. exit(1);
  233. }
  234. flags = yes ? (flags|O_NONBLOCK) : (flags&~O_NONBLOCK);
  235. int r = fcntl(fd, F_SETFL, flags);
  236. if (r<0) {
  237. perror("fcntl(SET) failed");
  238. exit(1);
  239. }
  240. }
  241. #endif // defined(unix)
  242. // Algo benchmark, crash-safe, system-dependent stage
  243. static double bench_algo_stage2(
  244. enum sha256_algos algo
  245. )
  246. {
  247. // Here, the gig is to safely run a piece of code that potentially
  248. // crashes. Unfortunately, the Right Way (tm) to do this is rather
  249. // heavily platform dependent :(
  250. double rate = -1.23457;
  251. #if defined(unix)
  252. // Make a pipe: [readFD, writeFD]
  253. int pfd[2];
  254. int r = pipe(pfd);
  255. if (r<0) {
  256. perror("pipe - failed to create pipe for --algo auto");
  257. exit(1);
  258. }
  259. // Make pipe non blocking
  260. set_non_blocking(pfd[0], 1);
  261. set_non_blocking(pfd[1], 1);
  262. // Don't allow a crashing child to kill the main process
  263. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  264. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  265. if (SIG_ERR==sr0 || SIG_ERR==sr1) {
  266. perror("signal - failed to edit signal mask for --algo auto");
  267. exit(1);
  268. }
  269. // Fork a child to do the actual benchmarking
  270. pid_t child_pid = fork();
  271. if (child_pid<0) {
  272. perror("fork - failed to create a child process for --algo auto");
  273. exit(1);
  274. }
  275. // Do the dangerous work in the child, knowing we might crash
  276. if (0==child_pid) {
  277. // TODO: some umask trickery to prevent coredumps
  278. // Benchmark this algorithm
  279. double r = bench_algo_stage3(algo);
  280. // We survived, send result to parent and bail
  281. int loop_count = 0;
  282. while (1) {
  283. ssize_t bytes_written = write(pfd[1], &r, sizeof(r));
  284. int try_again = (0==bytes_written || (bytes_written<0 && EAGAIN==errno));
  285. int success = (sizeof(r)==(size_t)bytes_written);
  286. if (success)
  287. break;
  288. if (!try_again) {
  289. perror("write - child failed to write benchmark result to pipe");
  290. exit(1);
  291. }
  292. if (5<loop_count) {
  293. applog(LOG_ERR, "child tried %d times to communicate with parent, giving up", loop_count);
  294. exit(1);
  295. }
  296. ++loop_count;
  297. sleep(1);
  298. }
  299. exit(0);
  300. }
  301. // Parent waits for a result from child
  302. int loop_count = 0;
  303. while (1) {
  304. // Wait for child to die
  305. int status;
  306. int r = waitpid(child_pid, &status, WNOHANG);
  307. if ((child_pid==r) || (r<0 && ECHILD==errno)) {
  308. // Child died somehow. Grab result and bail
  309. double tmp;
  310. ssize_t bytes_read = read(pfd[0], &tmp, sizeof(tmp));
  311. if (sizeof(tmp)==(size_t)bytes_read)
  312. rate = tmp;
  313. break;
  314. } else if (r<0) {
  315. perror("bench_algo: waitpid failed. giving up.");
  316. exit(1);
  317. }
  318. // Give up on child after a ~60s
  319. if (60<loop_count) {
  320. kill(child_pid, SIGKILL);
  321. waitpid(child_pid, &status, 0);
  322. break;
  323. }
  324. // Wait a bit longer
  325. ++loop_count;
  326. sleep(1);
  327. }
  328. // Close pipe
  329. r = close(pfd[0]);
  330. if (r<0) {
  331. perror("close - failed to close read end of pipe for --algo auto");
  332. exit(1);
  333. }
  334. r = close(pfd[1]);
  335. if (r<0) {
  336. perror("close - failed to close read end of pipe for --algo auto");
  337. exit(1);
  338. }
  339. #elif defined(WIN32)
  340. // Get handle to current exe
  341. HINSTANCE module = GetModuleHandle(0);
  342. if (!module) {
  343. applog(LOG_ERR, "failed to retrieve module handle");
  344. exit(1);
  345. }
  346. // Create a unique name
  347. char unique_name[32];
  348. snprintf(
  349. unique_name,
  350. sizeof(unique_name)-1,
  351. "cgminer-%p",
  352. (void*)module
  353. );
  354. // Create and init a chunked of shared memory
  355. HANDLE map_handle = CreateFileMapping(
  356. INVALID_HANDLE_VALUE, // use paging file
  357. NULL, // default security attributes
  358. PAGE_READWRITE, // read/write access
  359. 0, // size: high 32-bits
  360. 4096, // size: low 32-bits
  361. unique_name // name of map object
  362. );
  363. if (NULL==map_handle) {
  364. applog(LOG_ERR, "could not create shared memory");
  365. exit(1);
  366. }
  367. void *shared_mem = MapViewOfFile(
  368. map_handle, // object to map view of
  369. FILE_MAP_WRITE, // read/write access
  370. 0, // high offset: map from
  371. 0, // low offset: beginning
  372. 0 // default: map entire file
  373. );
  374. if (NULL==shared_mem) {
  375. applog(LOG_ERR, "could not map shared memory");
  376. exit(1);
  377. }
  378. SetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name);
  379. CopyMemory(shared_mem, &rate, sizeof(rate));
  380. // Get path to current exe
  381. char cmd_line[256 + MAX_PATH];
  382. const size_t n = sizeof(cmd_line)-200;
  383. DWORD size = GetModuleFileName(module, cmd_line, n);
  384. if (0==size) {
  385. applog(LOG_ERR, "failed to retrieve module path");
  386. exit(1);
  387. }
  388. // Construct new command line based on that
  389. char *p = strlen(cmd_line) + cmd_line;
  390. sprintf(p, " --bench-algo %d", algo);
  391. SetEnvironmentVariable("CGMINER_BENCH_ALGO", "1");
  392. // Launch a debug copy of cgminer
  393. STARTUPINFO startup_info;
  394. PROCESS_INFORMATION process_info;
  395. ZeroMemory(&startup_info, sizeof(startup_info));
  396. ZeroMemory(&process_info, sizeof(process_info));
  397. startup_info.cb = sizeof(startup_info);
  398. BOOL ok = CreateProcess(
  399. NULL, // No module name (use command line)
  400. cmd_line, // Command line
  401. NULL, // Process handle not inheritable
  402. NULL, // Thread handle not inheritable
  403. FALSE, // Set handle inheritance to FALSE
  404. DEBUG_ONLY_THIS_PROCESS,// We're going to debug the child
  405. NULL, // Use parent's environment block
  406. NULL, // Use parent's starting directory
  407. &startup_info, // Pointer to STARTUPINFO structure
  408. &process_info // Pointer to PROCESS_INFORMATION structure
  409. );
  410. if (!ok) {
  411. applog(LOG_ERR, "CreateProcess failed with error %d\n", GetLastError() );
  412. exit(1);
  413. }
  414. // Debug the child (only clean way to catch exceptions)
  415. while (1) {
  416. // Wait for child to do something
  417. DEBUG_EVENT debug_event;
  418. ZeroMemory(&debug_event, sizeof(debug_event));
  419. BOOL ok = WaitForDebugEvent(&debug_event, 60 * 1000);
  420. if (!ok)
  421. break;
  422. // Decide if event is "normal"
  423. int go_on =
  424. CREATE_PROCESS_DEBUG_EVENT== debug_event.dwDebugEventCode ||
  425. CREATE_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  426. EXIT_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  427. EXCEPTION_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  428. LOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  429. OUTPUT_DEBUG_STRING_EVENT == debug_event.dwDebugEventCode ||
  430. UNLOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode;
  431. if (!go_on)
  432. break;
  433. // Some exceptions are also "normal", apparently.
  434. if (EXCEPTION_DEBUG_EVENT== debug_event.dwDebugEventCode) {
  435. int go_on =
  436. EXCEPTION_BREAKPOINT== debug_event.u.Exception.ExceptionRecord.ExceptionCode;
  437. if (!go_on)
  438. break;
  439. }
  440. // If nothing unexpected happened, let child proceed
  441. ContinueDebugEvent(
  442. debug_event.dwProcessId,
  443. debug_event.dwThreadId,
  444. DBG_CONTINUE
  445. );
  446. }
  447. // Clean up child process
  448. TerminateProcess(process_info.hProcess, 1);
  449. CloseHandle(process_info.hProcess);
  450. CloseHandle(process_info.hThread);
  451. // Reap return value and cleanup
  452. CopyMemory(&rate, shared_mem, sizeof(rate));
  453. (void)UnmapViewOfFile(shared_mem);
  454. (void)CloseHandle(map_handle);
  455. #else
  456. // Not linux, not unix, not WIN32 ... do our best
  457. rate = bench_algo_stage3(algo);
  458. #endif // defined(unix)
  459. // Done
  460. return rate;
  461. }
  462. static void bench_algo(
  463. double *best_rate,
  464. enum sha256_algos *best_algo,
  465. enum sha256_algos algo
  466. )
  467. {
  468. size_t n = max_name_len - strlen(algo_names[algo]);
  469. memset(name_spaces_pad, ' ', n);
  470. name_spaces_pad[n] = 0;
  471. applog(
  472. LOG_ERR,
  473. "\"%s\"%s : benchmarking algorithm ...",
  474. algo_names[algo],
  475. name_spaces_pad
  476. );
  477. double rate = bench_algo_stage2(algo);
  478. if (rate<0.0) {
  479. applog(
  480. LOG_ERR,
  481. "\"%s\"%s : algorithm fails on this platform",
  482. algo_names[algo],
  483. name_spaces_pad
  484. );
  485. } else {
  486. applog(
  487. LOG_ERR,
  488. "\"%s\"%s : algorithm runs at %.5f MH/s",
  489. algo_names[algo],
  490. name_spaces_pad,
  491. rate
  492. );
  493. if (*best_rate<rate) {
  494. *best_rate = rate;
  495. *best_algo = algo;
  496. }
  497. }
  498. }
  499. // Figure out the longest algorithm name
  500. void init_max_name_len()
  501. {
  502. size_t i;
  503. size_t nb_names = sizeof(algo_names)/sizeof(algo_names[0]);
  504. for (i=0; i<nb_names; ++i) {
  505. const char *p = algo_names[i];
  506. size_t name_len = p ? strlen(p) : 0;
  507. if (max_name_len<name_len)
  508. max_name_len = name_len;
  509. }
  510. name_spaces_pad = (char*) malloc(max_name_len+16);
  511. if (0==name_spaces_pad) {
  512. perror("malloc failed");
  513. exit(1);
  514. }
  515. }
  516. // Pick the fastest CPU hasher
  517. static enum sha256_algos pick_fastest_algo()
  518. {
  519. double best_rate = -1.0;
  520. enum sha256_algos best_algo = 0;
  521. applog(LOG_ERR, "benchmarking all sha256 algorithms ...");
  522. bench_algo(&best_rate, &best_algo, ALGO_C);
  523. #if defined(WANT_SSE2_4WAY)
  524. bench_algo(&best_rate, &best_algo, ALGO_4WAY);
  525. #endif
  526. #if defined(WANT_VIA_PADLOCK)
  527. bench_algo(&best_rate, &best_algo, ALGO_VIA);
  528. #endif
  529. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP);
  530. #if defined(WANT_CRYPTOPP_ASM32)
  531. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
  532. #endif
  533. #if defined(WANT_X8632_SSE2)
  534. bench_algo(&best_rate, &best_algo, ALGO_SSE2_32);
  535. #endif
  536. #if defined(WANT_X8664_SSE2)
  537. bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
  538. #endif
  539. #if defined(WANT_X8664_SSE4)
  540. bench_algo(&best_rate, &best_algo, ALGO_SSE4_64);
  541. #endif
  542. #if defined(WANT_ALTIVEC_4WAY)
  543. bench_algo(&best_rate, &best_algo, ALGO_ALTIVEC_4WAY);
  544. #endif
  545. size_t n = max_name_len - strlen(algo_names[best_algo]);
  546. memset(name_spaces_pad, ' ', n);
  547. name_spaces_pad[n] = 0;
  548. applog(
  549. LOG_ERR,
  550. "\"%s\"%s : is fastest algorithm at %.5f MH/s",
  551. algo_names[best_algo],
  552. name_spaces_pad,
  553. best_rate
  554. );
  555. return best_algo;
  556. }
  557. /* FIXME: Use asprintf for better errors. */
  558. char *set_algo(const char *arg, enum sha256_algos *algo)
  559. {
  560. enum sha256_algos i;
  561. if (!strcmp(arg, "auto")) {
  562. *algo = pick_fastest_algo();
  563. return NULL;
  564. }
  565. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  566. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  567. *algo = i;
  568. return NULL;
  569. }
  570. }
  571. return "Unknown algorithm";
  572. }
  573. void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  574. {
  575. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  576. }
  577. #endif
  578. #ifdef WANT_CPUMINE
  579. char *force_nthreads_int(const char *arg, int *i)
  580. {
  581. forced_n_threads = true;
  582. return set_int_range(arg, i, 0, 9999);
  583. }
  584. #endif
  585. #ifdef WANT_CPUMINE
  586. static void cpu_detect()
  587. {
  588. int i;
  589. // Reckon number of cores in the box
  590. #if defined(WIN32)
  591. {
  592. DWORD system_am;
  593. DWORD process_am;
  594. BOOL ok = GetProcessAffinityMask(
  595. GetCurrentProcess(),
  596. &system_am,
  597. &process_am
  598. );
  599. if (!ok) {
  600. applog(LOG_ERR, "couldn't figure out number of processors :(");
  601. num_processors = 1;
  602. } else {
  603. size_t n = 32;
  604. num_processors = 0;
  605. while (n--)
  606. if (process_am & (1<<n))
  607. ++num_processors;
  608. }
  609. }
  610. #else
  611. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  612. #endif /* !WIN32 */
  613. if (opt_n_threads < 0 || !forced_n_threads) {
  614. if (total_devices && !opt_usecpu)
  615. opt_n_threads = 0;
  616. else
  617. opt_n_threads = num_processors;
  618. }
  619. if (num_processors < 1)
  620. return;
  621. if (total_devices + opt_n_threads > MAX_DEVICES)
  622. opt_n_threads = MAX_DEVICES - total_devices;
  623. cpus = calloc(opt_n_threads, sizeof(struct cgpu_info));
  624. if (unlikely(!cpus))
  625. quit(1, "Failed to calloc cpus");
  626. for (i = 0; i < opt_n_threads; ++i) {
  627. struct cgpu_info *cgpu;
  628. cgpu = devices[total_devices + i] = &cpus[i];
  629. cgpu->api = &cpu_api;
  630. cgpu->enabled = true;
  631. cgpu->device_id = i;
  632. cgpu->threads = 1;
  633. }
  634. total_devices += opt_n_threads;
  635. }
  636. static void reinit_cpu_device(struct cgpu_info *cpu)
  637. {
  638. tq_push(thr_info[cpur_thr_id].q, cpu);
  639. }
  640. static bool cpu_thread_prepare(struct thr_info *thr)
  641. {
  642. thread_reportin(thr);
  643. return true;
  644. }
  645. static uint64_t cpu_can_limit_work(struct thr_info *thr)
  646. {
  647. return 0xfffff;
  648. }
  649. static bool cpu_thread_init(struct thr_info *thr)
  650. {
  651. const int thr_id = thr->id;
  652. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  653. * and if that fails, then SCHED_BATCH. No need for this to be an
  654. * error if it fails */
  655. setpriority(PRIO_PROCESS, 0, 19);
  656. drop_policy();
  657. /* Cpu affinity only makes sense if the number of threads is a multiple
  658. * of the number of CPUs */
  659. if (!(opt_n_threads % num_processors))
  660. affine_to_cpu(dev_from_id(thr_id), dev_from_id(thr_id) % num_processors);
  661. return true;
  662. }
  663. static uint64_t cpu_scanhash(struct thr_info *thr, struct work *work, uint64_t max_nonce)
  664. {
  665. const int thr_id = thr->id;
  666. uint32_t first_nonce = work->blk.nonce;
  667. uint32_t last_nonce;
  668. bool rc;
  669. CPUSearch:
  670. last_nonce = first_nonce;
  671. rc = false;
  672. /* scan nonces for a proof-of-work hash */
  673. {
  674. sha256_func func = sha256_funcs[opt_algo];
  675. rc = (*func)(
  676. thr_id,
  677. work->midstate,
  678. work->data,
  679. work->hash1,
  680. work->hash,
  681. work->target,
  682. max_nonce,
  683. &last_nonce,
  684. work->blk.nonce
  685. );
  686. }
  687. /* if nonce found, submit work */
  688. if (unlikely(rc)) {
  689. applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
  690. if (unlikely(!submit_work_sync(thr, work))) {
  691. applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
  692. }
  693. work->blk.nonce = last_nonce + 1;
  694. goto CPUSearch;
  695. }
  696. else
  697. if (unlikely(last_nonce == first_nonce))
  698. return 0;
  699. work->blk.nonce = last_nonce + 1;
  700. return last_nonce - first_nonce + 1;
  701. }
  702. struct device_api cpu_api = {
  703. .name = "CPU",
  704. .api_detect = cpu_detect,
  705. .reinit_device = reinit_cpu_device,
  706. .thread_prepare = cpu_thread_prepare,
  707. .can_limit_work = cpu_can_limit_work,
  708. .thread_init = cpu_thread_init,
  709. .scanhash = cpu_scanhash,
  710. };
  711. #endif