driver-cpu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * Copyright 2011-2012 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 3 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include "config.h"
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <signal.h>
  19. #include <sys/stat.h>
  20. #include <sys/types.h>
  21. #ifndef WIN32
  22. #include <sys/wait.h>
  23. #include <sys/resource.h>
  24. #endif
  25. #include <libgen.h>
  26. #include "compat.h"
  27. #include "fpgautils.h"
  28. #include "miner.h"
  29. #include "bench_block.h"
  30. #include "util.h"
  31. #include "driver-cpu.h"
  32. #if defined(unix)
  33. #include <errno.h>
  34. #include <fcntl.h>
  35. #endif
  36. #if defined(__linux) && defined(CPU_ZERO) /* Linux specific policy and affinity management */
  37. #include <sched.h>
  38. static inline void drop_policy(void)
  39. {
  40. struct sched_param param;
  41. #ifdef SCHED_BATCH
  42. #ifdef SCHED_IDLE
  43. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  44. #endif
  45. sched_setscheduler(0, SCHED_BATCH, &param);
  46. #endif
  47. }
  48. static inline void affine_to_cpu(int id, int cpu)
  49. {
  50. cpu_set_t set;
  51. CPU_ZERO(&set);
  52. CPU_SET(cpu, &set);
  53. sched_setaffinity(0, sizeof(&set), &set);
  54. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  55. }
  56. #else
  57. static inline void drop_policy(void)
  58. {
  59. }
  60. static inline void affine_to_cpu(int __maybe_unused id, int __maybe_unused cpu)
  61. {
  62. }
  63. #endif
  64. /* TODO: resolve externals */
  65. extern char *set_int_range(const char *arg, int *i, int min, int max);
  66. extern int dev_from_id(int thr_id);
  67. /* chipset-optimized hash functions */
  68. extern bool ScanHash_4WaySSE2(struct thr_info*, const unsigned char *pmidstate,
  69. unsigned char *pdata, unsigned char *phash1, unsigned char *phash,
  70. const unsigned char *ptarget,
  71. uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  72. extern bool ScanHash_altivec_4way(struct thr_info*, const unsigned char *pmidstate,
  73. unsigned char *pdata,
  74. unsigned char *phash1, unsigned char *phash,
  75. const unsigned char *ptarget,
  76. uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  77. extern bool scanhash_via(struct thr_info*, const unsigned char *pmidstate,
  78. unsigned char *pdata,
  79. unsigned char *phash1, unsigned char *phash,
  80. const unsigned char *target,
  81. uint32_t max_nonce, uint32_t *last_nonce, uint32_t n);
  82. extern bool scanhash_c(struct thr_info*, const unsigned char *midstate, unsigned char *data,
  83. unsigned char *hash1, unsigned char *hash,
  84. const unsigned char *target,
  85. uint32_t max_nonce, uint32_t *last_nonce, uint32_t n);
  86. extern bool scanhash_cryptopp(struct thr_info*, const unsigned char *midstate,unsigned char *data,
  87. unsigned char *hash1, unsigned char *hash,
  88. const unsigned char *target,
  89. uint32_t max_nonce, uint32_t *last_nonce, uint32_t n);
  90. extern bool scanhash_asm32(struct thr_info*, const unsigned char *midstate,unsigned char *data,
  91. unsigned char *hash1, unsigned char *hash,
  92. const unsigned char *target,
  93. uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  94. extern bool scanhash_sse2_64(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata,
  95. unsigned char *phash1, unsigned char *phash,
  96. const unsigned char *ptarget,
  97. uint32_t max_nonce, uint32_t *last_nonce,
  98. uint32_t nonce);
  99. extern bool scanhash_sse4_64(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata,
  100. unsigned char *phash1, unsigned char *phash,
  101. const unsigned char *ptarget,
  102. uint32_t max_nonce, uint32_t *last_nonce,
  103. uint32_t nonce);
  104. extern bool scanhash_sse2_32(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata,
  105. unsigned char *phash1, unsigned char *phash,
  106. const unsigned char *ptarget,
  107. uint32_t max_nonce, uint32_t *last_nonce,
  108. uint32_t nonce);
  109. extern bool scanhash_prime(struct thr_info *, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  110. extern bool scanhash_scrypt(struct thr_info *thr, int thr_id, unsigned char *pdata, unsigned char *scratchbuf,
  111. const unsigned char *ptarget,
  112. uint32_t max_nonce, unsigned long *hashes_done);
  113. #ifdef WANT_CPUMINE
  114. static size_t max_name_len = 0;
  115. static char *name_spaces_pad = NULL;
  116. const char *algo_names[] = {
  117. [ALGO_C] = "c",
  118. #ifdef WANT_SSE2_4WAY
  119. [ALGO_4WAY] = "4way",
  120. #endif
  121. #ifdef WANT_VIA_PADLOCK
  122. [ALGO_VIA] = "via",
  123. #endif
  124. [ALGO_CRYPTOPP] = "cryptopp",
  125. #ifdef WANT_CRYPTOPP_ASM32
  126. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  127. #endif
  128. #ifdef WANT_X8632_SSE2
  129. [ALGO_SSE2_32] = "sse2_32",
  130. #endif
  131. #ifdef WANT_X8664_SSE2
  132. [ALGO_SSE2_64] = "sse2_64",
  133. #endif
  134. #ifdef WANT_X8664_SSE4
  135. [ALGO_SSE4_64] = "sse4_64",
  136. #endif
  137. #ifdef WANT_ALTIVEC_4WAY
  138. [ALGO_ALTIVEC_4WAY] = "altivec_4way",
  139. #endif
  140. #ifdef WANT_PRIMEPOW
  141. [ALGO_PRIME] = "prime",
  142. #endif
  143. #ifdef WANT_SCRYPT
  144. [ALGO_SCRYPT] = "scrypt",
  145. #endif
  146. [ALGO_FASTAUTO] = "fastauto",
  147. [ALGO_AUTO] = "auto",
  148. };
  149. static const sha256_func sha256_funcs[] = {
  150. [ALGO_C] = (sha256_func)scanhash_c,
  151. #ifdef WANT_SSE2_4WAY
  152. [ALGO_4WAY] = (sha256_func)ScanHash_4WaySSE2,
  153. #endif
  154. #ifdef WANT_ALTIVEC_4WAY
  155. [ALGO_ALTIVEC_4WAY] = (sha256_func) ScanHash_altivec_4way,
  156. #endif
  157. #ifdef WANT_VIA_PADLOCK
  158. [ALGO_VIA] = (sha256_func)scanhash_via,
  159. #endif
  160. [ALGO_CRYPTOPP] = (sha256_func)scanhash_cryptopp,
  161. #ifdef WANT_CRYPTOPP_ASM32
  162. [ALGO_CRYPTOPP_ASM32] = (sha256_func)scanhash_asm32,
  163. #endif
  164. #ifdef WANT_X8632_SSE2
  165. [ALGO_SSE2_32] = (sha256_func)scanhash_sse2_32,
  166. #endif
  167. #ifdef WANT_X8664_SSE2
  168. [ALGO_SSE2_64] = (sha256_func)scanhash_sse2_64,
  169. #endif
  170. #ifdef WANT_X8664_SSE4
  171. [ALGO_SSE4_64] = (sha256_func)scanhash_sse4_64,
  172. #endif
  173. #ifdef WANT_PRIMEPOW
  174. [ALGO_PRIME] = (sha256_func)scanhash_prime,
  175. #endif
  176. #ifdef WANT_SCRYPT
  177. [ALGO_SCRYPT] = (sha256_func)scanhash_scrypt
  178. #endif
  179. };
  180. #endif
  181. #ifdef WANT_CPUMINE
  182. enum sha256_algos opt_algo = ALGO_FASTAUTO;
  183. bool opt_usecpu = false;
  184. static bool forced_n_threads;
  185. #endif
  186. static const uint32_t hash1_init[] = {
  187. 0,0,0,0,0,0,0,0,
  188. 0x80000000,
  189. 0,0,0,0,0,0,
  190. 0x100,
  191. };
  192. #ifdef WANT_CPUMINE
  193. // Algo benchmark, crash-prone, system independent stage
  194. double bench_algo_stage3(
  195. enum sha256_algos algo
  196. )
  197. {
  198. // Use a random work block pulled from a pool
  199. static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK };
  200. struct work work __attribute__((aligned(128)));
  201. unsigned char hash1[64];
  202. size_t bench_size = sizeof(work);
  203. size_t work_size = sizeof(bench_block);
  204. size_t min_size = (work_size < bench_size ? work_size : bench_size);
  205. memset(&work, 0, sizeof(work));
  206. memcpy(&work, &bench_block, min_size);
  207. static struct thr_info dummy;
  208. struct timeval end;
  209. struct timeval start;
  210. uint32_t max_nonce = opt_algo == ALGO_FASTAUTO ? (1<<8) : (1<<22);
  211. uint32_t last_nonce = 0;
  212. memcpy(&hash1[0], &hash1_init[0], sizeof(hash1));
  213. gettimeofday(&start, 0);
  214. {
  215. sha256_func func = sha256_funcs[algo];
  216. (*func)(
  217. &dummy,
  218. work.midstate,
  219. work.data,
  220. hash1,
  221. work.hash,
  222. work.target,
  223. max_nonce,
  224. &last_nonce,
  225. work.blk.nonce
  226. );
  227. }
  228. gettimeofday(&end, 0);
  229. uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
  230. uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
  231. uint64_t usec_elapsed = usec_end - usec_start;
  232. double rate = -1.0;
  233. if (0<usec_elapsed) {
  234. rate = (1.0*(last_nonce+1))/usec_elapsed;
  235. }
  236. return rate;
  237. }
  238. #if defined(unix)
  239. // Change non-blocking status on a file descriptor
  240. static void set_non_blocking(
  241. int fd,
  242. int yes
  243. )
  244. {
  245. int flags = fcntl(fd, F_GETFL, 0);
  246. if (flags<0) {
  247. perror("fcntl(GET) failed");
  248. exit(1);
  249. }
  250. flags = yes ? (flags|O_NONBLOCK) : (flags&~O_NONBLOCK);
  251. int r = fcntl(fd, F_SETFL, flags);
  252. if (r<0) {
  253. perror("fcntl(SET) failed");
  254. exit(1);
  255. }
  256. }
  257. #endif // defined(unix)
  258. // Algo benchmark, crash-safe, system-dependent stage
  259. static double bench_algo_stage2(
  260. enum sha256_algos algo
  261. )
  262. {
  263. // Here, the gig is to safely run a piece of code that potentially
  264. // crashes. Unfortunately, the Right Way (tm) to do this is rather
  265. // heavily platform dependent :(
  266. double rate = -1.23457;
  267. #if defined(unix)
  268. // Make a pipe: [readFD, writeFD]
  269. int pfd[2];
  270. int r = pipe(pfd);
  271. if (r<0) {
  272. perror("pipe - failed to create pipe for --algo auto");
  273. exit(1);
  274. }
  275. // Make pipe non blocking
  276. set_non_blocking(pfd[0], 1);
  277. set_non_blocking(pfd[1], 1);
  278. // Don't allow a crashing child to kill the main process
  279. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  280. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  281. if (SIG_ERR==sr0 || SIG_ERR==sr1) {
  282. perror("signal - failed to edit signal mask for --algo auto");
  283. exit(1);
  284. }
  285. // Fork a child to do the actual benchmarking
  286. pid_t child_pid = fork();
  287. if (child_pid<0) {
  288. perror("fork - failed to create a child process for --algo auto");
  289. exit(1);
  290. }
  291. // Do the dangerous work in the child, knowing we might crash
  292. if (0==child_pid) {
  293. // TODO: some umask trickery to prevent coredumps
  294. // Benchmark this algorithm
  295. double r = bench_algo_stage3(algo);
  296. // We survived, send result to parent and bail
  297. int loop_count = 0;
  298. while (1) {
  299. ssize_t bytes_written = write(pfd[1], &r, sizeof(r));
  300. int try_again = (0==bytes_written || (bytes_written<0 && EAGAIN==errno));
  301. int success = (sizeof(r)==(size_t)bytes_written);
  302. if (success)
  303. break;
  304. if (!try_again) {
  305. perror("write - child failed to write benchmark result to pipe");
  306. exit(1);
  307. }
  308. if (5<loop_count) {
  309. applog(LOG_ERR, "child tried %d times to communicate with parent, giving up", loop_count);
  310. exit(1);
  311. }
  312. ++loop_count;
  313. sleep(1);
  314. }
  315. exit(0);
  316. }
  317. // Parent waits for a result from child
  318. int loop_count = 0;
  319. while (1) {
  320. // Wait for child to die
  321. int status;
  322. int r = waitpid(child_pid, &status, WNOHANG);
  323. if ((child_pid==r) || (r<0 && ECHILD==errno)) {
  324. // Child died somehow. Grab result and bail
  325. double tmp;
  326. ssize_t bytes_read = read(pfd[0], &tmp, sizeof(tmp));
  327. if (sizeof(tmp)==(size_t)bytes_read)
  328. rate = tmp;
  329. break;
  330. } else if (r<0) {
  331. perror("bench_algo: waitpid failed. giving up.");
  332. exit(1);
  333. }
  334. // Give up on child after a ~60s
  335. if (60<loop_count) {
  336. kill(child_pid, SIGKILL);
  337. waitpid(child_pid, &status, 0);
  338. break;
  339. }
  340. // Wait a bit longer
  341. ++loop_count;
  342. sleep(1);
  343. }
  344. // Close pipe
  345. r = close(pfd[0]);
  346. if (r<0) {
  347. perror("close - failed to close read end of pipe for --algo auto");
  348. exit(1);
  349. }
  350. r = close(pfd[1]);
  351. if (r<0) {
  352. perror("close - failed to close read end of pipe for --algo auto");
  353. exit(1);
  354. }
  355. #elif defined(WIN32)
  356. // Get handle to current exe
  357. HINSTANCE module = GetModuleHandle(0);
  358. if (!module) {
  359. applog(LOG_ERR, "failed to retrieve module handle");
  360. exit(1);
  361. }
  362. // Create a unique name
  363. char unique_name[33];
  364. snprintf(
  365. unique_name,
  366. sizeof(unique_name)-1,
  367. "bfgminer-%p",
  368. (void*)module
  369. );
  370. // Create and init a chunked of shared memory
  371. HANDLE map_handle = CreateFileMapping(
  372. INVALID_HANDLE_VALUE, // use paging file
  373. NULL, // default security attributes
  374. PAGE_READWRITE, // read/write access
  375. 0, // size: high 32-bits
  376. 4096, // size: low 32-bits
  377. unique_name // name of map object
  378. );
  379. if (NULL==map_handle) {
  380. applog(LOG_ERR, "could not create shared memory");
  381. exit(1);
  382. }
  383. void *shared_mem = MapViewOfFile(
  384. map_handle, // object to map view of
  385. FILE_MAP_WRITE, // read/write access
  386. 0, // high offset: map from
  387. 0, // low offset: beginning
  388. 0 // default: map entire file
  389. );
  390. if (NULL==shared_mem) {
  391. applog(LOG_ERR, "could not map shared memory");
  392. exit(1);
  393. }
  394. SetEnvironmentVariable("BFGMINER_SHARED_MEM", unique_name);
  395. CopyMemory(shared_mem, &rate, sizeof(rate));
  396. // Get path to current exe
  397. char cmd_line[256 + MAX_PATH];
  398. const size_t n = sizeof(cmd_line)-200;
  399. DWORD size = GetModuleFileName(module, cmd_line, n);
  400. if (0==size) {
  401. applog(LOG_ERR, "failed to retrieve module path");
  402. exit(1);
  403. }
  404. // Construct new command line based on that
  405. char *p = strlen(cmd_line) + cmd_line;
  406. sprintf(p, " --bench-algo %d", algo);
  407. SetEnvironmentVariable("BFGMINER_BENCH_ALGO", "1");
  408. // Launch a debug copy of BFGMiner
  409. STARTUPINFO startup_info;
  410. PROCESS_INFORMATION process_info;
  411. ZeroMemory(&startup_info, sizeof(startup_info));
  412. ZeroMemory(&process_info, sizeof(process_info));
  413. startup_info.cb = sizeof(startup_info);
  414. BOOL ok = CreateProcess(
  415. NULL, // No module name (use command line)
  416. cmd_line, // Command line
  417. NULL, // Process handle not inheritable
  418. NULL, // Thread handle not inheritable
  419. FALSE, // Set handle inheritance to FALSE
  420. DEBUG_ONLY_THIS_PROCESS,// We're going to debug the child
  421. NULL, // Use parent's environment block
  422. NULL, // Use parent's starting directory
  423. &startup_info, // Pointer to STARTUPINFO structure
  424. &process_info // Pointer to PROCESS_INFORMATION structure
  425. );
  426. if (!ok) {
  427. applog(LOG_ERR, "CreateProcess failed with error %ld\n", (long)GetLastError() );
  428. exit(1);
  429. }
  430. // Debug the child (only clean way to catch exceptions)
  431. while (1) {
  432. // Wait for child to do something
  433. DEBUG_EVENT debug_event;
  434. ZeroMemory(&debug_event, sizeof(debug_event));
  435. BOOL ok = WaitForDebugEvent(&debug_event, 60 * 1000);
  436. if (!ok)
  437. break;
  438. // Decide if event is "normal"
  439. int go_on =
  440. CREATE_PROCESS_DEBUG_EVENT== debug_event.dwDebugEventCode ||
  441. CREATE_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  442. EXIT_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  443. EXCEPTION_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  444. LOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  445. OUTPUT_DEBUG_STRING_EVENT == debug_event.dwDebugEventCode ||
  446. UNLOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode;
  447. if (!go_on)
  448. break;
  449. // Some exceptions are also "normal", apparently.
  450. if (EXCEPTION_DEBUG_EVENT== debug_event.dwDebugEventCode) {
  451. int go_on =
  452. EXCEPTION_BREAKPOINT== debug_event.u.Exception.ExceptionRecord.ExceptionCode;
  453. if (!go_on)
  454. break;
  455. }
  456. // If nothing unexpected happened, let child proceed
  457. ContinueDebugEvent(
  458. debug_event.dwProcessId,
  459. debug_event.dwThreadId,
  460. DBG_CONTINUE
  461. );
  462. }
  463. // Clean up child process
  464. TerminateProcess(process_info.hProcess, 1);
  465. CloseHandle(process_info.hProcess);
  466. CloseHandle(process_info.hThread);
  467. // Reap return value and cleanup
  468. CopyMemory(&rate, shared_mem, sizeof(rate));
  469. (void)UnmapViewOfFile(shared_mem);
  470. (void)CloseHandle(map_handle);
  471. #else
  472. // Not linux, not unix, not WIN32 ... do our best
  473. rate = bench_algo_stage3(algo);
  474. #endif // defined(unix)
  475. // Done
  476. return rate;
  477. }
  478. static void bench_algo(
  479. double *best_rate,
  480. enum sha256_algos *best_algo,
  481. enum sha256_algos algo
  482. )
  483. {
  484. size_t n = max_name_len - strlen(algo_names[algo]);
  485. memset(name_spaces_pad, ' ', n);
  486. name_spaces_pad[n] = 0;
  487. applog(
  488. LOG_ERR,
  489. "\"%s\"%s : benchmarking algorithm ...",
  490. algo_names[algo],
  491. name_spaces_pad
  492. );
  493. double rate = bench_algo_stage2(algo);
  494. if (rate<0.0) {
  495. applog(
  496. LOG_ERR,
  497. "\"%s\"%s : algorithm fails on this platform",
  498. algo_names[algo],
  499. name_spaces_pad
  500. );
  501. } else {
  502. applog(
  503. LOG_ERR,
  504. "\"%s\"%s : algorithm runs at %.5f MH/s",
  505. algo_names[algo],
  506. name_spaces_pad,
  507. rate
  508. );
  509. if (*best_rate<rate) {
  510. *best_rate = rate;
  511. *best_algo = algo;
  512. }
  513. }
  514. }
  515. // Figure out the longest algorithm name
  516. void init_max_name_len()
  517. {
  518. size_t i;
  519. size_t nb_names = sizeof(algo_names)/sizeof(algo_names[0]);
  520. for (i=0; i<nb_names; ++i) {
  521. const char *p = algo_names[i];
  522. size_t name_len = p ? strlen(p) : 0;
  523. if (max_name_len<name_len)
  524. max_name_len = name_len;
  525. }
  526. name_spaces_pad = (char*) malloc(max_name_len+16);
  527. if (0==name_spaces_pad) {
  528. perror("malloc failed");
  529. exit(1);
  530. }
  531. }
  532. // Pick the fastest CPU hasher
  533. static enum sha256_algos pick_fastest_algo()
  534. {
  535. double best_rate = -1.0;
  536. enum sha256_algos best_algo = 0;
  537. applog(LOG_ERR, "benchmarking all sha256 algorithms ...");
  538. bench_algo(&best_rate, &best_algo, ALGO_C);
  539. #if defined(WANT_SSE2_4WAY)
  540. bench_algo(&best_rate, &best_algo, ALGO_4WAY);
  541. #endif
  542. #if defined(WANT_VIA_PADLOCK)
  543. bench_algo(&best_rate, &best_algo, ALGO_VIA);
  544. #endif
  545. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP);
  546. #if defined(WANT_CRYPTOPP_ASM32)
  547. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
  548. #endif
  549. #if defined(WANT_X8632_SSE2)
  550. bench_algo(&best_rate, &best_algo, ALGO_SSE2_32);
  551. #endif
  552. #if defined(WANT_X8664_SSE2)
  553. bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
  554. #endif
  555. #if defined(WANT_X8664_SSE4)
  556. bench_algo(&best_rate, &best_algo, ALGO_SSE4_64);
  557. #endif
  558. #if defined(WANT_ALTIVEC_4WAY)
  559. bench_algo(&best_rate, &best_algo, ALGO_ALTIVEC_4WAY);
  560. #endif
  561. size_t n = max_name_len - strlen(algo_names[best_algo]);
  562. memset(name_spaces_pad, ' ', n);
  563. name_spaces_pad[n] = 0;
  564. applog(
  565. LOG_ERR,
  566. "\"%s\"%s : is fastest algorithm at %.5f MH/s",
  567. algo_names[best_algo],
  568. name_spaces_pad,
  569. best_rate
  570. );
  571. return best_algo;
  572. }
  573. /* FIXME: Use asprintf for better errors. */
  574. char *set_algo(const char *arg, enum sha256_algos *algo)
  575. {
  576. enum sha256_algos i;
  577. if (opt_scrypt)
  578. return "Can only use scrypt algorithm";
  579. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  580. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  581. *algo = i;
  582. return NULL;
  583. }
  584. }
  585. return "Unknown algorithm";
  586. }
  587. #ifdef WANT_SCRYPT
  588. void set_scrypt_algo(enum sha256_algos *algo)
  589. {
  590. *algo = ALGO_SCRYPT;
  591. }
  592. #endif
  593. void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  594. {
  595. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  596. }
  597. #endif
  598. #ifdef WANT_CPUMINE
  599. char *force_nthreads_int(const char *arg, int *i)
  600. {
  601. forced_n_threads = true;
  602. return set_int_range(arg, i, 0, 9999);
  603. }
  604. #endif
  605. #ifdef WANT_CPUMINE
  606. static int cpu_autodetect()
  607. {
  608. int i;
  609. // Reckon number of cores in the box
  610. #if defined(WIN32)
  611. {
  612. DWORD_PTR system_am;
  613. DWORD_PTR process_am;
  614. BOOL ok = GetProcessAffinityMask(
  615. GetCurrentProcess(),
  616. &system_am,
  617. &process_am
  618. );
  619. if (!ok) {
  620. applog(LOG_ERR, "couldn't figure out number of processors :(");
  621. num_processors = 1;
  622. } else {
  623. size_t n = 32;
  624. num_processors = 0;
  625. while (n--)
  626. if (process_am & (1<<n))
  627. ++num_processors;
  628. }
  629. }
  630. #elif defined(_SC_NPROCESSORS_ONLN)
  631. num_processors = sysconf(_SC_NPROCESSORS_ONLN);
  632. #elif defined(HW_NCPU)
  633. int req[] = { CTL_HW, HW_NCPU };
  634. size_t len = sizeof(num_processors);
  635. v = sysctl(req, 2, &num_processors, &len, NULL, 0);
  636. #else
  637. num_processors = 1;
  638. #endif /* !WIN32 */
  639. if (opt_n_threads < 0 || !forced_n_threads) {
  640. opt_n_threads = num_processors;
  641. }
  642. if (num_processors < 1)
  643. return 0;
  644. cpus = calloc(opt_n_threads, sizeof(struct cgpu_info));
  645. if (unlikely(!cpus))
  646. quit(1, "Failed to calloc cpus");
  647. for (i = 0; i < opt_n_threads; ++i) {
  648. struct cgpu_info *cgpu;
  649. cgpu = &cpus[i];
  650. cgpu->drv = &cpu_drv;
  651. cgpu->devtype = "CPU";
  652. cgpu->deven = DEV_ENABLED;
  653. cgpu->threads = 1;
  654. cgpu->kname = algo_names[opt_algo];
  655. add_cgpu(cgpu);
  656. }
  657. return opt_n_threads;
  658. }
  659. static void cpu_detect()
  660. {
  661. RUNONCE();
  662. if ((opt_n_threads < 0 || !forced_n_threads)
  663. && ((total_devices || total_devices_new) && !opt_usecpu))
  664. // If there are any other devices, only act if the user has explicitly enabled it
  665. noserial_detect_manual(&cpu_drv, cpu_autodetect);
  666. else
  667. noserial_detect(&cpu_drv, cpu_autodetect);
  668. }
  669. static pthread_mutex_t cpualgo_lock;
  670. static bool cpu_thread_prepare(struct thr_info *thr)
  671. {
  672. struct cgpu_info *cgpu = thr->cgpu;
  673. if (!(cgpu->device_id || thr->device_thread || cgpu->proc_id))
  674. mutex_init(&cpualgo_lock);
  675. thread_reportin(thr);
  676. return true;
  677. }
  678. static uint64_t cpu_can_limit_work(struct thr_info __maybe_unused *thr)
  679. {
  680. return 0xffff;
  681. }
  682. static bool cpu_thread_init(struct thr_info *thr)
  683. {
  684. const int thr_id = thr->id;
  685. struct cgpu_info *cgpu = thr->cgpu;
  686. mutex_lock(&cpualgo_lock);
  687. switch (opt_algo)
  688. {
  689. case ALGO_AUTO:
  690. case ALGO_FASTAUTO:
  691. opt_algo = pick_fastest_algo();
  692. default:
  693. break;
  694. }
  695. mutex_unlock(&cpualgo_lock);
  696. cgpu->kname = algo_names[opt_algo];
  697. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  698. * and if that fails, then SCHED_BATCH. No need for this to be an
  699. * error if it fails */
  700. setpriority(PRIO_PROCESS, 0, 19);
  701. drop_policy();
  702. /* Cpu affinity only makes sense if the number of threads is a multiple
  703. * of the number of CPUs */
  704. if (!(opt_n_threads % num_processors))
  705. affine_to_cpu(dev_from_id(thr_id), dev_from_id(thr_id) % num_processors);
  706. return true;
  707. }
  708. static int64_t cpu_scanhash(struct thr_info *thr, struct work *work, int64_t max_nonce)
  709. {
  710. unsigned char hash1[64];
  711. uint32_t first_nonce = work->blk.nonce;
  712. uint32_t last_nonce;
  713. bool rc;
  714. memcpy(&hash1[0], &hash1_init[0], sizeof(hash1));
  715. CPUSearch:
  716. last_nonce = first_nonce;
  717. rc = false;
  718. /* scan nonces for a proof-of-work hash */
  719. {
  720. sha256_func func = sha256_funcs[opt_algo];
  721. rc = (*func)(
  722. thr,
  723. work->midstate,
  724. work->data,
  725. hash1,
  726. work->hash,
  727. work->target,
  728. max_nonce,
  729. &last_nonce,
  730. work->blk.nonce
  731. );
  732. }
  733. /* if nonce found, submit work */
  734. if (unlikely(rc)) {
  735. applog(LOG_DEBUG, "%"PRIpreprv" found something?", thr->cgpu->proc_repr);
  736. submit_nonce(thr, work, le32toh(*(uint32_t*)&work->data[76]));
  737. work->blk.nonce = last_nonce + 1;
  738. goto CPUSearch;
  739. }
  740. else
  741. if (unlikely(last_nonce == first_nonce))
  742. return 0;
  743. work->blk.nonce = last_nonce + 1;
  744. return last_nonce - first_nonce + 1;
  745. }
  746. struct device_drv cpu_drv = {
  747. .dname = "cpu",
  748. .name = "CPU",
  749. .drv_detect = cpu_detect,
  750. .thread_prepare = cpu_thread_prepare,
  751. .can_limit_work = cpu_can_limit_work,
  752. .thread_init = cpu_thread_init,
  753. .scanhash = cpu_scanhash,
  754. };
  755. #endif