driver-cpu.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2014 Luke Dashjr
  4. * Copyright 2010 Jeff Garzik
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 3 of the License, or (at your option)
  9. * any later version. See COPYING for more details.
  10. */
  11. #include "config.h"
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <unistd.h>
  18. #include <signal.h>
  19. #include <sys/stat.h>
  20. #include <sys/types.h>
  21. #ifndef WIN32
  22. #include <sys/wait.h>
  23. #include <sys/resource.h>
  24. #endif
  25. #include <libgen.h>
  26. #include "compat.h"
  27. #include "deviceapi.h"
  28. #include "miner.h"
  29. #include "logging.h"
  30. #include "util.h"
  31. #include "driver-cpu.h"
  32. #if defined(unix)
  33. #include <errno.h>
  34. #include <fcntl.h>
  35. #endif
  36. BFG_REGISTER_DRIVER(cpu_drv)
  37. struct cgpu_info *cpus;
  38. #if defined(__linux) && defined(CPU_ZERO) /* Linux specific policy and affinity management */
  39. #include <sched.h>
  40. static inline void drop_policy(void)
  41. {
  42. struct sched_param param;
  43. param.sched_priority = 0;
  44. #ifdef SCHED_BATCH
  45. #ifdef SCHED_IDLE
  46. if (unlikely(sched_setscheduler(0, SCHED_IDLE, &param) == -1))
  47. #endif
  48. sched_setscheduler(0, SCHED_BATCH, &param);
  49. #endif
  50. }
  51. static inline void affine_to_cpu(int id, int cpu)
  52. {
  53. cpu_set_t set;
  54. CPU_ZERO(&set);
  55. CPU_SET(cpu, &set);
  56. sched_setaffinity(0, sizeof(set), &set);
  57. applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu);
  58. }
  59. #else
  60. static inline void drop_policy(void)
  61. {
  62. }
  63. static inline void affine_to_cpu(int __maybe_unused id, int __maybe_unused cpu)
  64. {
  65. }
  66. #endif
  67. /* TODO: resolve externals */
  68. extern char *set_int_range(const char *arg, int *i, int min, int max);
  69. extern int dev_from_id(int thr_id);
  70. /* chipset-optimized hash functions */
  71. typedef bool (*sha256_func)(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  72. extern bool ScanHash_4WaySSE2(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  73. extern bool ScanHash_altivec_4way(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  74. extern bool scanhash_via(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  75. extern bool scanhash_c(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  76. extern bool scanhash_cryptopp(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  77. extern bool scanhash_asm32(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  78. extern bool scanhash_sse2_64(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  79. extern bool scanhash_sse4_64(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  80. extern bool scanhash_sse2_32(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  81. extern bool scanhash_scrypt(struct thr_info *, struct work *, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce);
  82. #ifdef USE_SHA256D
  83. static size_t max_name_len = 0;
  84. static char *name_spaces_pad = NULL;
  85. #endif
  86. const char *algo_names[] = {
  87. #ifdef USE_SHA256D
  88. [ALGO_C] = "c",
  89. #ifdef WANT_SSE2_4WAY
  90. [ALGO_4WAY] = "4way",
  91. #endif
  92. #ifdef WANT_VIA_PADLOCK
  93. [ALGO_VIA] = "via",
  94. #endif
  95. [ALGO_CRYPTOPP] = "cryptopp",
  96. #ifdef WANT_CRYPTOPP_ASM32
  97. [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32",
  98. #endif
  99. #ifdef WANT_X8632_SSE2
  100. [ALGO_SSE2_32] = "sse2_32",
  101. #endif
  102. #ifdef WANT_X8664_SSE2
  103. [ALGO_SSE2_64] = "sse2_64",
  104. #endif
  105. #ifdef WANT_X8664_SSE4
  106. [ALGO_SSE4_64] = "sse4_64",
  107. #endif
  108. #ifdef WANT_ALTIVEC_4WAY
  109. [ALGO_ALTIVEC_4WAY] = "altivec_4way",
  110. #endif
  111. #endif
  112. #ifdef WANT_SCRYPT
  113. [ALGO_SCRYPT] = "scrypt",
  114. #endif
  115. #ifdef USE_SHA256D
  116. [ALGO_FASTAUTO] = "fastauto",
  117. [ALGO_AUTO] = "auto",
  118. #endif
  119. };
  120. #ifdef USE_SHA256D
  121. static const sha256_func sha256_funcs[] = {
  122. [ALGO_C] = (sha256_func)scanhash_c,
  123. #ifdef WANT_SSE2_4WAY
  124. [ALGO_4WAY] = (sha256_func)ScanHash_4WaySSE2,
  125. #endif
  126. #ifdef WANT_ALTIVEC_4WAY
  127. [ALGO_ALTIVEC_4WAY] = (sha256_func) ScanHash_altivec_4way,
  128. #endif
  129. #ifdef WANT_VIA_PADLOCK
  130. [ALGO_VIA] = (sha256_func)scanhash_via,
  131. #endif
  132. [ALGO_CRYPTOPP] = (sha256_func)scanhash_cryptopp,
  133. #ifdef WANT_CRYPTOPP_ASM32
  134. [ALGO_CRYPTOPP_ASM32] = (sha256_func)scanhash_asm32,
  135. #endif
  136. #ifdef WANT_X8632_SSE2
  137. [ALGO_SSE2_32] = (sha256_func)scanhash_sse2_32,
  138. #endif
  139. #ifdef WANT_X8664_SSE2
  140. [ALGO_SSE2_64] = (sha256_func)scanhash_sse2_64,
  141. #endif
  142. #ifdef WANT_X8664_SSE4
  143. [ALGO_SSE4_64] = (sha256_func)scanhash_sse4_64,
  144. #endif
  145. };
  146. #endif
  147. #ifdef USE_SHA256D
  148. enum sha256_algos opt_algo = ALGO_FASTAUTO;
  149. #endif
  150. static bool forced_n_threads;
  151. #ifdef USE_SHA256D
  152. const uint32_t hash1_init[] = {
  153. 0,0,0,0,0,0,0,0,
  154. 0x80000000,
  155. 0,0,0,0,0,0,
  156. 0x100,
  157. };
  158. // Algo benchmark, crash-prone, system independent stage
  159. double bench_algo_stage3(
  160. enum sha256_algos algo
  161. )
  162. {
  163. struct work work __attribute__((aligned(128)));
  164. get_benchmark_work(&work, false);
  165. static struct thr_info dummy;
  166. struct timeval end;
  167. struct timeval start;
  168. uint32_t max_nonce = opt_algo == ALGO_FASTAUTO ? (1<<8) : (1<<22);
  169. uint32_t last_nonce = 0;
  170. timer_set_now(&start);
  171. {
  172. sha256_func func = sha256_funcs[algo];
  173. (*func)(
  174. &dummy,
  175. &work,
  176. max_nonce,
  177. &last_nonce,
  178. 0
  179. );
  180. }
  181. timer_set_now(&end);
  182. uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
  183. uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
  184. uint64_t usec_elapsed = usec_end - usec_start;
  185. double rate = -1.0;
  186. if (0<usec_elapsed) {
  187. rate = (1.0*(last_nonce+1))/usec_elapsed;
  188. }
  189. return rate;
  190. }
  191. #if defined(unix)
  192. // Change non-blocking status on a file descriptor
  193. static void set_non_blocking(
  194. int fd,
  195. int yes
  196. )
  197. {
  198. int flags = fcntl(fd, F_GETFL, 0);
  199. if (flags<0) {
  200. perror("fcntl(GET) failed");
  201. exit(1);
  202. }
  203. flags = yes ? (flags|O_NONBLOCK) : (flags&~O_NONBLOCK);
  204. int r = fcntl(fd, F_SETFL, flags);
  205. if (r<0) {
  206. perror("fcntl(SET) failed");
  207. exit(1);
  208. }
  209. }
  210. #endif // defined(unix)
  211. // Algo benchmark, crash-safe, system-dependent stage
  212. static double bench_algo_stage2(
  213. enum sha256_algos algo
  214. )
  215. {
  216. // Here, the gig is to safely run a piece of code that potentially
  217. // crashes. Unfortunately, the Right Way (tm) to do this is rather
  218. // heavily platform dependent :(
  219. double rate = -1.23457;
  220. #if defined(unix)
  221. // Make a pipe: [readFD, writeFD]
  222. int pfd[2];
  223. int r = pipe(pfd);
  224. if (r<0) {
  225. perror("pipe - failed to create pipe for --algo auto");
  226. exit(1);
  227. }
  228. // Make pipe non blocking
  229. set_non_blocking(pfd[0], 1);
  230. set_non_blocking(pfd[1], 1);
  231. // Don't allow a crashing child to kill the main process
  232. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  233. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  234. if (SIG_ERR==sr0 || SIG_ERR==sr1) {
  235. perror("signal - failed to edit signal mask for --algo auto");
  236. exit(1);
  237. }
  238. // Fork a child to do the actual benchmarking
  239. pid_t child_pid = fork();
  240. if (child_pid<0) {
  241. perror("fork - failed to create a child process for --algo auto");
  242. exit(1);
  243. }
  244. // Do the dangerous work in the child, knowing we might crash
  245. if (0==child_pid) {
  246. // TODO: some umask trickery to prevent coredumps
  247. // Benchmark this algorithm
  248. double r = bench_algo_stage3(algo);
  249. // We survived, send result to parent and bail
  250. int loop_count = 0;
  251. while (1) {
  252. ssize_t bytes_written = write(pfd[1], &r, sizeof(r));
  253. int try_again = (0==bytes_written || (bytes_written<0 && EAGAIN==errno));
  254. int success = (sizeof(r)==(size_t)bytes_written);
  255. if (success)
  256. break;
  257. if (!try_again) {
  258. perror("write - child failed to write benchmark result to pipe");
  259. exit(1);
  260. }
  261. if (5<loop_count) {
  262. applog(LOG_ERR, "child tried %d times to communicate with parent, giving up", loop_count);
  263. exit(1);
  264. }
  265. ++loop_count;
  266. sleep(1);
  267. }
  268. exit(0);
  269. }
  270. // Parent waits for a result from child
  271. int loop_count = 0;
  272. while (1) {
  273. // Wait for child to die
  274. int status;
  275. int r = waitpid(child_pid, &status, WNOHANG);
  276. if ((child_pid==r) || (r<0 && ECHILD==errno)) {
  277. // Child died somehow. Grab result and bail
  278. double tmp;
  279. ssize_t bytes_read = read(pfd[0], &tmp, sizeof(tmp));
  280. if (sizeof(tmp)==(size_t)bytes_read)
  281. rate = tmp;
  282. break;
  283. } else if (r<0) {
  284. perror("bench_algo: waitpid failed. giving up.");
  285. exit(1);
  286. }
  287. // Give up on child after a ~60s
  288. if (60<loop_count) {
  289. kill(child_pid, SIGKILL);
  290. waitpid(child_pid, &status, 0);
  291. break;
  292. }
  293. // Wait a bit longer
  294. ++loop_count;
  295. sleep(1);
  296. }
  297. // Close pipe
  298. r = close(pfd[0]);
  299. if (r<0) {
  300. perror("close - failed to close read end of pipe for --algo auto");
  301. exit(1);
  302. }
  303. r = close(pfd[1]);
  304. if (r<0) {
  305. perror("close - failed to close read end of pipe for --algo auto");
  306. exit(1);
  307. }
  308. #elif defined(WIN32)
  309. // Get handle to current exe
  310. HINSTANCE module = GetModuleHandle(0);
  311. if (!module) {
  312. applog(LOG_ERR, "failed to retrieve module handle");
  313. exit(1);
  314. }
  315. // Create a unique name
  316. char unique_name[33];
  317. snprintf(
  318. unique_name,
  319. sizeof(unique_name)-1,
  320. "bfgminer-%p",
  321. (void*)module
  322. );
  323. // Create and init a chunked of shared memory
  324. HANDLE map_handle = CreateFileMapping(
  325. INVALID_HANDLE_VALUE, // use paging file
  326. NULL, // default security attributes
  327. PAGE_READWRITE, // read/write access
  328. 0, // size: high 32-bits
  329. 4096, // size: low 32-bits
  330. unique_name // name of map object
  331. );
  332. if (NULL==map_handle) {
  333. applog(LOG_ERR, "could not create shared memory");
  334. exit(1);
  335. }
  336. void *shared_mem = MapViewOfFile(
  337. map_handle, // object to map view of
  338. FILE_MAP_WRITE, // read/write access
  339. 0, // high offset: map from
  340. 0, // low offset: beginning
  341. 0 // default: map entire file
  342. );
  343. if (NULL==shared_mem) {
  344. applog(LOG_ERR, "could not map shared memory");
  345. exit(1);
  346. }
  347. SetEnvironmentVariable("BFGMINER_SHARED_MEM", unique_name);
  348. CopyMemory(shared_mem, &rate, sizeof(rate));
  349. // Get path to current exe
  350. char cmd_line[256 + MAX_PATH];
  351. const size_t n = sizeof(cmd_line)-200;
  352. DWORD size = GetModuleFileName(module, cmd_line, n);
  353. if (0==size) {
  354. applog(LOG_ERR, "failed to retrieve module path");
  355. exit(1);
  356. }
  357. // Construct new command line based on that
  358. char buf[0x20];
  359. snprintf(buf, sizeof(buf), "%d", algo);
  360. SetEnvironmentVariable("BFGMINER_BENCH_ALGO", buf);
  361. // Launch a debug copy of BFGMiner
  362. STARTUPINFO startup_info;
  363. PROCESS_INFORMATION process_info;
  364. ZeroMemory(&startup_info, sizeof(startup_info));
  365. ZeroMemory(&process_info, sizeof(process_info));
  366. startup_info.cb = sizeof(startup_info);
  367. BOOL ok = CreateProcess(
  368. NULL, // No module name (use command line)
  369. cmd_line, // Command line
  370. NULL, // Process handle not inheritable
  371. NULL, // Thread handle not inheritable
  372. FALSE, // Set handle inheritance to FALSE
  373. DEBUG_ONLY_THIS_PROCESS,// We're going to debug the child
  374. NULL, // Use parent's environment block
  375. NULL, // Use parent's starting directory
  376. &startup_info, // Pointer to STARTUPINFO structure
  377. &process_info // Pointer to PROCESS_INFORMATION structure
  378. );
  379. if (!ok) {
  380. applog(LOG_ERR, "CreateProcess failed with error %ld\n", (long)GetLastError() );
  381. exit(1);
  382. }
  383. // Debug the child (only clean way to catch exceptions)
  384. while (1) {
  385. // Wait for child to do something
  386. DEBUG_EVENT debug_event;
  387. ZeroMemory(&debug_event, sizeof(debug_event));
  388. BOOL ok = WaitForDebugEvent(&debug_event, 60 * 1000);
  389. if (!ok)
  390. break;
  391. // Decide if event is "normal"
  392. int go_on =
  393. CREATE_PROCESS_DEBUG_EVENT== debug_event.dwDebugEventCode ||
  394. CREATE_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  395. EXIT_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  396. EXCEPTION_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  397. LOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode ||
  398. OUTPUT_DEBUG_STRING_EVENT == debug_event.dwDebugEventCode ||
  399. UNLOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode;
  400. if (!go_on)
  401. break;
  402. // Some exceptions are also "normal", apparently.
  403. if (EXCEPTION_DEBUG_EVENT== debug_event.dwDebugEventCode) {
  404. int go_on =
  405. EXCEPTION_BREAKPOINT== debug_event.u.Exception.ExceptionRecord.ExceptionCode;
  406. if (!go_on)
  407. break;
  408. }
  409. // If nothing unexpected happened, let child proceed
  410. ContinueDebugEvent(
  411. debug_event.dwProcessId,
  412. debug_event.dwThreadId,
  413. DBG_CONTINUE
  414. );
  415. }
  416. // Clean up child process
  417. TerminateProcess(process_info.hProcess, 1);
  418. CloseHandle(process_info.hProcess);
  419. CloseHandle(process_info.hThread);
  420. // Reap return value and cleanup
  421. CopyMemory(&rate, shared_mem, sizeof(rate));
  422. (void)UnmapViewOfFile(shared_mem);
  423. (void)CloseHandle(map_handle);
  424. #else
  425. // Not linux, not unix, not WIN32 ... do our best
  426. rate = bench_algo_stage3(algo);
  427. #endif // defined(unix)
  428. // Done
  429. return rate;
  430. }
  431. static void bench_algo(
  432. double *best_rate,
  433. enum sha256_algos *best_algo,
  434. enum sha256_algos algo
  435. )
  436. {
  437. size_t n = max_name_len - strlen(algo_names[algo]);
  438. memset(name_spaces_pad, ' ', n);
  439. name_spaces_pad[n] = 0;
  440. applog(
  441. LOG_ERR,
  442. "\"%s\"%s : benchmarking algorithm ...",
  443. algo_names[algo],
  444. name_spaces_pad
  445. );
  446. double rate = bench_algo_stage2(algo);
  447. if (rate<0.0) {
  448. applog(
  449. LOG_ERR,
  450. "\"%s\"%s : algorithm fails on this platform",
  451. algo_names[algo],
  452. name_spaces_pad
  453. );
  454. } else {
  455. applog(
  456. LOG_ERR,
  457. "\"%s\"%s : algorithm runs at %.5f MH/s",
  458. algo_names[algo],
  459. name_spaces_pad,
  460. rate
  461. );
  462. if (*best_rate<rate) {
  463. *best_rate = rate;
  464. *best_algo = algo;
  465. }
  466. }
  467. }
  468. // Figure out the longest algorithm name
  469. void init_max_name_len()
  470. {
  471. size_t i;
  472. size_t nb_names = sizeof(algo_names)/sizeof(algo_names[0]);
  473. for (i=0; i<nb_names; ++i) {
  474. const char *p = algo_names[i];
  475. size_t name_len = p ? strlen(p) : 0;
  476. if (max_name_len<name_len)
  477. max_name_len = name_len;
  478. }
  479. name_spaces_pad = (char*) malloc(max_name_len+16);
  480. if (0==name_spaces_pad) {
  481. perror("malloc failed");
  482. exit(1);
  483. }
  484. }
  485. // Pick the fastest CPU hasher
  486. static enum sha256_algos pick_fastest_algo()
  487. {
  488. double best_rate = -1.0;
  489. enum sha256_algos best_algo = 0;
  490. applog(LOG_ERR, "benchmarking all sha256 algorithms ...");
  491. bench_algo(&best_rate, &best_algo, ALGO_C);
  492. #if defined(WANT_SSE2_4WAY)
  493. bench_algo(&best_rate, &best_algo, ALGO_4WAY);
  494. #endif
  495. #if defined(WANT_VIA_PADLOCK)
  496. bench_algo(&best_rate, &best_algo, ALGO_VIA);
  497. #endif
  498. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP);
  499. #if defined(WANT_CRYPTOPP_ASM32)
  500. bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
  501. #endif
  502. #if defined(WANT_X8632_SSE2)
  503. bench_algo(&best_rate, &best_algo, ALGO_SSE2_32);
  504. #endif
  505. #if defined(WANT_X8664_SSE2)
  506. bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
  507. #endif
  508. #if defined(WANT_X8664_SSE4)
  509. bench_algo(&best_rate, &best_algo, ALGO_SSE4_64);
  510. #endif
  511. #if defined(WANT_ALTIVEC_4WAY)
  512. bench_algo(&best_rate, &best_algo, ALGO_ALTIVEC_4WAY);
  513. #endif
  514. size_t n = max_name_len - strlen(algo_names[best_algo]);
  515. memset(name_spaces_pad, ' ', n);
  516. name_spaces_pad[n] = 0;
  517. applog(
  518. LOG_ERR,
  519. "\"%s\"%s : is fastest algorithm at %.5f MH/s",
  520. algo_names[best_algo],
  521. name_spaces_pad,
  522. best_rate
  523. );
  524. return best_algo;
  525. }
  526. char *set_algo(const char *arg, enum sha256_algos *algo)
  527. {
  528. enum sha256_algos i;
  529. for (i = 0; i < ARRAY_SIZE(algo_names); i++) {
  530. if (algo_names[i] && !strcmp(arg, algo_names[i])) {
  531. *algo = i;
  532. return NULL;
  533. }
  534. }
  535. return "Unknown algorithm";
  536. }
  537. void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo)
  538. {
  539. strncpy(buf, algo_names[*algo], OPT_SHOW_LEN);
  540. }
  541. #endif /* USE_SHA256D */
  542. char *force_nthreads_int(const char *arg, int *i)
  543. {
  544. forced_n_threads = true;
  545. return set_int_range(arg, i, 0, 9999);
  546. }
  547. static int cpu_autodetect()
  548. {
  549. RUNONCE(0);
  550. int i;
  551. // Reckon number of cores in the box
  552. #if defined(WIN32)
  553. {
  554. DWORD_PTR system_am;
  555. DWORD_PTR process_am;
  556. BOOL ok = GetProcessAffinityMask(
  557. GetCurrentProcess(),
  558. &system_am,
  559. &process_am
  560. );
  561. if (!ok) {
  562. applog(LOG_ERR, "couldn't figure out number of processors :(");
  563. num_processors = 1;
  564. } else {
  565. size_t n = 32;
  566. num_processors = 0;
  567. while (n--)
  568. if (process_am & (1<<n))
  569. ++num_processors;
  570. }
  571. }
  572. #elif defined(_SC_NPROCESSORS_CONF)
  573. num_processors = sysconf(_SC_NPROCESSORS_CONF);
  574. #elif defined(CTL_HW) && defined(HW_NCPU)
  575. int req[] = { CTL_HW, HW_NCPU };
  576. size_t len = sizeof(num_processors);
  577. sysctl(req, 2, &num_processors, &len, NULL, 0);
  578. #else
  579. num_processors = 1;
  580. #endif /* !WIN32 */
  581. if (opt_n_threads < 0 || !forced_n_threads) {
  582. opt_n_threads = num_processors;
  583. }
  584. if (num_processors < 1)
  585. return 0;
  586. cpus = calloc(opt_n_threads, sizeof(struct cgpu_info));
  587. if (unlikely(!cpus))
  588. quit(1, "Failed to calloc cpus");
  589. for (i = 0; i < opt_n_threads; ++i) {
  590. struct cgpu_info *cgpu;
  591. cgpu = &cpus[i];
  592. cgpu->drv = &cpu_drv;
  593. cgpu->deven = DEV_ENABLED;
  594. cgpu->threads = 1;
  595. #ifdef USE_SHA256D
  596. cgpu->kname = algo_names[opt_algo];
  597. #endif
  598. add_cgpu(cgpu);
  599. }
  600. return opt_n_threads;
  601. }
  602. static void cpu_detect()
  603. {
  604. noserial_detect_manual(&cpu_drv, cpu_autodetect);
  605. }
  606. static pthread_mutex_t cpualgo_lock;
  607. static bool cpu_thread_prepare(struct thr_info *thr)
  608. {
  609. struct cgpu_info *cgpu = thr->cgpu;
  610. if (!(cgpu->device_id || thr->device_thread || cgpu->proc_id))
  611. mutex_init(&cpualgo_lock);
  612. thread_reportin(thr);
  613. return true;
  614. }
  615. static uint64_t cpu_can_limit_work(struct thr_info __maybe_unused *thr)
  616. {
  617. return 0xffff;
  618. }
  619. static bool cpu_thread_init(struct thr_info *thr)
  620. {
  621. const int thr_id = thr->id;
  622. #ifdef USE_SHA256D
  623. struct cgpu_info *cgpu = thr->cgpu;
  624. mutex_lock(&cpualgo_lock);
  625. switch (opt_algo)
  626. {
  627. case ALGO_AUTO:
  628. case ALGO_FASTAUTO:
  629. opt_algo = pick_fastest_algo();
  630. default:
  631. break;
  632. }
  633. mutex_unlock(&cpualgo_lock);
  634. cgpu->kname = algo_names[opt_algo];
  635. #endif
  636. /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
  637. * and if that fails, then SCHED_BATCH. No need for this to be an
  638. * error if it fails */
  639. setpriority(PRIO_PROCESS, 0, 19);
  640. drop_policy();
  641. /* Cpu affinity only makes sense if the number of threads is a multiple
  642. * of the number of CPUs */
  643. if (num_processors > 1 && opt_n_threads % num_processors == 0)
  644. affine_to_cpu(dev_from_id(thr_id), dev_from_id(thr_id) % num_processors);
  645. return true;
  646. }
  647. static
  648. float cpu_min_nonce_diff(struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  649. {
  650. return minimum_pdiff;
  651. }
  652. static
  653. bool scanhash_generic(struct thr_info * const thr, struct work * const work, const uint32_t max_nonce, uint32_t * const last_nonce, uint32_t n)
  654. {
  655. struct mining_algorithm * const malgo = work_mining_algorithm(work);
  656. void (* const hash_data_f)(void *, const void *) = malgo->hash_data_f;
  657. uint8_t * const hash = work->hash;
  658. uint8_t *data = work->data;
  659. const uint8_t * const target = work->target;
  660. uint32_t * const out_nonce = (uint32_t *)&data[0x4c];
  661. bool ret = false;
  662. const uint32_t hash7_targ = le32toh(((const uint32_t *)target)[7]);
  663. uint32_t * const hash7_tmp = &((uint32_t *)hash)[7];
  664. while (true)
  665. {
  666. *out_nonce = n;
  667. hash_data_f(hash, data);
  668. if (unlikely(le32toh(*hash7_tmp) <= hash7_targ))
  669. {
  670. ret = true;
  671. break;
  672. }
  673. if ((n >= max_nonce) || thr->work_restart)
  674. break;
  675. n++;
  676. }
  677. *last_nonce = n;
  678. return ret;
  679. }
  680. static int64_t cpu_scanhash(struct thr_info *thr, struct work *work, int64_t max_nonce)
  681. {
  682. uint32_t first_nonce = work->blk.nonce;
  683. uint32_t last_nonce;
  684. bool rc;
  685. CPUSearch:
  686. last_nonce = first_nonce;
  687. rc = false;
  688. /* scan nonces for a proof-of-work hash */
  689. {
  690. sha256_func func = scanhash_generic;
  691. switch (work_mining_algorithm(work)->algo)
  692. {
  693. #ifdef USE_SCRYPT
  694. case POW_SCRYPT:
  695. func = scanhash_scrypt;
  696. break;
  697. #endif
  698. #ifdef USE_SHA256D
  699. case POW_SHA256D:
  700. if (work->nonce_diff >= 1.)
  701. func = sha256_funcs[opt_algo];
  702. break;
  703. #endif
  704. default:
  705. break;
  706. }
  707. if (unlikely(!func))
  708. applogr(0, LOG_ERR, "%"PRIpreprv": Unknown mining algorithm", thr->cgpu->proc_repr);
  709. rc = (*func)(
  710. thr,
  711. work,
  712. max_nonce,
  713. &last_nonce,
  714. work->blk.nonce
  715. );
  716. }
  717. /* if nonce found, submit work */
  718. if (unlikely(rc)) {
  719. applog(LOG_DEBUG, "%"PRIpreprv" found something?", thr->cgpu->proc_repr);
  720. submit_nonce(thr, work, le32toh(*(uint32_t*)&work->data[76]));
  721. work->blk.nonce = last_nonce + 1;
  722. goto CPUSearch;
  723. }
  724. else
  725. if (unlikely(last_nonce == first_nonce))
  726. return 0;
  727. work->blk.nonce = last_nonce + 1;
  728. return last_nonce - first_nonce + 1;
  729. }
  730. struct device_drv cpu_drv = {
  731. .dname = "cpu",
  732. .name = "CPU",
  733. .probe_priority = 120,
  734. .drv_min_nonce_diff = cpu_min_nonce_diff,
  735. .drv_detect = cpu_detect,
  736. .thread_prepare = cpu_thread_prepare,
  737. .can_limit_work = cpu_can_limit_work,
  738. .thread_init = cpu_thread_init,
  739. .scanhash = cpu_scanhash,
  740. };