driver-rockminer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * Copyright 2014 Luke Dashjr
  3. * Copyright 2014 Nate Woolls
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the Free
  7. * Software Foundation; either version 3 of the License, or (at your option)
  8. * any later version. See COPYING for more details.
  9. */
  10. #include "config.h"
  11. #include <stdbool.h>
  12. #include <stdint.h>
  13. #include <string.h>
  14. #include <unistd.h>
  15. #include "deviceapi.h"
  16. #include "lowlevel.h"
  17. #include "lowl-vcom.h"
  18. #include "miner.h"
  19. #define ROCKMINER_MIN_FREQ_MHZ 200
  20. #define ROCKMINER_DEF_FREQ_MHZ 270
  21. #define ROCKMINER_MAX_SAFE_FREQ_MHZ 290
  22. #define ROCKMINER_MAX_FREQ_MHZ 640
  23. #define ROCKMINER_POLL_US 0
  24. #define ROCKMINER_RETRY_US 5000000
  25. #define ROCKMINER_MIDTASK_TIMEOUT_US 500000
  26. #define ROCKMINER_MIDTASK_RETRY_US 1000000
  27. #define ROCKMINER_TASK_TIMEOUT_US 5273438
  28. #define ROCKMINER_IO_SPEED 115200
  29. #define ROCKMINER_READ_TIMEOUT 1 //deciseconds
  30. #define ROCKMINER_READ_REPLIES 0x40
  31. #define ROCKMINER_MAX_CHIPS 64
  32. #define ROCKMINER_WORK_REQ_SIZE 0x40
  33. #define ROCKMINER_REPLY_SIZE 8
  34. enum rockminer_replies {
  35. ROCKMINER_REPLY_NONCE_FOUND = 0,
  36. ROCKMINER_REPLY_TASK_COMPLETE = 1,
  37. ROCKMINER_REPLY_GET_TASK = 2,
  38. };
  39. BFG_REGISTER_DRIVER(rockminer_drv)
  40. static const struct bfg_set_device_definition rockminer_set_device_funcs[];
  41. struct rockminer_chip_data {
  42. uint8_t next_work_req[ROCKMINER_WORK_REQ_SIZE];
  43. struct work *works[2];
  44. uint8_t last_taskid;
  45. struct timeval tv_midtask_timeout;
  46. int requested_work;
  47. // Only used on first chip's struct
  48. uint8_t incomplete_reply[ROCKMINER_REPLY_SIZE];
  49. size_t incomplete_reply_sz;
  50. };
  51. static
  52. int rockminer_open(const char *devpath)
  53. {
  54. return serial_open(devpath, ROCKMINER_IO_SPEED, ROCKMINER_READ_TIMEOUT, true);
  55. }
  56. static
  57. void rockminer_log_protocol(int fd, const void *buf, size_t bufLen, const char *prefix)
  58. {
  59. char hex[(bufLen * 2) + 1];
  60. bin2hex(hex, buf, bufLen);
  61. applog(LOG_DEBUG, "%s fd=%d: DEVPROTO: %s %s", rockminer_drv.dname, fd, prefix, hex);
  62. }
  63. static
  64. int rockminer_read(int fd, void *buf, size_t bufLen)
  65. {
  66. int result = read(fd, buf, bufLen);
  67. if (result < 0)
  68. applog(LOG_ERR, "%s: %s fd %d", rockminer_drv.dname, "Failed to read", fd);
  69. else if ((result > 0) && opt_dev_protocol && opt_debug)
  70. rockminer_log_protocol(fd, buf, bufLen, "RECV");
  71. return result;
  72. }
  73. static
  74. int rockminer_write(int fd, const void *buf, size_t bufLen)
  75. {
  76. if (opt_dev_protocol && opt_debug)
  77. rockminer_log_protocol(fd, buf, bufLen, "SEND");
  78. return write(fd, buf, bufLen);
  79. }
  80. static
  81. void rockminer_job_buf_init(uint8_t * const buf, const uint8_t chipid)
  82. {
  83. memset(&buf[0x20], 0, 0x10);
  84. buf[0x30] = 0xaa;
  85. // 0x31 is frequency, filled in elsewhere
  86. buf[0x32] = chipid;
  87. buf[0x33] = 0x55;
  88. }
  89. static
  90. void rockminer_job_buf_set_freq(uint8_t * const buf, const unsigned short freq)
  91. {
  92. buf[0x31] = (freq / 10) - 1;
  93. }
  94. static
  95. bool rockminer_lowl_match(const struct lowlevel_device_info * const info)
  96. {
  97. return lowlevel_match_product(info, "R-BOX miner") || lowlevel_match_product(info, "RX-BOX miner");
  98. }
  99. static const uint8_t golden_midstate[] = {
  100. 0x4a, 0x54, 0x8f, 0xe4, 0x71, 0xfa, 0x3a, 0x9a,
  101. 0x13, 0x71, 0x14, 0x45, 0x56, 0xc3, 0xf6, 0x4d,
  102. 0x25, 0x00, 0xb4, 0x82, 0x60, 0x08, 0xfe, 0x4b,
  103. 0xbf, 0x76, 0x98, 0xc9, 0x4e, 0xba, 0x79, 0x46,
  104. };
  105. static const uint8_t golden_datatail[] = {
  106. 0xce, 0x22, 0xa7, 0x2f,
  107. 0x4f, 0x67, 0x26, 0x14, 0x1a, 0x0b, 0x32, 0x87,
  108. };
  109. static const uint8_t golden_result[] = {
  110. 0x00, 0x01, 0x87, 0xa2,
  111. };
  112. int8_t rockminer_bisect_chips(const int fd, uint8_t * const buf)
  113. {
  114. static const int max_concurrent_tests = 4;
  115. int concurrent_tests = max_concurrent_tests;
  116. uint8_t tests[max_concurrent_tests];
  117. uint8_t reply[ROCKMINER_REPLY_SIZE];
  118. uint8_t minvalid = 0, maxvalid = ROCKMINER_MAX_CHIPS - 1;
  119. uint8_t pertest;
  120. char msg[0x10];
  121. ssize_t rsz;
  122. do {
  123. pertest = (maxvalid + 1 - minvalid) / concurrent_tests;
  124. if (!pertest)
  125. pertest = 1;
  126. msg[0] = '\0';
  127. for (int i = 0; i < concurrent_tests; ++i)
  128. {
  129. uint8_t chipid = (minvalid + pertest * (i + 1)) - 1;
  130. if (chipid > maxvalid)
  131. {
  132. concurrent_tests = i;
  133. break;
  134. }
  135. tests[i] = chipid;
  136. buf[0x32] = chipid;
  137. if (rockminer_write(fd, buf, ROCKMINER_WORK_REQ_SIZE) != ROCKMINER_WORK_REQ_SIZE)
  138. applogr(-1, LOG_DEBUG, "%s(%d): Error sending request for chip %d", __func__, fd, chipid);
  139. tailsprintf(msg, sizeof(msg), "%d ", chipid);
  140. }
  141. msg[strlen(msg)-1] = '\0';
  142. applog(LOG_DEBUG, "%s(%d): Testing chips %s (within range %d-%d)", __func__, fd, msg, minvalid, maxvalid);
  143. while ( (rsz = rockminer_read(fd, reply, sizeof(reply))) == sizeof(reply))
  144. {
  145. const uint8_t chipid = reply[5] & 0x3f;
  146. if (chipid > minvalid)
  147. {
  148. applog(LOG_DEBUG, "%s(%d): Saw chip %d", __func__, fd, chipid);
  149. minvalid = chipid;
  150. if (minvalid >= tests[concurrent_tests-1])
  151. break;
  152. }
  153. }
  154. for (int i = concurrent_tests; i--; )
  155. {
  156. if (tests[i] > minvalid)
  157. {
  158. applog(LOG_DEBUG, "%s(%d): Didn't see chip %d", __func__, fd, tests[i]);
  159. maxvalid = tests[i] - 1;
  160. }
  161. else
  162. break;
  163. }
  164. } while (minvalid != maxvalid);
  165. return maxvalid + 1;
  166. }
  167. static
  168. bool rockminer_detect_one(const char * const devpath)
  169. {
  170. int fd, chips;
  171. uint8_t buf[ROCKMINER_WORK_REQ_SIZE], reply[ROCKMINER_REPLY_SIZE];
  172. ssize_t rsz;
  173. fd = rockminer_open(devpath);
  174. if (fd < 0)
  175. return_via_applog(err, , LOG_DEBUG, "%s: %s %s", rockminer_drv.dname, "Failed to open", devpath);
  176. applog(LOG_DEBUG, "%s: %s %s", rockminer_drv.dname, "Successfully opened", devpath);
  177. rockminer_job_buf_init(buf, 0);
  178. rockminer_job_buf_set_freq(buf, ROCKMINER_MIN_FREQ_MHZ);
  179. memcpy(&buf[ 0], golden_midstate, 0x20);
  180. memcpy(&buf[0x34], golden_datatail, 0xc);
  181. if (rockminer_write(fd, buf, sizeof(buf)) != sizeof(buf))
  182. return_via_applog(err, , LOG_DEBUG, "%s: %s %s", rockminer_drv.dname, "Error sending request to ", devpath);
  183. while (true)
  184. {
  185. rsz = rockminer_read(fd, reply, sizeof(reply));
  186. if (rsz != sizeof(reply))
  187. return_via_applog(err, , LOG_DEBUG, "%s: Short read from %s (%d)", rockminer_drv.dname, devpath, (int)rsz);
  188. if ((!memcmp(reply, golden_result, sizeof(golden_result))) && (reply[4] & 0xf) == ROCKMINER_REPLY_NONCE_FOUND)
  189. break;
  190. }
  191. applog(LOG_DEBUG, "%s: Found chip 0 on %s, probing for total chip count", rockminer_drv.dname, devpath);
  192. chips = rockminer_bisect_chips(fd, buf);
  193. applog(LOG_DEBUG, "%s: Identified %d chips on %s", rockminer_drv.dname, chips, devpath);
  194. if (serial_claim_v(devpath, &rockminer_drv))
  195. goto err;
  196. serial_close(fd);
  197. struct cgpu_info * const cgpu = malloc(sizeof(*cgpu));
  198. *cgpu = (struct cgpu_info){
  199. .drv = &rockminer_drv,
  200. .set_device_funcs = rockminer_set_device_funcs,
  201. .device_path = strdup(devpath),
  202. .deven = DEV_ENABLED,
  203. .procs = chips,
  204. .threads = 1,
  205. };
  206. // NOTE: Xcode's clang has a bug where it cannot find fields inside anonymous unions (more details in fpgautils)
  207. cgpu->device_fd = -1;
  208. return add_cgpu(cgpu);
  209. err:
  210. if (fd >= 0)
  211. serial_close(fd);
  212. return false;
  213. }
  214. static
  215. bool rockminer_lowl_probe(const struct lowlevel_device_info * const info)
  216. {
  217. return vcom_lowl_probe_wrapper(info, rockminer_detect_one);
  218. }
  219. static
  220. bool rockminer_init(struct thr_info * const master_thr)
  221. {
  222. struct cgpu_info * const dev = master_thr->cgpu;
  223. for_each_managed_proc(proc, dev)
  224. {
  225. struct thr_info * const thr = proc->thr[0];
  226. struct rockminer_chip_data * const chip = malloc(sizeof(*chip));
  227. thr->cgpu_data = chip;
  228. *chip = (struct rockminer_chip_data){
  229. .last_taskid = 0,
  230. };
  231. rockminer_job_buf_init(chip->next_work_req, proc->proc_id);
  232. rockminer_job_buf_set_freq(chip->next_work_req, ROCKMINER_DEF_FREQ_MHZ);
  233. }
  234. timer_set_now(&master_thr->tv_poll);
  235. return true;
  236. }
  237. static
  238. void rockminer_dead(struct cgpu_info * const dev)
  239. {
  240. serial_close(dev->device_fd);
  241. dev->device_fd = -1;
  242. for_each_managed_proc(proc, dev)
  243. {
  244. struct thr_info * const thr = proc->thr[0];
  245. thr->queue_full = true;
  246. }
  247. }
  248. static
  249. bool rockminer_send_work(struct thr_info * const thr)
  250. {
  251. struct cgpu_info * const proc = thr->cgpu;
  252. struct cgpu_info * const dev = proc->device;
  253. struct rockminer_chip_data * const chip = thr->cgpu_data;
  254. const int fd = dev->device_fd;
  255. return (rockminer_write(fd, chip->next_work_req, sizeof(chip->next_work_req)) == sizeof(chip->next_work_req));
  256. }
  257. static
  258. bool rockminer_queue_append(struct thr_info * const thr, struct work * const work)
  259. {
  260. struct cgpu_info * const proc = thr->cgpu;
  261. struct cgpu_info * const dev = proc->device;
  262. struct rockminer_chip_data * const chip = thr->cgpu_data;
  263. const int fd = dev->device_fd;
  264. if (fd < 0 || !chip->requested_work)
  265. {
  266. thr->queue_full = true;
  267. return false;
  268. }
  269. memcpy(&chip->next_work_req[ 0], work->midstate, 0x20);
  270. memcpy(&chip->next_work_req[0x34], &work->data[0x40], 0xc);
  271. if (!rockminer_send_work(thr))
  272. {
  273. rockminer_dead(dev);
  274. inc_hw_errors_only(thr);
  275. applogr(false, LOG_ERR, "%"PRIpreprv": Failed to send work", proc->proc_repr);
  276. }
  277. chip->last_taskid = chip->last_taskid ? 0 : 1;
  278. if (chip->works[chip->last_taskid])
  279. free_work(chip->works[chip->last_taskid]);
  280. chip->works[chip->last_taskid] = work;
  281. timer_set_delay_from_now(&chip->tv_midtask_timeout, ROCKMINER_MIDTASK_RETRY_US);
  282. applog(LOG_DEBUG, "%"PRIpreprv": Work %d queued as task %d", proc->proc_repr, work->id, chip->last_taskid);
  283. if (!--chip->requested_work)
  284. thr->queue_full = true;
  285. return true;
  286. }
  287. static
  288. void rockminer_queue_flush(__maybe_unused struct thr_info * const thr)
  289. {
  290. }
  291. static
  292. void rockminer_poll(struct thr_info * const master_thr)
  293. {
  294. struct cgpu_info * const dev = master_thr->cgpu;
  295. struct rockminer_chip_data * const master_chip = master_thr->cgpu_data;
  296. int fd = dev->device_fd;
  297. uint8_t buf[ROCKMINER_REPLY_SIZE * ROCKMINER_READ_REPLIES], *reply;
  298. ssize_t rsz;
  299. if (fd < 0)
  300. {
  301. fd = rockminer_open(dev->device_path);
  302. if (fd < 0)
  303. {
  304. timer_set_delay_from_now(&master_thr->tv_poll, ROCKMINER_RETRY_US);
  305. for_each_managed_proc(proc, dev)
  306. {
  307. struct thr_info * const thr = proc->thr[0];
  308. inc_hw_errors_only(thr);
  309. }
  310. applogr(, LOG_ERR, "%s: Failed to open %s", dev->dev_repr, dev->device_path);
  311. }
  312. dev->device_fd = fd;
  313. struct timeval tv_timeout;
  314. timer_set_delay_from_now(&tv_timeout, ROCKMINER_TASK_TIMEOUT_US);
  315. for_each_managed_proc(proc, dev)
  316. {
  317. struct thr_info * const thr = proc->thr[0];
  318. struct rockminer_chip_data * const chip = thr->cgpu_data;
  319. chip->requested_work = 1;
  320. thr->queue_full = false;
  321. chip->tv_midtask_timeout = tv_timeout;
  322. }
  323. }
  324. bool maybe_more_to_read = true;
  325. while (maybe_more_to_read)
  326. {
  327. size_t buf_read_sz = sizeof(buf) - (master_chip->incomplete_reply_sz ? ROCKMINER_REPLY_SIZE : 0);
  328. rsz = rockminer_read(fd, &buf[master_chip->incomplete_reply_sz], buf_read_sz);
  329. if (rsz <= 0)
  330. break;
  331. maybe_more_to_read = (rsz == buf_read_sz);
  332. if (master_chip->incomplete_reply_sz)
  333. {
  334. memcpy(buf, master_chip->incomplete_reply, master_chip->incomplete_reply_sz);
  335. rsz += master_chip->incomplete_reply_sz;
  336. }
  337. for (reply = buf; rsz >= ROCKMINER_REPLY_SIZE; (rsz -= ROCKMINER_REPLY_SIZE), (reply += ROCKMINER_REPLY_SIZE))
  338. {
  339. // const uint8_t status = reply[4] >> 4;
  340. const enum rockminer_replies cmd = reply[4] & 0xf;
  341. // const uint8_t prodid = reply[5] >> 6;
  342. const uint8_t chipid = reply[5] & 0x3f;
  343. const uint8_t taskid = reply[6] & 1;
  344. const uint8_t temp = reply[7];
  345. struct cgpu_info * const proc = device_proc_by_id(dev, chipid);
  346. if (unlikely(!proc))
  347. {
  348. for_each_managed_proc(proc, dev)
  349. {
  350. struct thr_info * const thr = proc->thr[0];
  351. inc_hw_errors_only(thr);
  352. }
  353. applog(LOG_ERR, "%s: Chip id %d out of range", dev->dev_repr, chipid);
  354. continue;
  355. }
  356. struct thr_info * const thr = proc->thr[0];
  357. struct rockminer_chip_data * const chip = thr->cgpu_data;
  358. if (temp != 128)
  359. proc->temp = temp;
  360. switch (cmd) {
  361. case ROCKMINER_REPLY_NONCE_FOUND:
  362. {
  363. const uint32_t nonce = upk_u32be(reply, 0);
  364. struct work *work;
  365. if (chip->works[taskid] && test_nonce(chip->works[taskid], nonce, false))
  366. {}
  367. else
  368. if (chip->works[taskid ? 0 : 1] && test_nonce(chip->works[taskid ? 0 : 1], nonce, false))
  369. {
  370. applog(LOG_DEBUG, "%"PRIpreprv": We have task ids inverted; fixing", proc->proc_repr);
  371. work = chip->works[0];
  372. chip->works[0] = chip->works[1];
  373. chip->works[1] = work;
  374. chip->last_taskid = chip->last_taskid ? 0 : 1;
  375. }
  376. work = chip->works[taskid];
  377. submit_nonce(thr, work, nonce);
  378. break;
  379. }
  380. case ROCKMINER_REPLY_TASK_COMPLETE:
  381. applog(LOG_DEBUG, "%"PRIpreprv": Task %d completed", proc->proc_repr, taskid);
  382. hashes_done2(thr, 0x100000000, NULL);
  383. if (proc->deven == DEV_ENABLED)
  384. timer_set_delay_from_now(&chip->tv_midtask_timeout, ROCKMINER_MIDTASK_TIMEOUT_US);
  385. break;
  386. case ROCKMINER_REPLY_GET_TASK:
  387. applog(LOG_DEBUG, "%"PRIpreprv": Task %d requested", proc->proc_repr, taskid);
  388. thr->queue_full = false;
  389. ++chip->requested_work;
  390. if (proc->deven == DEV_ENABLED)
  391. timer_set_delay_from_now(&chip->tv_midtask_timeout, ROCKMINER_TASK_TIMEOUT_US);
  392. break;
  393. }
  394. }
  395. master_chip->incomplete_reply_sz = rsz;
  396. if (rsz)
  397. memcpy(master_chip->incomplete_reply, reply, rsz);
  398. }
  399. if (rsz < 0)
  400. rockminer_dead(dev);
  401. struct timeval tv_now;
  402. timer_set_now(&tv_now);
  403. for_each_managed_proc(proc, dev)
  404. {
  405. struct thr_info * const thr = proc->thr[0];
  406. struct rockminer_chip_data * const chip = thr->cgpu_data;
  407. if (timer_passed(&chip->tv_midtask_timeout, &tv_now))
  408. {
  409. if (proc->deven != DEV_ENABLED)
  410. {
  411. timer_unset(&chip->tv_midtask_timeout);
  412. continue;
  413. }
  414. // A task completed, but no request followed
  415. // This means it missed our last task send, so we need to resend it
  416. applog(LOG_WARNING, "%"PRIpreprv": No task request? Probably lost, resending task %d", proc->proc_repr, chip->last_taskid);
  417. inc_hw_errors_only(thr);
  418. timer_set_delay(&chip->tv_midtask_timeout, &tv_now, ROCKMINER_MIDTASK_RETRY_US);
  419. struct work *work;
  420. if ((!(work = chip->works[chip->last_taskid])) || stale_work(work, false))
  421. {
  422. // Either no work was queued, or it was stale
  423. // Instead of resending, just queue a new one
  424. if (!chip->requested_work)
  425. chip->requested_work = 1;
  426. thr->queue_full = false;
  427. }
  428. else
  429. if (!rockminer_send_work(thr))
  430. {
  431. rockminer_dead(dev);
  432. timer_set_delay_from_now(&master_thr->tv_poll, ROCKMINER_RETRY_US);
  433. inc_hw_errors_only(thr);
  434. applogr(, LOG_ERR, "%"PRIpreprv": Failed to resend work", proc->proc_repr);
  435. }
  436. }
  437. }
  438. timer_set_delay_from_now(&master_thr->tv_poll, ROCKMINER_POLL_US);
  439. }
  440. static
  441. const char *rockminer_set_clock(struct cgpu_info * const proc, const char * const optname, const char *newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
  442. {
  443. struct thr_info * const thr = proc->thr[0];
  444. struct rockminer_chip_data * const chip = thr->cgpu_data;
  445. bool unsafe = false;
  446. if (!strncasecmp(newvalue, "unsafe:", 7))
  447. {
  448. newvalue += 7;
  449. unsafe = true;
  450. }
  451. const int val = atoi(newvalue);
  452. if (val < ROCKMINER_MIN_FREQ_MHZ || val > ROCKMINER_MAX_FREQ_MHZ)
  453. return "Invalid clock speed";
  454. else
  455. if (val > ROCKMINER_MAX_SAFE_FREQ_MHZ && !unsafe)
  456. return "Dangerous clock speed (use \"unsafe:N\" to force)";
  457. applog(LOG_DEBUG, "%"PRIpreprv": Changing clock frequency for future jobs to %d MHz", proc->proc_repr, val);
  458. rockminer_job_buf_set_freq(chip->next_work_req, val);
  459. return NULL;
  460. }
  461. static const struct bfg_set_device_definition rockminer_set_device_funcs[] = {
  462. {"clock", rockminer_set_clock, "clock frequency"},
  463. {NULL}
  464. };
  465. static
  466. int rockminer_get_clock(struct cgpu_info * const proc)
  467. {
  468. struct thr_info * const thr = proc->thr[0];
  469. struct rockminer_chip_data * const chip = thr->cgpu_data;
  470. return ((int)chip->next_work_req[0x31] + 1) * 10;
  471. }
  472. static
  473. struct api_data *rockminer_get_extra_device_status(struct cgpu_info * const proc)
  474. {
  475. struct api_data *root = NULL;
  476. double d = rockminer_get_clock(proc);
  477. root = api_add_freq(root, "Frequency", &d, true);
  478. return root;
  479. }
  480. #ifdef HAVE_CURSES
  481. static
  482. void rockminer_tui_wlogprint_choices(struct cgpu_info * const proc)
  483. {
  484. wlogprint("[C]lock speed ");
  485. }
  486. static
  487. const char *rockminer_tui_handle_choice(struct cgpu_info * const proc, const int input)
  488. {
  489. static char buf[0x100]; // Static for replies
  490. switch (input)
  491. {
  492. case 'c': case 'C':
  493. {
  494. sprintf(buf, "Set clock speed (range %d-%d, multiple of 10)", ROCKMINER_MIN_FREQ_MHZ, ROCKMINER_MAX_FREQ_MHZ);
  495. char * const val = curses_input(buf);
  496. const char * const msg = rockminer_set_clock(proc, "clock", val ?: "", NULL, NULL);
  497. free(val);
  498. if (msg)
  499. {
  500. snprintf(buf, sizeof(buf), "%s\n", msg);
  501. return buf;
  502. }
  503. return "Clock speed changed\n";
  504. }
  505. }
  506. return NULL;
  507. }
  508. static
  509. void rockminer_wlogprint_status(struct cgpu_info * const proc)
  510. {
  511. wlogprint("Clock speed: %d\n", rockminer_get_clock(proc));
  512. }
  513. #endif
  514. struct device_drv rockminer_drv = {
  515. .dname = "rockminer",
  516. .name = "RKM",
  517. .lowl_match = rockminer_lowl_match,
  518. .lowl_probe = rockminer_lowl_probe,
  519. .thread_init = rockminer_init,
  520. .minerloop = minerloop_queue,
  521. .queue_append = rockminer_queue_append,
  522. .queue_flush = rockminer_queue_flush,
  523. .poll = rockminer_poll,
  524. .get_api_extra_device_status = rockminer_get_extra_device_status,
  525. #ifdef HAVE_CURSES
  526. .proc_wlogprint_status = rockminer_wlogprint_status,
  527. .proc_tui_wlogprint_choices = rockminer_tui_wlogprint_choices,
  528. .proc_tui_handle_choice = rockminer_tui_handle_choice,
  529. #endif
  530. };