driver-avalonmm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Copyright 2014 Luke Dashjr
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 3 of the License, or (at your option)
  7. * any later version. See COPYING for more details.
  8. */
  9. #include "config.h"
  10. #include <stdbool.h>
  11. #include <stdint.h>
  12. #include <stdlib.h>
  13. #include <string.h>
  14. #include <unistd.h>
  15. #include <utlist.h>
  16. #include "deviceapi.h"
  17. #include "logging.h"
  18. #include "lowlevel.h"
  19. #include "lowl-vcom.h"
  20. #include "miner.h"
  21. #include "util.h"
  22. #include "work2d.h"
  23. #define AVALONMM_MAX_MODULES 4
  24. #define AVALONMM_MAX_COINBASE_SIZE (6 * 1024)
  25. #define AVALONMM_MAX_MERKLES 20
  26. #define AVALONMM_MAX_NONCE_DIFF 0x20
  27. // Must be a power of two
  28. #define AVALONMM_CACHED_JOBS 2
  29. #define AVALONMM_NONCE_OFFSET 0x180
  30. BFG_REGISTER_DRIVER(avalonmm_drv)
  31. #define AVALONMM_PKT_DATA_SIZE 0x20
  32. #define AVALONMM_PKT_SIZE (AVALONMM_PKT_DATA_SIZE + 7)
  33. enum avalonmm_cmd {
  34. AMC_DETECT = 0x0a,
  35. AMC_NEW_JOB = 0x0b,
  36. AMC_JOB_ID = 0x0c,
  37. AMC_COINBASE = 0x0d,
  38. AMC_MERKLES = 0x0e,
  39. AMC_BLKHDR = 0x0f,
  40. AMC_POLL = 0x10,
  41. AMC_TARGET = 0x11,
  42. AMC_START = 0x13,
  43. };
  44. enum avalonmm_reply {
  45. AMR_NONCE = 0x17,
  46. AMR_STATUS = 0x18,
  47. AMR_DETECT_ACK = 0x19,
  48. };
  49. static
  50. bool avalonmm_write_cmd(const int fd, const enum avalonmm_cmd cmd, const void *data, size_t datasz)
  51. {
  52. uint8_t packets = ((datasz + AVALONMM_PKT_DATA_SIZE - 1) / AVALONMM_PKT_DATA_SIZE) ?: 1;
  53. uint8_t pkt[AVALONMM_PKT_SIZE] = {'A', 'V', cmd, 1, packets};
  54. uint16_t crc;
  55. ssize_t r;
  56. while (true)
  57. {
  58. size_t copysz = AVALONMM_PKT_DATA_SIZE;
  59. if (datasz < copysz)
  60. {
  61. copysz = datasz;
  62. memset(&pkt[5 + copysz], '\0', AVALONMM_PKT_DATA_SIZE - copysz);
  63. }
  64. if (copysz)
  65. memcpy(&pkt[5], data, copysz);
  66. crc = crc16xmodem(&pkt[5], AVALONMM_PKT_DATA_SIZE);
  67. pk_u16be(pkt, 5 + AVALONMM_PKT_DATA_SIZE, crc);
  68. r = write(fd, pkt, sizeof(pkt));
  69. if (opt_dev_protocol)
  70. {
  71. char hex[(sizeof(pkt) * 2) + 1];
  72. bin2hex(hex, pkt, sizeof(pkt));
  73. applog(LOG_DEBUG, "DEVPROTO fd=%d SEND: %s => %d", fd, hex, (int)r);
  74. }
  75. if (sizeof(pkt) != r)
  76. return false;
  77. datasz -= copysz;
  78. if (!datasz)
  79. break;
  80. data += copysz;
  81. ++pkt[3];
  82. }
  83. return true;
  84. }
  85. static
  86. ssize_t avalonmm_read(const int fd, const int logprio, enum avalonmm_reply *out_reply, void * const bufp, size_t bufsz)
  87. {
  88. uint8_t *buf = bufp;
  89. uint8_t pkt[AVALONMM_PKT_SIZE];
  90. uint8_t packets = 0, got = 0;
  91. uint16_t good_crc, actual_crc;
  92. ssize_t r;
  93. while (true)
  94. {
  95. r = serial_read(fd, pkt, sizeof(pkt));
  96. if (opt_dev_protocol)
  97. {
  98. if (r >= 0)
  99. {
  100. char hex[(r * 2) + 1];
  101. bin2hex(hex, pkt, r);
  102. applog(LOG_DEBUG, "DEVPROTO fd=%d RECV: %s", fd, hex);
  103. }
  104. else
  105. applog(LOG_DEBUG, "DEVPROTO fd=%d RECV (%d)", fd, (int)r);
  106. }
  107. if (r != sizeof(pkt))
  108. return -1;
  109. if (memcmp(pkt, "AV", 2))
  110. applogr(-1, logprio, "%s: bad header", __func__);
  111. good_crc = crc16xmodem(&pkt[5], AVALONMM_PKT_DATA_SIZE);
  112. actual_crc = upk_u16le(pkt, 5 + AVALONMM_PKT_DATA_SIZE);
  113. if (good_crc != actual_crc)
  114. applogr(-1, logprio, "%s: bad CRC (good=%04x actual=%04x)", __func__, good_crc, actual_crc);
  115. *out_reply = pkt[2];
  116. if (!got)
  117. {
  118. if (pkt[3] != 1)
  119. applogr(-1, logprio, "%s: first packet is not index 1", __func__);
  120. ++got;
  121. packets = pkt[4];
  122. }
  123. else
  124. {
  125. if (pkt[3] != ++got)
  126. applogr(-1, logprio, "%s: packet %d is not index %d", __func__, got, got);
  127. if (pkt[4] != packets)
  128. applogr(-1, logprio, "%s: packet %d total packet count is %d rather than original value of %d", __func__, got, pkt[4], packets);
  129. }
  130. if (bufsz)
  131. {
  132. if (likely(bufsz > AVALONMM_PKT_DATA_SIZE))
  133. {
  134. memcpy(buf, &pkt[5], AVALONMM_PKT_DATA_SIZE);
  135. bufsz -= AVALONMM_PKT_DATA_SIZE;
  136. buf += AVALONMM_PKT_DATA_SIZE;
  137. }
  138. else
  139. {
  140. memcpy(buf, &pkt[5], bufsz);
  141. bufsz = 0;
  142. }
  143. }
  144. if (got == packets)
  145. break;
  146. }
  147. return (((ssize_t)got) * AVALONMM_PKT_DATA_SIZE);
  148. }
  149. static
  150. bool avalonmm_detect_one(const char * const devpath)
  151. {
  152. uint8_t buf[AVALONMM_PKT_DATA_SIZE] = {0};
  153. enum avalonmm_reply reply;
  154. const int fd = serial_open(devpath, 0, 1, true);
  155. struct cgpu_info *prev_cgpu = NULL;
  156. if (fd == -1)
  157. applogr(false, LOG_DEBUG, "%s: Failed to open %s", __func__, devpath);
  158. for (int i = 0; i < AVALONMM_MAX_MODULES; ++i)
  159. {
  160. pk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4, i);
  161. avalonmm_write_cmd(fd, AMC_DETECT, buf, AVALONMM_PKT_DATA_SIZE);
  162. }
  163. while (avalonmm_read(fd, LOG_DEBUG, &reply, NULL, 0) > 0)
  164. {
  165. if (reply != AMR_DETECT_ACK)
  166. continue;
  167. int moduleno = upk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4);
  168. struct cgpu_info * const cgpu = malloc(sizeof(*cgpu));
  169. *cgpu = (struct cgpu_info){
  170. .drv = &avalonmm_drv,
  171. .device_path = prev_cgpu ? prev_cgpu->device_path : strdup(devpath),
  172. .device_data = (void*)(intptr_t)moduleno,
  173. .deven = DEV_ENABLED,
  174. .procs = 1,
  175. .threads = prev_cgpu ? 0 : 1,
  176. };
  177. add_cgpu_slave(cgpu, prev_cgpu);
  178. prev_cgpu = cgpu;
  179. }
  180. serial_close(fd);
  181. return prev_cgpu;
  182. }
  183. static
  184. bool avalonmm_lowl_probe(const struct lowlevel_device_info * const info)
  185. {
  186. return vcom_lowl_probe_wrapper(info, avalonmm_detect_one);
  187. }
  188. struct avalonmm_job {
  189. struct stratum_work swork;
  190. uint32_t jobid;
  191. struct timeval tv_prepared;
  192. double nonce_diff;
  193. };
  194. struct avalonmm_chain_state {
  195. uint32_t xnonce1;
  196. struct avalonmm_job *jobs[AVALONMM_CACHED_JOBS];
  197. uint32_t next_jobid;
  198. };
  199. struct avalonmm_module_state {
  200. unsigned module_id;
  201. uint16_t temp[2];
  202. };
  203. static
  204. bool avalonmm_init(struct thr_info * const master_thr)
  205. {
  206. struct cgpu_info * const master_dev = master_thr->cgpu, *dev = NULL;
  207. const char * const devpath = master_dev->device_path;
  208. const int fd = serial_open(devpath, 115200, 1, true);
  209. master_dev->device_fd = fd;
  210. if (unlikely(fd == -1))
  211. applogr(false, LOG_ERR, "%s: Failed to initialise", master_dev->dev_repr);
  212. struct avalonmm_chain_state * const chain = malloc(sizeof(*chain));
  213. *chain = (struct avalonmm_chain_state){
  214. .xnonce1 = 0,
  215. };
  216. work2d_init();
  217. if (!reserve_work2d_(&chain->xnonce1))
  218. {
  219. applog(LOG_ERR, "%s: Failed to reserve 2D work", master_dev->dev_repr);
  220. free(chain);
  221. serial_close(fd);
  222. return false;
  223. }
  224. for_each_managed_proc(proc, master_dev)
  225. {
  226. if (dev == proc->device)
  227. continue;
  228. dev = proc->device;
  229. struct thr_info * const thr = proc->thr[0];
  230. struct avalonmm_module_state * const module = malloc(sizeof(*module));
  231. *module = (struct avalonmm_module_state){
  232. .module_id = (intptr_t)dev->device_data,
  233. };
  234. proc->device_data = chain;
  235. thr->cgpu_data = module;
  236. }
  237. for_each_managed_proc(proc, master_dev)
  238. {
  239. proc->status = LIFE_INIT2;
  240. }
  241. return true;
  242. }
  243. static
  244. bool avalonmm_send_swork(const int fd, struct avalonmm_chain_state * const chain, const struct stratum_work * const swork, uint32_t jobid, double *out_nonce_diff)
  245. {
  246. uint8_t buf[AVALONMM_PKT_DATA_SIZE];
  247. bytes_t coinbase = BYTES_INIT;
  248. int coinbase_len = bytes_len(&swork->coinbase);
  249. if (coinbase_len > AVALONMM_MAX_COINBASE_SIZE)
  250. return false;
  251. if (swork->merkles > AVALONMM_MAX_MERKLES)
  252. return false;
  253. pk_u32be(buf, 0, coinbase_len);
  254. const size_t xnonce2_offset = swork->nonce2_offset + work2d_pad_xnonce_size(swork) + work2d_xnonce1sz;
  255. pk_u32be(buf, 4, xnonce2_offset);
  256. pk_u32be(buf, 8, 4); // extranonce2 size, but only 4 is supported - smaller sizes are handled by limiting the range
  257. pk_u32be(buf, 0x0c, 0x24); // merkle_offset, always 0x24 for Bitcoin
  258. pk_u32be(buf, 0x10, swork->merkles);
  259. pk_u32be(buf, 0x14, 1); // diff? poorly defined
  260. pk_u32be(buf, 0x18, 0); // pool number - none of its business
  261. if (!avalonmm_write_cmd(fd, AMC_NEW_JOB, buf, 0x1c))
  262. return false;
  263. double nonce_diff = target_diff(swork->target);
  264. if (nonce_diff >= AVALONMM_MAX_NONCE_DIFF)
  265. set_target_to_pdiff(buf, nonce_diff = AVALONMM_MAX_NONCE_DIFF);
  266. else
  267. memcpy(buf, swork->target, 0x20);
  268. *out_nonce_diff = nonce_diff;
  269. if (!avalonmm_write_cmd(fd, AMC_TARGET, buf, 0x20))
  270. return false;
  271. pk_u32be(buf, 0, jobid);
  272. if (!avalonmm_write_cmd(fd, AMC_JOB_ID, buf, 4))
  273. return false;
  274. // Need to add extranonce padding and extranonce2
  275. bytes_cpy(&coinbase, &swork->coinbase);
  276. uint8_t *cbp = bytes_buf(&coinbase);
  277. cbp += swork->nonce2_offset;
  278. work2d_pad_xnonce(cbp, swork, false);
  279. cbp += work2d_pad_xnonce_size(swork);
  280. memcpy(cbp, &chain->xnonce1, work2d_xnonce1sz);
  281. cbp += work2d_xnonce1sz;
  282. if (!avalonmm_write_cmd(fd, AMC_COINBASE, bytes_buf(&coinbase), bytes_len(&coinbase)))
  283. return false;
  284. if (!avalonmm_write_cmd(fd, AMC_MERKLES, bytes_buf(&swork->merkle_bin), bytes_len(&swork->merkle_bin)))
  285. return false;
  286. uint8_t header_bin[0x80];
  287. memcpy(&header_bin[ 0], swork->header1, 0x24);
  288. memset(&header_bin[0x24], '\0', 0x20); // merkle root
  289. pk_u32be(header_bin, 0x44, swork->ntime);
  290. memcpy(&header_bin[0x48], swork->diffbits, 4);
  291. memset(&header_bin[0x4c], '\0', 4); // nonce
  292. memcpy(&header_bin[0x50], bfg_workpadding_bin, 0x30);
  293. if (!avalonmm_write_cmd(fd, AMC_BLKHDR, header_bin, sizeof(header_bin)))
  294. return false;
  295. // Avalon MM cannot handle xnonce2_size other than 4, and works in big endian, so we use a range to ensure the following bytes match
  296. const int fixed_mm_xnonce2_bytes = (work2d_xnonce2sz >= 4) ? 0 : (4 - work2d_xnonce2sz);
  297. uint8_t mm_xnonce2_start[4];
  298. uint32_t xnonce2_range;
  299. memset(mm_xnonce2_start, '\0', 4);
  300. cbp += work2d_xnonce2sz;
  301. for (int i = 1; i <= fixed_mm_xnonce2_bytes; ++i)
  302. mm_xnonce2_start[fixed_mm_xnonce2_bytes - i] = cbp++[0];
  303. if (fixed_mm_xnonce2_bytes > 0)
  304. xnonce2_range = (1 << (8 * work2d_xnonce2sz)) - 1;
  305. else
  306. xnonce2_range = 0xffffffff;
  307. pk_u32be(buf, 0, 80); // fan speed %
  308. uint16_t voltcfg = ((uint16_t)bitflip8((0x78 - /*deci-milli-volts*/6625 / 125) << 1 | 1)) << 8;
  309. pk_u32be(buf, 4, voltcfg);
  310. pk_u32be(buf, 8, 450/*freq*/);
  311. memcpy(&buf[0xc], mm_xnonce2_start, 4);
  312. pk_u32be(buf, 0x10, xnonce2_range);
  313. if (!avalonmm_write_cmd(fd, AMC_START, buf, 0x14))
  314. return false;
  315. return true;
  316. }
  317. static
  318. void avalonmm_free_job(struct avalonmm_job * const mmjob)
  319. {
  320. stratum_work_clean(&mmjob->swork);
  321. free(mmjob);
  322. }
  323. static
  324. bool avalonmm_update_swork_from_pool(struct cgpu_info * const master_dev, struct pool * const pool)
  325. {
  326. struct avalonmm_chain_state * const chain = master_dev->device_data;
  327. const int fd = master_dev->device_fd;
  328. struct avalonmm_job *mmjob = malloc(sizeof(*mmjob));
  329. *mmjob = (struct avalonmm_job){
  330. .jobid = chain->next_jobid,
  331. };
  332. cg_rlock(&pool->data_lock);
  333. stratum_work_cpy(&mmjob->swork, &pool->swork);
  334. cg_runlock(&pool->data_lock);
  335. timer_set_now(&mmjob->tv_prepared);
  336. mmjob->swork.data_lock_p = NULL;
  337. if (!avalonmm_send_swork(fd, chain, &mmjob->swork, mmjob->jobid, &mmjob->nonce_diff))
  338. {
  339. avalonmm_free_job(mmjob);
  340. return false;
  341. }
  342. applog(LOG_DEBUG, "%s: Upload of job id %08lx complete", master_dev->dev_repr, (unsigned long)mmjob->jobid);
  343. ++chain->next_jobid;
  344. struct avalonmm_job **jobentry = &chain->jobs[mmjob->jobid % AVALONMM_CACHED_JOBS];
  345. if (*jobentry)
  346. avalonmm_free_job(*jobentry);
  347. *jobentry = mmjob;
  348. return true;
  349. }
  350. static
  351. struct cgpu_info *avalonmm_dev_for_module_id(struct cgpu_info * const master_dev, const uint32_t module_id)
  352. {
  353. struct cgpu_info *dev = NULL;
  354. for_each_managed_proc(proc, master_dev)
  355. {
  356. if (dev == proc->device)
  357. continue;
  358. dev = proc->device;
  359. struct thr_info * const thr = dev->thr[0];
  360. struct avalonmm_module_state * const module = thr->cgpu_data;
  361. if (module->module_id == module_id)
  362. return dev;
  363. }
  364. return NULL;
  365. }
  366. static
  367. bool avalonmm_poll_once(struct cgpu_info * const master_dev)
  368. {
  369. struct avalonmm_chain_state * const chain = master_dev->device_data;
  370. const int fd = master_dev->device_fd;
  371. uint8_t buf[AVALONMM_PKT_DATA_SIZE];
  372. enum avalonmm_reply reply;
  373. if (avalonmm_read(fd, LOG_ERR, &reply, buf, sizeof(buf)) < 0)
  374. return false;
  375. switch (reply)
  376. {
  377. case AMR_STATUS:
  378. {
  379. const uint32_t module_id = upk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4);
  380. struct cgpu_info * const dev = avalonmm_dev_for_module_id(master_dev, module_id);
  381. if (unlikely(!dev))
  382. {
  383. struct thr_info * const master_thr = master_dev->thr[0];
  384. applog(LOG_ERR, "%s: %s for unknown module id %lu", master_dev->dev_repr, "Status", (unsigned long)module_id);
  385. inc_hw_errors_only(master_thr);
  386. break;
  387. }
  388. struct thr_info * const thr = dev->thr[0];
  389. struct avalonmm_module_state * const module = thr->cgpu_data;
  390. module->temp[0] = upk_u16be(buf, 0);
  391. module->temp[1] = upk_u16be(buf, 2);
  392. #if 0
  393. module->fan [0] = upk_u16be(buf, 4);
  394. module->fan [1] = upk_u16be(buf, 6);
  395. module->freq = upk_u32be(buf, 8);
  396. module->voltage = upk_u32be(buf, 0x0c);
  397. #endif
  398. dev->temp = max(module->temp[0], module->temp[1]);
  399. break;
  400. }
  401. case AMR_NONCE:
  402. {
  403. const int fixed_mm_xnonce2_bytes = (work2d_xnonce2sz >= 4) ? 0 : (4 - work2d_xnonce2sz);
  404. const uint8_t * const backward_xnonce2 = &buf[8 + fixed_mm_xnonce2_bytes];
  405. const uint32_t nonce = upk_u32be(buf, 0x10) - AVALONMM_NONCE_OFFSET;
  406. const uint32_t jobid = upk_u32be(buf, 0x14);
  407. const uint32_t module_id = upk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4);
  408. struct cgpu_info * const dev = avalonmm_dev_for_module_id(master_dev, module_id);
  409. if (unlikely(!dev))
  410. {
  411. struct thr_info * const master_thr = master_dev->thr[0];
  412. applog(LOG_ERR, "%s: %s for unknown module id %lu", master_dev->dev_repr, "Nonce", (unsigned long)module_id);
  413. inc_hw_errors_only(master_thr);
  414. break;
  415. }
  416. struct thr_info * const thr = dev->thr[0];
  417. bool invalid_jobid = false;
  418. if (unlikely((uint32_t)(chain->next_jobid - AVALONMM_CACHED_JOBS) > chain->next_jobid))
  419. // Jobs wrap around
  420. invalid_jobid = (jobid < chain->next_jobid - AVALONMM_CACHED_JOBS && jobid >= chain->next_jobid);
  421. else
  422. invalid_jobid = (jobid < chain->next_jobid - AVALONMM_CACHED_JOBS || jobid >= chain->next_jobid);
  423. if (unlikely(invalid_jobid))
  424. {
  425. applog(LOG_ERR, "%s: Bad job id %08lx", dev->dev_repr, (unsigned long)jobid);
  426. inc_hw_errors_only(thr);
  427. break;
  428. }
  429. struct avalonmm_job * const mmjob = chain->jobs[jobid % AVALONMM_CACHED_JOBS];
  430. uint8_t xnonce2[work2d_xnonce2sz];
  431. for (int i = 0; i < work2d_xnonce2sz; ++i)
  432. xnonce2[i] = backward_xnonce2[(work2d_xnonce2sz - 1) - i];
  433. work2d_submit_nonce(thr, &mmjob->swork, &mmjob->tv_prepared, xnonce2, chain->xnonce1, nonce, mmjob->swork.ntime, NULL, mmjob->nonce_diff);
  434. break;
  435. }
  436. }
  437. return true;
  438. }
  439. static
  440. void avalonmm_poll(struct cgpu_info * const master_dev, int n)
  441. {
  442. while (n > 0)
  443. {
  444. if (avalonmm_poll_once(master_dev))
  445. --n;
  446. }
  447. }
  448. static
  449. void avalonmm_minerloop(struct thr_info * const master_thr)
  450. {
  451. struct cgpu_info * const master_dev = master_thr->cgpu;
  452. const int fd = master_dev->device_fd;
  453. struct pool *nextpool = current_pool(), *pool = NULL;
  454. uint8_t buf[AVALONMM_PKT_DATA_SIZE] = {0};
  455. while (likely(!master_dev->shutdown))
  456. {
  457. master_thr->work_restart = false;
  458. if (!pool_has_usable_swork(nextpool))
  459. ; // FIXME
  460. else
  461. if (avalonmm_update_swork_from_pool(master_dev, nextpool))
  462. pool = nextpool;
  463. while (likely(!(master_thr->work_restart || ((nextpool = current_pool()) != pool && pool_has_usable_swork(nextpool)))))
  464. {
  465. cgsleep_ms(10);
  466. struct cgpu_info *dev = NULL;
  467. int n = 0;
  468. for_each_managed_proc(proc, master_dev)
  469. {
  470. if (dev == proc->device)
  471. continue;
  472. dev = proc->device;
  473. struct thr_info * const thr = dev->thr[0];
  474. struct avalonmm_module_state * const module = thr->cgpu_data;
  475. pk_u32be(buf, AVALONMM_PKT_DATA_SIZE - 4, module->module_id);
  476. avalonmm_write_cmd(fd, AMC_POLL, buf, AVALONMM_PKT_DATA_SIZE);
  477. ++n;
  478. }
  479. avalonmm_poll(master_dev, n);
  480. }
  481. }
  482. }
  483. struct device_drv avalonmm_drv = {
  484. .dname = "avalonmm",
  485. .name = "AVM",
  486. .lowl_probe = avalonmm_lowl_probe,
  487. .thread_init = avalonmm_init,
  488. .minerloop = avalonmm_minerloop,
  489. };