|
|
@@ -2681,6 +2681,39 @@ static void calc_midstate(struct work *work)
|
|
|
swap32tole(work->midstate, work->midstate, 8);
|
|
|
}
|
|
|
|
|
|
+static
|
|
|
+struct bfg_tmpl_ref *tmpl_makeref(blktemplate_t * const tmpl)
|
|
|
+{
|
|
|
+ struct bfg_tmpl_ref * const tr = malloc(sizeof(*tr));
|
|
|
+ *tr = (struct bfg_tmpl_ref){
|
|
|
+ .tmpl = tmpl,
|
|
|
+ .refcount = 1,
|
|
|
+ };
|
|
|
+ mutex_init(&tr->mutex);
|
|
|
+ return tr;
|
|
|
+}
|
|
|
+
|
|
|
+static
|
|
|
+void tmpl_incref(struct bfg_tmpl_ref * const tr)
|
|
|
+{
|
|
|
+ mutex_lock(&tr->mutex);
|
|
|
+ ++tr->refcount;
|
|
|
+ mutex_unlock(&tr->mutex);
|
|
|
+}
|
|
|
+
|
|
|
+void tmpl_decref(struct bfg_tmpl_ref * const tr)
|
|
|
+{
|
|
|
+ mutex_lock(&tr->mutex);
|
|
|
+ bool free_tmpl = !--tr->refcount;
|
|
|
+ mutex_unlock(&tr->mutex);
|
|
|
+ if (free_tmpl)
|
|
|
+ {
|
|
|
+ blktmpl_free(tr->tmpl);
|
|
|
+ mutex_destroy(&tr->mutex);
|
|
|
+ free(tr);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static struct work *make_work(void)
|
|
|
{
|
|
|
struct work *work = calloc(1, sizeof(struct work));
|
|
|
@@ -2705,16 +2738,8 @@ void clean_work(struct work *work)
|
|
|
if (work->device_data_free_func)
|
|
|
work->device_data_free_func(work);
|
|
|
|
|
|
- if (work->tmpl) {
|
|
|
- struct pool *pool = work->pool;
|
|
|
- mutex_lock(&pool->pool_lock);
|
|
|
- bool free_tmpl = !--*work->tmpl_refcount;
|
|
|
- mutex_unlock(&pool->pool_lock);
|
|
|
- if (free_tmpl) {
|
|
|
- blktmpl_free(work->tmpl);
|
|
|
- free(work->tmpl_refcount);
|
|
|
- }
|
|
|
- }
|
|
|
+ if (work->tr)
|
|
|
+ tmpl_decref(work->tr);
|
|
|
|
|
|
memset(work, 0, sizeof(struct work));
|
|
|
}
|
|
|
@@ -2793,11 +2818,36 @@ bool pool_may_redirect_to(struct pool * const pool, const char * const uri)
|
|
|
return match_domains(pool->rpc_url, strlen(pool->rpc_url), uri, strlen(uri));
|
|
|
}
|
|
|
|
|
|
+void set_simple_ntime_roll_limit(struct ntime_roll_limits * const nrl, const uint32_t ntime_base, const int ntime_roll)
|
|
|
+{
|
|
|
+ *nrl = (struct ntime_roll_limits){
|
|
|
+ .min = ntime_base,
|
|
|
+ .max = ntime_base + ntime_roll,
|
|
|
+ .minoff = -ntime_roll,
|
|
|
+ .maxoff = ntime_roll,
|
|
|
+ };
|
|
|
+}
|
|
|
+
|
|
|
+void work_set_simple_ntime_roll_limit(struct work * const work, const int ntime_roll)
|
|
|
+{
|
|
|
+ set_simple_ntime_roll_limit(&work->ntime_roll_limits, upk_u32be(work->data, 0x44), ntime_roll);
|
|
|
+}
|
|
|
+
|
|
|
+static double target_diff(const unsigned char *);
|
|
|
+
|
|
|
+#define GBT_XNONCESZ (sizeof(uint32_t))
|
|
|
+
|
|
|
+#if BLKMAKER_VERSION > 4
|
|
|
+#define blkmk_append_coinbase_safe(tmpl, append, appendsz) \
|
|
|
+ blkmk_append_coinbase_safe2(tmpl, append, appendsz, GBT_XNONCESZ, false)
|
|
|
+#endif
|
|
|
+
|
|
|
static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
{
|
|
|
json_t *res_val = json_object_get(val, "result");
|
|
|
json_t *tmp_val;
|
|
|
bool ret = false;
|
|
|
+ struct timeval tv_now;
|
|
|
|
|
|
if (unlikely(detect_algo == 1)) {
|
|
|
json_t *tmp = json_object_get(res_val, "algorithm");
|
|
|
@@ -2806,28 +2856,30 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
detect_algo = 2;
|
|
|
}
|
|
|
|
|
|
- if (work->tmpl) {
|
|
|
- struct timeval tv_now;
|
|
|
- cgtime(&tv_now);
|
|
|
- const char *err = blktmpl_add_jansson(work->tmpl, res_val, tv_now.tv_sec);
|
|
|
+ timer_set_now(&tv_now);
|
|
|
+
|
|
|
+ if (work->tr)
|
|
|
+ {
|
|
|
+ blktemplate_t * const tmpl = work->tr->tmpl;
|
|
|
+ const char *err = blktmpl_add_jansson(tmpl, res_val, tv_now.tv_sec);
|
|
|
if (err) {
|
|
|
applog(LOG_ERR, "blktmpl error: %s", err);
|
|
|
return false;
|
|
|
}
|
|
|
- work->rolltime = blkmk_time_left(work->tmpl, tv_now.tv_sec);
|
|
|
+ work->rolltime = blkmk_time_left(tmpl, tv_now.tv_sec);
|
|
|
#if BLKMAKER_VERSION > 1
|
|
|
if (opt_coinbase_script.sz)
|
|
|
{
|
|
|
bool newcb;
|
|
|
#if BLKMAKER_VERSION > 2
|
|
|
- blkmk_init_generation2(work->tmpl, opt_coinbase_script.data, opt_coinbase_script.sz, &newcb);
|
|
|
+ blkmk_init_generation2(tmpl, opt_coinbase_script.data, opt_coinbase_script.sz, &newcb);
|
|
|
#else
|
|
|
- newcb = !work->tmpl->cbtxn;
|
|
|
- blkmk_init_generation(work->tmpl, opt_coinbase_script.data, opt_coinbase_script.sz);
|
|
|
+ newcb = !tmpl->cbtxn;
|
|
|
+ blkmk_init_generation(tmpl, opt_coinbase_script.data, opt_coinbase_script.sz);
|
|
|
#endif
|
|
|
if (newcb)
|
|
|
{
|
|
|
- ssize_t ae = blkmk_append_coinbase_safe(work->tmpl, &template_nonce, sizeof(template_nonce));
|
|
|
+ ssize_t ae = blkmk_append_coinbase_safe(tmpl, &template_nonce, sizeof(template_nonce));
|
|
|
if (ae < (ssize_t)sizeof(template_nonce))
|
|
|
applog(LOG_WARNING, "Cannot append template-nonce to coinbase on pool %u (%"PRId64") - you might be wasting hashing!", work->pool->pool_no, (int64_t)ae);
|
|
|
++template_nonce;
|
|
|
@@ -2836,7 +2888,7 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
#endif
|
|
|
#if BLKMAKER_VERSION > 0
|
|
|
{
|
|
|
- ssize_t ae = blkmk_append_coinbase_safe(work->tmpl, opt_coinbase_sig, 101);
|
|
|
+ ssize_t ae = blkmk_append_coinbase_safe(tmpl, opt_coinbase_sig, 101);
|
|
|
static bool appenderr = false;
|
|
|
if (ae <= 0) {
|
|
|
if (opt_coinbase_sig) {
|
|
|
@@ -2872,7 +2924,7 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
free(tmp);
|
|
|
truncatewarning = true;
|
|
|
}
|
|
|
- ae = blkmk_append_coinbase_safe(work->tmpl, cbappend, ae);
|
|
|
+ ae = blkmk_append_coinbase_safe(tmpl, cbappend, ae);
|
|
|
if (ae <= 0) {
|
|
|
applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Error appending coinbase signature (%"PRId64")", (int64_t)ae);
|
|
|
appenderr = true;
|
|
|
@@ -2881,13 +2933,20 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
}
|
|
|
}
|
|
|
#endif
|
|
|
- if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
|
|
|
+ if (blkmk_get_data(tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
|
|
|
return false;
|
|
|
swap32yes(work->data, work->data, 80 / 4);
|
|
|
memcpy(&work->data[80], workpadding_bin, 48);
|
|
|
+
|
|
|
+ work->ntime_roll_limits = (struct ntime_roll_limits){
|
|
|
+ .min = tmpl->mintime,
|
|
|
+ .max = tmpl->maxtime,
|
|
|
+ .minoff = tmpl->mintimeoff,
|
|
|
+ .maxoff = tmpl->maxtimeoff,
|
|
|
+ };
|
|
|
|
|
|
const struct blktmpl_longpoll_req *lp;
|
|
|
- if ((lp = blktmpl_get_longpoll(work->tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) {
|
|
|
+ if ((lp = blktmpl_get_longpoll(tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) {
|
|
|
free(pool->lp_id);
|
|
|
pool->lp_id = strdup(lp->id);
|
|
|
|
|
|
@@ -2906,6 +2965,8 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
applog(LOG_ERR, "JSON inval data");
|
|
|
return false;
|
|
|
}
|
|
|
+ else
|
|
|
+ work_set_simple_ntime_roll_limit(work, 0);
|
|
|
|
|
|
if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) {
|
|
|
// Calculate it ourselves
|
|
|
@@ -2917,7 +2978,8 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
applog(LOG_ERR, "JSON inval target");
|
|
|
return false;
|
|
|
}
|
|
|
- if (work->tmpl) {
|
|
|
+ if (work->tr)
|
|
|
+ {
|
|
|
for (size_t i = 0; i < sizeof(work->target) / 2; ++i)
|
|
|
{
|
|
|
int p = (sizeof(work->target) - 1) - i;
|
|
|
@@ -2935,9 +2997,49 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
|
|
|
memset(work->hash, 0, sizeof(work->hash));
|
|
|
|
|
|
- cgtime(&work->tv_staged);
|
|
|
+ work->tv_staged = tv_now;
|
|
|
|
|
|
- pool_set_opaque(pool, !work->tmpl);
|
|
|
+#if BLKMAKER_VERSION > 4
|
|
|
+ if (work->tr)
|
|
|
+ {
|
|
|
+ blktemplate_t * const tmpl = work->tr->tmpl;
|
|
|
+ uint8_t buf[80];
|
|
|
+ int16_t expire;
|
|
|
+ uint8_t *cbtxn;
|
|
|
+ size_t cbtxnsz;
|
|
|
+ size_t cbextranonceoffset;
|
|
|
+ int branchcount;
|
|
|
+ libblkmaker_hash_t *branches;
|
|
|
+
|
|
|
+ if (blkmk_get_mdata(tmpl, buf, sizeof(buf), tv_now.tv_sec, &expire, &cbtxn, &cbtxnsz, &cbextranonceoffset, &branchcount, &branches, GBT_XNONCESZ, false))
|
|
|
+ {
|
|
|
+ struct stratum_work * const swork = &pool->swork;
|
|
|
+ const size_t branchdatasz = branchcount * 0x20;
|
|
|
+
|
|
|
+ cg_wlock(&pool->data_lock);
|
|
|
+ swork->tr = work->tr;
|
|
|
+ bytes_assimilate_raw(&swork->coinbase, cbtxn, cbtxnsz, cbtxnsz);
|
|
|
+ swork->nonce2_offset = cbextranonceoffset;
|
|
|
+ bytes_assimilate_raw(&swork->merkle_bin, branches, branchdatasz, branchdatasz);
|
|
|
+ swork->merkles = branchcount;
|
|
|
+ memcpy(swork->header1, &buf[0], 36);
|
|
|
+ swork->ntime = le32toh(*(uint32_t *)(&buf[68]));
|
|
|
+ swork->tv_received = tv_now;
|
|
|
+ memcpy(swork->diffbits, &buf[72], 4);
|
|
|
+ memcpy(swork->target, work->target, sizeof(swork->target));
|
|
|
+ free(swork->job_id);
|
|
|
+ swork->job_id = NULL;
|
|
|
+ swork->clean = true;
|
|
|
+ // FIXME: Do something with expire
|
|
|
+ pool->nonce2sz = pool->n2size = GBT_XNONCESZ;
|
|
|
+ pool->nonce2 = 0;
|
|
|
+ cg_wunlock(&pool->data_lock);
|
|
|
+ }
|
|
|
+ else
|
|
|
+ applog(LOG_DEBUG, "blkmk_get_mdata failed for pool %u", pool->pool_no);
|
|
|
+ }
|
|
|
+#endif // BLKMAKER_VERSION > 4
|
|
|
+ pool_set_opaque(pool, !work->tr);
|
|
|
|
|
|
ret = true;
|
|
|
|
|
|
@@ -4238,7 +4340,7 @@ static
|
|
|
void maybe_local_submit(const struct work *work)
|
|
|
{
|
|
|
#if BLKMAKER_VERSION > 3
|
|
|
- if (unlikely(work->block && work->tmpl))
|
|
|
+ if (unlikely(work->block && work->tr))
|
|
|
{
|
|
|
// This is a block with a full template (GBT)
|
|
|
// Regardless of the result, submit to local bitcoind(s) as well
|
|
|
@@ -4404,17 +4506,19 @@ static char *submit_upstream_work_request(struct work *work)
|
|
|
char *s, *sd;
|
|
|
struct pool *pool = work->pool;
|
|
|
|
|
|
- if (work->tmpl) {
|
|
|
+ if (work->tr)
|
|
|
+ {
|
|
|
+ blktemplate_t * const tmpl = work->tr->tmpl;
|
|
|
json_t *req;
|
|
|
unsigned char data[80];
|
|
|
|
|
|
swap32yes(data, work->data, 80 / 4);
|
|
|
#if BLKMAKER_VERSION > 3
|
|
|
if (work->do_foreign_submit)
|
|
|
- req = blkmk_submit_foreign_jansson(work->tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
|
|
|
+ req = blkmk_submit_foreign_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
|
|
|
else
|
|
|
#endif
|
|
|
- req = blkmk_submit_jansson(work->tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
|
|
|
+ req = blkmk_submit_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
|
|
|
s = json_dumps(req, 0);
|
|
|
json_decref(req);
|
|
|
sd = malloc(161);
|
|
|
@@ -4436,7 +4540,7 @@ static char *submit_upstream_work_request(struct work *work)
|
|
|
}
|
|
|
|
|
|
applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
|
|
|
- if (work->tmpl)
|
|
|
+ if (work->tr)
|
|
|
free(sd);
|
|
|
else
|
|
|
s = realloc_strcat(s, "\n");
|
|
|
@@ -4771,13 +4875,14 @@ void get_benchmark_work(struct work *work)
|
|
|
copy_time(&work->tv_staged, &work->tv_getwork);
|
|
|
work->getwork_mode = GETWORK_MODE_BENCHMARK;
|
|
|
calc_diff(work, 0);
|
|
|
+ work_set_simple_ntime_roll_limit(work, 60);
|
|
|
}
|
|
|
|
|
|
static void wake_gws(void);
|
|
|
|
|
|
static void update_last_work(struct work *work)
|
|
|
{
|
|
|
- if (!work->tmpl)
|
|
|
+ if (!work->tr)
|
|
|
// Only save GBT jobs, since rollntime isn't coordinated well yet
|
|
|
return;
|
|
|
|
|
|
@@ -4844,14 +4949,11 @@ static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const
|
|
|
return strdup(getwork_req);
|
|
|
case PLP_GETBLOCKTEMPLATE:
|
|
|
work->getwork_mode = GETWORK_MODE_GBT;
|
|
|
- work->tmpl_refcount = malloc(sizeof(*work->tmpl_refcount));
|
|
|
- if (!work->tmpl_refcount)
|
|
|
- return NULL;
|
|
|
- work->tmpl = blktmpl_create();
|
|
|
- if (!work->tmpl)
|
|
|
+ blktemplate_t * const tmpl = blktmpl_create();
|
|
|
+ if (!tmpl)
|
|
|
goto gbtfail2;
|
|
|
- *work->tmpl_refcount = 1;
|
|
|
- gbt_capabilities_t caps = blktmpl_addcaps(work->tmpl);
|
|
|
+ work->tr = tmpl_makeref(tmpl);
|
|
|
+ gbt_capabilities_t caps = blktmpl_addcaps(tmpl);
|
|
|
if (!caps)
|
|
|
goto gbtfail;
|
|
|
caps |= GBT_LONGPOLL;
|
|
|
@@ -4877,11 +4979,9 @@ static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const
|
|
|
return NULL;
|
|
|
|
|
|
gbtfail:
|
|
|
- blktmpl_free(work->tmpl);
|
|
|
- work->tmpl = NULL;
|
|
|
+ tmpl_decref(work->tr);
|
|
|
+ work->tr = NULL;
|
|
|
gbtfail2:
|
|
|
- free(work->tmpl_refcount);
|
|
|
- work->tmpl_refcount = NULL;
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
@@ -5248,10 +5348,11 @@ static inline bool can_roll(struct work *work)
|
|
|
return false;
|
|
|
if (!(work->pool && !work->clone))
|
|
|
return false;
|
|
|
- if (work->tmpl) {
|
|
|
+ if (work->tr)
|
|
|
+ {
|
|
|
if (stale_work(work, false))
|
|
|
return false;
|
|
|
- return blkmk_work_left(work->tmpl);
|
|
|
+ return blkmk_work_left(work->tr->tmpl);
|
|
|
}
|
|
|
return (work->rolltime &&
|
|
|
work->rolls < 7000 && !stale_work(work, false));
|
|
|
@@ -5259,10 +5360,11 @@ static inline bool can_roll(struct work *work)
|
|
|
|
|
|
static void roll_work(struct work *work)
|
|
|
{
|
|
|
- if (work->tmpl) {
|
|
|
+ if (work->tr)
|
|
|
+ {
|
|
|
struct timeval tv_now;
|
|
|
cgtime(&tv_now);
|
|
|
- if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
|
|
|
+ if (blkmk_get_data(work->tr->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
|
|
|
applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
|
|
|
swap32yes(work->data, work->data, 80 / 4);
|
|
|
calc_midstate(work);
|
|
|
@@ -5276,6 +5378,7 @@ static void roll_work(struct work *work)
|
|
|
ntime = be32toh(*work_ntime);
|
|
|
ntime++;
|
|
|
*work_ntime = htobe32(ntime);
|
|
|
+ work_set_simple_ntime_roll_limit(work, 0);
|
|
|
|
|
|
applog(LOG_DEBUG, "Successfully rolled time header in work");
|
|
|
}
|
|
|
@@ -5306,12 +5409,8 @@ static void _copy_work(struct work *work, const struct work *base_work, int noff
|
|
|
work->nonce1 = strdup(base_work->nonce1);
|
|
|
bytes_cpy(&work->nonce2, &base_work->nonce2);
|
|
|
|
|
|
- if (base_work->tmpl) {
|
|
|
- struct pool *pool = work->pool;
|
|
|
- mutex_lock(&pool->pool_lock);
|
|
|
- ++*work->tmpl_refcount;
|
|
|
- mutex_unlock(&pool->pool_lock);
|
|
|
- }
|
|
|
+ if (base_work->tr)
|
|
|
+ tmpl_incref(base_work->tr);
|
|
|
|
|
|
if (noffset)
|
|
|
{
|
|
|
@@ -5417,7 +5516,7 @@ bool stale_work(struct work *work, bool share)
|
|
|
/* Technically the rolltime should be correct but some pools
|
|
|
* advertise a broken expire= that is lower than a meaningful
|
|
|
* scantime */
|
|
|
- if (work->rolltime >= opt_scantime || work->tmpl)
|
|
|
+ if (work->rolltime >= opt_scantime || work->tr)
|
|
|
work_expiry = work->rolltime;
|
|
|
else
|
|
|
work_expiry = opt_expiry;
|
|
|
@@ -8614,7 +8713,7 @@ badwork:
|
|
|
/* Decipher the longpoll URL, if any, and store it in ->lp_url */
|
|
|
|
|
|
const struct blktmpl_longpoll_req *lp;
|
|
|
- if (work->tmpl && (lp = blktmpl_get_longpoll(work->tmpl))) {
|
|
|
+ if (work->tr && (lp = blktmpl_get_longpoll(work->tr->tmpl))) {
|
|
|
// NOTE: work_decode takes care of lp id
|
|
|
pool->lp_url = lp->uri ? absolute_uri(lp->uri, pool->rpc_url) : pool->rpc_url;
|
|
|
if (!pool->lp_url)
|
|
|
@@ -8892,18 +8991,34 @@ void test_target()
|
|
|
void stratum_work_cpy(struct stratum_work * const dst, const struct stratum_work * const src)
|
|
|
{
|
|
|
*dst = *src;
|
|
|
- dst->job_id = strdup(src->job_id);
|
|
|
+ if (dst->tr)
|
|
|
+ tmpl_incref(dst->tr);
|
|
|
+ dst->job_id = maybe_strdup(src->job_id);
|
|
|
bytes_cpy(&dst->coinbase, &src->coinbase);
|
|
|
bytes_cpy(&dst->merkle_bin, &src->merkle_bin);
|
|
|
}
|
|
|
|
|
|
void stratum_work_clean(struct stratum_work * const swork)
|
|
|
{
|
|
|
+ if (swork->tr)
|
|
|
+ tmpl_decref(swork->tr);
|
|
|
free(swork->job_id);
|
|
|
bytes_free(&swork->coinbase);
|
|
|
bytes_free(&swork->merkle_bin);
|
|
|
}
|
|
|
|
|
|
+bool pool_has_usable_swork(const struct pool * const pool)
|
|
|
+{
|
|
|
+ if (pool->swork.tr)
|
|
|
+ {
|
|
|
+ // GBT
|
|
|
+ struct timeval tv_now;
|
|
|
+ timer_set_now(&tv_now);
|
|
|
+ return blkmk_time_left(pool->swork.tr->tmpl, tv_now.tv_sec);
|
|
|
+ }
|
|
|
+ return pool->stratum_notify;
|
|
|
+}
|
|
|
+
|
|
|
/* Generates stratum based work based on the most recent notify information
|
|
|
* from the pool. This will keep generating work while a pool is down so we use
|
|
|
* other means to detect when the pool has died in stratum_thread */
|
|
|
@@ -8968,11 +9083,13 @@ void gen_stratum_work2(struct work *work, struct stratum_work *swork, const char
|
|
|
memcpy(&work->data[72], swork->diffbits, 4);
|
|
|
memset(&work->data[76], 0, 4); // nonce
|
|
|
memcpy(&work->data[80], workpadding_bin, 48);
|
|
|
+
|
|
|
+ work->ntime_roll_limits = swork->ntime_roll_limits;
|
|
|
|
|
|
/* Copy parameters required for share submission */
|
|
|
memcpy(work->target, swork->target, sizeof(work->target));
|
|
|
- work->job_id = strdup(swork->job_id);
|
|
|
- work->nonce1 = strdup(nonce1);
|
|
|
+ work->job_id = maybe_strdup(swork->job_id);
|
|
|
+ work->nonce1 = maybe_strdup(nonce1);
|
|
|
if (swork->data_lock_p)
|
|
|
cg_runlock(swork->data_lock_p);
|
|
|
|
|
|
@@ -8994,8 +9111,6 @@ void gen_stratum_work2(struct work *work, struct stratum_work *swork, const char
|
|
|
work->id = total_work++;
|
|
|
work->longpoll = false;
|
|
|
work->getwork_mode = GETWORK_MODE_STRATUM;
|
|
|
- /* Nominally allow a driver to ntime roll 60 seconds */
|
|
|
- work->drv_rolllimit = 60;
|
|
|
calc_diff(work, 0);
|
|
|
}
|
|
|
|
|
|
@@ -12199,10 +12314,10 @@ retry:
|
|
|
work = make_clone(pool->last_work_copy);
|
|
|
mutex_unlock(&pool->last_work_lock);
|
|
|
roll_work(work);
|
|
|
- applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, tv_now.tv_sec));
|
|
|
+ applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tr->tmpl, tv_now.tv_sec));
|
|
|
stage_work(work);
|
|
|
continue;
|
|
|
- } else if (last_work->tmpl && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tmpl) > (unsigned long)mining_threads) {
|
|
|
+ } else if (last_work->tr && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tr->tmpl) > (unsigned long)mining_threads) {
|
|
|
// Don't free last_work_copy, since it is used to detect upstream provides plenty of work per template
|
|
|
} else {
|
|
|
free_work(last_work);
|