Browse Source

Merge branch 'ssm_from_gbt' into bfgminer

Conflicts:
	miner.c
Luke Dashjr 11 years ago
parent
commit
4ad4c1a19b
10 changed files with 444 additions and 87 deletions
  1. 4 0
      Makefile.am
  2. 4 0
      autogen.sh
  3. 11 20
      driver-stratum.c
  4. 1 1
      libblkmaker
  5. 177 62
      miner.c
  6. 24 3
      miner.h
  7. 8 0
      util.c
  8. 154 1
      util.h
  9. 47 0
      work2d.c
  10. 14 0
      work2d.h

+ 4 - 0
Makefile.am

@@ -202,6 +202,10 @@ if NEED_DYNCLOCK
 bfgminer_SOURCES += dynclock.c dynclock.h
 bfgminer_SOURCES += dynclock.c dynclock.h
 endif
 endif
 
 
+if USE_LIBEVENT
+bfgminer_SOURCES  += work2d.c work2d.h
+endif
+
 if HAS_FPGA
 if HAS_FPGA
 dist_doc_DATA += README.FPGA
 dist_doc_DATA += README.FPGA
 endif
 endif

+ 4 - 0
autogen.sh

@@ -6,6 +6,10 @@ bs_dir="$(dirname "$0")"
 
 
 if test -z "$NOSUBMODULES" ; then
 if test -z "$NOSUBMODULES" ; then
 	echo 'Getting submodules...'
 	echo 'Getting submodules...'
+	
+	# Older versions had INSTALL in git; remove it so git can update cleanly
+	rm -f libblkmaker/INSTALL
+	
 	(
 	(
 		cd "${bs_dir}"
 		cd "${bs_dir}"
 		git submodule update --init
 		git submodule update --init

+ 11 - 20
driver-stratum.c

@@ -30,12 +30,10 @@
 #include "driver-proxy.h"
 #include "driver-proxy.h"
 #include "miner.h"
 #include "miner.h"
 #include "util.h"
 #include "util.h"
+#include "work2d.h"
 
 
-#define MAX_CLIENTS 255
-
-static bool _ssm_xnonce1s[MAX_CLIENTS + 1] = { true };
-static uint8_t _ssm_client_octets;
-static uint8_t _ssm_client_xnonce2sz;
+#define _ssm_client_octets     work2d_xnonce1sz
+#define _ssm_client_xnonce2sz  work2d_xnonce2sz
 static char *_ssm_notify;
 static char *_ssm_notify;
 static int _ssm_notify_sz;
 static int _ssm_notify_sz;
 static struct event *ev_notify;
 static struct event *ev_notify;
@@ -152,7 +150,7 @@ bool stratumsrv_update_notify_str(struct pool * const pool, bool clean)
 		.pool = pool,
 		.pool = pool,
 		.work_restart_id = pool->work_restart_id,
 		.work_restart_id = pool->work_restart_id,
 		.n2size = n2size,
 		.n2size = n2size,
-		.nonce1 = strdup(pool->nonce1),
+		.nonce1 = maybe_strdup(pool->nonce1),
 	};
 	};
 	timer_set_now(&ssj->tv_prepared);
 	timer_set_now(&ssj->tv_prepared);
 	stratum_work_cpy(&ssj->swork, swork);
 	stratum_work_cpy(&ssj->swork, swork);
@@ -277,11 +275,11 @@ void _stratumsrv_update_notify(evutil_socket_t fd, short what, __maybe_unused vo
 	else
 	else
 		stratumsrv_job_pruner();
 		stratumsrv_job_pruner();
 	
 	
-	if (!pool->stratum_notify)
+	if (!pool_has_usable_swork(pool))
 	{
 	{
-		applog(LOG_WARNING, "SSM: Not using a stratum server upstream!");
+		applog(LOG_WARNING, "SSM: No usable 2D work upstream!");
 		if (clean)
 		if (clean)
-			stratumsrv_boot_all_subscribed("Current upstream pool does not have active stratum");
+			stratumsrv_boot_all_subscribed("Current upstream pool does not have usable 2D work");
 		goto out;
 		goto out;
 	}
 	}
 	
 	
@@ -368,12 +366,8 @@ void stratumsrv_mining_subscribe(struct bufferevent *bev, json_t *params, const
 	
 	
 	if (!*xnonce1_p)
 	if (!*xnonce1_p)
 	{
 	{
-		uint32_t xnonce1;
-		for (xnonce1 = MAX_CLIENTS; _ssm_xnonce1s[xnonce1]; --xnonce1)
-			if (!xnonce1)
-				return_stratumsrv_failure(20, "Maximum clients already connected");
-		_ssm_xnonce1s[xnonce1] = true;
-		*xnonce1_p = htole32(xnonce1);
+		if (!reserve_work2d_(xnonce1_p))
+			return_stratumsrv_failure(20, "Maximum clients already connected");
 	}
 	}
 	
 	
 	bin2hex(xnonce1x, xnonce1_p, _ssm_client_octets);
 	bin2hex(xnonce1x, xnonce1_p, _ssm_client_octets);
@@ -544,12 +538,11 @@ static
 void stratumsrv_client_close(struct stratumsrv_conn * const conn)
 void stratumsrv_client_close(struct stratumsrv_conn * const conn)
 {
 {
 	struct bufferevent * const bev = conn->bev;
 	struct bufferevent * const bev = conn->bev;
-	uint32_t xnonce1 = le32toh(conn->xnonce1_le);
 	
 	
 	bufferevent_free(bev);
 	bufferevent_free(bev);
 	LL_DELETE(_ssm_connections, conn);
 	LL_DELETE(_ssm_connections, conn);
+	release_work2d_(conn->xnonce1_le);
 	free(conn);
 	free(conn);
-	_ssm_xnonce1s[xnonce1] = false;
 }
 }
 
 
 static
 static
@@ -630,9 +623,7 @@ void *stratumsrv_thread(__maybe_unused void *p)
 	pthread_detach(pthread_self());
 	pthread_detach(pthread_self());
 	RenameThread("stratumsrv");
 	RenameThread("stratumsrv");
 	
 	
-	for (uint64_t n = MAX_CLIENTS; n; n >>= 8)
-		++_ssm_client_octets;
-	_ssm_client_xnonce2sz = 2;
+	work2d_init();
 	
 	
 	struct event_base *evbase = event_base_new();
 	struct event_base *evbase = event_base_new();
 	_smm_evbase = evbase;
 	_smm_evbase = evbase;

+ 1 - 1
libblkmaker

@@ -1 +1 @@
-Subproject commit bca8f6f5e56c547e9bbc808fb644152e44f3344d
+Subproject commit 385f2ddd7c11532d73175009d864dc646596c554

+ 177 - 62
miner.c

@@ -2681,6 +2681,39 @@ static void calc_midstate(struct work *work)
 	swap32tole(work->midstate, work->midstate, 8);
 	swap32tole(work->midstate, work->midstate, 8);
 }
 }
 
 
+static
+struct bfg_tmpl_ref *tmpl_makeref(blktemplate_t * const tmpl)
+{
+	struct bfg_tmpl_ref * const tr = malloc(sizeof(*tr));
+	*tr = (struct bfg_tmpl_ref){
+		.tmpl = tmpl,
+		.refcount = 1,
+	};
+	mutex_init(&tr->mutex);
+	return tr;
+}
+
+static
+void tmpl_incref(struct bfg_tmpl_ref * const tr)
+{
+	mutex_lock(&tr->mutex);
+	++tr->refcount;
+	mutex_unlock(&tr->mutex);
+}
+
+void tmpl_decref(struct bfg_tmpl_ref * const tr)
+{
+	mutex_lock(&tr->mutex);
+	bool free_tmpl = !--tr->refcount;
+	mutex_unlock(&tr->mutex);
+	if (free_tmpl)
+	{
+		blktmpl_free(tr->tmpl);
+		mutex_destroy(&tr->mutex);
+		free(tr);
+	}
+}
+
 static struct work *make_work(void)
 static struct work *make_work(void)
 {
 {
 	struct work *work = calloc(1, sizeof(struct work));
 	struct work *work = calloc(1, sizeof(struct work));
@@ -2705,16 +2738,8 @@ void clean_work(struct work *work)
 	if (work->device_data_free_func)
 	if (work->device_data_free_func)
 		work->device_data_free_func(work);
 		work->device_data_free_func(work);
 
 
-	if (work->tmpl) {
-		struct pool *pool = work->pool;
-		mutex_lock(&pool->pool_lock);
-		bool free_tmpl = !--*work->tmpl_refcount;
-		mutex_unlock(&pool->pool_lock);
-		if (free_tmpl) {
-			blktmpl_free(work->tmpl);
-			free(work->tmpl_refcount);
-		}
-	}
+	if (work->tr)
+		tmpl_decref(work->tr);
 
 
 	memset(work, 0, sizeof(struct work));
 	memset(work, 0, sizeof(struct work));
 }
 }
@@ -2793,11 +2818,36 @@ bool pool_may_redirect_to(struct pool * const pool, const char * const uri)
 	return match_domains(pool->rpc_url, strlen(pool->rpc_url), uri, strlen(uri));
 	return match_domains(pool->rpc_url, strlen(pool->rpc_url), uri, strlen(uri));
 }
 }
 
 
+void set_simple_ntime_roll_limit(struct ntime_roll_limits * const nrl, const uint32_t ntime_base, const int ntime_roll)
+{
+	*nrl = (struct ntime_roll_limits){
+		.min = ntime_base,
+		.max = ntime_base + ntime_roll,
+		.minoff = -ntime_roll,
+		.maxoff = ntime_roll,
+	};
+}
+
+void work_set_simple_ntime_roll_limit(struct work * const work, const int ntime_roll)
+{
+	set_simple_ntime_roll_limit(&work->ntime_roll_limits, upk_u32be(work->data, 0x44), ntime_roll);
+}
+
+static double target_diff(const unsigned char *);
+
+#define GBT_XNONCESZ (sizeof(uint32_t))
+
+#if BLKMAKER_VERSION > 4
+#define blkmk_append_coinbase_safe(tmpl, append, appendsz)  \
+       blkmk_append_coinbase_safe2(tmpl, append, appendsz, GBT_XNONCESZ, false)
+#endif
+
 static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 {
 {
 	json_t *res_val = json_object_get(val, "result");
 	json_t *res_val = json_object_get(val, "result");
 	json_t *tmp_val;
 	json_t *tmp_val;
 	bool ret = false;
 	bool ret = false;
+	struct timeval tv_now;
 
 
 	if (unlikely(detect_algo == 1)) {
 	if (unlikely(detect_algo == 1)) {
 		json_t *tmp = json_object_get(res_val, "algorithm");
 		json_t *tmp = json_object_get(res_val, "algorithm");
@@ -2806,28 +2856,30 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 			detect_algo = 2;
 			detect_algo = 2;
 	}
 	}
 	
 	
-	if (work->tmpl) {
-		struct timeval tv_now;
-		cgtime(&tv_now);
-		const char *err = blktmpl_add_jansson(work->tmpl, res_val, tv_now.tv_sec);
+	timer_set_now(&tv_now);
+	
+	if (work->tr)
+	{
+		blktemplate_t * const tmpl = work->tr->tmpl;
+		const char *err = blktmpl_add_jansson(tmpl, res_val, tv_now.tv_sec);
 		if (err) {
 		if (err) {
 			applog(LOG_ERR, "blktmpl error: %s", err);
 			applog(LOG_ERR, "blktmpl error: %s", err);
 			return false;
 			return false;
 		}
 		}
-		work->rolltime = blkmk_time_left(work->tmpl, tv_now.tv_sec);
+		work->rolltime = blkmk_time_left(tmpl, tv_now.tv_sec);
 #if BLKMAKER_VERSION > 1
 #if BLKMAKER_VERSION > 1
 		if (opt_coinbase_script.sz)
 		if (opt_coinbase_script.sz)
 		{
 		{
 			bool newcb;
 			bool newcb;
 #if BLKMAKER_VERSION > 2
 #if BLKMAKER_VERSION > 2
-			blkmk_init_generation2(work->tmpl, opt_coinbase_script.data, opt_coinbase_script.sz, &newcb);
+			blkmk_init_generation2(tmpl, opt_coinbase_script.data, opt_coinbase_script.sz, &newcb);
 #else
 #else
-			newcb = !work->tmpl->cbtxn;
-			blkmk_init_generation(work->tmpl, opt_coinbase_script.data, opt_coinbase_script.sz);
+			newcb = !tmpl->cbtxn;
+			blkmk_init_generation(tmpl, opt_coinbase_script.data, opt_coinbase_script.sz);
 #endif
 #endif
 			if (newcb)
 			if (newcb)
 			{
 			{
-				ssize_t ae = blkmk_append_coinbase_safe(work->tmpl, &template_nonce, sizeof(template_nonce));
+				ssize_t ae = blkmk_append_coinbase_safe(tmpl, &template_nonce, sizeof(template_nonce));
 				if (ae < (ssize_t)sizeof(template_nonce))
 				if (ae < (ssize_t)sizeof(template_nonce))
 					applog(LOG_WARNING, "Cannot append template-nonce to coinbase on pool %u (%"PRId64") - you might be wasting hashing!", work->pool->pool_no, (int64_t)ae);
 					applog(LOG_WARNING, "Cannot append template-nonce to coinbase on pool %u (%"PRId64") - you might be wasting hashing!", work->pool->pool_no, (int64_t)ae);
 				++template_nonce;
 				++template_nonce;
@@ -2836,7 +2888,7 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 #endif
 #endif
 #if BLKMAKER_VERSION > 0
 #if BLKMAKER_VERSION > 0
 		{
 		{
-			ssize_t ae = blkmk_append_coinbase_safe(work->tmpl, opt_coinbase_sig, 101);
+			ssize_t ae = blkmk_append_coinbase_safe(tmpl, opt_coinbase_sig, 101);
 			static bool appenderr = false;
 			static bool appenderr = false;
 			if (ae <= 0) {
 			if (ae <= 0) {
 				if (opt_coinbase_sig) {
 				if (opt_coinbase_sig) {
@@ -2872,7 +2924,7 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 					free(tmp);
 					free(tmp);
 					truncatewarning = true;
 					truncatewarning = true;
 				}
 				}
-				ae = blkmk_append_coinbase_safe(work->tmpl, cbappend, ae);
+				ae = blkmk_append_coinbase_safe(tmpl, cbappend, ae);
 				if (ae <= 0) {
 				if (ae <= 0) {
 					applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Error appending coinbase signature (%"PRId64")", (int64_t)ae);
 					applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Error appending coinbase signature (%"PRId64")", (int64_t)ae);
 					appenderr = true;
 					appenderr = true;
@@ -2881,13 +2933,20 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 			}
 			}
 		}
 		}
 #endif
 #endif
-		if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
+		if (blkmk_get_data(tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
 			return false;
 			return false;
 		swap32yes(work->data, work->data, 80 / 4);
 		swap32yes(work->data, work->data, 80 / 4);
 		memcpy(&work->data[80], workpadding_bin, 48);
 		memcpy(&work->data[80], workpadding_bin, 48);
+		
+		work->ntime_roll_limits = (struct ntime_roll_limits){
+			.min = tmpl->mintime,
+			.max = tmpl->maxtime,
+			.minoff = tmpl->mintimeoff,
+			.maxoff = tmpl->maxtimeoff,
+		};
 
 
 		const struct blktmpl_longpoll_req *lp;
 		const struct blktmpl_longpoll_req *lp;
-		if ((lp = blktmpl_get_longpoll(work->tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) {
+		if ((lp = blktmpl_get_longpoll(tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) {
 			free(pool->lp_id);
 			free(pool->lp_id);
 			pool->lp_id = strdup(lp->id);
 			pool->lp_id = strdup(lp->id);
 
 
@@ -2906,6 +2965,8 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 		applog(LOG_ERR, "JSON inval data");
 		applog(LOG_ERR, "JSON inval data");
 		return false;
 		return false;
 	}
 	}
+	else
+		work_set_simple_ntime_roll_limit(work, 0);
 
 
 	if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) {
 	if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) {
 		// Calculate it ourselves
 		// Calculate it ourselves
@@ -2917,7 +2978,8 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 		applog(LOG_ERR, "JSON inval target");
 		applog(LOG_ERR, "JSON inval target");
 		return false;
 		return false;
 	}
 	}
-	if (work->tmpl) {
+	if (work->tr)
+	{
 		for (size_t i = 0; i < sizeof(work->target) / 2; ++i)
 		for (size_t i = 0; i < sizeof(work->target) / 2; ++i)
 		{
 		{
 			int p = (sizeof(work->target) - 1) - i;
 			int p = (sizeof(work->target) - 1) - i;
@@ -2935,9 +2997,49 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 
 
 	memset(work->hash, 0, sizeof(work->hash));
 	memset(work->hash, 0, sizeof(work->hash));
 
 
-	cgtime(&work->tv_staged);
+	work->tv_staged = tv_now;
 	
 	
-	pool_set_opaque(pool, !work->tmpl);
+#if BLKMAKER_VERSION > 4
+	if (work->tr)
+	{
+		blktemplate_t * const tmpl = work->tr->tmpl;
+		uint8_t buf[80];
+		int16_t expire;
+		uint8_t *cbtxn;
+		size_t cbtxnsz;
+		size_t cbextranonceoffset;
+		int branchcount;
+		libblkmaker_hash_t *branches;
+		
+		if (blkmk_get_mdata(tmpl, buf, sizeof(buf), tv_now.tv_sec, &expire, &cbtxn, &cbtxnsz, &cbextranonceoffset, &branchcount, &branches, GBT_XNONCESZ, false))
+		{
+			struct stratum_work * const swork = &pool->swork;
+			const size_t branchdatasz = branchcount * 0x20;
+			
+			cg_wlock(&pool->data_lock);
+			swork->tr = work->tr;
+			bytes_assimilate_raw(&swork->coinbase, cbtxn, cbtxnsz, cbtxnsz);
+			swork->nonce2_offset = cbextranonceoffset;
+			bytes_assimilate_raw(&swork->merkle_bin, branches, branchdatasz, branchdatasz);
+			swork->merkles = branchcount;
+			memcpy(swork->header1, &buf[0], 36);
+			swork->ntime = le32toh(*(uint32_t *)(&buf[68]));
+			swork->tv_received = tv_now;
+			memcpy(swork->diffbits, &buf[72], 4);
+			memcpy(swork->target, work->target, sizeof(swork->target));
+			free(swork->job_id);
+			swork->job_id = NULL;
+			swork->clean = true;
+			// FIXME: Do something with expire
+			pool->nonce2sz = pool->n2size = GBT_XNONCESZ;
+			pool->nonce2 = 0;
+			cg_wunlock(&pool->data_lock);
+		}
+		else
+			applog(LOG_DEBUG, "blkmk_get_mdata failed for pool %u", pool->pool_no);
+	}
+#endif  // BLKMAKER_VERSION > 4
+	pool_set_opaque(pool, !work->tr);
 
 
 	ret = true;
 	ret = true;
 
 
@@ -4238,7 +4340,7 @@ static
 void maybe_local_submit(const struct work *work)
 void maybe_local_submit(const struct work *work)
 {
 {
 #if BLKMAKER_VERSION > 3
 #if BLKMAKER_VERSION > 3
-	if (unlikely(work->block && work->tmpl))
+	if (unlikely(work->block && work->tr))
 	{
 	{
 		// This is a block with a full template (GBT)
 		// This is a block with a full template (GBT)
 		// Regardless of the result, submit to local bitcoind(s) as well
 		// Regardless of the result, submit to local bitcoind(s) as well
@@ -4404,17 +4506,19 @@ static char *submit_upstream_work_request(struct work *work)
 	char *s, *sd;
 	char *s, *sd;
 	struct pool *pool = work->pool;
 	struct pool *pool = work->pool;
 
 
-	if (work->tmpl) {
+	if (work->tr)
+	{
+		blktemplate_t * const tmpl = work->tr->tmpl;
 		json_t *req;
 		json_t *req;
 		unsigned char data[80];
 		unsigned char data[80];
 		
 		
 		swap32yes(data, work->data, 80 / 4);
 		swap32yes(data, work->data, 80 / 4);
 #if BLKMAKER_VERSION > 3
 #if BLKMAKER_VERSION > 3
 		if (work->do_foreign_submit)
 		if (work->do_foreign_submit)
-			req = blkmk_submit_foreign_jansson(work->tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
+			req = blkmk_submit_foreign_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
 		else
 		else
 #endif
 #endif
-			req = blkmk_submit_jansson(work->tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
+			req = blkmk_submit_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
 		s = json_dumps(req, 0);
 		s = json_dumps(req, 0);
 		json_decref(req);
 		json_decref(req);
 		sd = malloc(161);
 		sd = malloc(161);
@@ -4436,7 +4540,7 @@ static char *submit_upstream_work_request(struct work *work)
 	}
 	}
 
 
 	applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
 	applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
-	if (work->tmpl)
+	if (work->tr)
 		free(sd);
 		free(sd);
 	else
 	else
 		s = realloc_strcat(s, "\n");
 		s = realloc_strcat(s, "\n");
@@ -4771,13 +4875,14 @@ void get_benchmark_work(struct work *work)
 	copy_time(&work->tv_staged, &work->tv_getwork);
 	copy_time(&work->tv_staged, &work->tv_getwork);
 	work->getwork_mode = GETWORK_MODE_BENCHMARK;
 	work->getwork_mode = GETWORK_MODE_BENCHMARK;
 	calc_diff(work, 0);
 	calc_diff(work, 0);
+	work_set_simple_ntime_roll_limit(work, 60);
 }
 }
 
 
 static void wake_gws(void);
 static void wake_gws(void);
 
 
 static void update_last_work(struct work *work)
 static void update_last_work(struct work *work)
 {
 {
-	if (!work->tmpl)
+	if (!work->tr)
 		// Only save GBT jobs, since rollntime isn't coordinated well yet
 		// Only save GBT jobs, since rollntime isn't coordinated well yet
 		return;
 		return;
 
 
@@ -4844,14 +4949,11 @@ static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const
 			return strdup(getwork_req);
 			return strdup(getwork_req);
 		case PLP_GETBLOCKTEMPLATE:
 		case PLP_GETBLOCKTEMPLATE:
 			work->getwork_mode = GETWORK_MODE_GBT;
 			work->getwork_mode = GETWORK_MODE_GBT;
-			work->tmpl_refcount = malloc(sizeof(*work->tmpl_refcount));
-			if (!work->tmpl_refcount)
-				return NULL;
-			work->tmpl = blktmpl_create();
-			if (!work->tmpl)
+			blktemplate_t * const tmpl = blktmpl_create();
+			if (!tmpl)
 				goto gbtfail2;
 				goto gbtfail2;
-			*work->tmpl_refcount = 1;
-			gbt_capabilities_t caps = blktmpl_addcaps(work->tmpl);
+			work->tr = tmpl_makeref(tmpl);
+			gbt_capabilities_t caps = blktmpl_addcaps(tmpl);
 			if (!caps)
 			if (!caps)
 				goto gbtfail;
 				goto gbtfail;
 			caps |= GBT_LONGPOLL;
 			caps |= GBT_LONGPOLL;
@@ -4877,11 +4979,9 @@ static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const
 	return NULL;
 	return NULL;
 
 
 gbtfail:
 gbtfail:
-	blktmpl_free(work->tmpl);
-	work->tmpl = NULL;
+	tmpl_decref(work->tr);
+	work->tr = NULL;
 gbtfail2:
 gbtfail2:
-	free(work->tmpl_refcount);
-	work->tmpl_refcount = NULL;
 	return NULL;
 	return NULL;
 }
 }
 
 
@@ -5248,10 +5348,11 @@ static inline bool can_roll(struct work *work)
 		return false;
 		return false;
 	if (!(work->pool && !work->clone))
 	if (!(work->pool && !work->clone))
 		return false;
 		return false;
-	if (work->tmpl) {
+	if (work->tr)
+	{
 		if (stale_work(work, false))
 		if (stale_work(work, false))
 			return false;
 			return false;
-		return blkmk_work_left(work->tmpl);
+		return blkmk_work_left(work->tr->tmpl);
 	}
 	}
 	return (work->rolltime &&
 	return (work->rolltime &&
 		work->rolls < 7000 && !stale_work(work, false));
 		work->rolls < 7000 && !stale_work(work, false));
@@ -5259,10 +5360,11 @@ static inline bool can_roll(struct work *work)
 
 
 static void roll_work(struct work *work)
 static void roll_work(struct work *work)
 {
 {
-	if (work->tmpl) {
+	if (work->tr)
+	{
 		struct timeval tv_now;
 		struct timeval tv_now;
 		cgtime(&tv_now);
 		cgtime(&tv_now);
-		if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
+		if (blkmk_get_data(work->tr->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
 			applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
 			applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
 		swap32yes(work->data, work->data, 80 / 4);
 		swap32yes(work->data, work->data, 80 / 4);
 		calc_midstate(work);
 		calc_midstate(work);
@@ -5276,6 +5378,7 @@ static void roll_work(struct work *work)
 	ntime = be32toh(*work_ntime);
 	ntime = be32toh(*work_ntime);
 	ntime++;
 	ntime++;
 	*work_ntime = htobe32(ntime);
 	*work_ntime = htobe32(ntime);
+		work_set_simple_ntime_roll_limit(work, 0);
 
 
 		applog(LOG_DEBUG, "Successfully rolled time header in work");
 		applog(LOG_DEBUG, "Successfully rolled time header in work");
 	}
 	}
@@ -5306,12 +5409,8 @@ static void _copy_work(struct work *work, const struct work *base_work, int noff
 		work->nonce1 = strdup(base_work->nonce1);
 		work->nonce1 = strdup(base_work->nonce1);
 	bytes_cpy(&work->nonce2, &base_work->nonce2);
 	bytes_cpy(&work->nonce2, &base_work->nonce2);
 
 
-	if (base_work->tmpl) {
-		struct pool *pool = work->pool;
-		mutex_lock(&pool->pool_lock);
-		++*work->tmpl_refcount;
-		mutex_unlock(&pool->pool_lock);
-	}
+	if (base_work->tr)
+		tmpl_incref(base_work->tr);
 	
 	
 	if (noffset)
 	if (noffset)
 	{
 	{
@@ -5417,7 +5516,7 @@ bool stale_work(struct work *work, bool share)
 	/* Technically the rolltime should be correct but some pools
 	/* Technically the rolltime should be correct but some pools
 	 * advertise a broken expire= that is lower than a meaningful
 	 * advertise a broken expire= that is lower than a meaningful
 	 * scantime */
 	 * scantime */
-	if (work->rolltime >= opt_scantime || work->tmpl)
+	if (work->rolltime >= opt_scantime || work->tr)
 		work_expiry = work->rolltime;
 		work_expiry = work->rolltime;
 	else
 	else
 		work_expiry = opt_expiry;
 		work_expiry = opt_expiry;
@@ -8614,7 +8713,7 @@ badwork:
 		/* Decipher the longpoll URL, if any, and store it in ->lp_url */
 		/* Decipher the longpoll URL, if any, and store it in ->lp_url */
 
 
 		const struct blktmpl_longpoll_req *lp;
 		const struct blktmpl_longpoll_req *lp;
-		if (work->tmpl && (lp = blktmpl_get_longpoll(work->tmpl))) {
+		if (work->tr && (lp = blktmpl_get_longpoll(work->tr->tmpl))) {
 			// NOTE: work_decode takes care of lp id
 			// NOTE: work_decode takes care of lp id
 			pool->lp_url = lp->uri ? absolute_uri(lp->uri, pool->rpc_url) : pool->rpc_url;
 			pool->lp_url = lp->uri ? absolute_uri(lp->uri, pool->rpc_url) : pool->rpc_url;
 			if (!pool->lp_url)
 			if (!pool->lp_url)
@@ -8892,18 +8991,34 @@ void test_target()
 void stratum_work_cpy(struct stratum_work * const dst, const struct stratum_work * const src)
 void stratum_work_cpy(struct stratum_work * const dst, const struct stratum_work * const src)
 {
 {
 	*dst = *src;
 	*dst = *src;
-	dst->job_id = strdup(src->job_id);
+	if (dst->tr)
+		tmpl_incref(dst->tr);
+	dst->job_id = maybe_strdup(src->job_id);
 	bytes_cpy(&dst->coinbase, &src->coinbase);
 	bytes_cpy(&dst->coinbase, &src->coinbase);
 	bytes_cpy(&dst->merkle_bin, &src->merkle_bin);
 	bytes_cpy(&dst->merkle_bin, &src->merkle_bin);
 }
 }
 
 
 void stratum_work_clean(struct stratum_work * const swork)
 void stratum_work_clean(struct stratum_work * const swork)
 {
 {
+	if (swork->tr)
+		tmpl_decref(swork->tr);
 	free(swork->job_id);
 	free(swork->job_id);
 	bytes_free(&swork->coinbase);
 	bytes_free(&swork->coinbase);
 	bytes_free(&swork->merkle_bin);
 	bytes_free(&swork->merkle_bin);
 }
 }
 
 
+bool pool_has_usable_swork(const struct pool * const pool)
+{
+	if (pool->swork.tr)
+	{
+		// GBT
+		struct timeval tv_now;
+		timer_set_now(&tv_now);
+		return blkmk_time_left(pool->swork.tr->tmpl, tv_now.tv_sec);
+	}
+	return pool->stratum_notify;
+}
+
 /* Generates stratum based work based on the most recent notify information
 /* Generates stratum based work based on the most recent notify information
  * from the pool. This will keep generating work while a pool is down so we use
  * from the pool. This will keep generating work while a pool is down so we use
  * other means to detect when the pool has died in stratum_thread */
  * other means to detect when the pool has died in stratum_thread */
@@ -8968,11 +9083,13 @@ void gen_stratum_work2(struct work *work, struct stratum_work *swork, const char
 	memcpy(&work->data[72], swork->diffbits, 4);
 	memcpy(&work->data[72], swork->diffbits, 4);
 	memset(&work->data[76], 0, 4);  // nonce
 	memset(&work->data[76], 0, 4);  // nonce
 	memcpy(&work->data[80], workpadding_bin, 48);
 	memcpy(&work->data[80], workpadding_bin, 48);
+	
+	work->ntime_roll_limits = swork->ntime_roll_limits;
 
 
 	/* Copy parameters required for share submission */
 	/* Copy parameters required for share submission */
 	memcpy(work->target, swork->target, sizeof(work->target));
 	memcpy(work->target, swork->target, sizeof(work->target));
-	work->job_id = strdup(swork->job_id);
-	work->nonce1 = strdup(nonce1);
+	work->job_id = maybe_strdup(swork->job_id);
+	work->nonce1 = maybe_strdup(nonce1);
 	if (swork->data_lock_p)
 	if (swork->data_lock_p)
 		cg_runlock(swork->data_lock_p);
 		cg_runlock(swork->data_lock_p);
 
 
@@ -8994,8 +9111,6 @@ void gen_stratum_work2(struct work *work, struct stratum_work *swork, const char
 	work->id = total_work++;
 	work->id = total_work++;
 	work->longpoll = false;
 	work->longpoll = false;
 	work->getwork_mode = GETWORK_MODE_STRATUM;
 	work->getwork_mode = GETWORK_MODE_STRATUM;
-	/* Nominally allow a driver to ntime roll 60 seconds */
-	work->drv_rolllimit = 60;
 	calc_diff(work, 0);
 	calc_diff(work, 0);
 }
 }
 
 
@@ -12199,10 +12314,10 @@ retry:
 				work = make_clone(pool->last_work_copy);
 				work = make_clone(pool->last_work_copy);
 				mutex_unlock(&pool->last_work_lock);
 				mutex_unlock(&pool->last_work_lock);
 				roll_work(work);
 				roll_work(work);
-				applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, tv_now.tv_sec));
+				applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tr->tmpl, tv_now.tv_sec));
 				stage_work(work);
 				stage_work(work);
 				continue;
 				continue;
-			} else if (last_work->tmpl && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tmpl) > (unsigned long)mining_threads) {
+			} else if (last_work->tr && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tr->tmpl) > (unsigned long)mining_threads) {
 				// Don't free last_work_copy, since it is used to detect upstream provides plenty of work per template
 				// Don't free last_work_copy, since it is used to detect upstream provides plenty of work per template
 			} else {
 			} else {
 				free_work(last_work);
 				free_work(last_work);

+ 24 - 3
miner.h

@@ -1132,7 +1132,21 @@ enum pool_protocol {
 	PLP_GETBLOCKTEMPLATE,
 	PLP_GETBLOCKTEMPLATE,
 };
 };
 
 
+struct bfg_tmpl_ref {
+	blktemplate_t *tmpl;
+	int refcount;
+	pthread_mutex_t mutex;
+};
+
+struct ntime_roll_limits {
+	uint32_t min;
+	uint32_t max;
+	uint16_t minoff;
+	uint16_t maxoff;
+};
+
 struct stratum_work {
 struct stratum_work {
+	struct bfg_tmpl_ref *tr;
 	char *job_id;
 	char *job_id;
 	bool clean;
 	bool clean;
 	
 	
@@ -1144,8 +1158,12 @@ struct stratum_work {
 	
 	
 	uint8_t header1[36];
 	uint8_t header1[36];
 	uint8_t diffbits[4];
 	uint8_t diffbits[4];
+	
 	uint32_t ntime;
 	uint32_t ntime;
 	struct timeval tv_received;
 	struct timeval tv_received;
+	struct ntime_roll_limits ntime_roll_limits;
+	
+	struct timeval tv_expire;
 
 
 	uint8_t target[32];
 	uint8_t target[32];
 
 
@@ -1284,7 +1302,7 @@ struct work {
 	double share_diff;
 	double share_diff;
 
 
 	int		rolls;
 	int		rolls;
-	int		drv_rolllimit; /* How much the driver can roll ntime */
+	struct ntime_roll_limits ntime_roll_limits;
 
 
 	struct {
 	struct {
 		uint32_t nonce;
 		uint32_t nonce;
@@ -1329,8 +1347,7 @@ struct work {
 	// Allow devices to timestamp work for their own purposes
 	// Allow devices to timestamp work for their own purposes
 	struct timeval	tv_stamp;
 	struct timeval	tv_stamp;
 
 
-	blktemplate_t	*tmpl;
-	int		*tmpl_refcount;
+	struct bfg_tmpl_ref *tr;
 	unsigned int	dataid;
 	unsigned int	dataid;
 	bool		do_foreign_submit;
 	bool		do_foreign_submit;
 
 
@@ -1352,6 +1369,7 @@ extern void get_datestamp(char *, size_t, time_t);
 extern void get_benchmark_work(struct work *);
 extern void get_benchmark_work(struct work *);
 extern void stratum_work_cpy(struct stratum_work *dst, const struct stratum_work *src);
 extern void stratum_work_cpy(struct stratum_work *dst, const struct stratum_work *src);
 extern void stratum_work_clean(struct stratum_work *);
 extern void stratum_work_clean(struct stratum_work *);
+extern bool pool_has_usable_swork(const struct pool *);
 extern void gen_stratum_work2(struct work *, struct stratum_work *, const char *nonce1);
 extern void gen_stratum_work2(struct work *, struct stratum_work *, const char *nonce1);
 extern void inc_hw_errors3(struct thr_info *thr, const struct work *work, const uint32_t *bad_nonce_p, float nonce_diff);
 extern void inc_hw_errors3(struct thr_info *thr, const struct work *work, const uint32_t *bad_nonce_p, float nonce_diff);
 static inline
 static inline
@@ -1421,10 +1439,13 @@ extern void tq_freeze(struct thread_q *tq);
 extern void tq_thaw(struct thread_q *tq);
 extern void tq_thaw(struct thread_q *tq);
 extern bool successful_connect;
 extern bool successful_connect;
 extern void adl(void);
 extern void adl(void);
+extern void tmpl_decref(struct bfg_tmpl_ref *);
 extern void clean_work(struct work *work);
 extern void clean_work(struct work *work);
 extern void free_work(struct work *work);
 extern void free_work(struct work *work);
 extern void __copy_work(struct work *work, const struct work *base_work);
 extern void __copy_work(struct work *work, const struct work *base_work);
 extern struct work *copy_work(const struct work *base_work);
 extern struct work *copy_work(const struct work *base_work);
+extern void set_simple_ntime_roll_limit(struct ntime_roll_limits *, uint32_t ntime_base, int ntime_roll);
+extern void work_set_simple_ntime_roll_limit(struct work *, int ntime_roll);
 extern char *devpath_to_devid(const char *);
 extern char *devpath_to_devid(const char *);
 extern struct thr_info *get_thread(int thr_id);
 extern struct thr_info *get_thread(int thr_id);
 extern struct cgpu_info *get_devices(int id);
 extern struct cgpu_info *get_devices(int id);

+ 8 - 0
util.c

@@ -2109,6 +2109,11 @@ static bool parse_notify(struct pool *pool, json_t *val)
 	cgtime(&pool->swork.tv_received);
 	cgtime(&pool->swork.tv_received);
 	free(pool->swork.job_id);
 	free(pool->swork.job_id);
 	pool->swork.job_id = job_id;
 	pool->swork.job_id = job_id;
+	if (pool->swork.tr)
+	{
+		tmpl_decref(pool->swork.tr);
+		pool->swork.tr = NULL;
+	}
 	pool->submit_old = !clean;
 	pool->submit_old = !clean;
 	pool->swork.clean = true;
 	pool->swork.clean = true;
 	
 	
@@ -2118,6 +2123,9 @@ static bool parse_notify(struct pool *pool, json_t *val)
 	pool->swork.ntime = be32toh(pool->swork.ntime);
 	pool->swork.ntime = be32toh(pool->swork.ntime);
 	hex2bin(&pool->swork.diffbits[0], nbit, 4);
 	hex2bin(&pool->swork.diffbits[0], nbit, 4);
 	
 	
+	/* Nominally allow a driver to ntime roll 60 seconds */
+	set_simple_ntime_roll_limit(&pool->swork.ntime_roll_limits, pool->swork.ntime, 60);
+	
 	cb1_len = strlen(coinbase1) / 2;
 	cb1_len = strlen(coinbase1) / 2;
 	pool->swork.nonce2_offset = cb1_len + pool->n1_len;
 	pool->swork.nonce2_offset = cb1_len + pool->n1_len;
 	cb2_len = strlen(coinbase2) / 2;
 	cb2_len = strlen(coinbase2) / 2;

+ 154 - 1
util.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright 2013 Luke Dashjr
+ * Copyright 2013-2014 Luke Dashjr
  * Copyright 2012-2013 Con Kolivas
  * Copyright 2012-2013 Con Kolivas
  * Copyright 2011 Andrew Smith
  * Copyright 2011 Andrew Smith
  * Copyright 2011 Jeff Garzik
  * Copyright 2011 Jeff Garzik
@@ -224,6 +224,150 @@ static inline void align_len(size_t *len)
 }
 }
 
 
 
 
+static inline
+uint8_t upk_u8(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return buf[offset];
+}
+
+static inline
+uint16_t upk_u16be(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return (((uint16_t)buf[offset+0]) <<    8)
+	     | (((uint16_t)buf[offset+1]) <<    0);
+}
+
+static inline
+uint32_t upk_u32be(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return (((uint32_t)buf[offset+0]) << 0x18)
+	     | (((uint32_t)buf[offset+1]) << 0x10)
+	     | (((uint32_t)buf[offset+2]) <<    8)
+	     | (((uint32_t)buf[offset+3]) <<    0);
+}
+
+static inline
+uint64_t upk_u64be(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return (((uint64_t)buf[offset+0]) << 0x38)
+	     | (((uint64_t)buf[offset+1]) << 0x30)
+	     | (((uint64_t)buf[offset+2]) << 0x28)
+	     | (((uint64_t)buf[offset+3]) << 0x20)
+	     | (((uint64_t)buf[offset+4]) << 0x18)
+	     | (((uint64_t)buf[offset+5]) << 0x10)
+	     | (((uint64_t)buf[offset+6]) <<    8)
+	     | (((uint64_t)buf[offset+7]) <<    0);
+}
+
+static inline
+uint16_t upk_u16le(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return (((uint16_t)buf[offset+0]) <<    0)
+	     | (((uint16_t)buf[offset+1]) <<    8);
+}
+
+static inline
+uint32_t upk_u32le(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return (((uint32_t)buf[offset+0]) <<    0)
+	     | (((uint32_t)buf[offset+1]) <<    8)
+	     | (((uint32_t)buf[offset+2]) << 0x10)
+	     | (((uint32_t)buf[offset+3]) << 0x18);
+}
+
+static inline
+uint64_t upk_u64le(const void * const bufp, const int offset)
+{
+	const uint8_t * const buf = bufp;
+	return (((uint64_t)buf[offset+0]) <<    0)
+	     | (((uint64_t)buf[offset+1]) <<    8)
+	     | (((uint64_t)buf[offset+2]) << 0x10)
+	     | (((uint64_t)buf[offset+3]) << 0x18)
+	     | (((uint64_t)buf[offset+4]) << 0x20)
+	     | (((uint64_t)buf[offset+5]) << 0x28)
+	     | (((uint64_t)buf[offset+6]) << 0x30)
+	     | (((uint64_t)buf[offset+7]) << 0x38);
+}
+
+
+static inline
+void pk_u8(void * const bufp, const int offset, const uint8_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset] = nv;
+}
+
+static inline
+void pk_u16be(void * const bufp, const int offset, const uint16_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset+0] = (nv >>    8) & 0xff;
+	buf[offset+1] = (nv >>    0) & 0xff;
+}
+
+static inline
+void pk_u32be(void * const bufp, const int offset, const uint32_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset+0] = (nv >> 0x18) & 0xff;
+	buf[offset+1] = (nv >> 0x10) & 0xff;
+	buf[offset+2] = (nv >>    8) & 0xff;
+	buf[offset+3] = (nv >>    0) & 0xff;
+}
+
+static inline
+void pk_u64be(void * const bufp, const int offset, const uint64_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset+0] = (nv >> 0x38) & 0xff;
+	buf[offset+1] = (nv >> 0x30) & 0xff;
+	buf[offset+2] = (nv >> 0x28) & 0xff;
+	buf[offset+3] = (nv >> 0x20) & 0xff;
+	buf[offset+4] = (nv >> 0x18) & 0xff;
+	buf[offset+5] = (nv >> 0x10) & 0xff;
+	buf[offset+6] = (nv >>    8) & 0xff;
+	buf[offset+7] = (nv >>    0) & 0xff;
+}
+
+static inline
+void pk_u16le(void * const bufp, const int offset, const uint16_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset+0] = (nv >>    0) & 0xff;
+	buf[offset+1] = (nv >>    8) & 0xff;
+}
+
+static inline
+void pk_u32le(void * const bufp, const int offset, const uint32_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset+0] = (nv >>    0) & 0xff;
+	buf[offset+1] = (nv >>    8) & 0xff;
+	buf[offset+2] = (nv >> 0x10) & 0xff;
+	buf[offset+3] = (nv >> 0x18) & 0xff;
+}
+
+static inline
+void pk_u64le(void * const bufp, const int offset, const uint64_t nv)
+{
+	uint8_t * const buf = bufp;
+	buf[offset+0] = (nv >>    0) & 0xff;
+	buf[offset+1] = (nv >>    8) & 0xff;
+	buf[offset+2] = (nv >> 0x10) & 0xff;
+	buf[offset+3] = (nv >> 0x18) & 0xff;
+	buf[offset+4] = (nv >> 0x20) & 0xff;
+	buf[offset+5] = (nv >> 0x28) & 0xff;
+	buf[offset+6] = (nv >> 0x30) & 0xff;
+	buf[offset+7] = (nv >> 0x38) & 0xff;
+}
+
+
 typedef struct bytes_t {
 typedef struct bytes_t {
 	uint8_t *buf;
 	uint8_t *buf;
 	size_t sz;
 	size_t sz;
@@ -329,6 +473,15 @@ void bytes_cpy(bytes_t *dst, const bytes_t *src)
 	memcpy(dst->buf, src->buf, dst->sz);
 	memcpy(dst->buf, src->buf, dst->sz);
 }
 }
 
 
+static inline
+void bytes_assimilate_raw(bytes_t * const b, void * const buf, const size_t bufsz, const size_t buflen)
+{
+	free(b->buf);
+	b->buf = buf;
+	b->allocsz = bufsz;
+	b->sz = buflen;
+}
+
 static inline
 static inline
 void bytes_shift(bytes_t *b, size_t shift)
 void bytes_shift(bytes_t *b, size_t shift)
 {
 {

+ 47 - 0
work2d.c

@@ -0,0 +1,47 @@
+/*
+ * Copyright 2013-2014 Luke Dashjr
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option)
+ * any later version.  See COPYING for more details.
+ */
+
+#include "config.h"
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "miner.h"
+
+#define MAX_DIVISIONS 255
+
+static bool work2d_reserved[MAX_DIVISIONS + 1] = { true };
+int work2d_xnonce1sz;
+int work2d_xnonce2sz;
+
+void work2d_init()
+{
+	RUNONCE();
+	
+	for (uint64_t n = MAX_DIVISIONS; n; n >>= 8)
+		++work2d_xnonce1sz;
+	work2d_xnonce2sz = 2;
+}
+
+bool reserve_work2d_(uint32_t * const xnonce1_p)
+{
+	uint32_t xnonce1;
+	for (xnonce1 = MAX_DIVISIONS; work2d_reserved[xnonce1]; --xnonce1)
+		if (!xnonce1)
+			return false;
+	work2d_reserved[xnonce1] = true;
+	*xnonce1_p = htole32(xnonce1);
+	return true;
+}
+
+void release_work2d_(uint32_t xnonce1)
+{
+	xnonce1 = le32toh(xnonce1);
+	work2d_reserved[xnonce1] = false;
+}

+ 14 - 0
work2d.h

@@ -0,0 +1,14 @@
+#ifndef BFG_WORK2D_H
+#define BFG_WORK2D_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+extern int work2d_xnonce1sz;
+extern int work2d_xnonce2sz;
+
+extern void work2d_init();
+extern bool reserve_work2d_(uint32_t *xnonce1_p);
+extern void release_work2d_(uint32_t xnonce1);
+
+#endif