|
@@ -1096,6 +1096,8 @@ static bool get_upstream_work(struct work *work, bool lagging)
|
|
|
work->pool = pool;
|
|
work->pool = pool;
|
|
|
total_getworks++;
|
|
total_getworks++;
|
|
|
pool->getwork_requested++;
|
|
pool->getwork_requested++;
|
|
|
|
|
+ if (work->thr)
|
|
|
|
|
+ work->thr->cgpu->getworks++;
|
|
|
|
|
|
|
|
json_decref(val);
|
|
json_decref(val);
|
|
|
out:
|
|
out:
|
|
@@ -1220,6 +1222,11 @@ static void *get_work_thread(void *userdata)
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if (wc->thr)
|
|
|
|
|
+ ret_work->thr = wc->thr;
|
|
|
|
|
+ else
|
|
|
|
|
+ ret_work->thr = NULL;
|
|
|
|
|
+
|
|
|
/* obtain new work from bitcoin via JSON-RPC */
|
|
/* obtain new work from bitcoin via JSON-RPC */
|
|
|
while (!get_upstream_work(ret_work, wc->lagging)) {
|
|
while (!get_upstream_work(ret_work, wc->lagging)) {
|
|
|
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
|
|
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
|
|
@@ -2268,7 +2275,7 @@ static void pool_resus(struct pool *pool)
|
|
|
switch_pools(NULL);
|
|
switch_pools(NULL);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static bool queue_request(void)
|
|
|
|
|
|
|
+static bool queue_request(struct thr_info *thr)
|
|
|
{
|
|
{
|
|
|
int maxq = opt_queue + mining_threads;
|
|
int maxq = opt_queue + mining_threads;
|
|
|
struct workio_cmd *wc;
|
|
struct workio_cmd *wc;
|
|
@@ -2290,8 +2297,10 @@ static bool queue_request(void)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
wc->cmd = WC_GET_WORK;
|
|
wc->cmd = WC_GET_WORK;
|
|
|
- /* The get work does not belong to any thread */
|
|
|
|
|
- wc->thr = NULL;
|
|
|
|
|
|
|
+ if (thr)
|
|
|
|
|
+ wc->thr = thr;
|
|
|
|
|
+ else
|
|
|
|
|
+ wc->thr = NULL;
|
|
|
|
|
|
|
|
/* If we've queued more than 2/3 of the maximum and still have no
|
|
/* If we've queued more than 2/3 of the maximum and still have no
|
|
|
* staged work, consider the system lagging and allow work to be
|
|
* staged work, consider the system lagging and allow work to be
|
|
@@ -2363,7 +2372,7 @@ static void flush_requests(void)
|
|
|
|
|
|
|
|
for (i = 0; i < stale; i++) {
|
|
for (i = 0; i < stale; i++) {
|
|
|
/* Queue a whole batch of new requests */
|
|
/* Queue a whole batch of new requests */
|
|
|
- if (unlikely(!queue_request())) {
|
|
|
|
|
|
|
+ if (unlikely(!queue_request(NULL))) {
|
|
|
applog(LOG_ERR, "Failed to queue requests in flush_requests");
|
|
applog(LOG_ERR, "Failed to queue requests in flush_requests");
|
|
|
kill_work();
|
|
kill_work();
|
|
|
break;
|
|
break;
|
|
@@ -2445,7 +2454,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr,
|
|
|
thread_reportout(thr);
|
|
thread_reportout(thr);
|
|
|
retry:
|
|
retry:
|
|
|
pool = current_pool();
|
|
pool = current_pool();
|
|
|
- if (unlikely(!requested && !queue_request())) {
|
|
|
|
|
|
|
+ if (unlikely(!requested && !queue_request(NULL))) {
|
|
|
applog(LOG_WARNING, "Failed to queue_request in get_work");
|
|
applog(LOG_WARNING, "Failed to queue_request in get_work");
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
@@ -2673,7 +2682,6 @@ static void *miner_thread(void *userdata)
|
|
|
"mining thread %d", thr_id);
|
|
"mining thread %d", thr_id);
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
|
- mythr->cgpu->getworks++;
|
|
|
|
|
needs_work = requested = false;
|
|
needs_work = requested = false;
|
|
|
total_hashes = 0;
|
|
total_hashes = 0;
|
|
|
max_nonce = work.blk.nonce + hashes_done;
|
|
max_nonce = work.blk.nonce + hashes_done;
|
|
@@ -2794,7 +2802,7 @@ static void *miner_thread(void *userdata)
|
|
|
timeval_subtract(&diff, &tv_end, &tv_workstart);
|
|
timeval_subtract(&diff, &tv_end, &tv_workstart);
|
|
|
if (!requested && (diff.tv_sec >= request_interval)) {
|
|
if (!requested && (diff.tv_sec >= request_interval)) {
|
|
|
thread_reportout(mythr);
|
|
thread_reportout(mythr);
|
|
|
- if (unlikely(!queue_request())) {
|
|
|
|
|
|
|
+ if (unlikely(!queue_request(mythr))) {
|
|
|
applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
|
|
applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
@@ -2997,7 +3005,6 @@ static void *gpuminer_thread(void *userdata)
|
|
|
"gpu mining thread %d", thr_id);
|
|
"gpu mining thread %d", thr_id);
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
|
- mythr->cgpu->getworks++;
|
|
|
|
|
requested = false;
|
|
requested = false;
|
|
|
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
|
|
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
|
|
|
work->blk.nonce = 0;
|
|
work->blk.nonce = 0;
|
|
@@ -3048,7 +3055,6 @@ static void *gpuminer_thread(void *userdata)
|
|
|
"gpu mining thread %d", thr_id);
|
|
"gpu mining thread %d", thr_id);
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
|
- mythr->cgpu->getworks++;
|
|
|
|
|
requested = false;
|
|
requested = false;
|
|
|
|
|
|
|
|
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
|
|
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
|
|
@@ -3104,7 +3110,7 @@ static void *gpuminer_thread(void *userdata)
|
|
|
#endif
|
|
#endif
|
|
|
if (diff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
|
|
if (diff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
|
|
|
thread_reportout(mythr);
|
|
thread_reportout(mythr);
|
|
|
- if (unlikely(!queue_request())) {
|
|
|
|
|
|
|
+ if (unlikely(!queue_request(mythr))) {
|
|
|
applog(LOG_ERR, "Failed to queue_request in gpuminer_thread %d", thr_id);
|
|
applog(LOG_ERR, "Failed to queue_request in gpuminer_thread %d", thr_id);
|
|
|
goto out;
|
|
goto out;
|
|
|
}
|
|
}
|
|
@@ -3438,7 +3444,7 @@ static void *watchdog_thread(void *userdata)
|
|
|
|
|
|
|
|
sleep(interval);
|
|
sleep(interval);
|
|
|
if (requests_queued() < opt_queue)
|
|
if (requests_queued() < opt_queue)
|
|
|
- queue_request();
|
|
|
|
|
|
|
+ queue_request(NULL);
|
|
|
|
|
|
|
|
hashmeter(-1, &zero_tv, 0);
|
|
hashmeter(-1, &zero_tv, 0);
|
|
|
|
|
|