|
|
@@ -321,7 +321,6 @@ struct stratum_share {
|
|
|
bool block;
|
|
|
struct work *work;
|
|
|
int id;
|
|
|
- time_t sshare_time;
|
|
|
};
|
|
|
|
|
|
static struct stratum_share *stratum_shares = NULL;
|
|
|
@@ -370,15 +369,15 @@ static bool time_before(struct tm *tm1, struct tm *tm2)
|
|
|
|
|
|
static bool should_run(void)
|
|
|
{
|
|
|
- struct timeval tv;
|
|
|
struct tm tm;
|
|
|
+ time_t tt;
|
|
|
bool within_range;
|
|
|
|
|
|
if (!schedstart.enable && !schedstop.enable)
|
|
|
return true;
|
|
|
|
|
|
- cgtime(&tv);
|
|
|
- localtime_r(&tv.tv_sec, &tm);
|
|
|
+ tt = time(NULL);
|
|
|
+ localtime_r(&tt, &tm);
|
|
|
|
|
|
// NOTE: This is delicately balanced so that should_run is always false if schedstart==schedstop
|
|
|
if (time_before(&schedstop.tm, &schedstart.tm))
|
|
|
@@ -528,7 +527,7 @@ struct pool *add_pool(void)
|
|
|
quit(1, "Failed to pthread_cond_init in add_pool");
|
|
|
cglock_init(&pool->data_lock);
|
|
|
mutex_init(&pool->stratum_lock);
|
|
|
- pool->swork.transparency_time = (time_t)-1;
|
|
|
+ timer_unset(&pool->swork.tv_transparency);
|
|
|
|
|
|
/* Make sure the pool doesn't think we've been idle since time 0 */
|
|
|
pool->tv_idle.tv_sec = ~0UL;
|
|
|
@@ -1997,12 +1996,14 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
}
|
|
|
|
|
|
if (work->tmpl) {
|
|
|
- const char *err = blktmpl_add_jansson(work->tmpl, res_val, time(NULL));
|
|
|
+ struct timeval tv_now;
|
|
|
+ cgtime(&tv_now);
|
|
|
+ const char *err = blktmpl_add_jansson(work->tmpl, res_val, tv_now.tv_sec);
|
|
|
if (err) {
|
|
|
applog(LOG_ERR, "blktmpl error: %s", err);
|
|
|
return false;
|
|
|
}
|
|
|
- work->rolltime = blkmk_time_left(work->tmpl, time(NULL));
|
|
|
+ work->rolltime = blkmk_time_left(work->tmpl, tv_now.tv_sec);
|
|
|
#if BLKMAKER_VERSION > 1
|
|
|
if (opt_coinbase_script.sz)
|
|
|
{
|
|
|
@@ -2069,7 +2070,7 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
|
|
}
|
|
|
}
|
|
|
#endif
|
|
|
- if (blkmk_get_data(work->tmpl, work->data, 80, time(NULL), NULL, &work->dataid) < 76)
|
|
|
+ if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
|
|
|
return false;
|
|
|
swap32yes(work->data, work->data, 80 / 4);
|
|
|
memcpy(&work->data[80], workpadding_bin, 48);
|
|
|
@@ -3876,7 +3877,9 @@ static inline bool can_roll(struct work *work)
|
|
|
static void roll_work(struct work *work)
|
|
|
{
|
|
|
if (work->tmpl) {
|
|
|
- if (blkmk_get_data(work->tmpl, work->data, 80, time(NULL), NULL, &work->dataid) < 76)
|
|
|
+ struct timeval tv_now;
|
|
|
+ cgtime(&tv_now);
|
|
|
+ if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
|
|
|
applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
|
|
|
swap32yes(work->data, work->data, 80 / 4);
|
|
|
calc_midstate(work);
|
|
|
@@ -4092,9 +4095,9 @@ bool stale_work(struct work *work, bool share)
|
|
|
|
|
|
}
|
|
|
|
|
|
- double elapsed_since_staged = difftime(time(NULL), work->tv_staged.tv_sec);
|
|
|
+ int elapsed_since_staged = timer_elapsed(&work->tv_staged, NULL);
|
|
|
if (elapsed_since_staged > work_expiry) {
|
|
|
- applog(LOG_DEBUG, "%s stale due to expiry (%.0f >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry);
|
|
|
+ applog(LOG_DEBUG, "%s stale due to expiry (%d >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry);
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
@@ -4180,7 +4183,7 @@ struct submit_work_state {
|
|
|
bool resubmit;
|
|
|
struct curl_ent *ce;
|
|
|
int failures;
|
|
|
- time_t staleexpire;
|
|
|
+ struct timeval tv_staleexpire;
|
|
|
char *s;
|
|
|
struct timeval tv_submit;
|
|
|
struct submit_work_state *next;
|
|
|
@@ -4230,7 +4233,7 @@ static struct submit_work_state *begin_submission(struct work *work)
|
|
|
submit_discard_share(work);
|
|
|
goto out;
|
|
|
}
|
|
|
- sws->staleexpire = time(NULL) + 300;
|
|
|
+ timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
|
|
|
}
|
|
|
|
|
|
if (work->stratum) {
|
|
|
@@ -4278,7 +4281,7 @@ static bool retry_submission(struct submit_work_state *sws)
|
|
|
submit_discard_share(work);
|
|
|
return false;
|
|
|
}
|
|
|
- sws->staleexpire = time(NULL) + 300;
|
|
|
+ timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
|
|
|
}
|
|
|
if (unlikely((opt_retries >= 0) && (++sws->failures > opt_retries))) {
|
|
|
applog(LOG_ERR, "Pool %d failed %d submission retries, discarding", pool->pool_no, opt_retries);
|
|
|
@@ -4286,7 +4289,8 @@ static bool retry_submission(struct submit_work_state *sws)
|
|
|
return false;
|
|
|
}
|
|
|
else if (work->stale) {
|
|
|
- if (unlikely(opt_retries < 0 && sws->staleexpire <= time(NULL))) {
|
|
|
+ if (unlikely(opt_retries < 0 && timer_passed(&sws->tv_staleexpire, NULL)))
|
|
|
+ {
|
|
|
applog(LOG_NOTICE, "Pool %d stale share failed to submit for 5 minutes, discarding", pool->pool_no);
|
|
|
submit_discard_share(work);
|
|
|
return false;
|
|
|
@@ -4446,7 +4450,6 @@ next_write_sws_del:
|
|
|
char noncehex[9];
|
|
|
char ntimehex[9];
|
|
|
|
|
|
- sshare->sshare_time = time(NULL);
|
|
|
sshare->work = copy_work(work);
|
|
|
bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
|
|
|
nonce = *((uint32_t *)(work->data + 76));
|
|
|
@@ -5099,6 +5102,7 @@ static void stage_work(struct work *work)
|
|
|
applog(LOG_DEBUG, "Pushing work from pool %d to hash queue", work->pool->pool_no);
|
|
|
work->work_restart_id = work->pool->work_restart_id;
|
|
|
work->pool->last_work_time = time(NULL);
|
|
|
+ cgtime(&work->pool->tv_last_work_time);
|
|
|
test_work_current(work);
|
|
|
hash_push(work);
|
|
|
}
|
|
|
@@ -6396,7 +6400,7 @@ bool parse_stratum_response(struct pool *pool, char *s)
|
|
|
applog(LOG_NOTICE, "Pool %u now providing block contents to us",
|
|
|
pool->pool_no);
|
|
|
}
|
|
|
- pool->swork.transparency_time = (time_t)-1;
|
|
|
+ timer_unset(&pool->swork.tv_transparency);
|
|
|
|
|
|
fishy:
|
|
|
ret = true;
|
|
|
@@ -6619,7 +6623,7 @@ static bool cnx_needed(struct pool *pool)
|
|
|
|
|
|
/* Keep the connection open to allow any stray shares to be submitted
|
|
|
* on switching pools for 2 minutes. */
|
|
|
- if (difftime(time(NULL), pool->last_work_time) < 120)
|
|
|
+ if (!timer_passed(&pool->tv_last_work_time, NULL))
|
|
|
return true;
|
|
|
|
|
|
/* If the pool has only just come to life and is higher priority than
|
|
|
@@ -6790,9 +6794,9 @@ static void *stratum_thread(void *userdata)
|
|
|
free_work(work);
|
|
|
}
|
|
|
|
|
|
- if (pool->swork.transparency_time != (time_t)-1 && difftime(time(NULL), pool->swork.transparency_time) > 21.09375) {
|
|
|
+ if (timer_passed(&pool->swork.tv_transparency, NULL)) {
|
|
|
// More than 4 timmills past since requested transactions
|
|
|
- pool->swork.transparency_time = (time_t)-1;
|
|
|
+ timer_unset(&pool->swork.tv_transparency);
|
|
|
pool->swork.opaque = true;
|
|
|
applog(LOG_WARNING, "Pool %u is hiding block contents from us",
|
|
|
pool->pool_no);
|
|
|
@@ -7069,6 +7073,7 @@ retry:
|
|
|
pthread_cond_signal(&getq->cond);
|
|
|
mutex_unlock(stgd_lock);
|
|
|
work->pool->last_work_time = time(NULL);
|
|
|
+ cgtime(&work->pool->tv_last_work_time);
|
|
|
|
|
|
return work;
|
|
|
}
|
|
|
@@ -8197,7 +8202,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|
|
continue;
|
|
|
else
|
|
|
if (*denable == DEV_RECOVER_ERR) {
|
|
|
- if (opt_restart && difftime(time(NULL), cgpu->device_last_not_well) > cgpu->reinit_backoff) {
|
|
|
+ if (opt_restart && timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > cgpu->reinit_backoff) {
|
|
|
applog(LOG_NOTICE, "Attempting to reinitialize %s",
|
|
|
dev_str);
|
|
|
if (cgpu->reinit_backoff < 300)
|
|
|
@@ -8213,8 +8218,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|
|
dev_str);
|
|
|
device_recovered(cgpu);
|
|
|
}
|
|
|
- cgpu->device_last_not_well = time(NULL);
|
|
|
- cgpu->device_not_well_reason = REASON_DEV_THERMAL_CUTOFF;
|
|
|
+ dev_error(cgpu, REASON_DEV_THERMAL_CUTOFF);
|
|
|
continue;
|
|
|
}
|
|
|
else
|
|
|
@@ -9467,10 +9471,11 @@ begin_bench:
|
|
|
cgtime(&total_tv_start);
|
|
|
cgtime(&total_tv_end);
|
|
|
miner_started = total_tv_start;
|
|
|
+ time_t miner_start_ts = time(NULL);
|
|
|
if (schedstart.tm.tm_sec)
|
|
|
- localtime_r(&miner_started.tv_sec, &schedstart.tm);
|
|
|
+ localtime_r(&miner_start_ts, &schedstart.tm);
|
|
|
if (schedstop.tm.tm_sec)
|
|
|
- localtime_r(&miner_started.tv_sec, &schedstop .tm);
|
|
|
+ localtime_r(&miner_start_ts, &schedstop .tm);
|
|
|
get_datestamp(datestamp, total_tv_start.tv_sec);
|
|
|
|
|
|
// Initialise processors and threads
|
|
|
@@ -9617,11 +9622,13 @@ retry:
|
|
|
{}
|
|
|
else
|
|
|
if (can_roll(last_work) && should_roll(last_work)) {
|
|
|
+ struct timeval tv_now;
|
|
|
+ cgtime(&tv_now);
|
|
|
free_work(work);
|
|
|
work = make_clone(pool->last_work_copy);
|
|
|
mutex_unlock(&pool->last_work_lock);
|
|
|
roll_work(work);
|
|
|
- applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, time(NULL)));
|
|
|
+ applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, tv_now.tv_sec));
|
|
|
stage_work(work);
|
|
|
continue;
|
|
|
} else if (last_work->tmpl && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tmpl) > (unsigned long)mining_threads) {
|