Browse Source

Always use struct timeval for timers, and don't mix timers with timestamps (functional only)

Luke Dashjr 12 years ago
parent
commit
df45e5cf9f
8 changed files with 96 additions and 49 deletions
  1. 3 3
      api.c
  2. 1 2
      deviceapi.c
  3. 5 4
      driver-modminer.c
  4. 5 4
      driver-x6500.c
  5. 33 26
      miner.c
  6. 3 1
      miner.h
  7. 5 4
      util.c
  8. 41 5
      util.h

+ 3 - 3
api.c

@@ -3565,7 +3565,7 @@ void api(int api_thr_id)
 	int n, bound;
 	char *connectaddr;
 	const char *binderror;
-	time_t bindstart;
+	struct timeval bindstart;
 	short int port = opt_api_port;
 	struct sockaddr_in serv;
 	struct sockaddr_in cli;
@@ -3650,11 +3650,11 @@ void api(int api_thr_id)
 
 	// try for more than 1 minute ... in case the old one hasn't completely gone yet
 	bound = 0;
-	bindstart = time(NULL);
+	cgtime(&bindstart);
 	while (bound == 0) {
 		if (SOCKETFAIL(bind(*apisock, (struct sockaddr *)(&serv), sizeof(serv)))) {
 			binderror = SOCKERRMSG;
-			if ((time(NULL) - bindstart) > 61)
+			if (timer_elapsed(&bindstart, NULL) > 61)
 				break;
 			else {
 				applog(LOG_WARNING, "API bind to port %d failed - trying again in 30sec", port);

+ 1 - 2
deviceapi.c

@@ -37,8 +37,7 @@ bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashe
 	const long cycle = opt_log_interval / 5 ? : 1;
 	
 	if (unlikely(hashes == -1)) {
-		time_t now = time(NULL);
-		if (difftime(now, cgpu->device_last_not_well) > 1.)
+		if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
 			dev_error(cgpu, REASON_THREAD_ZERO_HASH);
 		
 		if (thr->scanhash_working && opt_restart) {

+ 5 - 4
driver-modminer.c

@@ -64,7 +64,7 @@ struct modminer_fpga_state {
 	// Number of nonces did meet pdiff 1, ever
 	int good_share_counter;
 	// Time the clock was last reduced due to temperature
-	time_t last_cutoff_reduced;
+	struct timeval tv_last_cutoff_reduced;
 
 	unsigned char temp;
 
@@ -484,9 +484,10 @@ static void modminer_get_temperature(struct cgpu_info *modminer, struct thr_info
 		state->temp = temperature;
 		if (temperature > modminer->targettemp + opt_hysteresis) {
 			{
-				time_t now = time(NULL);
-				if (state->last_cutoff_reduced != now) {
-					state->last_cutoff_reduced = now;
+				struct timeval now;
+				cgtime(&now);
+				if (timer_elapsed(&state->tv_last_cutoff_reduced, &now)) {
+					state->tv_last_cutoff_reduced = now;
 					int oldFreq = state->dclk.freqM;
 					if (modminer_reduce_clock(thr, false))
 						applog(LOG_NOTICE, "%s: Frequency %s from %u to %u MHz (temp: %d)",

+ 5 - 4
driver-x6500.c

@@ -200,7 +200,7 @@ struct x6500_fpga_data {
 	uint8_t freqMaxMaxM;
 
 	// Time the clock was last reduced due to temperature
-	time_t last_cutoff_reduced;
+	struct timeval tv_last_cutoff_reduced;
 
 	float temp;
 	
@@ -498,9 +498,10 @@ void x6500_get_temperature(struct cgpu_info *x6500)
 
 		int temperature = round(fpga->temp);
 		if (temperature > x6500->targettemp + opt_hysteresis) {
-			time_t now = time(NULL);
-			if (fpga->last_cutoff_reduced != now) {
-				fpga->last_cutoff_reduced = now;
+			struct timeval now;
+			cgtime(&now);
+			if (timer_elapsed(&fpga->tv_last_cutoff_reduced, &now)) {
+				fpga->tv_last_cutoff_reduced = now;
 				int oldFreq = fpga->dclk.freqM;
 				if (x6500_change_clock(thr, oldFreq - 1))
 					applog(LOG_NOTICE, "%"PRIprepr": Frequency dropped from %u to %u MHz (temp: %.1fC)",

+ 33 - 26
miner.c

@@ -321,7 +321,6 @@ struct stratum_share {
 	bool block;
 	struct work *work;
 	int id;
-	time_t sshare_time;
 };
 
 static struct stratum_share *stratum_shares = NULL;
@@ -370,15 +369,15 @@ static bool time_before(struct tm *tm1, struct tm *tm2)
 
 static bool should_run(void)
 {
-	struct timeval tv;
 	struct tm tm;
+	time_t tt;
 	bool within_range;
 
 	if (!schedstart.enable && !schedstop.enable)
 		return true;
 
-	cgtime(&tv);
-	localtime_r(&tv.tv_sec, &tm);
+	tt = time(NULL);
+	localtime_r(&tt, &tm);
 
 	// NOTE: This is delicately balanced so that should_run is always false if schedstart==schedstop
 	if (time_before(&schedstop.tm, &schedstart.tm))
@@ -528,7 +527,7 @@ struct pool *add_pool(void)
 		quit(1, "Failed to pthread_cond_init in add_pool");
 	cglock_init(&pool->data_lock);
 	mutex_init(&pool->stratum_lock);
-	pool->swork.transparency_time = (time_t)-1;
+	timer_unset(&pool->swork.tv_transparency);
 
 	/* Make sure the pool doesn't think we've been idle since time 0 */
 	pool->tv_idle.tv_sec = ~0UL;
@@ -1997,12 +1996,14 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 	}
 	
 	if (work->tmpl) {
-		const char *err = blktmpl_add_jansson(work->tmpl, res_val, time(NULL));
+		struct timeval tv_now;
+		cgtime(&tv_now);
+		const char *err = blktmpl_add_jansson(work->tmpl, res_val, tv_now.tv_sec);
 		if (err) {
 			applog(LOG_ERR, "blktmpl error: %s", err);
 			return false;
 		}
-		work->rolltime = blkmk_time_left(work->tmpl, time(NULL));
+		work->rolltime = blkmk_time_left(work->tmpl, tv_now.tv_sec);
 #if BLKMAKER_VERSION > 1
 		if (opt_coinbase_script.sz)
 		{
@@ -2069,7 +2070,7 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
 			}
 		}
 #endif
-		if (blkmk_get_data(work->tmpl, work->data, 80, time(NULL), NULL, &work->dataid) < 76)
+		if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
 			return false;
 		swap32yes(work->data, work->data, 80 / 4);
 		memcpy(&work->data[80], workpadding_bin, 48);
@@ -3876,7 +3877,9 @@ static inline bool can_roll(struct work *work)
 static void roll_work(struct work *work)
 {
 	if (work->tmpl) {
-		if (blkmk_get_data(work->tmpl, work->data, 80, time(NULL), NULL, &work->dataid) < 76)
+		struct timeval tv_now;
+		cgtime(&tv_now);
+		if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
 			applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
 		swap32yes(work->data, work->data, 80 / 4);
 		calc_midstate(work);
@@ -4092,9 +4095,9 @@ bool stale_work(struct work *work, bool share)
 
 	}
 
-	double elapsed_since_staged = difftime(time(NULL), work->tv_staged.tv_sec);
+	int elapsed_since_staged = timer_elapsed(&work->tv_staged, NULL);
 	if (elapsed_since_staged > work_expiry) {
-		applog(LOG_DEBUG, "%s stale due to expiry (%.0f >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry);
+		applog(LOG_DEBUG, "%s stale due to expiry (%d >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry);
 		return true;
 	}
 
@@ -4180,7 +4183,7 @@ struct submit_work_state {
 	bool resubmit;
 	struct curl_ent *ce;
 	int failures;
-	time_t staleexpire;
+	struct timeval tv_staleexpire;
 	char *s;
 	struct timeval tv_submit;
 	struct submit_work_state *next;
@@ -4230,7 +4233,7 @@ static struct submit_work_state *begin_submission(struct work *work)
 			submit_discard_share(work);
 			goto out;
 		}
-		sws->staleexpire = time(NULL) + 300;
+		timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
 	}
 
 	if (work->stratum) {
@@ -4278,7 +4281,7 @@ static bool retry_submission(struct submit_work_state *sws)
 				submit_discard_share(work);
 				return false;
 			}
-			sws->staleexpire = time(NULL) + 300;
+			timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
 		}
 		if (unlikely((opt_retries >= 0) && (++sws->failures > opt_retries))) {
 			applog(LOG_ERR, "Pool %d failed %d submission retries, discarding", pool->pool_no, opt_retries);
@@ -4286,7 +4289,8 @@ static bool retry_submission(struct submit_work_state *sws)
 			return false;
 		}
 		else if (work->stale) {
-			if (unlikely(opt_retries < 0 && sws->staleexpire <= time(NULL))) {
+			if (unlikely(opt_retries < 0 && timer_passed(&sws->tv_staleexpire, NULL)))
+			{
 				applog(LOG_NOTICE, "Pool %d stale share failed to submit for 5 minutes, discarding", pool->pool_no);
 				submit_discard_share(work);
 				return false;
@@ -4446,7 +4450,6 @@ next_write_sws_del:
 			char noncehex[9];
 			char ntimehex[9];
 			
-			sshare->sshare_time = time(NULL);
 			sshare->work = copy_work(work);
 			bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
 			nonce = *((uint32_t *)(work->data + 76));
@@ -5099,6 +5102,7 @@ static void stage_work(struct work *work)
 	applog(LOG_DEBUG, "Pushing work from pool %d to hash queue", work->pool->pool_no);
 	work->work_restart_id = work->pool->work_restart_id;
 	work->pool->last_work_time = time(NULL);
+	cgtime(&work->pool->tv_last_work_time);
 	test_work_current(work);
 	hash_push(work);
 }
@@ -6396,7 +6400,7 @@ bool parse_stratum_response(struct pool *pool, char *s)
 				applog(LOG_NOTICE, "Pool %u now providing block contents to us",
 				       pool->pool_no);
 			}
-			pool->swork.transparency_time = (time_t)-1;
+			timer_unset(&pool->swork.tv_transparency);
 
 fishy:
 			ret = true;
@@ -6619,7 +6623,7 @@ static bool cnx_needed(struct pool *pool)
 
 	/* Keep the connection open to allow any stray shares to be submitted
 	 * on switching pools for 2 minutes. */
-	if (difftime(time(NULL), pool->last_work_time) < 120)
+	if (!timer_passed(&pool->tv_last_work_time, NULL))
 		return true;
 
 	/* If the pool has only just come to life and is higher priority than
@@ -6790,9 +6794,9 @@ static void *stratum_thread(void *userdata)
 			free_work(work);
 		}
 
-		if (pool->swork.transparency_time != (time_t)-1 && difftime(time(NULL), pool->swork.transparency_time) > 21.09375) {
+		if (timer_passed(&pool->swork.tv_transparency, NULL)) {
 			// More than 4 timmills past since requested transactions
-			pool->swork.transparency_time = (time_t)-1;
+			timer_unset(&pool->swork.tv_transparency);
 			pool->swork.opaque = true;
 			applog(LOG_WARNING, "Pool %u is hiding block contents from us",
 			       pool->pool_no);
@@ -7069,6 +7073,7 @@ retry:
 	pthread_cond_signal(&getq->cond);
 	mutex_unlock(stgd_lock);
 	work->pool->last_work_time = time(NULL);
+	cgtime(&work->pool->tv_last_work_time);
 
 	return work;
 }
@@ -8197,7 +8202,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
 				continue;
 			else
 			if (*denable == DEV_RECOVER_ERR) {
-				if (opt_restart && difftime(time(NULL), cgpu->device_last_not_well) > cgpu->reinit_backoff) {
+				if (opt_restart && timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > cgpu->reinit_backoff) {
 					applog(LOG_NOTICE, "Attempting to reinitialize %s",
 					       dev_str);
 					if (cgpu->reinit_backoff < 300)
@@ -8213,8 +8218,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
 					       dev_str);
 					device_recovered(cgpu);
 				}
-				cgpu->device_last_not_well = time(NULL);
-				cgpu->device_not_well_reason = REASON_DEV_THERMAL_CUTOFF;
+				dev_error(cgpu, REASON_DEV_THERMAL_CUTOFF);
 				continue;
 			}
 			else
@@ -9467,10 +9471,11 @@ begin_bench:
 	cgtime(&total_tv_start);
 	cgtime(&total_tv_end);
 	miner_started = total_tv_start;
+	time_t miner_start_ts = time(NULL);
 	if (schedstart.tm.tm_sec)
-		localtime_r(&miner_started.tv_sec, &schedstart.tm);
+		localtime_r(&miner_start_ts, &schedstart.tm);
 	if (schedstop.tm.tm_sec)
-		localtime_r(&miner_started.tv_sec, &schedstop .tm);
+		localtime_r(&miner_start_ts, &schedstop .tm);
 	get_datestamp(datestamp, total_tv_start.tv_sec);
 
 	// Initialise processors and threads
@@ -9617,11 +9622,13 @@ retry:
 				{}
 			else
 			if (can_roll(last_work) && should_roll(last_work)) {
+				struct timeval tv_now;
+				cgtime(&tv_now);
 				free_work(work);
 				work = make_clone(pool->last_work_copy);
 				mutex_unlock(&pool->last_work_lock);
 				roll_work(work);
-				applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, time(NULL)));
+				applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, tv_now.tv_sec));
 				stage_work(work);
 				continue;
 			} else if (last_work->tmpl && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tmpl) > (unsigned long)mining_threads) {

+ 3 - 1
miner.h

@@ -543,6 +543,7 @@ struct cgpu_info {
 
 	time_t device_last_well;
 	time_t device_last_not_well;
+	struct timeval tv_device_last_not_well;
 	enum dev_reason device_not_well_reason;
 	float reinit_backoff;
 	int thread_fail_init_count;
@@ -1072,7 +1073,7 @@ struct stratum_work {
 	double diff;
 
 	bool transparency_probed;
-	time_t transparency_time;
+	struct timeval tv_transparency;
 	bool opaque;
 };
 
@@ -1144,6 +1145,7 @@ struct pool {
 	struct submit_work_state *sws_waiting_on_curl;
 
 	time_t last_work_time;
+	struct timeval tv_last_work_time;
 	time_t last_share_time;
 	double last_share_diff;
 	uint64_t best_diff;

+ 5 - 4
util.c

@@ -1503,8 +1503,8 @@ void stratum_probe_transparency(struct pool *pool)
 	        pool->swork.job_id,
 	        pool->swork.job_id);
 	stratum_send(pool, s, sLen);
-	if ((!pool->swork.opaque) && pool->swork.transparency_time == (time_t)-1)
-		pool->swork.transparency_time = time(NULL);
+	if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency))
+		cgtime(&pool->swork.tv_transparency);
 	pool->swork.transparency_probed = true;
 }
 
@@ -1590,7 +1590,7 @@ static bool parse_notify(struct pool *pool, json_t *val)
 	pool->getwork_requested++;
 	total_getworks++;
 
-	if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || pool->swork.transparency_time != (time_t)-1)
+	if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency))
 		if (pool->stratum_init)
 			stratum_probe_transparency(pool);
 
@@ -1839,7 +1839,7 @@ static bool setup_stratum_curl(struct pool *pool)
 
 	applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf);
 	mutex_lock(&pool->stratum_lock);
-	pool->swork.transparency_time = (time_t)-1;
+	timer_unset(&pool->swork.tv_transparency);
 	pool->stratum_active = false;
 	pool->stratum_notify = false;
 	pool->swork.transparency_probed = false;
@@ -2099,6 +2099,7 @@ bool restart_stratum(struct pool *pool)
 void dev_error(struct cgpu_info *dev, enum dev_reason reason)
 {
 	dev->device_last_not_well = time(NULL);
+	cgtime(&dev->tv_device_last_not_well);
 	dev->device_not_well_reason = reason;
 
 	switch (reason) {

+ 41 - 5
util.h

@@ -211,6 +211,18 @@ void set_maxfd(int *p_maxfd, int fd)
 }
 
 
+static inline
+void timer_unset(struct timeval *tvp)
+{
+	tvp->tv_sec = -1;
+}
+
+static inline
+bool timer_isset(const struct timeval *tvp)
+{
+	return tvp->tv_sec != -1;
+}
+
 #define TIMEVAL_USECS(usecs)  (  \
 	(struct timeval){  \
 		.tv_sec = (usecs) / 1000000,  \
@@ -230,24 +242,48 @@ void set_maxfd(int *p_maxfd, int fd)
 } while(0)
 
 static inline
-bool timer_passed(struct timeval *tvp_timer, struct timeval *tvp_now)
+const struct timeval *_bfg_nullisnow(const struct timeval *tvp, struct timeval *tvp_buf)
+{
+	if (tvp)
+		return tvp;
+	cgtime(tvp_buf);
+	return tvp_buf;
+}
+
+static inline
+int timer_elapsed(const struct timeval *tvp_timer, const struct timeval *tvp_now)
 {
-	return (tvp_timer->tv_sec != -1 && timercmp(tvp_timer, tvp_now, <));
+	struct timeval tv;
+	const struct timeval *_tvp_now = _bfg_nullisnow(tvp_now, &tv);
+	timersub(_tvp_now, tvp_timer, &tv);
+	return tv.tv_sec;
+}
+
+static inline
+bool timer_passed(const struct timeval *tvp_timer, const struct timeval *tvp_now)
+{
+	if (!timer_isset(tvp_timer))
+		return false;
+	
+	struct timeval tv;
+	const struct timeval *_tvp_now = _bfg_nullisnow(tvp_now, &tv);
+	
+	return timercmp(tvp_timer, _tvp_now, <);
 }
 
 static inline
 void reduce_timeout_to(struct timeval *tvp_timeout, struct timeval *tvp_time)
 {
-	if (tvp_time->tv_sec == -1)
+	if (!timer_isset(tvp_time))
 		return;
-	if (tvp_timeout->tv_sec == -1 /* no timeout */ || timercmp(tvp_time, tvp_timeout, <))
+	if ((!timer_isset(tvp_timeout)) || timercmp(tvp_time, tvp_timeout, <))
 		*tvp_timeout = *tvp_time;
 }
 
 static inline
 struct timeval *select_timeout(struct timeval *tvp_timeout, struct timeval *tvp_now)
 {
-	if (tvp_timeout->tv_sec == -1)
+	if (!timer_isset(tvp_timeout))
 		return NULL;
 	
 	if (timercmp(tvp_timeout, tvp_now, <))