Browse Source

Merge commit '411784a' into bfgminer

Luke Dashjr 13 years ago
parent
commit
8141adb3bc
6 changed files with 128 additions and 57 deletions
  1. 5 0
      adl.c
  2. 3 2
      api.c
  3. 2 6
      driver-opencl.c
  4. 104 41
      miner.c
  5. 3 2
      miner.h
  6. 11 6
      util.c

+ 5 - 0
adl.c

@@ -695,6 +695,11 @@ int gpu_fanpercent(int gpu)
 		applog(LOG_WARNING, "You will need to start BFGMiner from scratch to correct this");
 		applog(LOG_WARNING, "Disabling fanspeed monitoring on this device");
 		ga->has_fanspeed = false;
+		if (ga->twin) {
+			applog(LOG_WARNING, "Disabling fanspeed linking on GPU twins");
+			ga->twin->twin = NULL;;
+			ga->twin = NULL;
+		}
 	}
 	return ret;
 }

+ 3 - 2
api.c

@@ -2058,11 +2058,12 @@ static int itemstats(int i, char *id, struct cgminer_stats *stats, struct cgmine
 		if (pool_stats) {
 			sprintf(buf, isjson
 				? ",\"Pool Calls\":%d,\"Pool Attempts\":%d,\"Pool Wait\":%ld.%06ld,\"Pool Max\":%ld.%06ld,\"Pool Min\":%ld.%06ld"
-				: ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld",
+				: ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld,Pool Av=%f",
 				pool_stats->getwork_calls, pool_stats->getwork_attempts,
 				pool_stats->getwork_wait.tv_sec, pool_stats->getwork_wait.tv_usec,
 				pool_stats->getwork_wait_max.tv_sec, pool_stats->getwork_wait_max.tv_usec,
-				pool_stats->getwork_wait_min.tv_sec, pool_stats->getwork_wait_min.tv_usec);
+				pool_stats->getwork_wait_min.tv_sec, pool_stats->getwork_wait_min.tv_usec,
+				pool_stats->getwork_wait_rolling);
 
 			strcat(io_buffer, buf);
 		}

+ 2 - 6
driver-opencl.c

@@ -755,15 +755,11 @@ struct cgpu_info *cpus;
 void pause_dynamic_threads(int gpu)
 {
 	struct cgpu_info *cgpu = &gpus[gpu];
-	int i, thread_no = 0;
+	int i;
 
-	for (i = 0; i < mining_threads; i++) {
+	for (i = 1; i < cgpu->threads; i++) {
 		struct thr_info *thr = &thr_info[i];
 
-		if (thr->cgpu != cgpu)
-			continue;
-		if (!thread_no++)
-			continue;
 		if (!thr->pause && cgpu->dynamic) {
 			applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
 			applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval");

+ 104 - 41
miner.c

@@ -1635,7 +1635,7 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
 	int thr_id = work->thr_id;
 	struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
 	struct pool *pool = work->pool;
-	bool rolltime;
+	int rolltime;
 	uint32_t *hash32;
 	char hashshow[64+1] = "";
 
@@ -1850,16 +1850,15 @@ static bool get_upstream_work(struct work *work, CURL *curl)
 
 	url = pool->rpc_url;
 
+	gettimeofday(&tv_start, NULL);
 retry:
 	/* A single failure response here might be reported as a dead pool and
 	 * there may be temporary denied messages etc. falsely reporting
 	 * failure so retry a few times before giving up */
 	while (!val && retries++ < 3) {
 		pool_stats->getwork_attempts++;
-		gettimeofday(&tv_start, NULL);
 		val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req,
 			    false, false, &work->rolltime, pool, false);
-		gettimeofday(&tv_end, NULL);
 	}
 	if (unlikely(!val)) {
 		applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
@@ -1869,12 +1868,12 @@ retry:
 	rc = work_decode(json_object_get(val, "result"), work);
 	if (!rc && retries < 3)
 		goto retry;
-	work->pool = pool;
-	work->longpoll = false;
-	total_getworks++;
-	pool->getwork_requested++;
 
+	gettimeofday(&tv_end, NULL);
 	timersub(&tv_end, &tv_start, &tv_elapsed);
+	pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
+	pool_stats->getwork_wait_rolling /= 1.63;
+
 	timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait));
 	if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) {
 		pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec;
@@ -1886,6 +1885,11 @@ retry:
 	}
 	pool_stats->getwork_calls++;
 
+	work->pool = pool;
+	work->longpoll = false;
+	total_getworks++;
+	pool->getwork_requested++;
+
 	json_decref(val);
 out:
 
@@ -2170,22 +2174,34 @@ static bool workio_get_work(struct workio_cmd *wc)
 static bool stale_work(struct work *work, bool share)
 {
 	struct timeval now;
+	time_t work_expiry;
 	struct pool *pool;
+	int getwork_delay;
 
 	if (work->mandatory)
 		return false;
 
+	if (share)
+		work_expiry = opt_expiry;
+	else if (work->rolltime)
+		work_expiry = work->rolltime;
+	else
+		work_expiry = opt_scantime;
+	pool = work->pool;
+	/* Factor in the average getwork delay of this pool, rounding it up to
+	 * the nearest second */
+	getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1;
+	work_expiry -= getwork_delay;
+	if (unlikely(work_expiry < 5))
+		work_expiry = 5;
+
 	gettimeofday(&now, NULL);
-	if (share) {
-		if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_expiry)
-			return true;
-	} else if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_scantime)
+	if ((now.tv_sec - work->tv_staged.tv_sec) >= work_expiry)
 		return true;
 
 	if (work->work_block != work_block)
 		return true;
 
-	pool = work->pool;
 	if (opt_fail_only && !share && pool != current_pool() && pool->enabled != POOL_REJECTING)
 		return true;
 
@@ -3421,7 +3437,7 @@ static bool pool_active(struct pool *pool, bool pinging)
 	bool ret = false;
 	json_t *val;
 	CURL *curl;
-	bool rolltime;
+	int rolltime;
 
 	curl = curl_easy_init();
 	if (unlikely(!curl)) {
@@ -3607,8 +3623,7 @@ static inline bool should_roll(struct work *work)
 
 static inline bool can_roll(struct work *work)
 {
-	return (work->pool && !stale_work(work, false) && work->rolltime &&
-		work->rolls < 11 && !work->clone);
+	return (work->pool && !stale_work(work, false) && work->rolltime && !work->clone);
 }
 
 static void roll_work(struct work *work)
@@ -3639,6 +3654,53 @@ static bool reuse_work(struct work *work)
 	return false;
 }
 
+static struct work *make_clone(struct work *work)
+{
+	struct work *work_clone = make_work();
+
+	memcpy(work_clone, work, sizeof(struct work));
+	work_clone->clone = true;
+	work_clone->longpoll = false;
+	/* Make cloned work appear slightly older to bias towards keeping the
+	 * master work item which can be further rolled */
+	work_clone->tv_staged.tv_sec -= 1;
+
+	return work_clone;
+}
+
+/* Clones work by rolling it if possible, and returning a clone instead of the
+ * original work item which gets staged again to possibly be rolled again in
+ * the future */
+static struct work *clone_work(struct work *work)
+{
+	struct work *work_clone;
+	bool cloned = false;
+
+	work_clone = make_clone(work);
+	while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) {
+		applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
+		if (unlikely(!stage_work(work_clone))) {
+			cloned = false;
+			break;
+		}
+		roll_work(work);
+		work_clone = make_clone(work);
+		/* Roll it again to prevent duplicates should this be used
+		 * directly later on */
+		roll_work(work);
+		cloned = true;
+	}
+
+	if (cloned) {
+		stage_work(work);
+		return work_clone;
+	}
+
+	free_work(work_clone);
+
+	return work;
+}
+
 static bool get_work(struct work *work, bool requested, struct thr_info *thr,
 		     const int thr_id)
 {
@@ -3723,18 +3785,11 @@ retry:
 			pool_resus(pool);
 	}
 
-	memcpy(work, work_heap, sizeof(*work));
-
-	/* Hand out a clone if we can roll this work item */
-	if (reuse_work(work_heap)) {
-		applog(LOG_DEBUG, "Pushing divided work to get queue head");
-
-		stage_work(work_heap);
-		work->clone = true;
-	} else {
+	work_heap = clone_work(work_heap);
+	memcpy(work, work_heap, sizeof(struct work));
+	free_work(work_heap);
+	if (!work->clone)
 		dec_queued();
-		free_work(work_heap);
-	}
 
 	ret = true;
 out:
@@ -4058,9 +4113,9 @@ enum {
 };
 
 /* Stage another work item from the work returned in a longpoll */
-static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
+static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
 {
-	struct work *work, *work_clone;
+	struct work *work;
 	bool rc;
 
 	work = make_work();
@@ -4093,18 +4148,7 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
 		return;
 	}
 
-	work_clone = make_work();
-	memcpy(work_clone, work, sizeof(struct work));
-	while (reuse_work(work)) {
-		work_clone->clone = true;
-		work_clone->longpoll = false;
-		applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
-		if (unlikely(!stage_work(work_clone)))
-			break;
-		work_clone = make_work();
-		memcpy(work_clone, work, sizeof(struct work));
-	}
-	free_work(work_clone);
+	work = clone_work(work);
 
 	applog(LOG_DEBUG, "Pushing converted work to stage thread");
 
@@ -4156,7 +4200,7 @@ static void *longpoll_thread(void *userdata)
 	struct timeval start, end;
 	CURL *curl = NULL;
 	int failures = 0;
-	bool rolltime;
+	int rolltime;
 
 	curl = curl_easy_init();
 	if (unlikely(!curl)) {
@@ -4343,6 +4387,23 @@ static void *watchpool_thread(void __maybe_unused *userdata)
 	return NULL;
 }
 
+/* Work is sorted according to age, so discard the oldest work items, leaving
+ * only 1 staged work item per mining thread */
+static void age_work(void)
+{
+	int discarded = 0;
+
+	while (requests_staged() > mining_threads) {
+		struct work *work = hash_pop(NULL);
+
+		if (unlikely(!work))
+			break;
+		discard_work(work);
+		discarded++;
+	}
+	if (discarded)
+		applog(LOG_DEBUG, "Aged %d work items", discarded);
+}
 
 /* Makes sure the hashmeter keeps going even if mining threads stall, updates
  * the screen at regular intervals, and restarts threads if they appear to have
@@ -4365,6 +4426,8 @@ static void *watchdog_thread(void __maybe_unused *userdata)
 		if (requests_queued() < opt_queue)
 			queue_request(NULL, false);
 
+		age_work();
+
 		hashmeter(-1, &zero_tv, 0);
 
 #ifdef HAVE_CURSES

+ 3 - 2
miner.h

@@ -296,6 +296,7 @@ struct cgminer_pool_stats {
 	struct timeval getwork_wait;
 	struct timeval getwork_wait_max;
 	struct timeval getwork_wait_min;
+	double getwork_wait_rolling;
 };
 
 struct cgpu_info {
@@ -533,7 +534,7 @@ extern pthread_rwlock_t netacc_lock;
 
 extern const uint32_t sha256_init_state[];
 extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass,
-			     const char *rpc_req, bool, bool, bool *,
+			     const char *rpc_req, bool, bool, int *,
 			     struct pool *pool, bool);
 extern char *bin2hex(const unsigned char *p, size_t len);
 extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len);
@@ -729,7 +730,7 @@ struct work {
 	bool		mined;
 	bool		clone;
 	bool		cloned;
-	bool		rolltime;
+	int		rolltime;
 	bool		longpoll;
 	bool		stale;
 	bool		mandatory;

+ 11 - 6
util.c

@@ -56,7 +56,7 @@ struct upload_buffer {
 
 struct header_info {
 	char		*lp_path;
-	bool		has_rolltime;
+	int		rolltime;
 	char		*reason;
 };
 
@@ -160,8 +160,13 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
 		if (!strncasecmp("N", val, 1)) {
 			applog(LOG_DEBUG, "X-Roll-Ntime: N found");
 		} else {
-			applog(LOG_DEBUG, "X-Roll-Ntime found");
-			hi->has_rolltime = true;
+			/* Check to see if expire= is supported and if not, set
+			 * the rolltime to the default scantime */
+			if (strlen(val) > 7 && !strncasecmp("expire=", val, 7))
+				sscanf(val + 7, "%d", &hi->rolltime);
+			else
+				hi->rolltime = opt_scantime;
+			applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
 		}
 	}
 
@@ -248,7 +253,7 @@ static void set_nettime(void)
 
 json_t *json_rpc_call(CURL *curl, const char *url,
 		      const char *userpass, const char *rpc_req,
-		      bool probe, bool longpoll, bool *rolltime,
+		      bool probe, bool longpoll, int *rolltime,
 		      struct pool *pool, bool share)
 {
 	json_t *val, *err_val, *res_val;
@@ -260,7 +265,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
 	char len_hdr[64], user_agent_hdr[128];
 	char curl_err_str[CURL_ERROR_SIZE];
 	long timeout = longpoll ? (60 * 60) : 60;
-	struct header_info hi = {NULL, false, NULL};
+	struct header_info hi = {NULL, 0, NULL};
 	bool probing = false;
 
 	memset(&err, 0, sizeof(err));
@@ -375,7 +380,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
 		hi.lp_path = NULL;
 	}
 
-	*rolltime = hi.has_rolltime;
+	*rolltime = hi.rolltime;
 
 	val = JSON_LOADS(all_data.buf, &err);
 	if (!val) {