Browse Source

Merge commit 'b0de3ed' into bfgminer

Luke Dashjr 13 years ago
parent
commit
63bfbffb70
4 changed files with 67 additions and 32 deletions
  1. 5 0
      adl.c
  2. 4 4
      api.c
  3. 56 28
      miner.c
  4. 2 0
      miner.h

+ 5 - 0
adl.c

@@ -1065,6 +1065,11 @@ static bool fan_autotune(int gpu, int temp, int fanpercent, int lasttemp, bool *
 	if (newpercent != fanpercent) {
 		applog(LOG_INFO, "Setting GPU %d fan percentage to %d", gpu, newpercent);
 		set_fanspeed(gpu, newpercent);
+		/* If the fanspeed is going down and we're below the top speed,
+		 * consider the fan optimal to prevent minute changes in
+		 * fanspeed delaying GPU engine speed changes */
+		if (newpercent < fanpercent && *fan_window)
+			return true;
 		return false;
 	}
 	return true;

+ 4 - 4
api.c

@@ -456,13 +456,13 @@ struct CODES {
  { SEVERITY_INFO,  MSG_GPUINT,	PARAM_BOTH,	"GPU %d set new intensity to %s" },
  { SEVERITY_SUCC,  MSG_MINECON, PARAM_NONE,	"BFGMiner config" },
  { SEVERITY_ERR,   MSG_GPUMERR,	PARAM_BOTH,	"Setting GPU %d memoryclock to (%s) reported failure" },
- { SEVERITY_SUCC,  MSG_GPUMEM,	PARAM_BOTH,	"Setting GPU %d memoryclock to (%s) reported succeess" },
+ { SEVERITY_SUCC,  MSG_GPUMEM,	PARAM_BOTH,	"Setting GPU %d memoryclock to (%s) reported success" },
  { SEVERITY_ERR,   MSG_GPUEERR,	PARAM_BOTH,	"Setting GPU %d clock to (%s) reported failure" },
- { SEVERITY_SUCC,  MSG_GPUENG,	PARAM_BOTH,	"Setting GPU %d clock to (%s) reported succeess" },
+ { SEVERITY_SUCC,  MSG_GPUENG,	PARAM_BOTH,	"Setting GPU %d clock to (%s) reported success" },
  { SEVERITY_ERR,   MSG_GPUVERR,	PARAM_BOTH,	"Setting GPU %d vddc to (%s) reported failure" },
- { SEVERITY_SUCC,  MSG_GPUVDDC,	PARAM_BOTH,	"Setting GPU %d vddc to (%s) reported succeess" },
+ { SEVERITY_SUCC,  MSG_GPUVDDC,	PARAM_BOTH,	"Setting GPU %d vddc to (%s) reported success" },
  { SEVERITY_ERR,   MSG_GPUFERR,	PARAM_BOTH,	"Setting GPU %d fan to (%s) reported failure" },
- { SEVERITY_SUCC,  MSG_GPUFAN,	PARAM_BOTH,	"Setting GPU %d fan to (%s) reported succeess" },
+ { SEVERITY_SUCC,  MSG_GPUFAN,	PARAM_BOTH,	"Setting GPU %d fan to (%s) reported success" },
  { SEVERITY_ERR,   MSG_MISFN,	PARAM_NONE,	"Missing save filename parameter" },
  { SEVERITY_ERR,   MSG_BADFN,	PARAM_STR,	"Can't open or create save file '%s'" },
  { SEVERITY_SUCC,  MSG_SAVED,	PARAM_STR,	"Configuration saved to file '%s'" },

+ 56 - 28
miner.c

@@ -1638,7 +1638,6 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
 	bool rolltime;
 	uint32_t *hash32;
 	char hashshow[64+1] = "";
-	bool isblock;
 
 #ifdef __BIG_ENDIAN__
         int swapcounter = 0;
@@ -1679,17 +1678,9 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
 	res = json_object_get(val, "result");
 
 	if (!QUIET) {
-#ifndef MIPSEB
-// This one segfaults on my router for some reason
-		isblock = regeneratehash(work);
-		if (unlikely(isblock)) {
-			pool->solved++;
-			found_blocks++;
-		}
 		hash32 = (uint32_t *)(work->hash);
 		sprintf(hashshow, "%08lx.%08lx%s", (unsigned long)(hash32[6]), (unsigned long)(hash32[5]),
-			isblock ? " BLOCK!" : "");
-#endif
+			work->block? " BLOCK!" : "");
 	}
 
 	/* Theoretically threads could race when modifying accepted and
@@ -1842,6 +1833,7 @@ static void get_benchmark_work(struct work *work)
 	size_t min_size = (work_size < bench_size ? work_size : bench_size);
 	memset(work, 0, sizeof(work));
 	memcpy(work, &bench_block, min_size);
+	work->mandatory = true;
 }
 
 static bool get_upstream_work(struct work *work, CURL *curl)
@@ -2180,7 +2172,7 @@ static bool stale_work(struct work *work, bool share)
 	struct timeval now;
 	struct pool *pool;
 
-	if (opt_benchmark)
+	if (work->mandatory)
 		return false;
 
 	gettimeofday(&now, NULL);
@@ -2200,6 +2192,16 @@ static bool stale_work(struct work *work, bool share)
 	return false;
 }
 
+static void check_solve(struct work *work)
+{
+	work->block = regeneratehash(work);
+	if (unlikely(work->block)) {
+		work->pool->solved++;
+		found_blocks++;
+		work->mandatory = true;
+		applog(LOG_NOTICE, "Found block for pool %d!", work->pool);
+	}
+}
 
 static void *submit_work_thread(void *userdata)
 {
@@ -2213,6 +2215,8 @@ static void *submit_work_thread(void *userdata)
 
 	applog(LOG_DEBUG, "Creating extra submit work thread");
 
+	check_solve(work);
+
 	if (stale_work(work, true)) {
 		if (opt_submit_stale)
 			applog(LOG_NOTICE, "Stale share detected, submitting as user requested");
@@ -2296,7 +2300,7 @@ static struct pool *priority_pool(int choice)
 void switch_pools(struct pool *selected)
 {
 	struct pool *pool, *last_pool;
-	int i, pool_no;
+	int i, pool_no, next_pool;
 
 	mutex_lock(&control_lock);
 	last_pool = currentpool;
@@ -2329,13 +2333,22 @@ void switch_pools(struct pool *selected)
 		/* Both of these simply increment and cycle */
 		case POOL_ROUNDROBIN:
 		case POOL_ROTATE:
-			if (selected) {
+			if (selected && !selected->idle) {
 				pool_no = selected->pool_no;
 				break;
 			}
-			pool_no++;
-			if (pool_no >= total_pools)
-				pool_no = 0;
+			next_pool = pool_no;
+			/* Select the next alive pool */
+			for (i = 1; i < total_pools; i++) {
+				next_pool++;
+				if (next_pool >= total_pools)
+					next_pool = 0;
+				pool = pools[next_pool];
+				if (!pool->idle && pool->enabled == POOL_ENABLED) {
+					pool_no = next_pool;
+					break;
+				}
+			}
 			break;
 		default:
 			break;
@@ -2507,7 +2520,7 @@ static void test_work_current(struct work *work)
 {
 	char *hexstr;
 
-	if (opt_benchmark)
+	if (work->mandatory)
 		return;
 
 	hexstr = bin2hex(work->data, 18);
@@ -3325,12 +3338,9 @@ static void hashmeter(int thr_id, struct timeval *diff,
 
 		/* Rolling average for each thread and each device */
 		decay_time(&thr->rolling, local_mhashes / secs);
-		for (i = 0; i < mining_threads; i++) {
-			struct thr_info *th = &thr_info[i];
+		for (i = 0; i < cgpu->threads; i++)
+			thread_rolling += cgpu->thr[i]->rolling;
 
-			if (th->cgpu == cgpu)
-				thread_rolling += th->rolling;
-		}
 		mutex_lock(&hash_lock);
 		decay_time(&cgpu->rolling, thread_rolling);
 		cgpu->total_mhashes += local_mhashes;
@@ -3658,11 +3668,26 @@ retry:
 		goto out;
 	}
 
-	if (requested && !newreq && !requests_staged() && requests_queued() >= mining_threads &&
-	    !pool_tset(pool, &pool->lagging)) {
-		applog(LOG_WARNING, "Pool %d not providing work fast enough", pool->pool_no);
-		pool->getfail_occasions++;
-		total_go++;
+	if (!pool->lagging && requested && !newreq && !requests_staged() && requests_queued() >= mining_threads) {
+		struct cgpu_info *cgpu = thr->cgpu;
+		bool stalled = true;
+		int i;
+
+		/* Check to see if all the threads on the device that called
+		 * get_work are waiting on work and only consider the pool
+		 * lagging if true */
+		for (i = 0; i < cgpu->threads; i++) {
+			if (!cgpu->thr[i]->getwork) {
+				stalled = false;
+				break;
+			}
+		}
+
+		if (stalled && !pool_tset(pool, &pool->lagging)) {
+			applog(LOG_WARNING, "Pool %d not providing work fast enough", pool->pool_no);
+			pool->getfail_occasions++;
+			total_go++;
+		}
 	}
 
 	newreq = requested = false;
@@ -4045,6 +4070,9 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
 	work->rolltime = rolltime;
 	work->longpoll = true;
 
+	if (pool->enabled == POOL_REJECTING)
+		work->mandatory = true;
+
 	/* We'll be checking this work item twice, but we already know it's
 	 * from a new block so explicitly force the new block detection now
 	 * rather than waiting for it to hit the stage thread. This also
@@ -4104,7 +4132,7 @@ static struct pool *select_longpoll_pool(struct pool *cp)
  */
 static void wait_lpcurrent(struct pool *pool)
 {
-	if (pool->enabled == POOL_REJECTING)
+	if (pool->enabled == POOL_REJECTING || pool_strategy == POOL_LOADBALANCE)
 		return;
 
 	while (pool != current_pool()) {

+ 2 - 0
miner.h

@@ -731,6 +731,8 @@ struct work {
 	bool		rolltime;
 	bool		longpoll;
 	bool		stale;
+	bool		mandatory;
+	bool		block;
 
 	unsigned int	work_block;
 	int		id;