Browse Source

Merge commit '3576abf' into bfgminer

Luke Dashjr 13 years ago
parent
commit
1620147174
5 changed files with 69 additions and 20 deletions
  1. 16 0
      api.c
  2. 6 6
      driver-opencl.c
  3. 35 7
      miner.c
  4. 5 3
      miner.h
  5. 7 4
      ocl.c

+ 16 - 0
api.c

@@ -177,7 +177,9 @@ static const char *ALIVE = "Alive";
 static const char *REJECTING = "Rejecting";
 static const char *REJECTING = "Rejecting";
 static const char *UNKNOWN = "Unknown";
 static const char *UNKNOWN = "Unknown";
 #define _DYNAMIC "D"
 #define _DYNAMIC "D"
+#ifdef HAVE_OPENCL
 static const char *DYNAMIC = _DYNAMIC;
 static const char *DYNAMIC = _DYNAMIC;
+#endif
 
 
 static const char *YES = "Y";
 static const char *YES = "Y";
 static const char *NO = "N";
 static const char *NO = "N";
@@ -3124,6 +3126,20 @@ void api(int api_thr_id)
 
 
 	serv.sin_port = htons(port);
 	serv.sin_port = htons(port);
 
 
+#ifndef WIN32
+	// On linux with SO_REUSEADDR, bind will get the port if the previous
+	// socket is closed (even if it is still in TIME_WAIT) but fail if
+	// another program has it open - which is what we want
+	int optval = 1;
+	// If it doesn't work, we don't really care - just show a debug message
+	if (SOCKETFAIL(setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void *)(&optval), sizeof(optval))))
+		applog(LOG_DEBUG, "API setsockopt SO_REUSEADDR failed (ignored): %s", SOCKERRMSG);
+#else
+	// On windows a 2nd program can bind to a port>1024 already in use unless
+	// SO_EXCLUSIVEADDRUSE is used - however then the bind to a closed port
+	// in TIME_WAIT will fail until the timeout - so we leave the options alone
+#endif
+
 	// try for more than 1 minute ... in case the old one hasn't completely gone yet
 	// try for more than 1 minute ... in case the old one hasn't completely gone yet
 	bound = 0;
 	bound = 0;
 	bindstart = time(NULL);
 	bindstart = time(NULL);

+ 6 - 6
driver-opencl.c

@@ -380,16 +380,16 @@ char *set_lookup_gap(char *arg)
 		return "Invalid parameters for set lookup gap";
 		return "Invalid parameters for set lookup gap";
 	val = atoi(nextptr);
 	val = atoi(nextptr);
 
 
-	gpus[device++].lookup_gap = val;
+	gpus[device++].opt_lg = val;
 
 
 	while ((nextptr = strtok(NULL, ",")) != NULL) {
 	while ((nextptr = strtok(NULL, ",")) != NULL) {
 		val = atoi(nextptr);
 		val = atoi(nextptr);
 
 
-		gpus[device++].lookup_gap = val;
+		gpus[device++].opt_lg = val;
 	}
 	}
 	if (device == 1) {
 	if (device == 1) {
 		for (i = device; i < MAX_GPUDEVICES; i++)
 		for (i = device; i < MAX_GPUDEVICES; i++)
-			gpus[i].lookup_gap = gpus[0].lookup_gap;
+			gpus[i].opt_lg = gpus[0].opt_lg;
 	}
 	}
 
 
 	return NULL;
 	return NULL;
@@ -405,16 +405,16 @@ char *set_thread_concurrency(char *arg)
 		return "Invalid parameters for set thread concurrency";
 		return "Invalid parameters for set thread concurrency";
 	val = atoi(nextptr);
 	val = atoi(nextptr);
 
 
-	gpus[device++].thread_concurrency = val;
+	gpus[device++].opt_tc = val;
 
 
 	while ((nextptr = strtok(NULL, ",")) != NULL) {
 	while ((nextptr = strtok(NULL, ",")) != NULL) {
 		val = atoi(nextptr);
 		val = atoi(nextptr);
 
 
-		gpus[device++].thread_concurrency = val;
+		gpus[device++].opt_tc = val;
 	}
 	}
 	if (device == 1) {
 	if (device == 1) {
 		for (i = device; i < MAX_GPUDEVICES; i++)
 		for (i = device; i < MAX_GPUDEVICES; i++)
-			gpus[i].thread_concurrency = gpus[0].thread_concurrency;
+			gpus[i].opt_tc = gpus[0].opt_tc;
 	}
 	}
 
 
 	return NULL;
 	return NULL;

+ 35 - 7
miner.c

@@ -2782,6 +2782,8 @@ void switch_pools(struct pool *selected)
 	mutex_lock(&lp_lock);
 	mutex_lock(&lp_lock);
 	pthread_cond_broadcast(&lp_cond);
 	pthread_cond_broadcast(&lp_cond);
 	mutex_unlock(&lp_lock);
 	mutex_unlock(&lp_lock);
+
+	queue_request(NULL, false);
 }
 }
 
 
 static void discard_work(struct work *work)
 static void discard_work(struct work *work)
@@ -3222,6 +3224,20 @@ void write_config(FILE *fcfg)
 					break;
 					break;
 			}
 			}
 		}
 		}
+#ifdef USE_SCRYPT
+		fputs("\",\n\"lookup-gap\" : \"", fcfg);
+		for(i = 0; i < nDevs; i++)
+			fprintf(fcfg, "%s%d", i > 0 ? "," : "",
+				(int)gpus[i].opt_lg);
+		fputs("\",\n\"thread-concurrency\" : \"", fcfg);
+		for(i = 0; i < nDevs; i++)
+			fprintf(fcfg, "%s%d", i > 0 ? "," : "",
+				(int)gpus[i].opt_tc);
+		fputs("\",\n\"shaders\" : \"", fcfg);
+		for(i = 0; i < nDevs; i++)
+			fprintf(fcfg, "%s%d", i > 0 ? "," : "",
+				(int)gpus[i].shaders);
+#endif
 #ifdef HAVE_ADL
 #ifdef HAVE_ADL
 		fputs("\",\n\"gpu-engine\" : \"", fcfg);
 		fputs("\",\n\"gpu-engine\" : \"", fcfg);
 		for(i = 0; i < nDevs; i++)
 		for(i = 0; i < nDevs; i++)
@@ -3372,6 +3388,7 @@ retry:
 		strategies[pool_strategy]);
 		strategies[pool_strategy]);
 	if (pool_strategy == POOL_ROTATE)
 	if (pool_strategy == POOL_ROTATE)
 		wlogprint("Set to rotate every %d minutes\n", opt_rotate_period);
 		wlogprint("Set to rotate every %d minutes\n", opt_rotate_period);
+	wlogprint("[F]ailover only %s\n", opt_fail_only ? "enabled" : "disabled");
 	wlogprint("[A]dd pool [R]emove pool [D]isable pool [E]nable pool\n");
 	wlogprint("[A]dd pool [R]emove pool [D]isable pool [E]nable pool\n");
 	wlogprint("[C]hange management strategy [S]witch pool [I]nformation\n");
 	wlogprint("[C]hange management strategy [S]witch pool [I]nformation\n");
 	wlogprint("Or press any other key to continue\n");
 	wlogprint("Or press any other key to continue\n");
@@ -3465,6 +3482,9 @@ retry:
 		pool = pools[selected];
 		pool = pools[selected];
 		display_pool_summary(pool);
 		display_pool_summary(pool);
 		goto retry;
 		goto retry;
+	} else if (!strncasecmp(&input, "f", 1)) {
+		opt_fail_only ^= true;
+		goto updated;
 	} else
 	} else
 		clear_logwin();
 		clear_logwin();
 
 
@@ -4027,18 +4047,25 @@ bool queue_request(struct thr_info *thr, bool needed)
 {
 {
 	int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
 	int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
 	struct workio_cmd *wc;
 	struct workio_cmd *wc;
-	bool ret = true;
+	bool lag = false;
 
 
 	cq = current_queued();
 	cq = current_queued();
 	cs = current_staged();
 	cs = current_staged();
 	ts = total_staged();
 	ts = total_staged();
 	tq = global_queued();
 	tq = global_queued();
 
 
-	/* Test to make sure we have enough work for pools without rolltime
-	 * and enough original work for pools with rolltime */
-	if (((cs || cq >= opt_queue) && ts >= maxq) ||
-	    ((cs || cq) && tq >= maxq))
-		return true;
+	if (needed && cq >= maxq && !ts && !opt_fail_only) {
+		/* If we're queueing work faster than we can stage it, consider
+		 * the system lagging and allow work to be gathered from
+		 * another pool if possible */
+		lag = true;
+	} else {
+		/* Test to make sure we have enough work for pools without rolltime
+		 * and enough original work for pools with rolltime */
+		if (((cs || cq >= opt_queue) && ts >= maxq) ||
+		    ((cs || cq) && tq >= maxq))
+			return true;
+	}
 
 
 	/* fill out work request message */
 	/* fill out work request message */
 	wc = calloc(1, sizeof(*wc));
 	wc = calloc(1, sizeof(*wc));
@@ -4049,6 +4076,7 @@ bool queue_request(struct thr_info *thr, bool needed)
 
 
 	wc->cmd = WC_GET_WORK;
 	wc->cmd = WC_GET_WORK;
 	wc->thr = thr;
 	wc->thr = thr;
+	wc->lagging = lag;
 
 
 	applog(LOG_DEBUG, "Queueing getwork request to work thread");
 	applog(LOG_DEBUG, "Queueing getwork request to work thread");
 
 
@@ -4076,7 +4104,7 @@ static struct work *hash_pop(const struct timespec *abstime)
 		work = staged_work;
 		work = staged_work;
 		HASH_DEL(staged_work, work);
 		HASH_DEL(staged_work, work);
 		work->pool->staged--;
 		work->pool->staged--;
-		if (HASH_COUNT(staged_work) < mining_threads)
+		if (HASH_COUNT(staged_work) < (unsigned int)mining_threads)
 			queue = true;
 			queue = true;
 	}
 	}
 	mutex_unlock(stgd_lock);
 	mutex_unlock(stgd_lock);

+ 5 - 3
miner.h

@@ -384,8 +384,8 @@ struct cgpu_info {
 	cl_ulong max_alloc;
 	cl_ulong max_alloc;
 
 
 #ifdef USE_SCRYPT
 #ifdef USE_SCRYPT
-	int lookup_gap;
-	int thread_concurrency;
+	int opt_lg, lookup_gap;
+	int opt_tc, thread_concurrency;
 	int shaders;
 	int shaders;
 #endif
 #endif
 	struct timeval tv_gpustart;;
 	struct timeval tv_gpustart;;
@@ -746,9 +746,11 @@ struct curl_ent {
 	struct timeval tv;
 	struct timeval tv;
 };
 };
 
 
+/* Disabled needs to be the lowest enum as a freshly calloced value will then
+ * equal disabled */
 enum pool_enable {
 enum pool_enable {
-	POOL_ENABLED,
 	POOL_DISABLED,
 	POOL_DISABLED,
+	POOL_ENABLED,
 	POOL_REJECTING,
 	POOL_REJECTING,
 };
 };
 
 

+ 7 - 4
ocl.c

@@ -653,11 +653,13 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
 		cl_ulong ma = cgpu->max_alloc, mt;
 		cl_ulong ma = cgpu->max_alloc, mt;
 		int pow2 = 0;
 		int pow2 = 0;
 
 
-		if (!cgpu->lookup_gap) {
+		if (!cgpu->opt_lg) {
 			applog(LOG_DEBUG, "GPU %d: selecting lookup gap of 2", gpu);
 			applog(LOG_DEBUG, "GPU %d: selecting lookup gap of 2", gpu);
 			cgpu->lookup_gap = 2;
 			cgpu->lookup_gap = 2;
-		}
-		if (!cgpu->thread_concurrency) {
+		} else
+			cgpu->lookup_gap = cgpu->opt_lg;
+
+		if (!cgpu->opt_tc) {
 			cgpu->thread_concurrency = ma / 32768 / cgpu->lookup_gap;
 			cgpu->thread_concurrency = ma / 32768 / cgpu->lookup_gap;
 			if (cgpu->shaders && cgpu->thread_concurrency > cgpu->shaders) {
 			if (cgpu->shaders && cgpu->thread_concurrency > cgpu->shaders) {
 				cgpu->thread_concurrency -= cgpu->thread_concurrency % cgpu->shaders;
 				cgpu->thread_concurrency -= cgpu->thread_concurrency % cgpu->shaders;
@@ -666,7 +668,8 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
 			}
 			}
 				
 				
 			applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %u",gpu,  cgpu->thread_concurrency);
 			applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %u",gpu,  cgpu->thread_concurrency);
-		}
+		} else
+			cgpu->thread_concurrency = cgpu->opt_tc;
 
 
 		/* If we have memory to spare, try to find a power of 2 value
 		/* If we have memory to spare, try to find a power of 2 value
 		 * >= required amount to map nicely to an intensity */
 		 * >= required amount to map nicely to an intensity */