Browse Source

Merge branch 'cgminer' into bfgminer

Conflicts:
	driver-bitforce.c
	driver-icarus.c
Luke Dashjr 13 years ago
parent
commit
5acedd22f0
8 changed files with 86 additions and 84 deletions
  1. 12 13
      driver-bitforce.c
  2. 1 1
      driver-cpu.c
  3. 8 8
      driver-icarus.c
  4. 6 7
      driver-modminer.c
  5. 6 5
      driver-opencl.c
  6. 8 8
      driver-ztex.c
  7. 44 41
      miner.c
  8. 1 1
      miner.h

+ 12 - 13
driver-bitforce.c

@@ -170,7 +170,7 @@ void bitforce_init(struct cgpu_info *bitforce)
 	char pdevbuf[0x100];
 	char *s;
 
-	applog(LOG_WARNING, "BFL%i: Re-initalizing", bitforce->device_id);
+	applog(LOG_WARNING, "BFL%i: Re-initialising", bitforce->device_id);
 
 	biforce_clear_buffer(bitforce);
 
@@ -331,7 +331,7 @@ re_send:
 	return true;
 }
 
-static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
+static int64_t bitforce_get_result(struct thr_info *thr, struct work *work)
 {
 	struct cgpu_info *bitforce = thr->cgpu;
 	int fdDev = bitforce->device_fd;
@@ -340,9 +340,8 @@ static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
 	char *pnoncebuf;
 	uint32_t nonce;
 
-
 	if (!fdDev)
-		return 0;
+		return -1;
 
 	while (bitforce->wait_ms < BITFORCE_LONG_TIMEOUT_MS) {
 		mutex_lock(&bitforce->device_mutex);
@@ -367,7 +366,7 @@ static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
 		bitforce->dev_over_heat_count++;
 
 		if (!pdevbuf[0])           /* Only return if we got nothing after timeout - there still may be results */
-			return 1;
+			return 0;
 	} else if (!strncasecmp(pdevbuf, "N", 1)) {/* Hashing complete (NONCE-FOUND or NO-NONCE) */
 		    /* Simple timing adjustment. Allow a few polls to cope with
 		     * OS timer delays being variably reliable. wait_ms will
@@ -391,10 +390,10 @@ static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
 	if (!strncasecmp(&pdevbuf[2], "-", 1))
 		return bitforce->nonces;   /* No valid nonce found */
 	else if (!strncasecmp(pdevbuf, "I", 1))
-		return 1;          /* Device idle */
+		return 0;          /* Device idle */
 	else if (strncasecmp(pdevbuf, "NONCE-FOUND", 11)) {
 		applog(LOG_WARNING, "BFL%i: Error: Get result reports: %s", bitforce->device_id, pdevbuf);
-		return 1;
+		return 0;
 	}
 
 	pnoncebuf = &pdevbuf[12];
@@ -436,11 +435,11 @@ static void biforce_thread_enable(struct thr_info *thr)
 	bitforce_init(bitforce);
 }
 
-static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint64_t __maybe_unused max_nonce)
+static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce)
 {
 	struct cgpu_info *bitforce = thr->cgpu;
 	unsigned int sleep_time;
-	uint64_t ret;
+	int64_t ret;
 
 	ret = bitforce_send_work(thr, work);
 
@@ -469,7 +468,7 @@ static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint6
 		ret = bitforce_get_result(thr, work);
 
 	if (!ret) {
-		ret = 1;
+		ret = 0;
 		applog(LOG_ERR, "BFL%i: Comms error", bitforce->device_id);
 		bitforce->device_last_not_well = time(NULL);
 		bitforce->device_not_well_reason = REASON_DEV_COMMS_ERROR;
@@ -490,9 +489,9 @@ static bool bitforce_thread_init(struct thr_info *thr)
 	struct cgpu_info *bitforce = thr->cgpu;
 	unsigned int wait;
 
-	/* Pause each new thread a random time between 0-100ms 
-	so the devices aren't making calls all at the same time. */
-	wait = (rand() * MAX_START_DELAY_US)/RAND_MAX;
+	/* Pause each new thread at least 100ms between initialising
+	 * so the devices aren't making calls all at the same time. */
+	wait = thr->id * MAX_START_DELAY_US;
 	applog(LOG_DEBUG, "BFL%i: Delaying start by %dms", bitforce->device_id, wait / 1000);
 	usleep(wait);
 

+ 1 - 1
driver-cpu.c

@@ -784,7 +784,7 @@ static bool cpu_thread_init(struct thr_info *thr)
 	return true;
 }
 
-static uint64_t cpu_scanhash(struct thr_info *thr, struct work *work, uint64_t max_nonce)
+static int64_t cpu_scanhash(struct thr_info *thr, struct work *work, int64_t max_nonce)
 {
 	const int thr_id = thr->id;
 

+ 8 - 8
driver-icarus.c

@@ -547,8 +547,8 @@ static bool icarus_prepare(struct thr_info *thr)
 	return true;
 }
 
-static uint64_t icarus_scanhash(struct thr_info *thr, struct work *work,
-				__maybe_unused uint64_t max_nonce)
+static int64_t icarus_scanhash(struct thr_info *thr, struct work *work,
+				__maybe_unused int64_t max_nonce)
 {
 	struct cgpu_info *icarus;
 	int fd;
@@ -559,7 +559,7 @@ static uint64_t icarus_scanhash(struct thr_info *thr, struct work *work,
 	unsigned char ob_bin[64] = {0}, nonce_bin[ICARUS_READ_SIZE] = {0};
 	char *ob_hex;
 	uint32_t nonce;
-	uint64_t hash_count;
+	int64_t hash_count;
 	struct timeval tv_start, elapsed;
 	struct timeval tv_history_start, tv_history_finish;
 	double Ti, Xi;
@@ -569,9 +569,9 @@ static uint64_t icarus_scanhash(struct thr_info *thr, struct work *work,
 	int count;
 	double Hs, W, fullnonce;
 	int read_count;
-	uint64_t estimate_hashes;
+	int64_t estimate_hashes;
 	uint32_t values;
-	uint64_t hash_count_range;
+	int64_t hash_count_range;
 
 	elapsed.tv_sec = elapsed.tv_usec = 0;
 
@@ -600,7 +600,7 @@ static uint64_t icarus_scanhash(struct thr_info *thr, struct work *work,
 				// Go back to the main loop to get the next work, and stuff
 				// Returning to the main loop will clear work_restart, so use a flag...
 				state->changework = true;
-				return 1;
+				return 0;
 			}
 		}
 
@@ -617,7 +617,7 @@ static uint64_t icarus_scanhash(struct thr_info *thr, struct work *work,
 	ret = icarus_write(fd, ob_bin, sizeof(ob_bin));
 	if (ret) {
 		icarus_close(fd);
-		return 0;	/* This should never happen */
+		return -1;	/* This should never happen */
 	}
 
 	if (opt_debug) {
@@ -644,7 +644,7 @@ static uint64_t icarus_scanhash(struct thr_info *thr, struct work *work,
 	if (state->firstrun) {
 		state->firstrun = false;
 		memcpy(&state->last_work, work, sizeof(state->last_work));
-		return 1;
+		return 0;
 	}
 
 	// OK, done starting Icarus's next job... now process the last run's result!

+ 6 - 7
driver-modminer.c

@@ -518,11 +518,11 @@ modminer_process_results(struct thr_info*thr)
 	return hashes;
 }
 
-static uint64_t
-modminer_scanhash(struct thr_info*thr, struct work*work, uint64_t __maybe_unused max_nonce)
+static int64_t
+modminer_scanhash(struct thr_info*thr, struct work*work, int64_t __maybe_unused max_nonce)
 {
 	struct modminer_fpga_state *state = thr->cgpu_data;
-	uint64_t hashes = 1;
+	int64_t hashes = 0;
 	bool startwork;
 
 	startwork = modminer_prepare_next_work(state, work);
@@ -530,15 +530,14 @@ modminer_scanhash(struct thr_info*thr, struct work*work, uint64_t __maybe_unused
 		hashes = modminer_process_results(thr);
 		if (work_restart(thr)) {
 			state->work_running = false;
-			return 1;
+			return 0;
 		}
-	}
-	else
+	} else
 		state->work_running = true;
 
 	if (startwork) {
 		if (!modminer_start_work(thr))
-			return 0;
+			return -1;
 		memcpy(&state->running_work, work, sizeof(state->running_work));
 	}
 

+ 6 - 5
driver-opencl.c

@@ -1203,7 +1203,7 @@ static cl_int queue_diablo_kernel(_clState *clState, dev_blk_ctx *blk, cl_uint t
 }
 
 static void set_threads_hashes(unsigned int vectors, unsigned int *threads,
-			       unsigned int *hashes, size_t *globalThreads,
+			       int64_t *hashes, size_t *globalThreads,
 			       unsigned int minthreads, int intensity)
 {
 	*threads = 1 << (15 + intensity);
@@ -1591,8 +1591,8 @@ static bool opencl_prepare_work(struct thr_info __maybe_unused *thr, struct work
 
 extern int opt_dynamic_interval;
 
-static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
-				uint64_t __maybe_unused max_nonce)
+static int64_t opencl_scanhash(struct thr_info *thr, struct work *work,
+				int64_t __maybe_unused max_nonce)
 {
 	const int thr_id = thr->id;
 	struct opencl_thread_data *thrdata = thr->cgpu_data;
@@ -1605,10 +1605,9 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
 	size_t globalThreads[1];
 	size_t localThreads[1] = { clState->wsize };
 	unsigned int threads;
-	unsigned int hashes;
+	int64_t hashes;
 
 	/* This finish flushes the readbuffer set with CL_FALSE later */
-	gettimeofday(&gpu->tv_gpustart, NULL);
 	clFinish(clState->commandQueue);
 	gettimeofday(&gpu->tv_gpuend, NULL);
 
@@ -1664,6 +1663,8 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
 		clFinish(clState->commandQueue);
 	}
 
+	gettimeofday(&gpu->tv_gpustart, NULL);
+
 	if (clState->goffset) {
 		size_t global_work_offset[1];
 

+ 8 - 8
driver-ztex.c

@@ -185,8 +185,8 @@ static bool ztex_checkNonce(struct libztex_device *ztex,
 	return true;
 }
 
-static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
-                              __maybe_unused uint64_t max_nonce)
+static int64_t ztex_scanhash(struct thr_info *thr, struct work *work,
+                              __maybe_unused int64_t max_nonce)
 {
 	struct libztex_device *ztex;
 	unsigned char sendbuf[44];
@@ -215,7 +215,7 @@ static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
 			ztex_disable(thr);
 			applog(LOG_ERR, "%s: Failed to send hash data with err %d, giving up", ztex->repr, i);
 			ztex_releaseFpga(ztex);
-			return 0;
+			return -1;
 		}
 	}
 	ztex_releaseFpga(ztex);
@@ -225,7 +225,7 @@ static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
 	lastnonce = malloc(sizeof(uint32_t)*ztex->numNonces);
 	if (lastnonce == NULL) {
 		applog(LOG_ERR, "%s: failed to allocate lastnonce[%d]", ztex->repr, ztex->numNonces);
-		return 0;
+		return -1;
 	}
 	memset(lastnonce, 0, sizeof(uint32_t)*ztex->numNonces);
 	
@@ -233,7 +233,7 @@ static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
 	backlog = malloc(sizeof(uint32_t) * backlog_max);
 	if (backlog == NULL) {
 		applog(LOG_ERR, "%s: failed to allocate backlog[%d]", ztex->repr, backlog_max);
-		return 0;
+		return -1;
 	}
 	memset(backlog, 0, sizeof(uint32_t) * backlog_max);
 	
@@ -260,7 +260,7 @@ static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
 				free(lastnonce);
 				free(backlog);
 				ztex_releaseFpga(ztex);
-				return 0;
+				return -1;
 			}
 		}
 		ztex_releaseFpga(ztex);
@@ -330,7 +330,7 @@ static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
 		free(lastnonce);
 		free(backlog);
 		
-		return 0;
+		return -1;
 	}
 
 	applog(LOG_DEBUG, "%s: exit %1.8X", ztex->repr, noncecnt);
@@ -340,7 +340,7 @@ static uint64_t ztex_scanhash(struct thr_info *thr, struct work *work,
 	free(lastnonce);
 	free(backlog);
 	
-	return noncecnt > 0? noncecnt: 1;
+	return noncecnt;
 }
 
 static void ztex_statline_before(char *buf, struct cgpu_info *cgpu)

+ 44 - 41
miner.c

@@ -4022,6 +4022,20 @@ static inline bool abandon_work(struct work *work, struct timeval *wdiff, uint64
 	return false;
 }
 
+static void mt_disable(struct thr_info *mythr, const int thr_id,
+		       struct device_api *api)
+{
+	applog(LOG_WARNING, "Thread %d being disabled", thr_id);
+	mythr->rolling = mythr->cgpu->rolling = 0;
+	applog(LOG_DEBUG, "Popping wakeup ping in miner thread");
+	thread_reportout(mythr);
+	tq_pop(mythr->q, NULL); /* Ignore ping that's popped */
+	thread_reportin(mythr);
+	applog(LOG_WARNING, "Thread %d being re-enabled", thr_id);
+	if (api->thread_enable)
+		api->thread_enable(mythr);
+}
+
 void *miner_thread(void *userdata)
 {
 	struct thr_info *mythr = userdata;
@@ -4037,8 +4051,8 @@ void *miner_thread(void *userdata)
 	struct timeval tv_start, tv_end, tv_workstart, tv_lastupdate;
 	struct timeval diff, sdiff, wdiff = {0, 0};
 	uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
-	unsigned long long hashes_done = 0;
-	unsigned long long hashes;
+	int64_t hashes_done = 0;
+	int64_t hashes;
 	struct work *work = make_work();
 	const time_t request_interval = opt_scantime * 2 / 3 ? : 1;
 	unsigned const long request_nonce = MAXTHREADS / 3 * 2;
@@ -4120,23 +4134,7 @@ void *miner_thread(void *userdata)
 
 			gettimeofday(&getwork_start, NULL);
 
-			if (unlikely(mythr->work_restart)) {
-
-				/* Apart from device_thread 0, we stagger the
-				 * starting of every next thread to try and get
-				 * all devices busy before worrying about
-				 * getting work for their extra threads */
-				if (!primary) {
-					struct timespec rgtp;
-
-					rgtp.tv_sec = 0;
-					rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
-					nanosleep(&rgtp, NULL);
-				}
-				break;
-			}
-
-			if (unlikely(!hashes)) {
+			if (unlikely(hashes == -1)) {
 				applog(LOG_ERR, "%s %d failure, disabling!", api->name, cgpu->device_id);
 				cgpu->deven = DEV_DISABLED;
 
@@ -4144,7 +4142,7 @@ void *miner_thread(void *userdata)
 				cgpu->device_not_well_reason = REASON_THREAD_ZERO_HASH;
 				cgpu->thread_zero_hash_count++;
 
-				goto disabled;
+				mt_disable(mythr, thr_id, api);
 			}
 
 			hashes_done += hashes;
@@ -4179,22 +4177,21 @@ void *miner_thread(void *userdata)
 			}
 
 			if (unlikely((long)sdiff.tv_sec < cycle)) {
+				int mult;
+
 				if (likely(!api->can_limit_work || max_nonce == 0xffffffff))
 					continue;
 
-				{
-					int mult = 1000000 / ((sdiff.tv_usec + 0x400) / 0x400) + 0x10;
-					mult *= cycle;
-					if (max_nonce > (0xffffffff * 0x400) / mult)
-						max_nonce = 0xffffffff;
-					else
-						max_nonce = (max_nonce * mult) / 0x400;
-				}
-			} else if (unlikely(sdiff.tv_sec > cycle) && api->can_limit_work) {
+				mult = 1000000 / ((sdiff.tv_usec + 0x400) / 0x400) + 0x10;
+				mult *= cycle;
+				if (max_nonce > (0xffffffff * 0x400) / mult)
+					max_nonce = 0xffffffff;
+				else
+					max_nonce = (max_nonce * mult) / 0x400;
+			} else if (unlikely(sdiff.tv_sec > cycle) && api->can_limit_work)
 				max_nonce = max_nonce * cycle / sdiff.tv_sec;
-			} else if (unlikely(sdiff.tv_usec > 100000) && api->can_limit_work) {
+			else if (unlikely(sdiff.tv_usec > 100000) && api->can_limit_work)
 				max_nonce = max_nonce * 0x400 / (((cycle * 1000000) + sdiff.tv_usec) / (cycle * 1000000 / 0x400));
-			}
 
 			timersub(&tv_end, &tv_lastupdate, &diff);
 			if (diff.tv_sec >= opt_log_interval) {
@@ -4203,18 +4200,24 @@ void *miner_thread(void *userdata)
 				tv_lastupdate = tv_end;
 			}
 
-			if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) {
-				applog(LOG_WARNING, "Thread %d being disabled", thr_id);
-disabled:
-				mythr->rolling = mythr->cgpu->rolling = 0;
-				applog(LOG_DEBUG, "Popping wakeup ping in miner thread");
-				thread_reportout(mythr);
-				tq_pop(mythr->q, NULL); /* Ignore ping that's popped */
-				thread_reportin(mythr);
-				applog(LOG_WARNING, "Thread %d being re-enabled", thr_id);
-				if (api->thread_enable) api->thread_enable(mythr);
+			if (unlikely(mythr->work_restart)) {
+				/* Apart from device_thread 0, we stagger the
+				 * starting of every next thread to try and get
+				 * all devices busy before worrying about
+				 * getting work for their extra threads */
+				if (!primary) {
+					struct timespec rgtp;
+
+					rgtp.tv_sec = 0;
+					rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
+					nanosleep(&rgtp, NULL);
+				}
+				break;
 			}
 
+			if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
+				mt_disable(mythr, thr_id, api);
+
 			sdiff.tv_sec = sdiff.tv_usec = 0;
 		} while (!abandon_work(work, &wdiff, cgpu->max_hashes));
 	}

+ 1 - 1
miner.h

@@ -242,7 +242,7 @@ struct device_api {
 	bool (*thread_init)(struct thr_info*);
 	void (*free_work)(struct thr_info*, struct work*);
 	bool (*prepare_work)(struct thr_info*, struct work*);
-	uint64_t (*scanhash)(struct thr_info*, struct work*, uint64_t);
+	int64_t (*scanhash)(struct thr_info*, struct work*, int64_t);
 	void (*thread_shutdown)(struct thr_info*);
 	void (*thread_enable)(struct thr_info*);
 };