Browse Source

Use a static base measurement difference of how many items to clone since requests_staged may not climb while rolling.

Con Kolivas 13 years ago
parent
commit
74cd6548a9
1 changed files with 7 additions and 2 deletions
  1. 7 2
      cgminer.c

+ 7 - 2
cgminer.c

@@ -3640,11 +3640,16 @@ static struct work *make_clone(struct work *work)
  * the future */
 static struct work *clone_work(struct work *work)
 {
+	int mrs = mining_threads - requests_staged();
 	struct work *work_clone;
-	bool cloned = false;
+	bool cloned;
 
+	if (mrs < 1)
+		return work;
+
+	cloned = false;
 	work_clone = make_clone(work);
-	while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) {
+	while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
 		applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
 		if (unlikely(!stage_work(work_clone))) {
 			cloned = false;