|
@@ -201,7 +201,7 @@ int hw_errors;
|
|
|
int total_accepted, total_rejected;
|
|
int total_accepted, total_rejected;
|
|
|
float total_accepted_weighed;
|
|
float total_accepted_weighed;
|
|
|
int total_getworks, total_stale, total_discarded;
|
|
int total_getworks, total_stale, total_discarded;
|
|
|
-static int total_queued;
|
|
|
|
|
|
|
+static int total_queued, staged_rollable;
|
|
|
unsigned int new_blocks;
|
|
unsigned int new_blocks;
|
|
|
unsigned int found_blocks;
|
|
unsigned int found_blocks;
|
|
|
|
|
|
|
@@ -2475,18 +2475,16 @@ static bool stale_work3(struct work *work, bool share, bool failoveronly);
|
|
|
static inline bool should_roll(struct work *work)
|
|
static inline bool should_roll(struct work *work)
|
|
|
{
|
|
{
|
|
|
struct timeval now;
|
|
struct timeval now;
|
|
|
- double share_time;
|
|
|
|
|
time_t expiry;
|
|
time_t expiry;
|
|
|
|
|
|
|
|
if (work->pool != current_pool() && pool_strategy != POOL_LOADBALANCE)
|
|
if (work->pool != current_pool() && pool_strategy != POOL_LOADBALANCE)
|
|
|
return false;
|
|
return false;
|
|
|
|
|
|
|
|
- share_time = total_secs * mining_threads / (total_accepted + 1);
|
|
|
|
|
if (work->rolltime > opt_scantime)
|
|
if (work->rolltime > opt_scantime)
|
|
|
expiry = work->rolltime;
|
|
expiry = work->rolltime;
|
|
|
else
|
|
else
|
|
|
expiry = opt_scantime;
|
|
expiry = opt_scantime;
|
|
|
- expiry -= share_time;
|
|
|
|
|
|
|
+ expiry = expiry * 2 / 3;
|
|
|
|
|
|
|
|
/* We shouldn't roll if we're unlikely to get one shares' duration
|
|
/* We shouldn't roll if we're unlikely to get one shares' duration
|
|
|
* work out of doing so */
|
|
* work out of doing so */
|
|
@@ -2546,6 +2544,9 @@ static bool clone_available(void)
|
|
|
struct work *work, *tmp;
|
|
struct work *work, *tmp;
|
|
|
bool cloned = false;
|
|
bool cloned = false;
|
|
|
|
|
|
|
|
|
|
+ if (!staged_rollable)
|
|
|
|
|
+ goto out;
|
|
|
|
|
+
|
|
|
mutex_lock(stgd_lock);
|
|
mutex_lock(stgd_lock);
|
|
|
HASH_ITER(hh, staged_work, work, tmp) {
|
|
HASH_ITER(hh, staged_work, work, tmp) {
|
|
|
if (can_roll(work) && should_roll(work)) {
|
|
if (can_roll(work) && should_roll(work)) {
|
|
@@ -2565,6 +2566,7 @@ static bool clone_available(void)
|
|
|
}
|
|
}
|
|
|
mutex_unlock(stgd_lock);
|
|
mutex_unlock(stgd_lock);
|
|
|
|
|
|
|
|
|
|
+out:
|
|
|
return cloned;
|
|
return cloned;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -3238,6 +3240,11 @@ static int tv_sort(struct work *worka, struct work *workb)
|
|
|
return worka->tv_staged.tv_sec - workb->tv_staged.tv_sec;
|
|
return worka->tv_staged.tv_sec - workb->tv_staged.tv_sec;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static bool work_rollable(struct work *work)
|
|
|
|
|
+{
|
|
|
|
|
+ return (!work->clone && work->rolltime);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
static bool hash_push(struct work *work)
|
|
static bool hash_push(struct work *work)
|
|
|
{
|
|
{
|
|
|
bool rc = true, dec = false;
|
|
bool rc = true, dec = false;
|
|
@@ -3248,6 +3255,8 @@ static bool hash_push(struct work *work)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(stgd_lock);
|
|
mutex_lock(stgd_lock);
|
|
|
|
|
+ if (work_rollable(work))
|
|
|
|
|
+ staged_rollable++;
|
|
|
if (likely(!getq->frozen)) {
|
|
if (likely(!getq->frozen)) {
|
|
|
HASH_ADD_INT(staged_work, id, work);
|
|
HASH_ADD_INT(staged_work, id, work);
|
|
|
work->pool->staged++;
|
|
work->pool->staged++;
|
|
@@ -4367,6 +4376,8 @@ static struct work *hash_pop(const struct timespec *abstime)
|
|
|
work = staged_work;
|
|
work = staged_work;
|
|
|
HASH_DEL(staged_work, work);
|
|
HASH_DEL(staged_work, work);
|
|
|
work->pool->staged--;
|
|
work->pool->staged--;
|
|
|
|
|
+ if (work_rollable(work))
|
|
|
|
|
+ staged_rollable--;
|
|
|
}
|
|
}
|
|
|
mutex_unlock(stgd_lock);
|
|
mutex_unlock(stgd_lock);
|
|
|
|
|
|