|
|
@@ -231,7 +231,7 @@ double total_mhashes_done;
|
|
|
static struct timeval total_tv_start, total_tv_end;
|
|
|
static struct timeval miner_started;
|
|
|
|
|
|
-pthread_mutex_t control_lock;
|
|
|
+cglock_t control_lock;
|
|
|
pthread_mutex_t stats_lock;
|
|
|
|
|
|
static pthread_mutex_t submitting_lock;
|
|
|
@@ -541,9 +541,9 @@ struct pool *current_pool(void)
|
|
|
{
|
|
|
struct pool *pool;
|
|
|
|
|
|
- mutex_lock(&control_lock);
|
|
|
+ cg_rlock(&control_lock);
|
|
|
pool = currentpool;
|
|
|
- mutex_unlock(&control_lock);
|
|
|
+ cg_runlock(&control_lock);
|
|
|
return pool;
|
|
|
}
|
|
|
|
|
|
@@ -1789,9 +1789,9 @@ static struct work *make_work(void)
|
|
|
|
|
|
if (unlikely(!work))
|
|
|
quit(1, "Failed to calloc work in make_work");
|
|
|
- mutex_lock(&control_lock);
|
|
|
+ cg_wlock(&control_lock);
|
|
|
work->id = total_work++;
|
|
|
- mutex_unlock(&control_lock);
|
|
|
+ cg_wunlock(&control_lock);
|
|
|
return work;
|
|
|
}
|
|
|
|
|
|
@@ -2907,7 +2907,7 @@ static uint64_t share_diff(const struct work *work)
|
|
|
bool new_best = false;
|
|
|
|
|
|
ret = target_diff(work->hash);
|
|
|
- mutex_lock(&control_lock);
|
|
|
+ cg_wlock(&control_lock);
|
|
|
if (unlikely(ret > best_diff)) {
|
|
|
new_best = true;
|
|
|
best_diff = ret;
|
|
|
@@ -2915,7 +2915,7 @@ static uint64_t share_diff(const struct work *work)
|
|
|
}
|
|
|
if (unlikely(ret > work->pool->best_diff))
|
|
|
work->pool->best_diff = ret;
|
|
|
- mutex_unlock(&control_lock);
|
|
|
+ cg_wunlock(&control_lock);
|
|
|
|
|
|
if (unlikely(new_best))
|
|
|
applog(LOG_INFO, "New best share: %s", best_share);
|
|
|
@@ -4422,7 +4422,7 @@ void switch_pools(struct pool *selected)
|
|
|
struct pool *pool, *last_pool;
|
|
|
int i, pool_no, next_pool;
|
|
|
|
|
|
- mutex_lock(&control_lock);
|
|
|
+ cg_wlock(&control_lock);
|
|
|
last_pool = currentpool;
|
|
|
pool_no = currentpool->pool_no;
|
|
|
|
|
|
@@ -4477,7 +4477,7 @@ void switch_pools(struct pool *selected)
|
|
|
|
|
|
currentpool = pools[pool_no];
|
|
|
pool = currentpool;
|
|
|
- mutex_unlock(&control_lock);
|
|
|
+ cg_wunlock(&control_lock);
|
|
|
|
|
|
/* Set the lagging flag to avoid pool not providing work fast enough
|
|
|
* messages in failover only mode since we have to get all fresh work
|
|
|
@@ -6125,9 +6125,9 @@ static int cp_prio(void)
|
|
|
{
|
|
|
int prio;
|
|
|
|
|
|
- mutex_lock(&control_lock);
|
|
|
+ cg_rlock(&control_lock);
|
|
|
prio = currentpool->prio;
|
|
|
- mutex_unlock(&control_lock);
|
|
|
+ cg_runlock(&control_lock);
|
|
|
return prio;
|
|
|
}
|
|
|
|
|
|
@@ -8221,14 +8221,14 @@ static void *test_pool_thread(void *arg)
|
|
|
pool_tset(pool, &pool->lagging);
|
|
|
pool_tclear(pool, &pool->idle);
|
|
|
|
|
|
- mutex_lock(&control_lock);
|
|
|
+ cg_wlock(&control_lock);
|
|
|
if (!pools_active) {
|
|
|
currentpool = pool;
|
|
|
if (pool->pool_no != 0)
|
|
|
applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url);
|
|
|
pools_active = true;
|
|
|
}
|
|
|
- mutex_unlock(&control_lock);
|
|
|
+ cg_wunlock(&control_lock);
|
|
|
pool_resus(pool);
|
|
|
} else
|
|
|
pool_died(pool);
|
|
|
@@ -8284,7 +8284,7 @@ int main(int argc, char *argv[])
|
|
|
|
|
|
mutex_init(&hash_lock);
|
|
|
mutex_init(&console_lock);
|
|
|
- mutex_init(&control_lock);
|
|
|
+ cglock_init(&control_lock);
|
|
|
mutex_init(&stats_lock);
|
|
|
mutex_init(&sharelog_lock);
|
|
|
mutex_init(&ch_lock);
|