|
|
@@ -365,6 +365,23 @@ static void applog_and_exit(const char *fmt, ...)
|
|
|
static pthread_mutex_t sharelog_lock;
|
|
|
static FILE *sharelog_file = NULL;
|
|
|
|
|
|
+struct thr_info *get_thread(int thr_id)
|
|
|
+{
|
|
|
+ struct thr_info *thr;
|
|
|
+
|
|
|
+ mutex_lock(&mining_thr_lock);
|
|
|
+ thr = mining_thr[thr_id];
|
|
|
+ mutex_unlock(&mining_thr_lock);
|
|
|
+ return thr;
|
|
|
+}
|
|
|
+
|
|
|
+static struct cgpu_info *get_thr_cgpu(int thr_id)
|
|
|
+{
|
|
|
+ struct thr_info *thr = get_thread(thr_id);
|
|
|
+
|
|
|
+ return thr->cgpu;
|
|
|
+}
|
|
|
+
|
|
|
static void sharelog(const char*disposition, const struct work*work)
|
|
|
{
|
|
|
char *target, *hash, *data;
|
|
|
@@ -379,9 +396,7 @@ static void sharelog(const char*disposition, const struct work*work)
|
|
|
return;
|
|
|
|
|
|
thr_id = work->thr_id;
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- cgpu = mining_thr[thr_id]->cgpu;
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ cgpu = get_thr_cgpu(thr_id);
|
|
|
pool = work->pool;
|
|
|
t = (unsigned long int)(work->tv_work_found.tv_sec);
|
|
|
target = bin2hex(work->target, sizeof(work->target));
|
|
|
@@ -1726,11 +1741,7 @@ out:
|
|
|
|
|
|
int dev_from_id(int thr_id)
|
|
|
{
|
|
|
- struct cgpu_info *cgpu;
|
|
|
-
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- cgpu = mining_thr[thr_id]->cgpu;
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ struct cgpu_info *cgpu = get_thr_cgpu(thr_id);
|
|
|
|
|
|
return cgpu->device_id;
|
|
|
}
|
|
|
@@ -1907,10 +1918,7 @@ static void text_print_status(int thr_id)
|
|
|
struct cgpu_info *cgpu;
|
|
|
char logline[256];
|
|
|
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- cgpu = mining_thr[thr_id]->cgpu;
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
-
|
|
|
+ cgpu = get_thr_cgpu(thr_id);
|
|
|
if (cgpu) {
|
|
|
get_statline(logline, cgpu);
|
|
|
printf("%s\n", logline);
|
|
|
@@ -1979,9 +1987,7 @@ static void curses_print_devstatus(int thr_id)
|
|
|
char displayed_hashes[16], displayed_rolling[16];
|
|
|
uint64_t dh64, dr64;
|
|
|
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- cgpu = mining_thr[thr_id]->cgpu;
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ cgpu = get_thr_cgpu(thr_id);
|
|
|
|
|
|
if (devcursor + cgpu->cgminer_id > LINES - 2 || opt_compact)
|
|
|
return;
|
|
|
@@ -2232,9 +2238,7 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
|
|
|
struct pool *pool = work->pool;
|
|
|
struct cgpu_info *cgpu;
|
|
|
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- cgpu = mining_thr[work->thr_id]->cgpu;
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ cgpu = get_thr_cgpu(work->thr_id);
|
|
|
|
|
|
if (json_is_true(res) || (work->gbt && json_is_null(res))) {
|
|
|
mutex_lock(&stats_lock);
|
|
|
@@ -2391,9 +2395,7 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
|
|
|
char hashshow[64 + 4] = "";
|
|
|
char worktime[200] = "";
|
|
|
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- cgpu = mining_thr[thr_id]->cgpu;
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ cgpu = get_thr_cgpu(thr_id);
|
|
|
|
|
|
#ifdef __BIG_ENDIAN__
|
|
|
int swapcounter = 0;
|
|
|
@@ -2781,9 +2783,7 @@ static void __kill_work(void)
|
|
|
applog(LOG_DEBUG, "Stopping mining threads");
|
|
|
/* Stop the mining threads*/
|
|
|
for (i = 0; i < mining_threads; i++) {
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- thr = mining_thr[i];
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ thr = get_thread(i);
|
|
|
thr_info_freeze(thr);
|
|
|
thr->pause = true;
|
|
|
}
|
|
|
@@ -2793,9 +2793,7 @@ static void __kill_work(void)
|
|
|
applog(LOG_DEBUG, "Killing off mining threads");
|
|
|
/* Kill the mining threads*/
|
|
|
for (i = 0; i < mining_threads; i++) {
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- thr = mining_thr[i];
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ thr = get_thread(i);
|
|
|
thr_info_cancel(thr);
|
|
|
}
|
|
|
|
|
|
@@ -4443,9 +4441,7 @@ static void hashmeter(int thr_id, struct timeval *diff,
|
|
|
local_mhashes = (double)hashes_done / 1000000.0;
|
|
|
/* Update the last time this thread reported in */
|
|
|
if (thr_id >= 0) {
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- thr = mining_thr[thr_id];
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ thr = get_thread(thr_id);
|
|
|
gettimeofday(&(thr->last), NULL);
|
|
|
thr->cgpu->device_last_well = time(NULL);
|
|
|
}
|
|
|
@@ -5907,9 +5903,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|
|
for (i = 0; i < mining_threads; i++) {
|
|
|
struct thr_info *thr;
|
|
|
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- thr = mining_thr[i];
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ thr = get_thread(i);
|
|
|
|
|
|
/* Don't touch disabled devices */
|
|
|
if (thr->cgpu->deven == DEV_DISABLED)
|
|
|
@@ -6855,9 +6849,7 @@ begin_bench:
|
|
|
cgpu->status = LIFE_INIT;
|
|
|
|
|
|
for (j = 0; j < cgpu->threads; ++j, ++k) {
|
|
|
- mutex_lock(&mining_thr_lock);
|
|
|
- thr = mining_thr[k];
|
|
|
- mutex_unlock(&mining_thr_lock);
|
|
|
+ thr = get_thread(k);
|
|
|
thr->id = k;
|
|
|
thr->cgpu = cgpu;
|
|
|
thr->device_thread = j;
|