|
|
@@ -1004,11 +1004,13 @@ static void *avalon_get_results(void *userdata)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static void avalon_rotate_array(struct cgpu_info *avalon)
|
|
|
+static void avalon_rotate_array(struct cgpu_info *avalon, struct avalon_info *info)
|
|
|
{
|
|
|
+ mutex_lock(&info->qlock);
|
|
|
avalon->queued = 0;
|
|
|
if (++avalon->work_array >= AVALON_ARRAY_SIZE)
|
|
|
avalon->work_array = 0;
|
|
|
+ mutex_unlock(&info->qlock);
|
|
|
}
|
|
|
|
|
|
static void bitburner_rotate_array(struct cgpu_info *avalon)
|
|
|
@@ -1117,7 +1119,6 @@ static void *avalon_send_tasks(void *userdata)
|
|
|
us_timeout = 0x100000000ll / info->asic_count / info->frequency;
|
|
|
cgsleep_prepare_r(&ts_start);
|
|
|
|
|
|
- mutex_lock(&info->qlock);
|
|
|
start_count = avalon->work_array * avalon_get_work_count;
|
|
|
end_count = start_count + avalon_get_work_count;
|
|
|
for (i = start_count, j = 0; i < end_count; i++, j++) {
|
|
|
@@ -1128,6 +1129,7 @@ static void *avalon_send_tasks(void *userdata)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ mutex_lock(&info->qlock);
|
|
|
if (likely(j < avalon->queued && !info->overheat && avalon->works[i])) {
|
|
|
avalon_init_task(&at, 0, 0, info->fan_pwm,
|
|
|
info->timeout, info->asic_count,
|
|
|
@@ -1148,6 +1150,7 @@ static void *avalon_send_tasks(void *userdata)
|
|
|
* idling any miners. */
|
|
|
avalon_reset_auto(info);
|
|
|
}
|
|
|
+ mutex_unlock(&info->qlock);
|
|
|
|
|
|
ret = avalon_send_task(&at, avalon, info);
|
|
|
|
|
|
@@ -1160,8 +1163,7 @@ static void *avalon_send_tasks(void *userdata)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- avalon_rotate_array(avalon);
|
|
|
- mutex_unlock(&info->qlock);
|
|
|
+ avalon_rotate_array(avalon, info);
|
|
|
|
|
|
cgsem_post(&info->qsem);
|
|
|
|