|
@@ -44,7 +44,7 @@ struct driver_registration *_bfg_drvreg2;
|
|
|
void _bfg_register_driver(const struct device_drv *drv)
|
|
void _bfg_register_driver(const struct device_drv *drv)
|
|
|
{
|
|
{
|
|
|
struct driver_registration *ndr;
|
|
struct driver_registration *ndr;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (!drv)
|
|
if (!drv)
|
|
|
{
|
|
{
|
|
|
// NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
|
|
// NOTE: Not sorted at this point (dname and priority may be unassigned until drv_init!)
|
|
@@ -56,7 +56,7 @@ void _bfg_register_driver(const struct device_drv *drv)
|
|
|
}
|
|
}
|
|
|
return;
|
|
return;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
ndr = malloc(sizeof(*ndr));
|
|
ndr = malloc(sizeof(*ndr));
|
|
|
*ndr = (struct driver_registration){
|
|
*ndr = (struct driver_registration){
|
|
|
.drv = drv,
|
|
.drv = drv,
|
|
@@ -122,11 +122,11 @@ bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashe
|
|
|
{
|
|
{
|
|
|
struct cgpu_info *cgpu = thr->cgpu;
|
|
struct cgpu_info *cgpu = thr->cgpu;
|
|
|
const long cycle = opt_log_interval / 5 ? : 1;
|
|
const long cycle = opt_log_interval / 5 ? : 1;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (unlikely(hashes == -1)) {
|
|
if (unlikely(hashes == -1)) {
|
|
|
if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
|
|
if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0)
|
|
|
dev_error(cgpu, REASON_THREAD_ZERO_HASH);
|
|
dev_error(cgpu, REASON_THREAD_ZERO_HASH);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (thr->scanhash_working && opt_restart) {
|
|
if (thr->scanhash_working && opt_restart) {
|
|
|
applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
|
|
applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr);
|
|
|
thr->scanhash_working = false;
|
|
thr->scanhash_working = false;
|
|
@@ -141,13 +141,13 @@ bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashe
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
|
thr->scanhash_working = true;
|
|
thr->scanhash_working = true;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
thr->hashes_done += hashes;
|
|
thr->hashes_done += hashes;
|
|
|
if (hashes > cgpu->max_hashes)
|
|
if (hashes > cgpu->max_hashes)
|
|
|
cgpu->max_hashes = hashes;
|
|
cgpu->max_hashes = hashes;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
|
|
timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
// max_nonce management (optional)
|
|
// max_nonce management (optional)
|
|
|
if (max_nonce)
|
|
if (max_nonce)
|
|
|
{
|
|
{
|
|
@@ -155,15 +155,15 @@ bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashe
|
|
|
new_max_nonce *= cycle;
|
|
new_max_nonce *= cycle;
|
|
|
new_max_nonce *= 1000000;
|
|
new_max_nonce *= 1000000;
|
|
|
new_max_nonce /= ((uint64_t)thr->tv_hashes_done.tv_sec * 1000000) + thr->tv_hashes_done.tv_usec;
|
|
new_max_nonce /= ((uint64_t)thr->tv_hashes_done.tv_sec * 1000000) + thr->tv_hashes_done.tv_usec;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (new_max_nonce > 0xffffffff)
|
|
if (new_max_nonce > 0xffffffff)
|
|
|
new_max_nonce = 0xffffffff;
|
|
new_max_nonce = 0xffffffff;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
*max_nonce = new_max_nonce;
|
|
*max_nonce = new_max_nonce;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
hashmeter2(thr);
|
|
hashmeter2(thr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return true;
|
|
return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -186,7 +186,7 @@ int restart_wait(struct thr_info *thr, unsigned int mstime)
|
|
|
fd_set rfds;
|
|
fd_set rfds;
|
|
|
SOCKETTYPE wrn = thr->work_restart_notifier[0];
|
|
SOCKETTYPE wrn = thr->work_restart_notifier[0];
|
|
|
int rv;
|
|
int rv;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
|
|
if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
|
|
|
{
|
|
{
|
|
|
// This is a bug!
|
|
// This is a bug!
|
|
@@ -194,7 +194,7 @@ int restart_wait(struct thr_info *thr, unsigned int mstime)
|
|
|
cgsleep_ms(mstime);
|
|
cgsleep_ms(mstime);
|
|
|
return (thr->work_restart ? 0 : ETIMEDOUT);
|
|
return (thr->work_restart ? 0 : ETIMEDOUT);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
|
|
timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
|
|
|
while (true)
|
|
while (true)
|
|
@@ -221,7 +221,7 @@ struct work *get_and_prepare_work(struct thr_info *thr)
|
|
|
struct cgpu_info *proc = thr->cgpu;
|
|
struct cgpu_info *proc = thr->cgpu;
|
|
|
struct device_drv *api = proc->drv;
|
|
struct device_drv *api = proc->drv;
|
|
|
struct work *work;
|
|
struct work *work;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
work = get_work(thr);
|
|
work = get_work(thr);
|
|
|
if (!work)
|
|
if (!work)
|
|
|
return NULL;
|
|
return NULL;
|
|
@@ -246,14 +246,14 @@ void minerloop_scanhash(struct thr_info *mythr)
|
|
|
int64_t hashes;
|
|
int64_t hashes;
|
|
|
struct work *work;
|
|
struct work *work;
|
|
|
const bool primary = (!mythr->device_thread) || mythr->primary_thread;
|
|
const bool primary = (!mythr->device_thread) || mythr->primary_thread;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
#ifdef HAVE_PTHREAD_CANCEL
|
|
#ifdef HAVE_PTHREAD_CANCEL
|
|
|
pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
|
|
pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
|
|
|
#endif
|
|
#endif
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (cgpu->deven != DEV_ENABLED)
|
|
if (cgpu->deven != DEV_ENABLED)
|
|
|
mt_disable(mythr);
|
|
mt_disable(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
while (likely(!cgpu->shutdown)) {
|
|
while (likely(!cgpu->shutdown)) {
|
|
|
mythr->work_restart = false;
|
|
mythr->work_restart = false;
|
|
|
request_work(mythr);
|
|
request_work(mythr);
|
|
@@ -261,7 +261,7 @@ void minerloop_scanhash(struct thr_info *mythr)
|
|
|
if (!work)
|
|
if (!work)
|
|
|
break;
|
|
break;
|
|
|
timer_set_now(&work->tv_work_start);
|
|
timer_set_now(&work->tv_work_start);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
do {
|
|
do {
|
|
|
thread_reportin(mythr);
|
|
thread_reportin(mythr);
|
|
|
/* Only allow the mining thread to be cancelled when
|
|
/* Only allow the mining thread to be cancelled when
|
|
@@ -277,11 +277,11 @@ void minerloop_scanhash(struct thr_info *mythr)
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
|
|
|
pthread_testcancel();
|
|
pthread_testcancel();
|
|
|
thread_reportin(mythr);
|
|
thread_reportin(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timersub(&tv_end, &tv_start, &tv_hashes);
|
|
timersub(&tv_end, &tv_start, &tv_hashes);
|
|
|
if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
|
|
if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
|
|
|
goto disabled;
|
|
goto disabled;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (unlikely(mythr->work_restart)) {
|
|
if (unlikely(mythr->work_restart)) {
|
|
|
/* Apart from device_thread 0, we stagger the
|
|
/* Apart from device_thread 0, we stagger the
|
|
|
* starting of every next thread to try and get
|
|
* starting of every next thread to try and get
|
|
@@ -296,11 +296,11 @@ void minerloop_scanhash(struct thr_info *mythr)
|
|
|
}
|
|
}
|
|
|
break;
|
|
break;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
|
|
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
|
|
|
disabled:
|
|
disabled:
|
|
|
mt_disable(mythr);
|
|
mt_disable(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timersub(&tv_end, &work->tv_work_start, &tv_worktime);
|
|
timersub(&tv_end, &work->tv_work_start, &tv_worktime);
|
|
|
|
|
|
|
|
/* The inner do-while loop will exit unless the device is capable of
|
|
/* The inner do-while loop will exit unless the device is capable of
|
|
@@ -326,7 +326,7 @@ bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
|
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
|
struct device_drv *api = proc->drv;
|
|
struct device_drv *api = proc->drv;
|
|
|
struct timeval tv_worktime;
|
|
struct timeval tv_worktime;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
mythr->tv_morework.tv_sec = -1;
|
|
mythr->tv_morework.tv_sec = -1;
|
|
|
mythr->_job_transition_in_progress = true;
|
|
mythr->_job_transition_in_progress = true;
|
|
|
if (mythr->work)
|
|
if (mythr->work)
|
|
@@ -373,7 +373,7 @@ void do_get_results(struct thr_info *mythr, bool proceed_with_new_job)
|
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
|
struct device_drv *api = proc->drv;
|
|
struct device_drv *api = proc->drv;
|
|
|
struct work *work = mythr->work;
|
|
struct work *work = mythr->work;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
mythr->_job_transition_in_progress = true;
|
|
mythr->_job_transition_in_progress = true;
|
|
|
mythr->tv_results_jobstart = mythr->tv_jobstart;
|
|
mythr->tv_results_jobstart = mythr->tv_jobstart;
|
|
|
mythr->_proceed_with_new_job = proceed_with_new_job;
|
|
mythr->_proceed_with_new_job = proceed_with_new_job;
|
|
@@ -392,9 +392,9 @@ void job_results_fetched(struct thr_info *mythr)
|
|
|
if (likely(mythr->prev_work))
|
|
if (likely(mythr->prev_work))
|
|
|
{
|
|
{
|
|
|
struct timeval tv_now;
|
|
struct timeval tv_now;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
do_process_results(mythr, &tv_now, mythr->prev_work, true);
|
|
do_process_results(mythr, &tv_now, mythr->prev_work, true);
|
|
|
}
|
|
}
|
|
|
mt_disable_start__async(mythr);
|
|
mt_disable_start__async(mythr);
|
|
@@ -405,7 +405,7 @@ void do_job_start(struct thr_info *mythr)
|
|
|
{
|
|
{
|
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
|
struct device_drv *api = proc->drv;
|
|
struct device_drv *api = proc->drv;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
thread_reportin(mythr);
|
|
thread_reportin(mythr);
|
|
|
api->job_start(mythr);
|
|
api->job_start(mythr);
|
|
|
}
|
|
}
|
|
@@ -413,9 +413,9 @@ void do_job_start(struct thr_info *mythr)
|
|
|
void mt_job_transition(struct thr_info *mythr)
|
|
void mt_job_transition(struct thr_info *mythr)
|
|
|
{
|
|
{
|
|
|
struct timeval tv_now;
|
|
struct timeval tv_now;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (mythr->starting_next_work)
|
|
if (mythr->starting_next_work)
|
|
|
{
|
|
{
|
|
|
mythr->next_work->tv_work_start = tv_now;
|
|
mythr->next_work->tv_work_start = tv_now;
|
|
@@ -432,19 +432,19 @@ void mt_job_transition(struct thr_info *mythr)
|
|
|
void job_start_complete(struct thr_info *mythr)
|
|
void job_start_complete(struct thr_info *mythr)
|
|
|
{
|
|
{
|
|
|
struct timeval tv_now;
|
|
struct timeval tv_now;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (unlikely(!mythr->prev_work))
|
|
if (unlikely(!mythr->prev_work))
|
|
|
return;
|
|
return;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
do_process_results(mythr, &tv_now, mythr->prev_work, false);
|
|
do_process_results(mythr, &tv_now, mythr->prev_work, false);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
void job_start_abort(struct thr_info *mythr, bool failure)
|
|
void job_start_abort(struct thr_info *mythr, bool failure)
|
|
|
{
|
|
{
|
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (failure)
|
|
if (failure)
|
|
|
{
|
|
{
|
|
|
proc->deven = DEV_RECOVER_ERR;
|
|
proc->deven = DEV_RECOVER_ERR;
|
|
@@ -460,18 +460,18 @@ bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct
|
|
|
struct device_drv *api = proc->drv;
|
|
struct device_drv *api = proc->drv;
|
|
|
struct timeval tv_hashes;
|
|
struct timeval tv_hashes;
|
|
|
int64_t hashes = 0;
|
|
int64_t hashes = 0;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (api->job_process_results)
|
|
if (api->job_process_results)
|
|
|
hashes = api->job_process_results(mythr, work, stopping);
|
|
hashes = api->job_process_results(mythr, work, stopping);
|
|
|
thread_reportin(mythr);
|
|
thread_reportin(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (hashes)
|
|
if (hashes)
|
|
|
{
|
|
{
|
|
|
timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
|
|
timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes);
|
|
|
if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
|
|
if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL))
|
|
|
return false;
|
|
return false;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return true;
|
|
return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -482,7 +482,7 @@ void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
|
|
|
struct timeval tv_now;
|
|
struct timeval tv_now;
|
|
|
int maxfd;
|
|
int maxfd;
|
|
|
fd_set rfds;
|
|
fd_set rfds;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
FD_ZERO(&rfds);
|
|
FD_ZERO(&rfds);
|
|
|
FD_SET(thr->notifier[0], &rfds);
|
|
FD_SET(thr->notifier[0], &rfds);
|
|
@@ -543,10 +543,10 @@ static
|
|
|
void _minerloop_setup(struct thr_info *mythr)
|
|
void _minerloop_setup(struct thr_info *mythr)
|
|
|
{
|
|
{
|
|
|
struct cgpu_info * const cgpu = mythr->cgpu, *proc;
|
|
struct cgpu_info * const cgpu = mythr->cgpu, *proc;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (mythr->work_restart_notifier[1] == -1)
|
|
if (mythr->work_restart_notifier[1] == -1)
|
|
|
notifier_init(mythr->work_restart_notifier);
|
|
notifier_init(mythr->work_restart_notifier);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
for (proc = cgpu; proc; proc = proc->next_proc)
|
|
for (proc = cgpu; proc; proc = proc->next_proc)
|
|
|
{
|
|
{
|
|
|
mythr = proc->thr[0];
|
|
mythr = proc->thr[0];
|
|
@@ -564,23 +564,23 @@ void minerloop_async(struct thr_info *mythr)
|
|
|
struct timeval tv_timeout;
|
|
struct timeval tv_timeout;
|
|
|
struct cgpu_info *proc;
|
|
struct cgpu_info *proc;
|
|
|
bool is_running, should_be_running;
|
|
bool is_running, should_be_running;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
_minerloop_setup(mythr);
|
|
_minerloop_setup(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
while (likely(!cgpu->shutdown)) {
|
|
while (likely(!cgpu->shutdown)) {
|
|
|
tv_timeout.tv_sec = -1;
|
|
tv_timeout.tv_sec = -1;
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
for (proc = cgpu; proc; proc = proc->next_proc)
|
|
for (proc = cgpu; proc; proc = proc->next_proc)
|
|
|
{
|
|
{
|
|
|
mythr = proc->thr[0];
|
|
mythr = proc->thr[0];
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
// Nothing should happen while we're starting a job
|
|
// Nothing should happen while we're starting a job
|
|
|
if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
|
|
if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
|
|
|
goto defer_events;
|
|
goto defer_events;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
is_running = mythr->work;
|
|
is_running = mythr->work;
|
|
|
should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
|
|
should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (should_be_running)
|
|
if (should_be_running)
|
|
|
{
|
|
{
|
|
|
if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
|
|
if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
|
|
@@ -613,32 +613,32 @@ disabled: ;
|
|
|
else // !mythr->_mt_disable_called
|
|
else // !mythr->_mt_disable_called
|
|
|
mt_disable_start__async(mythr);
|
|
mt_disable_start__async(mythr);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
timer_unset(&mythr->tv_morework);
|
|
timer_unset(&mythr->tv_morework);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (timer_passed(&mythr->tv_morework, &tv_now))
|
|
if (timer_passed(&mythr->tv_morework, &tv_now))
|
|
|
{
|
|
{
|
|
|
djp: ;
|
|
djp: ;
|
|
|
if (!do_job_prepare(mythr, &tv_now))
|
|
if (!do_job_prepare(mythr, &tv_now))
|
|
|
goto disabled;
|
|
goto disabled;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
defer_events:
|
|
defer_events:
|
|
|
if (timer_passed(&mythr->tv_poll, &tv_now))
|
|
if (timer_passed(&mythr->tv_poll, &tv_now))
|
|
|
api->poll(mythr);
|
|
api->poll(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (timer_passed(&mythr->tv_watchdog, &tv_now))
|
|
if (timer_passed(&mythr->tv_watchdog, &tv_now))
|
|
|
{
|
|
{
|
|
|
timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
|
|
timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
|
|
|
bfg_watchdog(proc, &tv_now);
|
|
bfg_watchdog(proc, &tv_now);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
|
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
|
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
do_notifier_select(thr, &tv_timeout);
|
|
do_notifier_select(thr, &tv_timeout);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -648,7 +648,7 @@ void do_queue_flush(struct thr_info *mythr)
|
|
|
{
|
|
{
|
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
struct cgpu_info *proc = mythr->cgpu;
|
|
|
struct device_drv *api = proc->drv;
|
|
struct device_drv *api = proc->drv;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
api->queue_flush(mythr);
|
|
api->queue_flush(mythr);
|
|
|
if (mythr->next_work)
|
|
if (mythr->next_work)
|
|
|
{
|
|
{
|
|
@@ -667,29 +667,29 @@ void minerloop_queue(struct thr_info *thr)
|
|
|
struct cgpu_info *proc;
|
|
struct cgpu_info *proc;
|
|
|
bool should_be_running;
|
|
bool should_be_running;
|
|
|
struct work *work;
|
|
struct work *work;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
_minerloop_setup(thr);
|
|
_minerloop_setup(thr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
while (likely(!cgpu->shutdown)) {
|
|
while (likely(!cgpu->shutdown)) {
|
|
|
tv_timeout.tv_sec = -1;
|
|
tv_timeout.tv_sec = -1;
|
|
|
timer_set_now(&tv_now);
|
|
timer_set_now(&tv_now);
|
|
|
for (proc = cgpu; proc; proc = proc->next_proc)
|
|
for (proc = cgpu; proc; proc = proc->next_proc)
|
|
|
{
|
|
{
|
|
|
mythr = proc->thr[0];
|
|
mythr = proc->thr[0];
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
|
|
should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
|
|
|
redo:
|
|
redo:
|
|
|
if (should_be_running)
|
|
if (should_be_running)
|
|
|
{
|
|
{
|
|
|
if (unlikely(mythr->_mt_disable_called))
|
|
if (unlikely(mythr->_mt_disable_called))
|
|
|
mt_disable_finish(mythr);
|
|
mt_disable_finish(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (unlikely(mythr->work_restart))
|
|
if (unlikely(mythr->work_restart))
|
|
|
{
|
|
{
|
|
|
mythr->work_restart = false;
|
|
mythr->work_restart = false;
|
|
|
do_queue_flush(mythr);
|
|
do_queue_flush(mythr);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
while (!mythr->queue_full)
|
|
while (!mythr->queue_full)
|
|
|
{
|
|
{
|
|
|
if (mythr->next_work)
|
|
if (mythr->next_work)
|
|
@@ -715,27 +715,27 @@ redo:
|
|
|
do_queue_flush(mythr);
|
|
do_queue_flush(mythr);
|
|
|
mt_disable_start(mythr);
|
|
mt_disable_start(mythr);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (timer_passed(&mythr->tv_poll, &tv_now))
|
|
if (timer_passed(&mythr->tv_poll, &tv_now))
|
|
|
api->poll(mythr);
|
|
api->poll(mythr);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (timer_passed(&mythr->tv_watchdog, &tv_now))
|
|
if (timer_passed(&mythr->tv_watchdog, &tv_now))
|
|
|
{
|
|
{
|
|
|
timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
|
|
timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000);
|
|
|
bfg_watchdog(proc, &tv_now);
|
|
bfg_watchdog(proc, &tv_now);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
|
|
should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
|
|
|
if (should_be_running && !mythr->queue_full)
|
|
if (should_be_running && !mythr->queue_full)
|
|
|
goto redo;
|
|
goto redo;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
|
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
|
|
reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog);
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
// HACK: Some designs set the main thr tv_poll from secondary thrs
|
|
// HACK: Some designs set the main thr tv_poll from secondary thrs
|
|
|
reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
|
|
reduce_timeout_to(&tv_timeout, &cgpu->thr[0]->tv_poll);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
do_notifier_select(thr, &tv_timeout);
|
|
do_notifier_select(thr, &tv_timeout);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -762,11 +762,11 @@ void *miner_thread(void *userdata)
|
|
|
|
|
|
|
|
if (drv_ready(cgpu) && !cgpu->already_set_defaults)
|
|
if (drv_ready(cgpu) && !cgpu->already_set_defaults)
|
|
|
cgpu_set_defaults(cgpu);
|
|
cgpu_set_defaults(cgpu);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
thread_reportout(mythr);
|
|
thread_reportout(mythr);
|
|
|
applog(LOG_DEBUG, "Popping ping in miner thread");
|
|
applog(LOG_DEBUG, "Popping ping in miner thread");
|
|
|
notifier_read(mythr->notifier); // Wait for a notification to start
|
|
notifier_read(mythr->notifier); // Wait for a notification to start
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
cgtime(&cgpu->cgminer_stats.start_tv);
|
|
cgtime(&cgpu->cgminer_stats.start_tv);
|
|
|
if (drv->minerloop)
|
|
if (drv->minerloop)
|
|
|
drv->minerloop(mythr);
|
|
drv->minerloop(mythr);
|
|
@@ -785,7 +785,7 @@ out: ;
|
|
|
mythr->getwork = 0;
|
|
mythr->getwork = 0;
|
|
|
mythr->has_pth = false;
|
|
mythr->has_pth = false;
|
|
|
cgsleep_ms(1);
|
|
cgsleep_ms(1);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (drv->thread_shutdown)
|
|
if (drv->thread_shutdown)
|
|
|
drv->thread_shutdown(mythr);
|
|
drv->thread_shutdown(mythr);
|
|
|
|
|
|
|
@@ -800,29 +800,29 @@ static
|
|
|
bool _add_cgpu(struct cgpu_info *cgpu)
|
|
bool _add_cgpu(struct cgpu_info *cgpu)
|
|
|
{
|
|
{
|
|
|
int lpcount;
|
|
int lpcount;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (!cgpu->procs)
|
|
if (!cgpu->procs)
|
|
|
cgpu->procs = 1;
|
|
cgpu->procs = 1;
|
|
|
lpcount = cgpu->procs;
|
|
lpcount = cgpu->procs;
|
|
|
cgpu->device = cgpu;
|
|
cgpu->device = cgpu;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
cgpu->dev_repr = malloc(6);
|
|
cgpu->dev_repr = malloc(6);
|
|
|
cgpu->dev_repr_ns = malloc(6);
|
|
cgpu->dev_repr_ns = malloc(6);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
#ifdef NEED_BFG_LOWL_VCOM
|
|
#ifdef NEED_BFG_LOWL_VCOM
|
|
|
maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
|
|
maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer);
|
|
|
maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
|
|
maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product);
|
|
|
maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
|
|
maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial);
|
|
|
#endif
|
|
#endif
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
|
|
devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1));
|
|
|
devices_new[total_devices_new++] = cgpu;
|
|
devices_new[total_devices_new++] = cgpu;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (lpcount > 1)
|
|
if (lpcount > 1)
|
|
|
{
|
|
{
|
|
|
int tpp = cgpu->threads / lpcount;
|
|
int tpp = cgpu->threads / lpcount;
|
|
|
struct cgpu_info **nlp_p, *slave;
|
|
struct cgpu_info **nlp_p, *slave;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
nlp_p = &cgpu->next_proc;
|
|
nlp_p = &cgpu->next_proc;
|
|
|
for (int i = 1; i < lpcount; ++i)
|
|
for (int i = 1; i < lpcount; ++i)
|
|
|
{
|
|
{
|
|
@@ -841,7 +841,7 @@ bool _add_cgpu(struct cgpu_info *cgpu)
|
|
|
|
|
|
|
|
renumber_cgpu(cgpu);
|
|
renumber_cgpu(cgpu);
|
|
|
cgpu->last_device_valid_work = time(NULL);
|
|
cgpu->last_device_valid_work = time(NULL);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return true;
|
|
return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -862,12 +862,12 @@ bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
|
|
|
{
|
|
{
|
|
|
if (!prev_cgpu)
|
|
if (!prev_cgpu)
|
|
|
return add_cgpu(cgpu);
|
|
return add_cgpu(cgpu);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
while (prev_cgpu->next_proc)
|
|
while (prev_cgpu->next_proc)
|
|
|
prev_cgpu = prev_cgpu->next_proc;
|
|
prev_cgpu = prev_cgpu->next_proc;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
mutex_lock(&_add_cgpu_mutex);
|
|
mutex_lock(&_add_cgpu_mutex);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
int old_total_devices = total_devices_new;
|
|
int old_total_devices = total_devices_new;
|
|
|
if (!_add_cgpu(cgpu))
|
|
if (!_add_cgpu(cgpu))
|
|
|
{
|
|
{
|
|
@@ -875,9 +875,9 @@ bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu)
|
|
|
return false;
|
|
return false;
|
|
|
}
|
|
}
|
|
|
prev_cgpu->next_proc = devices_new[old_total_devices];
|
|
prev_cgpu->next_proc = devices_new[old_total_devices];
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
mutex_unlock(&_add_cgpu_mutex);
|
|
mutex_unlock(&_add_cgpu_mutex);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return true;
|
|
return true;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -886,18 +886,18 @@ const char *proc_set_device_help(struct cgpu_info * const proc, const char * con
|
|
|
const struct bfg_set_device_definition *sdf;
|
|
const struct bfg_set_device_definition *sdf;
|
|
|
char *p = replybuf;
|
|
char *p = replybuf;
|
|
|
bool first = true;
|
|
bool first = true;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
*out_success = SDR_HELP;
|
|
*out_success = SDR_HELP;
|
|
|
sdf = proc->set_device_funcs;
|
|
sdf = proc->set_device_funcs;
|
|
|
if (!sdf)
|
|
if (!sdf)
|
|
|
nohelp:
|
|
nohelp:
|
|
|
return "No help available";
|
|
return "No help available";
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
size_t matchlen = 0;
|
|
size_t matchlen = 0;
|
|
|
if (newvalue)
|
|
if (newvalue)
|
|
|
while (newvalue[matchlen] && !isspace(newvalue[matchlen]))
|
|
while (newvalue[matchlen] && !isspace(newvalue[matchlen]))
|
|
|
++matchlen;
|
|
++matchlen;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
for ( ; sdf->optname; ++sdf)
|
|
for ( ; sdf->optname; ++sdf)
|
|
|
{
|
|
{
|
|
|
if (!sdf->description)
|
|
if (!sdf->description)
|
|
@@ -947,7 +947,7 @@ static
|
|
|
const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
|
|
const char *_proc_set_device(struct cgpu_info * const proc, const char * const optname, const char * const newvalue, char * const replybuf, enum bfg_set_device_replytype * const out_success)
|
|
|
{
|
|
{
|
|
|
const struct bfg_set_device_definition *sdf;
|
|
const struct bfg_set_device_definition *sdf;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
sdf = proc->set_device_funcs;
|
|
sdf = proc->set_device_funcs;
|
|
|
if (!sdf)
|
|
if (!sdf)
|
|
|
{
|
|
{
|
|
@@ -963,10 +963,10 @@ const char *_proc_set_device(struct cgpu_info * const proc, const char * const o
|
|
|
_set_auto_sdr(out_success, rv, optname);
|
|
_set_auto_sdr(out_success, rv, optname);
|
|
|
return rv;
|
|
return rv;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (!strcasecmp(optname, "help"))
|
|
if (!strcasecmp(optname, "help"))
|
|
|
return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
|
|
return proc_set_device_help(proc, optname, newvalue, replybuf, out_success);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
*out_success = SDR_UNKNOWN;
|
|
*out_success = SDR_UNKNOWN;
|
|
|
sprintf(replybuf, "Unknown option: %s", optname);
|
|
sprintf(replybuf, "Unknown option: %s", optname);
|
|
|
return replybuf;
|
|
return replybuf;
|
|
@@ -981,7 +981,7 @@ const char *__proc_set_device(struct cgpu_info * const proc, char * const optnam
|
|
|
_set_auto_sdr(out_success, rv, optname);
|
|
_set_auto_sdr(out_success, rv, optname);
|
|
|
return rv;
|
|
return rv;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
|
|
return _proc_set_device(proc, optname, newvalue, replybuf, out_success);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -1012,11 +1012,11 @@ const char *proc_set_device_tui_wrapper(struct cgpu_info * const proc, char * co
|
|
|
char * const cvar = curses_input(prompt);
|
|
char * const cvar = curses_input(prompt);
|
|
|
if (!cvar)
|
|
if (!cvar)
|
|
|
return "Cancelled\n";
|
|
return "Cancelled\n";
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
enum bfg_set_device_replytype success;
|
|
enum bfg_set_device_replytype success;
|
|
|
const char * const reply = func(proc, optname, cvar, replybuf, &success);
|
|
const char * const reply = func(proc, optname, cvar, replybuf, &success);
|
|
|
free(cvar);
|
|
free(cvar);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (reply)
|
|
if (reply)
|
|
|
{
|
|
{
|
|
|
if (reply != replybuf)
|
|
if (reply != replybuf)
|
|
@@ -1025,7 +1025,7 @@ const char *proc_set_device_tui_wrapper(struct cgpu_info * const proc, char * co
|
|
|
tailsprintf(replybuf, sizeof(replybuf), "\n");
|
|
tailsprintf(replybuf, sizeof(replybuf), "\n");
|
|
|
return replybuf;
|
|
return replybuf;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return success_msg ?: "Successful\n";
|
|
return success_msg ?: "Successful\n";
|
|
|
}
|
|
}
|
|
|
#endif
|
|
#endif
|
|
@@ -1034,10 +1034,10 @@ const char *proc_set_device_tui_wrapper(struct cgpu_info * const proc, char * co
|
|
|
bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
|
|
bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp)
|
|
|
{
|
|
{
|
|
|
detectone_func_t detectone = userp;
|
|
detectone_func_t detectone = userp;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (serial_claim(info->path, NULL))
|
|
if (serial_claim(info->path, NULL))
|
|
|
applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
|
|
applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path);
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
return detectone(info->path);
|
|
return detectone(info->path);
|
|
|
}
|
|
}
|
|
|
#endif
|
|
#endif
|
|
@@ -1090,7 +1090,7 @@ int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_
|
|
|
++found;
|
|
++found;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if ((forceauto || !(inhibitauto || found)) && autoscan)
|
|
if ((forceauto || !(inhibitauto || found)) && autoscan)
|
|
|
found += autoscan();
|
|
found += autoscan();
|
|
|
|
|
|
|
@@ -1147,10 +1147,10 @@ void close_device_fd(struct thr_info * const thr)
|
|
|
{
|
|
{
|
|
|
struct cgpu_info * const proc = thr->cgpu;
|
|
struct cgpu_info * const proc = thr->cgpu;
|
|
|
const int fd = proc->device_fd;
|
|
const int fd = proc->device_fd;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (fd == -1)
|
|
if (fd == -1)
|
|
|
return;
|
|
return;
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
if (close(fd))
|
|
if (close(fd))
|
|
|
applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
|
|
applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr);
|
|
|
else
|
|
else
|