miner.c 321 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531
  1. /*
  2. * Copyright 2011-2013 Con Kolivas
  3. * Copyright 2011-2013 Luke Dashjr
  4. * Copyright 2012-2013 Andrew Smith
  5. * Copyright 2010 Jeff Garzik
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the Free
  9. * Software Foundation; either version 3 of the License, or (at your option)
  10. * any later version. See COPYING for more details.
  11. */
  12. #include "config.h"
  13. #ifdef HAVE_CURSES
  14. #ifdef USE_UNICODE
  15. #define PDC_WIDE
  16. #endif
  17. // Must be before stdbool, since pdcurses typedefs bool :/
  18. #include <curses.h>
  19. #endif
  20. #include <ctype.h>
  21. #include <float.h>
  22. #include <limits.h>
  23. #include <locale.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #include <stdbool.h>
  28. #include <stdint.h>
  29. #include <unistd.h>
  30. #include <sys/time.h>
  31. #include <time.h>
  32. #include <math.h>
  33. #include <stdarg.h>
  34. #include <assert.h>
  35. #include <signal.h>
  36. #include <wctype.h>
  37. #include <sys/stat.h>
  38. #include <sys/types.h>
  39. #include <dirent.h>
  40. #ifdef HAVE_PWD_H
  41. #include <pwd.h>
  42. #endif
  43. #ifndef WIN32
  44. #include <sys/resource.h>
  45. #include <sys/socket.h>
  46. #if defined(HAVE_LIBUDEV) && defined(HAVE_SYS_EPOLL_H)
  47. #include <libudev.h>
  48. #include <sys/epoll.h>
  49. #define HAVE_BFG_HOTPLUG
  50. #endif
  51. #else
  52. #include <winsock2.h>
  53. #include <windows.h>
  54. #include <dbt.h>
  55. #define HAVE_BFG_HOTPLUG
  56. #endif
  57. #include <ccan/opt/opt.h>
  58. #include <jansson.h>
  59. #include <curl/curl.h>
  60. #include <libgen.h>
  61. #include <sha2.h>
  62. #include <utlist.h>
  63. #include <blkmaker.h>
  64. #include <blkmaker_jansson.h>
  65. #include <blktemplate.h>
  66. #include "compat.h"
  67. #include "deviceapi.h"
  68. #include "logging.h"
  69. #include "miner.h"
  70. #include "adl.h"
  71. #include "driver-cpu.h"
  72. #include "driver-opencl.h"
  73. #include "scrypt.h"
  74. #include "util.h"
  75. #ifdef USE_AVALON
  76. #include "driver-avalon.h"
  77. #endif
  78. #ifdef HAVE_BFG_LOWLEVEL
  79. #include "lowlevel.h"
  80. #endif
  81. #if defined(unix) || defined(__APPLE__)
  82. #include <errno.h>
  83. #include <fcntl.h>
  84. #include <sys/wait.h>
  85. #endif
  86. #ifdef USE_SCRYPT
  87. #include "scrypt.h"
  88. #endif
  89. #if defined(USE_AVALON) || defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_MODMINER) || defined(USE_NANOFURY) || defined(USE_X6500) || defined(USE_ZTEX)
  90. # define USE_FPGA
  91. #endif
  92. enum bfg_quit_summary {
  93. BQS_DEFAULT,
  94. BQS_NONE,
  95. BQS_DEVS,
  96. BQS_PROCS,
  97. BQS_DETAILED,
  98. };
  99. struct strategies strategies[] = {
  100. { "Failover" },
  101. { "Round Robin" },
  102. { "Rotate" },
  103. { "Load Balance" },
  104. { "Balance" },
  105. };
  106. static char packagename[256];
  107. bool opt_protocol;
  108. bool opt_dev_protocol;
  109. static bool opt_benchmark;
  110. static bool want_longpoll = true;
  111. static bool want_gbt = true;
  112. static bool want_getwork = true;
  113. #if BLKMAKER_VERSION > 1
  114. struct _cbscript_t {
  115. char *data;
  116. size_t sz;
  117. };
  118. static struct _cbscript_t opt_coinbase_script;
  119. static uint32_t template_nonce;
  120. #endif
  121. #if BLKMAKER_VERSION > 0
  122. char *opt_coinbase_sig;
  123. #endif
  124. static enum bfg_quit_summary opt_quit_summary = BQS_DEFAULT;
  125. static bool include_serial_in_statline;
  126. char *request_target_str;
  127. float request_pdiff = 1.0;
  128. double request_bdiff;
  129. static bool want_stratum = true;
  130. bool have_longpoll;
  131. int opt_skip_checks;
  132. bool want_per_device_stats;
  133. bool use_syslog;
  134. bool opt_quiet_work_updates = true;
  135. bool opt_quiet;
  136. bool opt_realquiet;
  137. int loginput_size;
  138. bool opt_compact;
  139. bool opt_show_procs;
  140. const int opt_cutofftemp = 95;
  141. int opt_hysteresis = 3;
  142. static int opt_retries = -1;
  143. int opt_fail_pause = 5;
  144. int opt_log_interval = 20;
  145. int opt_queue = 1;
  146. int opt_scantime = 60;
  147. int opt_expiry = 120;
  148. int opt_expiry_lp = 3600;
  149. unsigned long long global_hashrate;
  150. static bool opt_unittest = false;
  151. unsigned long global_quota_gcd = 1;
  152. time_t last_getwork;
  153. #ifdef HAVE_OPENCL
  154. int opt_dynamic_interval = 7;
  155. int nDevs;
  156. int opt_g_threads = -1;
  157. #endif
  158. #ifdef USE_SCRYPT
  159. static char detect_algo = 1;
  160. bool opt_scrypt;
  161. #else
  162. static char detect_algo;
  163. #endif
  164. bool opt_restart = true;
  165. #ifdef USE_LIBMICROHTTPD
  166. #include "httpsrv.h"
  167. int httpsrv_port = -1;
  168. #endif
  169. #ifdef USE_LIBEVENT
  170. int stratumsrv_port = -1;
  171. #endif
  172. const
  173. int rescan_delay_ms = 1000;
  174. #ifdef HAVE_BFG_HOTPLUG
  175. bool opt_hotplug = 1;
  176. const
  177. int hotplug_delay_ms = 100;
  178. #else
  179. const bool opt_hotplug;
  180. #endif
  181. struct string_elist *scan_devices;
  182. static struct string_elist *opt_set_device_list;
  183. bool opt_force_dev_init;
  184. static struct string_elist *opt_devices_enabled_list;
  185. static bool opt_display_devs;
  186. int total_devices;
  187. struct cgpu_info **devices;
  188. int total_devices_new;
  189. struct cgpu_info **devices_new;
  190. bool have_opencl;
  191. int opt_n_threads = -1;
  192. int mining_threads;
  193. int num_processors;
  194. #ifdef HAVE_CURSES
  195. bool use_curses = true;
  196. #else
  197. bool use_curses;
  198. #endif
  199. #ifdef HAVE_LIBUSB
  200. bool have_libusb;
  201. #endif
  202. static bool opt_submit_stale = true;
  203. static float opt_shares;
  204. static int opt_submit_threads = 0x40;
  205. bool opt_fail_only;
  206. int opt_fail_switch_delay = 300;
  207. bool opt_autofan;
  208. bool opt_autoengine;
  209. bool opt_noadl;
  210. char *opt_api_allow = NULL;
  211. char *opt_api_groups;
  212. char *opt_api_description = PACKAGE_STRING;
  213. int opt_api_port = 4028;
  214. bool opt_api_listen;
  215. bool opt_api_mcast;
  216. char *opt_api_mcast_addr = API_MCAST_ADDR;
  217. char *opt_api_mcast_code = API_MCAST_CODE;
  218. char *opt_api_mcast_des = "";
  219. int opt_api_mcast_port = 4028;
  220. bool opt_api_network;
  221. bool opt_delaynet;
  222. bool opt_disable_pool;
  223. bool opt_disable_client_reconnect = false;
  224. static bool no_work;
  225. bool opt_worktime;
  226. bool opt_weighed_stats;
  227. char *opt_kernel_path;
  228. char *cgminer_path;
  229. #if defined(USE_BITFORCE)
  230. bool opt_bfl_noncerange;
  231. #endif
  232. #define QUIET (opt_quiet || opt_realquiet)
  233. struct thr_info *control_thr;
  234. struct thr_info **mining_thr;
  235. static int watchpool_thr_id;
  236. static int watchdog_thr_id;
  237. #ifdef HAVE_CURSES
  238. static int input_thr_id;
  239. #endif
  240. int gpur_thr_id;
  241. static int api_thr_id;
  242. static int total_control_threads;
  243. pthread_mutex_t hash_lock;
  244. static pthread_mutex_t *stgd_lock;
  245. pthread_mutex_t console_lock;
  246. cglock_t ch_lock;
  247. static pthread_rwlock_t blk_lock;
  248. static pthread_mutex_t sshare_lock;
  249. pthread_rwlock_t netacc_lock;
  250. pthread_rwlock_t mining_thr_lock;
  251. pthread_rwlock_t devices_lock;
  252. static pthread_mutex_t lp_lock;
  253. static pthread_cond_t lp_cond;
  254. pthread_cond_t gws_cond;
  255. bool shutting_down;
  256. double total_rolling;
  257. double total_mhashes_done;
  258. static struct timeval total_tv_start, total_tv_end;
  259. static struct timeval miner_started;
  260. cglock_t control_lock;
  261. pthread_mutex_t stats_lock;
  262. static pthread_mutex_t submitting_lock;
  263. static int total_submitting;
  264. static struct work *submit_waiting;
  265. notifier_t submit_waiting_notifier;
  266. int hw_errors;
  267. int total_accepted, total_rejected;
  268. int total_getworks, total_stale, total_discarded;
  269. uint64_t total_bytes_rcvd, total_bytes_sent;
  270. double total_diff1, total_bad_diff1;
  271. double total_diff_accepted, total_diff_rejected, total_diff_stale;
  272. static int staged_rollable;
  273. unsigned int new_blocks;
  274. unsigned int found_blocks;
  275. unsigned int local_work;
  276. unsigned int total_go, total_ro;
  277. struct pool **pools;
  278. static struct pool *currentpool = NULL;
  279. int total_pools, enabled_pools;
  280. enum pool_strategy pool_strategy = POOL_FAILOVER;
  281. int opt_rotate_period;
  282. static int total_urls, total_users, total_passes;
  283. static
  284. #ifndef HAVE_CURSES
  285. const
  286. #endif
  287. bool curses_active;
  288. #ifdef HAVE_CURSES
  289. #if !(defined(PDCURSES) || defined(NCURSES_VERSION))
  290. const
  291. #endif
  292. short default_bgcolor = COLOR_BLACK;
  293. static int attr_title = A_BOLD;
  294. #endif
  295. static
  296. #if defined(HAVE_CURSES) && defined(USE_UNICODE)
  297. bool use_unicode;
  298. static
  299. bool have_unicode_degrees;
  300. static
  301. wchar_t unicode_micro = 'u';
  302. #else
  303. const bool use_unicode;
  304. static
  305. const bool have_unicode_degrees;
  306. static
  307. const char unicode_micro = 'u';
  308. #endif
  309. #ifdef HAVE_CURSES
  310. #define U8_BAD_START "\xef\x80\x81"
  311. #define U8_BAD_END "\xef\x80\x80"
  312. #define AS_BAD(x) U8_BAD_START x U8_BAD_END
  313. bool selecting_device;
  314. unsigned selected_device;
  315. #endif
  316. static char current_block[40];
  317. /* Protected by ch_lock */
  318. static char *current_hash;
  319. static uint32_t current_block_id;
  320. char *current_fullhash;
  321. static char datestamp[40];
  322. static char blocktime[32];
  323. time_t block_time;
  324. static char best_share[ALLOC_H2B_SHORTV] = "0";
  325. double current_diff = 0xFFFFFFFFFFFFFFFFULL;
  326. static char block_diff[ALLOC_H2B_SHORTV];
  327. static char net_hashrate[ALLOC_H2B_SHORT];
  328. double best_diff = 0;
  329. static bool known_blkheight_current;
  330. static uint32_t known_blkheight;
  331. static uint32_t known_blkheight_blkid;
  332. static uint64_t block_subsidy;
  333. struct block {
  334. char hash[40];
  335. UT_hash_handle hh;
  336. int block_no;
  337. };
  338. static struct block *blocks = NULL;
  339. int swork_id;
  340. /* For creating a hash database of stratum shares submitted that have not had
  341. * a response yet */
  342. struct stratum_share {
  343. UT_hash_handle hh;
  344. bool block;
  345. struct work *work;
  346. int id;
  347. };
  348. static struct stratum_share *stratum_shares = NULL;
  349. char *opt_socks_proxy = NULL;
  350. static const char def_conf[] = "bfgminer.conf";
  351. static bool config_loaded;
  352. static int include_count;
  353. #define JSON_INCLUDE_CONF "include"
  354. #define JSON_LOAD_ERROR "JSON decode of file '%s' failed\n %s"
  355. #define JSON_LOAD_ERROR_LEN strlen(JSON_LOAD_ERROR)
  356. #define JSON_MAX_DEPTH 10
  357. #define JSON_MAX_DEPTH_ERR "Too many levels of JSON includes (limit 10) or a loop"
  358. char *cmd_idle, *cmd_sick, *cmd_dead;
  359. #if defined(unix) || defined(__APPLE__)
  360. static char *opt_stderr_cmd = NULL;
  361. static int forkpid;
  362. #endif // defined(unix)
  363. #ifdef HAVE_CHROOT
  364. char *chroot_dir;
  365. #endif
  366. #ifdef HAVE_PWD_H
  367. char *opt_setuid;
  368. #endif
  369. struct sigaction termhandler, inthandler;
  370. struct thread_q *getq;
  371. static int total_work;
  372. static bool staged_full;
  373. struct work *staged_work = NULL;
  374. struct schedtime {
  375. bool enable;
  376. struct tm tm;
  377. };
  378. struct schedtime schedstart;
  379. struct schedtime schedstop;
  380. bool sched_paused;
  381. static bool time_before(struct tm *tm1, struct tm *tm2)
  382. {
  383. if (tm1->tm_hour < tm2->tm_hour)
  384. return true;
  385. if (tm1->tm_hour == tm2->tm_hour && tm1->tm_min < tm2->tm_min)
  386. return true;
  387. return false;
  388. }
  389. static bool should_run(void)
  390. {
  391. struct tm tm;
  392. time_t tt;
  393. bool within_range;
  394. if (!schedstart.enable && !schedstop.enable)
  395. return true;
  396. tt = time(NULL);
  397. localtime_r(&tt, &tm);
  398. // NOTE: This is delicately balanced so that should_run is always false if schedstart==schedstop
  399. if (time_before(&schedstop.tm, &schedstart.tm))
  400. within_range = (time_before(&tm, &schedstop.tm) || !time_before(&tm, &schedstart.tm));
  401. else
  402. within_range = (time_before(&tm, &schedstop.tm) && !time_before(&tm, &schedstart.tm));
  403. if (within_range && !schedstop.enable)
  404. /* This is a once off event with no stop time set */
  405. schedstart.enable = false;
  406. return within_range;
  407. }
  408. void get_datestamp(char *f, size_t fsiz, time_t tt)
  409. {
  410. struct tm _tm;
  411. struct tm *tm = &_tm;
  412. if (tt == INVALID_TIMESTAMP)
  413. tt = time(NULL);
  414. localtime_r(&tt, tm);
  415. snprintf(f, fsiz, "[%d-%02d-%02d %02d:%02d:%02d]",
  416. tm->tm_year + 1900,
  417. tm->tm_mon + 1,
  418. tm->tm_mday,
  419. tm->tm_hour,
  420. tm->tm_min,
  421. tm->tm_sec);
  422. }
  423. static
  424. void get_timestamp(char *f, size_t fsiz, time_t tt)
  425. {
  426. struct tm _tm;
  427. struct tm *tm = &_tm;
  428. localtime_r(&tt, tm);
  429. snprintf(f, fsiz, "[%02d:%02d:%02d]",
  430. tm->tm_hour,
  431. tm->tm_min,
  432. tm->tm_sec);
  433. }
  434. static void applog_and_exit(const char *fmt, ...) FORMAT_SYNTAX_CHECK(printf, 1, 2);
  435. static char exit_buf[512];
  436. static void applog_and_exit(const char *fmt, ...)
  437. {
  438. va_list ap;
  439. va_start(ap, fmt);
  440. vsnprintf(exit_buf, sizeof(exit_buf), fmt, ap);
  441. va_end(ap);
  442. _applog(LOG_ERR, exit_buf);
  443. exit(1);
  444. }
  445. char *devpath_to_devid(const char *devpath)
  446. {
  447. #ifndef WIN32
  448. struct stat my_stat;
  449. if (stat(devpath, &my_stat))
  450. return NULL;
  451. char *devs = malloc(6 + (sizeof(dev_t) * 2) + 1);
  452. memcpy(devs, "dev_t:", 6);
  453. bin2hex(&devs[6], &my_stat.st_rdev, sizeof(dev_t));
  454. #else
  455. if (!strncmp(devpath, "\\\\.\\", 4))
  456. devpath += 4;
  457. if (strncasecmp(devpath, "COM", 3) || !devpath[3])
  458. return NULL;
  459. devpath += 3;
  460. char *p;
  461. strtol(devpath, &p, 10);
  462. if (p[0])
  463. return NULL;
  464. const int sz = (p - devpath);
  465. char *devs = malloc(4 + sz + 1);
  466. sprintf(devs, "com:%s", devpath);
  467. #endif
  468. return devs;
  469. }
  470. static
  471. bool devpaths_match(const char * const ap, const char * const bp)
  472. {
  473. char * const a = devpath_to_devid(ap);
  474. if (!a)
  475. return false;
  476. char * const b = devpath_to_devid(bp);
  477. bool rv = false;
  478. if (b)
  479. {
  480. rv = !strcmp(a, b);
  481. free(b);
  482. }
  483. free(a);
  484. return rv;
  485. }
  486. static
  487. int proc_letter_to_number(const char *s, const char ** const rem)
  488. {
  489. int n = 0, c;
  490. for ( ; s[0]; ++s)
  491. {
  492. if (unlikely(n > INT_MAX / 26))
  493. break;
  494. c = tolower(s[0]) - 'a';
  495. if (unlikely(c < 0 || c > 25))
  496. break;
  497. if (unlikely(INT_MAX - c < n))
  498. break;
  499. n = (n * 26) + c;
  500. }
  501. *rem = s;
  502. return n;
  503. }
  504. static
  505. bool cgpu_match(const char * const pattern, const struct cgpu_info * const cgpu)
  506. {
  507. // all - matches anything
  508. // d0 - matches all processors of device 0
  509. // d0-3 - matches all processors of device 0, 1, 2, or 3
  510. // d0a - matches first processor of device 0
  511. // 0 - matches processor 0
  512. // 0-4 - matches processors 0, 1, 2, 3, or 4
  513. // ___ - matches all processors on all devices using driver/name ___
  514. // ___0 - matches all processors of 0th device using driver/name ___
  515. // ___0a - matches first processor of 0th device using driver/name ___
  516. // @* - matches device with serial or path *
  517. // @*@a - matches first processor of device with serial or path *
  518. // ___@* - matches device with serial or path * using driver/name ___
  519. if (!strcasecmp(pattern, "all"))
  520. return true;
  521. const struct device_drv * const drv = cgpu->drv;
  522. const char *p = pattern, *p2;
  523. size_t L;
  524. int n, i, c = -1;
  525. int n2;
  526. int proc_first = -1, proc_last = -1;
  527. struct cgpu_info *device;
  528. if (!(strncasecmp(drv->dname, p, (L = strlen(drv->dname)))
  529. && strncasecmp(drv-> name, p, (L = strlen(drv-> name)))))
  530. // dname or name
  531. p = &pattern[L];
  532. else
  533. if (p[0] == 'd' && (isdigit(p[1]) || p[1] == '-'))
  534. // d#
  535. ++p;
  536. else
  537. if (isdigit(p[0]) || p[0] == '@' || p[0] == '-')
  538. // # or @
  539. {}
  540. else
  541. return false;
  542. L = p - pattern;
  543. while (isspace(p[0]))
  544. ++p;
  545. if (p[0] == '@')
  546. {
  547. // Serial/path
  548. const char * const ser = &p[1];
  549. for (p = ser; p[0] != '@' && p[0] != '\0'; ++p)
  550. {}
  551. p2 = (p[0] == '@') ? &p[1] : p;
  552. const size_t serlen = (p - ser);
  553. p = "";
  554. n = n2 = 0;
  555. const char * const devpath = cgpu->device_path ?: "";
  556. const char * const devser = cgpu->dev_serial ?: "";
  557. if ((!strncmp(devpath, ser, serlen)) && devpath[serlen] == '\0')
  558. {} // Match
  559. else
  560. if ((!strncmp(devser, ser, serlen)) && devser[serlen] == '\0')
  561. {} // Match
  562. else
  563. {
  564. char devpath2[serlen + 1];
  565. memcpy(devpath2, ser, serlen);
  566. devpath2[serlen] = '\0';
  567. if (!devpaths_match(devpath, ser))
  568. return false;
  569. }
  570. }
  571. else
  572. {
  573. if (isdigit(p[0]))
  574. n = strtol(p, (void*)&p2, 0);
  575. else
  576. {
  577. n = 0;
  578. p2 = p;
  579. }
  580. if (p2[0] == '-')
  581. {
  582. ++p2;
  583. if (p2[0] && isdigit(p2[0]))
  584. n2 = strtol(p2, (void*)&p2, 0);
  585. else
  586. n2 = INT_MAX;
  587. }
  588. else
  589. n2 = n;
  590. if (p == pattern)
  591. {
  592. if (!p[0])
  593. return true;
  594. if (p2 && p2[0])
  595. goto invsyntax;
  596. for (i = n; i <= n2; ++i)
  597. {
  598. if (i >= total_devices)
  599. break;
  600. if (cgpu == devices[i])
  601. return true;
  602. }
  603. return false;
  604. }
  605. }
  606. if (p2[0])
  607. {
  608. proc_first = proc_letter_to_number(&p2[0], &p2);
  609. if (p2[0] == '-')
  610. {
  611. ++p2;
  612. if (p2[0])
  613. proc_last = proc_letter_to_number(p2, &p2);
  614. else
  615. proc_last = INT_MAX;
  616. }
  617. else
  618. proc_last = proc_first;
  619. if (p2[0])
  620. goto invsyntax;
  621. }
  622. if (L > 1 || tolower(pattern[0]) != 'd' || !p[0])
  623. {
  624. if ((L == 3 && !strncasecmp(pattern, drv->name, 3)) ||
  625. (!L) ||
  626. (L == strlen(drv->dname) && !strncasecmp(pattern, drv->dname, L)))
  627. {} // Matched name or dname
  628. else
  629. return false;
  630. if (p[0] && (cgpu->device_id < n || cgpu->device_id > n2))
  631. return false;
  632. if (proc_first != -1 && (cgpu->proc_id < proc_first || cgpu->proc_id > proc_last))
  633. return false;
  634. return true;
  635. }
  636. // d#
  637. c = -1;
  638. for (i = 0; ; ++i)
  639. {
  640. if (i == total_devices)
  641. return false;
  642. if (devices[i]->device != devices[i])
  643. continue;
  644. ++c;
  645. if (c < n)
  646. continue;
  647. if (c > n2)
  648. break;
  649. for (device = devices[i]; device; device = device->next_proc)
  650. {
  651. if (proc_first != -1 && (device->proc_id < proc_first || device->proc_id > proc_last))
  652. continue;
  653. if (device == cgpu)
  654. return true;
  655. }
  656. }
  657. return false;
  658. invsyntax:
  659. applog(LOG_WARNING, "%s: Invalid syntax: %s", __func__, pattern);
  660. return false;
  661. }
  662. #define TEST_CGPU_MATCH(pattern) \
  663. if (!cgpu_match(pattern, &cgpu)) \
  664. applog(LOG_ERR, "%s: Pattern \"%s\" should have matched!", __func__, pattern); \
  665. // END TEST_CGPU_MATCH
  666. #define TEST_CGPU_NOMATCH(pattern) \
  667. if (cgpu_match(pattern, &cgpu)) \
  668. applog(LOG_ERR, "%s: Pattern \"%s\" should NOT have matched!", __func__, pattern); \
  669. // END TEST_CGPU_MATCH
  670. static __maybe_unused
  671. void test_cgpu_match()
  672. {
  673. struct device_drv drv = {
  674. .dname = "test",
  675. .name = "TST",
  676. };
  677. struct cgpu_info cgpu = {
  678. .drv = &drv,
  679. .device = &cgpu,
  680. .device_id = 1,
  681. .proc_id = 1,
  682. .proc_repr = "TST 1b",
  683. }, cgpu0a = {
  684. .drv = &drv,
  685. .device = &cgpu0a,
  686. .device_id = 0,
  687. .proc_id = 0,
  688. .proc_repr = "TST 0a",
  689. }, cgpu1a = {
  690. .drv = &drv,
  691. .device = &cgpu0a,
  692. .device_id = 1,
  693. .proc_id = 0,
  694. .proc_repr = "TST 1a",
  695. };
  696. struct cgpu_info *devices_list[3] = {&cgpu0a, &cgpu1a, &cgpu,};
  697. devices = devices_list;
  698. total_devices = 3;
  699. TEST_CGPU_MATCH("all")
  700. TEST_CGPU_MATCH("d1")
  701. TEST_CGPU_NOMATCH("d2")
  702. TEST_CGPU_MATCH("d0-5")
  703. TEST_CGPU_NOMATCH("d0-0")
  704. TEST_CGPU_NOMATCH("d2-5")
  705. TEST_CGPU_MATCH("d-1")
  706. TEST_CGPU_MATCH("d1-")
  707. TEST_CGPU_NOMATCH("d-0")
  708. TEST_CGPU_NOMATCH("d2-")
  709. TEST_CGPU_MATCH("2")
  710. TEST_CGPU_NOMATCH("3")
  711. TEST_CGPU_MATCH("1-2")
  712. TEST_CGPU_MATCH("2-3")
  713. TEST_CGPU_NOMATCH("1-1")
  714. TEST_CGPU_NOMATCH("3-4")
  715. TEST_CGPU_MATCH("TST")
  716. TEST_CGPU_MATCH("test")
  717. TEST_CGPU_MATCH("tst")
  718. TEST_CGPU_MATCH("TEST")
  719. TEST_CGPU_NOMATCH("TSF")
  720. TEST_CGPU_NOMATCH("TS")
  721. TEST_CGPU_NOMATCH("TSTF")
  722. TEST_CGPU_MATCH("TST1")
  723. TEST_CGPU_MATCH("test1")
  724. TEST_CGPU_MATCH("TST0-1")
  725. TEST_CGPU_MATCH("TST 1")
  726. TEST_CGPU_MATCH("TST 1-2")
  727. TEST_CGPU_MATCH("TEST 1-2")
  728. TEST_CGPU_NOMATCH("TST2")
  729. TEST_CGPU_NOMATCH("TST2-3")
  730. TEST_CGPU_NOMATCH("TST0-0")
  731. TEST_CGPU_MATCH("TST1b")
  732. TEST_CGPU_MATCH("tst1b")
  733. TEST_CGPU_NOMATCH("TST1c")
  734. TEST_CGPU_NOMATCH("TST1bb")
  735. TEST_CGPU_MATCH("TST0-1b")
  736. TEST_CGPU_NOMATCH("TST0-1c")
  737. TEST_CGPU_MATCH("TST1a-d")
  738. TEST_CGPU_NOMATCH("TST1a-a")
  739. TEST_CGPU_NOMATCH("TST1-a")
  740. TEST_CGPU_NOMATCH("TST1c-z")
  741. TEST_CGPU_NOMATCH("TST1c-")
  742. TEST_CGPU_MATCH("@")
  743. TEST_CGPU_NOMATCH("@abc")
  744. TEST_CGPU_MATCH("@@b")
  745. TEST_CGPU_NOMATCH("@@c")
  746. TEST_CGPU_MATCH("TST@")
  747. TEST_CGPU_NOMATCH("TST@abc")
  748. TEST_CGPU_MATCH("TST@@b")
  749. TEST_CGPU_NOMATCH("TST@@c")
  750. TEST_CGPU_MATCH("TST@@b-f")
  751. TEST_CGPU_NOMATCH("TST@@c-f")
  752. TEST_CGPU_NOMATCH("TST@@-a")
  753. cgpu.device_path = "/dev/test";
  754. cgpu.dev_serial = "testy";
  755. TEST_CGPU_MATCH("TST@/dev/test")
  756. TEST_CGPU_MATCH("TST@testy")
  757. TEST_CGPU_NOMATCH("TST@")
  758. TEST_CGPU_NOMATCH("TST@/dev/test5@b")
  759. TEST_CGPU_NOMATCH("TST@testy3@b")
  760. TEST_CGPU_MATCH("TST@/dev/test@b")
  761. TEST_CGPU_MATCH("TST@testy@b")
  762. TEST_CGPU_NOMATCH("TST@/dev/test@c")
  763. TEST_CGPU_NOMATCH("TST@testy@c")
  764. cgpu.device_path = "usb:000:999";
  765. TEST_CGPU_MATCH("TST@usb:000:999")
  766. drv.dname = "test7";
  767. TEST_CGPU_MATCH("test7")
  768. TEST_CGPU_MATCH("TEST7")
  769. TEST_CGPU_NOMATCH("test&")
  770. TEST_CGPU_MATCH("test7 1-2")
  771. TEST_CGPU_MATCH("test7@testy@b")
  772. }
  773. static
  774. int cgpu_search(const char * const pattern, const int first)
  775. {
  776. int i;
  777. struct cgpu_info *cgpu;
  778. #define CHECK_CGPU_SEARCH do{ \
  779. cgpu = get_devices(i); \
  780. if (cgpu_match(pattern, cgpu)) \
  781. return i; \
  782. }while(0)
  783. for (i = first; i < total_devices; ++i)
  784. CHECK_CGPU_SEARCH;
  785. for (i = 0; i < first; ++i)
  786. CHECK_CGPU_SEARCH;
  787. #undef CHECK_CGPU_SEARCH
  788. return -1;
  789. }
  790. static pthread_mutex_t sharelog_lock;
  791. static FILE *sharelog_file = NULL;
  792. struct thr_info *get_thread(int thr_id)
  793. {
  794. struct thr_info *thr;
  795. rd_lock(&mining_thr_lock);
  796. thr = mining_thr[thr_id];
  797. rd_unlock(&mining_thr_lock);
  798. return thr;
  799. }
  800. static struct cgpu_info *get_thr_cgpu(int thr_id)
  801. {
  802. struct thr_info *thr = get_thread(thr_id);
  803. return thr->cgpu;
  804. }
  805. struct cgpu_info *get_devices(int id)
  806. {
  807. struct cgpu_info *cgpu;
  808. rd_lock(&devices_lock);
  809. cgpu = devices[id];
  810. rd_unlock(&devices_lock);
  811. return cgpu;
  812. }
  813. static pthread_mutex_t noncelog_lock = PTHREAD_MUTEX_INITIALIZER;
  814. static FILE *noncelog_file = NULL;
  815. static
  816. void noncelog(const struct work * const work)
  817. {
  818. const int thr_id = work->thr_id;
  819. const struct cgpu_info *proc = get_thr_cgpu(thr_id);
  820. char buf[0x200], hash[65], data[161], midstate[65];
  821. int rv;
  822. size_t ret;
  823. bin2hex(hash, work->hash, 32);
  824. bin2hex(data, work->data, 80);
  825. bin2hex(midstate, work->midstate, 32);
  826. // timestamp,proc,hash,data,midstate
  827. rv = snprintf(buf, sizeof(buf), "%lu,%s,%s,%s,%s\n",
  828. (unsigned long)time(NULL), proc->proc_repr_ns,
  829. hash, data, midstate);
  830. if (unlikely(rv < 1))
  831. {
  832. applog(LOG_ERR, "noncelog printf error");
  833. return;
  834. }
  835. mutex_lock(&noncelog_lock);
  836. ret = fwrite(buf, rv, 1, noncelog_file);
  837. fflush(noncelog_file);
  838. mutex_unlock(&noncelog_lock);
  839. if (ret != 1)
  840. applog(LOG_ERR, "noncelog fwrite error");
  841. }
  842. static void sharelog(const char*disposition, const struct work*work)
  843. {
  844. char target[(sizeof(work->target) * 2) + 1];
  845. char hash[(sizeof(work->hash) * 2) + 1];
  846. char data[(sizeof(work->data) * 2) + 1];
  847. struct cgpu_info *cgpu;
  848. unsigned long int t;
  849. struct pool *pool;
  850. int thr_id, rv;
  851. char s[1024];
  852. size_t ret;
  853. if (!sharelog_file)
  854. return;
  855. thr_id = work->thr_id;
  856. cgpu = get_thr_cgpu(thr_id);
  857. pool = work->pool;
  858. t = work->ts_getwork + timer_elapsed(&work->tv_getwork, &work->tv_work_found);
  859. bin2hex(target, work->target, sizeof(work->target));
  860. bin2hex(hash, work->hash, sizeof(work->hash));
  861. bin2hex(data, work->data, sizeof(work->data));
  862. // timestamp,disposition,target,pool,dev,thr,sharehash,sharedata
  863. rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->proc_repr_ns, thr_id, hash, data);
  864. if (rv >= (int)(sizeof(s)))
  865. s[sizeof(s) - 1] = '\0';
  866. else if (rv < 0) {
  867. applog(LOG_ERR, "sharelog printf error");
  868. return;
  869. }
  870. mutex_lock(&sharelog_lock);
  871. ret = fwrite(s, rv, 1, sharelog_file);
  872. fflush(sharelog_file);
  873. mutex_unlock(&sharelog_lock);
  874. if (ret != 1)
  875. applog(LOG_ERR, "sharelog fwrite error");
  876. }
  877. static char *getwork_req = "{\"method\": \"getwork\", \"params\": [], \"id\":0}\n";
  878. /* Adjust all the pools' quota to the greatest common denominator after a pool
  879. * has been added or the quotas changed. */
  880. void adjust_quota_gcd(void)
  881. {
  882. unsigned long gcd, lowest_quota = ~0UL, quota;
  883. struct pool *pool;
  884. int i;
  885. for (i = 0; i < total_pools; i++) {
  886. pool = pools[i];
  887. quota = pool->quota;
  888. if (!quota)
  889. continue;
  890. if (quota < lowest_quota)
  891. lowest_quota = quota;
  892. }
  893. if (likely(lowest_quota < ~0UL)) {
  894. gcd = lowest_quota;
  895. for (i = 0; i < total_pools; i++) {
  896. pool = pools[i];
  897. quota = pool->quota;
  898. if (!quota)
  899. continue;
  900. while (quota % gcd)
  901. gcd--;
  902. }
  903. } else
  904. gcd = 1;
  905. for (i = 0; i < total_pools; i++) {
  906. pool = pools[i];
  907. pool->quota_used *= global_quota_gcd;
  908. pool->quota_used /= gcd;
  909. pool->quota_gcd = pool->quota / gcd;
  910. }
  911. global_quota_gcd = gcd;
  912. applog(LOG_DEBUG, "Global quota greatest common denominator set to %lu", gcd);
  913. }
  914. /* Return value is ignored if not called from add_pool_details */
  915. struct pool *add_pool(void)
  916. {
  917. struct pool *pool;
  918. pool = calloc(sizeof(struct pool), 1);
  919. if (!pool)
  920. quit(1, "Failed to malloc pool in add_pool");
  921. pool->pool_no = pool->prio = total_pools;
  922. mutex_init(&pool->last_work_lock);
  923. mutex_init(&pool->pool_lock);
  924. mutex_init(&pool->pool_test_lock);
  925. if (unlikely(pthread_cond_init(&pool->cr_cond, NULL)))
  926. quit(1, "Failed to pthread_cond_init in add_pool");
  927. cglock_init(&pool->data_lock);
  928. mutex_init(&pool->stratum_lock);
  929. timer_unset(&pool->swork.tv_transparency);
  930. pool->swork.pool = pool;
  931. /* Make sure the pool doesn't think we've been idle since time 0 */
  932. pool->tv_idle.tv_sec = ~0UL;
  933. cgtime(&pool->cgminer_stats.start_tv);
  934. pool->rpc_proxy = NULL;
  935. pool->quota = 1;
  936. pool->sock = INVSOCK;
  937. pool->lp_socket = CURL_SOCKET_BAD;
  938. if (opt_benchmark)
  939. {
  940. // Don't add to pools array, but immediately remove it
  941. remove_pool(pool);
  942. return pool;
  943. }
  944. pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2));
  945. pools[total_pools++] = pool;
  946. adjust_quota_gcd();
  947. return pool;
  948. }
  949. /* Pool variant of test and set */
  950. static bool pool_tset(struct pool *pool, bool *var)
  951. {
  952. bool ret;
  953. mutex_lock(&pool->pool_lock);
  954. ret = *var;
  955. *var = true;
  956. mutex_unlock(&pool->pool_lock);
  957. return ret;
  958. }
  959. bool pool_tclear(struct pool *pool, bool *var)
  960. {
  961. bool ret;
  962. mutex_lock(&pool->pool_lock);
  963. ret = *var;
  964. *var = false;
  965. mutex_unlock(&pool->pool_lock);
  966. return ret;
  967. }
  968. struct pool *current_pool(void)
  969. {
  970. struct pool *pool;
  971. cg_rlock(&control_lock);
  972. pool = currentpool;
  973. cg_runlock(&control_lock);
  974. return pool;
  975. }
  976. // Copied from ccan/opt/helpers.c
  977. static char *arg_bad(const char *fmt, const char *arg)
  978. {
  979. char *str = malloc(strlen(fmt) + strlen(arg));
  980. sprintf(str, fmt, arg);
  981. return str;
  982. }
  983. static
  984. char *opt_set_floatval(const char *arg, float *f)
  985. {
  986. char *endp;
  987. errno = 0;
  988. *f = strtof(arg, &endp);
  989. if (*endp || !arg[0])
  990. return arg_bad("'%s' is not a number", arg);
  991. if (errno)
  992. return arg_bad("'%s' is out of range", arg);
  993. return NULL;
  994. }
  995. static
  996. void opt_show_floatval(char buf[OPT_SHOW_LEN], const float *f)
  997. {
  998. snprintf(buf, OPT_SHOW_LEN, "%.1f", *f);
  999. }
  1000. static
  1001. char *set_bool_ignore_arg(const char * const arg, bool * const b)
  1002. {
  1003. return opt_set_bool(b);
  1004. }
  1005. char *set_int_range(const char *arg, int *i, int min, int max)
  1006. {
  1007. char *err = opt_set_intval(arg, i);
  1008. if (err)
  1009. return err;
  1010. if (*i < min || *i > max)
  1011. return "Value out of range";
  1012. return NULL;
  1013. }
  1014. static char *set_int_0_to_9999(const char *arg, int *i)
  1015. {
  1016. return set_int_range(arg, i, 0, 9999);
  1017. }
  1018. static char *set_int_1_to_65535(const char *arg, int *i)
  1019. {
  1020. return set_int_range(arg, i, 1, 65535);
  1021. }
  1022. static char *set_int_0_to_10(const char *arg, int *i)
  1023. {
  1024. return set_int_range(arg, i, 0, 10);
  1025. }
  1026. static char *set_int_1_to_10(const char *arg, int *i)
  1027. {
  1028. return set_int_range(arg, i, 1, 10);
  1029. }
  1030. char *set_strdup(const char *arg, char **p)
  1031. {
  1032. *p = strdup((char *)arg);
  1033. return NULL;
  1034. }
  1035. #if BLKMAKER_VERSION > 1
  1036. static char *set_b58addr(const char *arg, struct _cbscript_t *p)
  1037. {
  1038. size_t scriptsz = blkmk_address_to_script(NULL, 0, arg);
  1039. if (!scriptsz)
  1040. return "Invalid address";
  1041. char *script = malloc(scriptsz);
  1042. if (blkmk_address_to_script(script, scriptsz, arg) != scriptsz) {
  1043. free(script);
  1044. return "Failed to convert address to script";
  1045. }
  1046. free(p->data);
  1047. p->data = script;
  1048. p->sz = scriptsz;
  1049. return NULL;
  1050. }
  1051. #endif
  1052. static
  1053. char *set_quit_summary(const char * const arg)
  1054. {
  1055. if (!(strcasecmp(arg, "none") && strcasecmp(arg, "no")))
  1056. opt_quit_summary = BQS_NONE;
  1057. else
  1058. if (!(strcasecmp(arg, "devs") && strcasecmp(arg, "devices")))
  1059. opt_quit_summary = BQS_DEVS;
  1060. else
  1061. if (!(strcasecmp(arg, "procs") && strcasecmp(arg, "processors") && strcasecmp(arg, "chips") && strcasecmp(arg, "cores")))
  1062. opt_quit_summary = BQS_PROCS;
  1063. else
  1064. if (!(strcasecmp(arg, "detailed") && strcasecmp(arg, "detail") && strcasecmp(arg, "all")))
  1065. opt_quit_summary = BQS_DETAILED;
  1066. else
  1067. return "Quit summary must be one of none/devs/procs/detailed";
  1068. return NULL;
  1069. }
  1070. static void pdiff_target_leadzero(void *, double);
  1071. char *set_request_diff(const char *arg, float *p)
  1072. {
  1073. unsigned char target[32];
  1074. char *e = opt_set_floatval(arg, p);
  1075. if (e)
  1076. return e;
  1077. request_bdiff = (double)*p * 0.9999847412109375;
  1078. pdiff_target_leadzero(target, *p);
  1079. request_target_str = malloc(65);
  1080. bin2hex(request_target_str, target, 32);
  1081. return NULL;
  1082. }
  1083. #ifdef NEED_BFG_LOWL_VCOM
  1084. extern struct lowlevel_device_info *_vcom_devinfo_findorcreate(struct lowlevel_device_info **, const char *);
  1085. #ifdef WIN32
  1086. void _vcom_devinfo_scan_querydosdevice(struct lowlevel_device_info ** const devinfo_list)
  1087. {
  1088. char dev[PATH_MAX];
  1089. char *devp = dev;
  1090. size_t bufLen = 0x100;
  1091. tryagain: ;
  1092. char buf[bufLen];
  1093. if (!QueryDosDevice(NULL, buf, bufLen)) {
  1094. if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
  1095. bufLen *= 2;
  1096. applog(LOG_DEBUG, "QueryDosDevice returned insufficent buffer error; enlarging to %lx", (unsigned long)bufLen);
  1097. goto tryagain;
  1098. }
  1099. applogr(, LOG_WARNING, "Error occurred trying to enumerate COM ports with QueryDosDevice");
  1100. }
  1101. size_t tLen;
  1102. memcpy(devp, "\\\\.\\", 4);
  1103. devp = &devp[4];
  1104. for (char *t = buf; *t; t += tLen) {
  1105. tLen = strlen(t) + 1;
  1106. if (strncmp("COM", t, 3))
  1107. continue;
  1108. memcpy(devp, t, tLen);
  1109. // NOTE: We depend on _vcom_devinfo_findorcreate to further check that there's a number (and only a number) on the end
  1110. _vcom_devinfo_findorcreate(devinfo_list, dev);
  1111. }
  1112. }
  1113. #else
  1114. void _vcom_devinfo_scan_lsdev(struct lowlevel_device_info ** const devinfo_list)
  1115. {
  1116. char dev[PATH_MAX];
  1117. char *devp = dev;
  1118. DIR *D;
  1119. struct dirent *de;
  1120. const char devdir[] = "/dev";
  1121. const size_t devdirlen = sizeof(devdir) - 1;
  1122. char *devpath = devp;
  1123. char *devfile = devpath + devdirlen + 1;
  1124. D = opendir(devdir);
  1125. if (!D)
  1126. applogr(, LOG_DEBUG, "No /dev directory to look for VCOM devices in");
  1127. memcpy(devpath, devdir, devdirlen);
  1128. devpath[devdirlen] = '/';
  1129. while ( (de = readdir(D)) ) {
  1130. if (!strncmp(de->d_name, "cu.", 3)
  1131. //don't probe Bluetooth devices - causes bus errors and segfaults
  1132. && strncmp(de->d_name, "cu.Bluetooth", 12))
  1133. goto trydev;
  1134. if (strncmp(de->d_name, "tty", 3))
  1135. continue;
  1136. if (strncmp(&de->d_name[3], "USB", 3) && strncmp(&de->d_name[3], "ACM", 3))
  1137. continue;
  1138. trydev:
  1139. strcpy(devfile, de->d_name);
  1140. _vcom_devinfo_findorcreate(devinfo_list, dev);
  1141. }
  1142. closedir(D);
  1143. }
  1144. #endif
  1145. #endif
  1146. static char *add_serial(const char *arg)
  1147. {
  1148. string_elist_add(arg, &scan_devices);
  1149. return NULL;
  1150. }
  1151. static
  1152. char *opt_string_elist_add(const char *arg, struct string_elist **elist)
  1153. {
  1154. string_elist_add(arg, elist);
  1155. return NULL;
  1156. }
  1157. bool get_intrange(const char *arg, int *val1, int *val2)
  1158. {
  1159. // NOTE: This could be done with sscanf, but its %n is broken in strange ways on Windows
  1160. char *p, *p2;
  1161. *val1 = strtol(arg, &p, 0);
  1162. if (arg == p)
  1163. // Zero-length ending number, invalid
  1164. return false;
  1165. while (true)
  1166. {
  1167. if (!p[0])
  1168. {
  1169. *val2 = *val1;
  1170. return true;
  1171. }
  1172. if (p[0] == '-')
  1173. break;
  1174. if (!isspace(p[0]))
  1175. // Garbage, invalid
  1176. return false;
  1177. ++p;
  1178. }
  1179. p2 = &p[1];
  1180. *val2 = strtol(p2, &p, 0);
  1181. if (p2 == p)
  1182. // Zero-length ending number, invalid
  1183. return false;
  1184. while (true)
  1185. {
  1186. if (!p[0])
  1187. return true;
  1188. if (!isspace(p[0]))
  1189. // Garbage, invalid
  1190. return false;
  1191. ++p;
  1192. }
  1193. }
  1194. static
  1195. void _test_intrange(const char *s, const int v[2])
  1196. {
  1197. int a[2];
  1198. if (!get_intrange(s, &a[0], &a[1]))
  1199. applog(LOG_ERR, "Test \"%s\" failed: returned false", s);
  1200. for (int i = 0; i < 2; ++i)
  1201. if (unlikely(a[i] != v[i]))
  1202. applog(LOG_ERR, "Test \"%s\" failed: value %d should be %d but got %d", s, i, v[i], a[i]);
  1203. }
  1204. #define _test_intrange(s, ...) _test_intrange(s, (int[]){ __VA_ARGS__ })
  1205. static
  1206. void _test_intrange_fail(const char *s)
  1207. {
  1208. int a[2];
  1209. if (get_intrange(s, &a[0], &a[1]))
  1210. applog(LOG_ERR, "Test !\"%s\" failed: returned true with %d and %d", s, a[0], a[1]);
  1211. }
  1212. static
  1213. void test_intrange()
  1214. {
  1215. _test_intrange("-1--2", -1, -2);
  1216. _test_intrange("-1-2", -1, 2);
  1217. _test_intrange("1--2", 1, -2);
  1218. _test_intrange("1-2", 1, 2);
  1219. _test_intrange("111-222", 111, 222);
  1220. _test_intrange(" 11 - 22 ", 11, 22);
  1221. _test_intrange("+11-+22", 11, 22);
  1222. _test_intrange("-1", -1, -1);
  1223. _test_intrange_fail("all");
  1224. _test_intrange_fail("1-");
  1225. _test_intrange_fail("");
  1226. _test_intrange_fail("1-54x");
  1227. }
  1228. static char *set_devices(char *arg)
  1229. {
  1230. if (*arg) {
  1231. if (*arg == '?') {
  1232. opt_display_devs = true;
  1233. return NULL;
  1234. }
  1235. } else
  1236. return "Invalid device parameters";
  1237. string_elist_add(arg, &opt_devices_enabled_list);
  1238. return NULL;
  1239. }
  1240. static char *set_balance(enum pool_strategy *strategy)
  1241. {
  1242. *strategy = POOL_BALANCE;
  1243. return NULL;
  1244. }
  1245. static char *set_loadbalance(enum pool_strategy *strategy)
  1246. {
  1247. *strategy = POOL_LOADBALANCE;
  1248. return NULL;
  1249. }
  1250. static char *set_rotate(const char *arg, int *i)
  1251. {
  1252. pool_strategy = POOL_ROTATE;
  1253. return set_int_range(arg, i, 0, 9999);
  1254. }
  1255. static char *set_rr(enum pool_strategy *strategy)
  1256. {
  1257. *strategy = POOL_ROUNDROBIN;
  1258. return NULL;
  1259. }
  1260. /* Detect that url is for a stratum protocol either via the presence of
  1261. * stratum+tcp or by detecting a stratum server response */
  1262. bool detect_stratum(struct pool *pool, char *url)
  1263. {
  1264. if (!extract_sockaddr(url, &pool->sockaddr_url, &pool->stratum_port))
  1265. return false;
  1266. if (!strncasecmp(url, "stratum+tcp://", 14)) {
  1267. pool->rpc_url = strdup(url);
  1268. pool->has_stratum = true;
  1269. pool->stratum_url = pool->sockaddr_url;
  1270. return true;
  1271. }
  1272. return false;
  1273. }
  1274. static struct pool *add_url(void)
  1275. {
  1276. total_urls++;
  1277. if (total_urls > total_pools)
  1278. add_pool();
  1279. return pools[total_urls - 1];
  1280. }
  1281. static void setup_url(struct pool *pool, char *arg)
  1282. {
  1283. if (detect_stratum(pool, arg))
  1284. return;
  1285. opt_set_charp(arg, &pool->rpc_url);
  1286. if (strncmp(arg, "http://", 7) &&
  1287. strncmp(arg, "https://", 8)) {
  1288. const size_t L = strlen(arg);
  1289. char *httpinput;
  1290. httpinput = malloc(8 + L);
  1291. if (!httpinput)
  1292. quit(1, "Failed to malloc httpinput");
  1293. sprintf(httpinput, "http://%s", arg);
  1294. pool->rpc_url = httpinput;
  1295. }
  1296. }
  1297. static char *set_url(char *arg)
  1298. {
  1299. struct pool *pool = add_url();
  1300. setup_url(pool, arg);
  1301. return NULL;
  1302. }
  1303. static char *set_quota(char *arg)
  1304. {
  1305. char *semicolon = strchr(arg, ';'), *url;
  1306. int len, qlen, quota;
  1307. struct pool *pool;
  1308. if (!semicolon)
  1309. return "No semicolon separated quota;URL pair found";
  1310. len = strlen(arg);
  1311. *semicolon = '\0';
  1312. qlen = strlen(arg);
  1313. if (!qlen)
  1314. return "No parameter for quota found";
  1315. len -= qlen + 1;
  1316. if (len < 1)
  1317. return "No parameter for URL found";
  1318. quota = atoi(arg);
  1319. if (quota < 0)
  1320. return "Invalid negative parameter for quota set";
  1321. url = arg + qlen + 1;
  1322. pool = add_url();
  1323. setup_url(pool, url);
  1324. pool->quota = quota;
  1325. applog(LOG_INFO, "Setting pool %d to quota %d", pool->pool_no, pool->quota);
  1326. adjust_quota_gcd();
  1327. return NULL;
  1328. }
  1329. static char *set_user(const char *arg)
  1330. {
  1331. struct pool *pool;
  1332. total_users++;
  1333. if (total_users > total_pools)
  1334. add_pool();
  1335. pool = pools[total_users - 1];
  1336. opt_set_charp(arg, &pool->rpc_user);
  1337. return NULL;
  1338. }
  1339. static char *set_pass(const char *arg)
  1340. {
  1341. struct pool *pool;
  1342. total_passes++;
  1343. if (total_passes > total_pools)
  1344. add_pool();
  1345. pool = pools[total_passes - 1];
  1346. opt_set_charp(arg, &pool->rpc_pass);
  1347. return NULL;
  1348. }
  1349. static char *set_userpass(const char *arg)
  1350. {
  1351. struct pool *pool;
  1352. char *updup;
  1353. if (total_users != total_passes)
  1354. return "User + pass options must be balanced before userpass";
  1355. ++total_users;
  1356. ++total_passes;
  1357. if (total_users > total_pools)
  1358. add_pool();
  1359. pool = pools[total_users - 1];
  1360. updup = strdup(arg);
  1361. opt_set_charp(arg, &pool->rpc_userpass);
  1362. pool->rpc_user = updup;
  1363. pool->rpc_pass = strchr(updup, ':');
  1364. if (pool->rpc_pass)
  1365. pool->rpc_pass++[0] = '\0';
  1366. else
  1367. pool->rpc_pass = &updup[strlen(updup)];
  1368. return NULL;
  1369. }
  1370. static char *set_pool_priority(const char *arg)
  1371. {
  1372. struct pool *pool;
  1373. if (!total_pools)
  1374. return "Usage of --pool-priority before pools are defined does not make sense";
  1375. pool = pools[total_pools - 1];
  1376. opt_set_intval(arg, &pool->prio);
  1377. return NULL;
  1378. }
  1379. static char *set_pool_proxy(const char *arg)
  1380. {
  1381. struct pool *pool;
  1382. if (!total_pools)
  1383. return "Usage of --pool-proxy before pools are defined does not make sense";
  1384. if (!our_curl_supports_proxy_uris())
  1385. return "Your installed cURL library does not support proxy URIs. At least version 7.21.7 is required.";
  1386. pool = pools[total_pools - 1];
  1387. opt_set_charp(arg, &pool->rpc_proxy);
  1388. return NULL;
  1389. }
  1390. static char *set_pool_force_rollntime(const char *arg)
  1391. {
  1392. struct pool *pool;
  1393. if (!total_pools)
  1394. return "Usage of --force-rollntime before pools are defined does not make sense";
  1395. pool = pools[total_pools - 1];
  1396. opt_set_intval(arg, &pool->force_rollntime);
  1397. return NULL;
  1398. }
  1399. static char *enable_debug(bool *flag)
  1400. {
  1401. *flag = true;
  1402. opt_debug_console = true;
  1403. /* Turn on verbose output, too. */
  1404. opt_log_output = true;
  1405. return NULL;
  1406. }
  1407. static char *set_schedtime(const char *arg, struct schedtime *st)
  1408. {
  1409. if (sscanf(arg, "%d:%d", &st->tm.tm_hour, &st->tm.tm_min) != 2)
  1410. {
  1411. if (strcasecmp(arg, "now"))
  1412. return "Invalid time set, should be HH:MM";
  1413. } else
  1414. schedstop.tm.tm_sec = 0;
  1415. if (st->tm.tm_hour > 23 || st->tm.tm_min > 59 || st->tm.tm_hour < 0 || st->tm.tm_min < 0)
  1416. return "Invalid time set.";
  1417. st->enable = true;
  1418. return NULL;
  1419. }
  1420. static
  1421. char *set_log_file(char *arg)
  1422. {
  1423. char *r = "";
  1424. long int i = strtol(arg, &r, 10);
  1425. int fd, stderr_fd = fileno(stderr);
  1426. if ((!*r) && i >= 0 && i <= INT_MAX)
  1427. fd = i;
  1428. else
  1429. if (!strcmp(arg, "-"))
  1430. {
  1431. fd = fileno(stdout);
  1432. if (unlikely(fd == -1))
  1433. return "Standard output missing for log-file";
  1434. }
  1435. else
  1436. {
  1437. fd = open(arg, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR);
  1438. if (unlikely(fd == -1))
  1439. return "Failed to open log-file";
  1440. }
  1441. close(stderr_fd);
  1442. if (unlikely(-1 == dup2(fd, stderr_fd)))
  1443. return "Failed to dup2 for log-file";
  1444. close(fd);
  1445. return NULL;
  1446. }
  1447. static
  1448. char *_bfgopt_set_file(const char *arg, FILE **F, const char *mode, const char *purpose)
  1449. {
  1450. char *r = "";
  1451. long int i = strtol(arg, &r, 10);
  1452. static char *err = NULL;
  1453. const size_t errbufsz = 0x100;
  1454. free(err);
  1455. err = NULL;
  1456. if ((!*r) && i >= 0 && i <= INT_MAX) {
  1457. *F = fdopen((int)i, mode);
  1458. if (!*F)
  1459. {
  1460. err = malloc(errbufsz);
  1461. snprintf(err, errbufsz, "Failed to open fd %d for %s",
  1462. (int)i, purpose);
  1463. return err;
  1464. }
  1465. } else if (!strcmp(arg, "-")) {
  1466. *F = (mode[0] == 'a') ? stdout : stdin;
  1467. if (!*F)
  1468. {
  1469. err = malloc(errbufsz);
  1470. snprintf(err, errbufsz, "Standard %sput missing for %s",
  1471. (mode[0] == 'a') ? "out" : "in", purpose);
  1472. return err;
  1473. }
  1474. } else {
  1475. *F = fopen(arg, mode);
  1476. if (!*F)
  1477. {
  1478. err = malloc(errbufsz);
  1479. snprintf(err, errbufsz, "Failed to open %s for %s",
  1480. arg, purpose);
  1481. return err;
  1482. }
  1483. }
  1484. return NULL;
  1485. }
  1486. static char *set_noncelog(char *arg)
  1487. {
  1488. return _bfgopt_set_file(arg, &noncelog_file, "a", "nonce log");
  1489. }
  1490. static char *set_sharelog(char *arg)
  1491. {
  1492. return _bfgopt_set_file(arg, &sharelog_file, "a", "share log");
  1493. }
  1494. static
  1495. void _add_set_device_option(const char * const func, const char * const buf)
  1496. {
  1497. applog(LOG_DEBUG, "%s: Using --set-device %s", func, buf);
  1498. string_elist_add(buf, &opt_set_device_list);
  1499. }
  1500. #define add_set_device_option(...) do{ \
  1501. char _tmp1718[0x100]; \
  1502. snprintf(_tmp1718, sizeof(_tmp1718), __VA_ARGS__); \
  1503. _add_set_device_option(__func__, _tmp1718); \
  1504. }while(0)
  1505. char *set_temp_cutoff(char *arg)
  1506. {
  1507. if (strchr(arg, ','))
  1508. return "temp-cutoff no longer supports comma-delimited syntax, use --set-device for better control";
  1509. applog(LOG_WARNING, "temp-cutoff is deprecated! Use --set-device for better control");
  1510. add_set_device_option("all:temp-cutoff=%s", arg);
  1511. return NULL;
  1512. }
  1513. char *set_temp_target(char *arg)
  1514. {
  1515. if (strchr(arg, ','))
  1516. return "temp-target no longer supports comma-delimited syntax, use --set-device for better control";
  1517. applog(LOG_WARNING, "temp-target is deprecated! Use --set-device for better control");
  1518. add_set_device_option("all:temp-target=%s", arg);
  1519. return NULL;
  1520. }
  1521. #ifdef HAVE_OPENCL
  1522. static
  1523. char *set_no_opencl_binaries(__maybe_unused void * const dummy)
  1524. {
  1525. applog(LOG_WARNING, "The --no-opencl-binaries option is deprecated! Use --set-device OCL:binary=no");
  1526. add_set_device_option("OCL:binary=no");
  1527. return NULL;
  1528. }
  1529. #endif
  1530. static
  1531. char *disable_pool_redirect(__maybe_unused void * const dummy)
  1532. {
  1533. opt_disable_client_reconnect = true;
  1534. want_stratum = false;
  1535. return NULL;
  1536. }
  1537. static char *set_api_allow(const char *arg)
  1538. {
  1539. opt_set_charp(arg, &opt_api_allow);
  1540. return NULL;
  1541. }
  1542. static char *set_api_groups(const char *arg)
  1543. {
  1544. opt_set_charp(arg, &opt_api_groups);
  1545. return NULL;
  1546. }
  1547. static char *set_api_description(const char *arg)
  1548. {
  1549. opt_set_charp(arg, &opt_api_description);
  1550. return NULL;
  1551. }
  1552. static char *set_api_mcast_des(const char *arg)
  1553. {
  1554. opt_set_charp(arg, &opt_api_mcast_des);
  1555. return NULL;
  1556. }
  1557. #ifdef USE_ICARUS
  1558. extern const struct bfg_set_device_definition icarus_set_device_funcs[];
  1559. static char *set_icarus_options(const char *arg)
  1560. {
  1561. if (strchr(arg, ','))
  1562. return "icarus-options no longer supports comma-delimited syntax, see README.FPGA for better control";
  1563. applog(LOG_WARNING, "icarus-options is deprecated! See README.FPGA for better control");
  1564. char *opts = strdup(arg), *argdup;
  1565. argdup = opts;
  1566. const struct bfg_set_device_definition *sdf = icarus_set_device_funcs;
  1567. const char *drivers[] = {"antminer", "cairnsmore", "erupter", "icarus"};
  1568. char *saveptr, *opt;
  1569. for (int i = 0; i < 4; ++i, ++sdf)
  1570. {
  1571. opt = strtok_r(opts, ":", &saveptr);
  1572. opts = NULL;
  1573. if (!opt)
  1574. break;
  1575. if (!opt[0])
  1576. continue;
  1577. for (int j = 0; j < 4; ++j)
  1578. add_set_device_option("%s:%s=%s", drivers[j], sdf->optname, opt);
  1579. }
  1580. free(argdup);
  1581. return NULL;
  1582. }
  1583. static char *set_icarus_timing(const char *arg)
  1584. {
  1585. if (strchr(arg, ','))
  1586. return "icarus-timing no longer supports comma-delimited syntax, see README.FPGA for better control";
  1587. applog(LOG_WARNING, "icarus-timing is deprecated! See README.FPGA for better control");
  1588. const char *drivers[] = {"antminer", "cairnsmore", "erupter", "icarus"};
  1589. for (int j = 0; j < 4; ++j)
  1590. add_set_device_option("%s:timing=%s", drivers[j], arg);
  1591. return NULL;
  1592. }
  1593. #endif
  1594. #ifdef USE_AVALON
  1595. extern const struct bfg_set_device_definition avalon_set_device_funcs[];
  1596. static char *set_avalon_options(const char *arg)
  1597. {
  1598. if (strchr(arg, ','))
  1599. return "avalon-options no longer supports comma-delimited syntax, see README.FPGA for better control";
  1600. applog(LOG_WARNING, "avalon-options is deprecated! See README.FPGA for better control");
  1601. char *opts = strdup(arg), *argdup;
  1602. argdup = opts;
  1603. const struct bfg_set_device_definition *sdf = avalon_set_device_funcs;
  1604. char *saveptr, *opt;
  1605. for (int i = 0; i < 5; ++i, ++sdf)
  1606. {
  1607. opt = strtok_r(opts, ":", &saveptr);
  1608. opts = NULL;
  1609. if (!opt)
  1610. break;
  1611. if (!opt[0])
  1612. continue;
  1613. add_set_device_option("avalon:%s=%s", sdf->optname, opt);
  1614. }
  1615. free(argdup);
  1616. return NULL;
  1617. }
  1618. #endif
  1619. #ifdef USE_KLONDIKE
  1620. static char *set_klondike_options(const char *arg)
  1621. {
  1622. int hashclock;
  1623. double temptarget;
  1624. switch (sscanf(arg, "%d:%lf", &hashclock, &temptarget))
  1625. {
  1626. default:
  1627. return "Unrecognised --klondike-options";
  1628. case 2:
  1629. add_set_device_option("klondike:temp-target=%lf", temptarget);
  1630. // fallthru
  1631. case 1:
  1632. add_set_device_option("klondike:clock=%d", hashclock);
  1633. }
  1634. applog(LOG_WARNING, "klondike-options is deprecated! Use --set-device for better control");
  1635. return NULL;
  1636. }
  1637. #endif
  1638. __maybe_unused
  1639. static char *set_null(const char __maybe_unused *arg)
  1640. {
  1641. return NULL;
  1642. }
  1643. /* These options are available from config file or commandline */
  1644. static struct opt_table opt_config_table[] = {
  1645. #ifdef WANT_CPUMINE
  1646. OPT_WITH_ARG("--algo",
  1647. set_algo, show_algo, &opt_algo,
  1648. "Specify sha256 implementation for CPU mining:\n"
  1649. "\tfastauto*\tQuick benchmark at startup to pick a working algorithm\n"
  1650. "\tauto\t\tBenchmark at startup and pick fastest algorithm"
  1651. "\n\tc\t\tLinux kernel sha256, implemented in C"
  1652. #ifdef WANT_SSE2_4WAY
  1653. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  1654. #endif
  1655. #ifdef WANT_VIA_PADLOCK
  1656. "\n\tvia\t\tVIA padlock implementation"
  1657. #endif
  1658. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  1659. #ifdef WANT_CRYPTOPP_ASM32
  1660. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  1661. #endif
  1662. #ifdef WANT_X8632_SSE2
  1663. "\n\tsse2_32\t\tSSE2 32 bit implementation for i386 machines"
  1664. #endif
  1665. #ifdef WANT_X8664_SSE2
  1666. "\n\tsse2_64\t\tSSE2 64 bit implementation for x86_64 machines"
  1667. #endif
  1668. #ifdef WANT_X8664_SSE4
  1669. "\n\tsse4_64\t\tSSE4.1 64 bit implementation for x86_64 machines"
  1670. #endif
  1671. #ifdef WANT_ALTIVEC_4WAY
  1672. "\n\taltivec_4way\tAltivec implementation for PowerPC G4 and G5 machines"
  1673. #endif
  1674. ),
  1675. OPT_WITH_ARG("-a",
  1676. set_algo, show_algo, &opt_algo,
  1677. opt_hidden),
  1678. #endif
  1679. OPT_WITH_ARG("--api-allow",
  1680. set_api_allow, NULL, NULL,
  1681. "Allow API access only to the given list of [G:]IP[/Prefix] addresses[/subnets]"),
  1682. OPT_WITH_ARG("--api-description",
  1683. set_api_description, NULL, NULL,
  1684. "Description placed in the API status header, default: BFGMiner version"),
  1685. OPT_WITH_ARG("--api-groups",
  1686. set_api_groups, NULL, NULL,
  1687. "API one letter groups G:cmd:cmd[,P:cmd:*...] defining the cmds a groups can use"),
  1688. OPT_WITHOUT_ARG("--api-listen",
  1689. opt_set_bool, &opt_api_listen,
  1690. "Enable API, default: disabled"),
  1691. OPT_WITHOUT_ARG("--api-mcast",
  1692. opt_set_bool, &opt_api_mcast,
  1693. "Enable API Multicast listener, default: disabled"),
  1694. OPT_WITH_ARG("--api-mcast-addr",
  1695. opt_set_charp, opt_show_charp, &opt_api_mcast_addr,
  1696. "API Multicast listen address"),
  1697. OPT_WITH_ARG("--api-mcast-code",
  1698. opt_set_charp, opt_show_charp, &opt_api_mcast_code,
  1699. "Code expected in the API Multicast message, don't use '-'"),
  1700. OPT_WITH_ARG("--api-mcast-des",
  1701. set_api_mcast_des, NULL, NULL,
  1702. "Description appended to the API Multicast reply, default: ''"),
  1703. OPT_WITH_ARG("--api-mcast-port",
  1704. set_int_1_to_65535, opt_show_intval, &opt_api_mcast_port,
  1705. "API Multicast listen port"),
  1706. OPT_WITHOUT_ARG("--api-network",
  1707. opt_set_bool, &opt_api_network,
  1708. "Allow API (if enabled) to listen on/for any address, default: only 127.0.0.1"),
  1709. OPT_WITH_ARG("--api-port",
  1710. set_int_1_to_65535, opt_show_intval, &opt_api_port,
  1711. "Port number of miner API"),
  1712. #ifdef HAVE_ADL
  1713. OPT_WITHOUT_ARG("--auto-fan",
  1714. opt_set_bool, &opt_autofan,
  1715. opt_hidden),
  1716. OPT_WITHOUT_ARG("--auto-gpu",
  1717. opt_set_bool, &opt_autoengine,
  1718. opt_hidden),
  1719. #endif
  1720. OPT_WITHOUT_ARG("--balance",
  1721. set_balance, &pool_strategy,
  1722. "Change multipool strategy from failover to even share balance"),
  1723. OPT_WITHOUT_ARG("--benchmark",
  1724. opt_set_bool, &opt_benchmark,
  1725. "Run BFGMiner in benchmark mode - produces no shares"),
  1726. #if defined(USE_BITFORCE)
  1727. OPT_WITHOUT_ARG("--bfl-range",
  1728. opt_set_bool, &opt_bfl_noncerange,
  1729. "Use nonce range on bitforce devices if supported"),
  1730. #endif
  1731. #ifdef HAVE_CHROOT
  1732. OPT_WITH_ARG("--chroot-dir",
  1733. opt_set_charp, NULL, &chroot_dir,
  1734. "Chroot to a directory right after startup"),
  1735. #endif
  1736. OPT_WITH_ARG("--cmd-idle",
  1737. opt_set_charp, NULL, &cmd_idle,
  1738. "Execute a command when a device is allowed to be idle (rest or wait)"),
  1739. OPT_WITH_ARG("--cmd-sick",
  1740. opt_set_charp, NULL, &cmd_sick,
  1741. "Execute a command when a device is declared sick"),
  1742. OPT_WITH_ARG("--cmd-dead",
  1743. opt_set_charp, NULL, &cmd_dead,
  1744. "Execute a command when a device is declared dead"),
  1745. #if BLKMAKER_VERSION > 1
  1746. OPT_WITH_ARG("--coinbase-addr",
  1747. set_b58addr, NULL, &opt_coinbase_script,
  1748. "Set coinbase payout address for solo mining"),
  1749. OPT_WITH_ARG("--coinbase-address|--coinbase-payout|--cbaddress|--cbaddr|--cb-address|--cb-addr|--payout",
  1750. set_b58addr, NULL, &opt_coinbase_script,
  1751. opt_hidden),
  1752. #endif
  1753. #if BLKMAKER_VERSION > 0
  1754. OPT_WITH_ARG("--coinbase-sig",
  1755. set_strdup, NULL, &opt_coinbase_sig,
  1756. "Set coinbase signature when possible"),
  1757. OPT_WITH_ARG("--coinbase|--cbsig|--cb-sig|--cb|--prayer",
  1758. set_strdup, NULL, &opt_coinbase_sig,
  1759. opt_hidden),
  1760. #endif
  1761. #ifdef HAVE_CURSES
  1762. OPT_WITHOUT_ARG("--compact",
  1763. opt_set_bool, &opt_compact,
  1764. "Use compact display without per device statistics"),
  1765. #endif
  1766. #ifdef WANT_CPUMINE
  1767. OPT_WITH_ARG("--cpu-threads",
  1768. force_nthreads_int, opt_show_intval, &opt_n_threads,
  1769. "Number of miner CPU threads"),
  1770. OPT_WITH_ARG("-t",
  1771. force_nthreads_int, opt_show_intval, &opt_n_threads,
  1772. opt_hidden),
  1773. #endif
  1774. OPT_WITHOUT_ARG("--debug|-D",
  1775. enable_debug, &opt_debug,
  1776. "Enable debug output"),
  1777. OPT_WITHOUT_ARG("--debuglog",
  1778. opt_set_bool, &opt_debug,
  1779. "Enable debug logging"),
  1780. OPT_WITHOUT_ARG("--device-protocol-dump",
  1781. opt_set_bool, &opt_dev_protocol,
  1782. "Verbose dump of device protocol-level activities"),
  1783. OPT_WITH_ARG("--device|-d",
  1784. set_devices, NULL, NULL,
  1785. "Enable only devices matching pattern (default: all)"),
  1786. OPT_WITHOUT_ARG("--disable-rejecting",
  1787. opt_set_bool, &opt_disable_pool,
  1788. "Automatically disable pools that continually reject shares"),
  1789. #ifdef USE_LIBMICROHTTPD
  1790. OPT_WITH_ARG("--http-port",
  1791. opt_set_intval, opt_show_intval, &httpsrv_port,
  1792. "Port number to listen on for HTTP getwork miners (-1 means disabled)"),
  1793. #endif
  1794. OPT_WITH_ARG("--expiry",
  1795. set_int_0_to_9999, opt_show_intval, &opt_expiry,
  1796. "Upper bound on how many seconds after getting work we consider a share from it stale (w/o longpoll active)"),
  1797. OPT_WITH_ARG("-E",
  1798. set_int_0_to_9999, opt_show_intval, &opt_expiry,
  1799. opt_hidden),
  1800. OPT_WITH_ARG("--expiry-lp",
  1801. set_int_0_to_9999, opt_show_intval, &opt_expiry_lp,
  1802. "Upper bound on how many seconds after getting work we consider a share from it stale (with longpoll active)"),
  1803. OPT_WITHOUT_ARG("--failover-only",
  1804. opt_set_bool, &opt_fail_only,
  1805. "Don't leak work to backup pools when primary pool is lagging"),
  1806. OPT_WITH_ARG("--failover-switch-delay",
  1807. set_int_1_to_65535, opt_show_intval, &opt_fail_switch_delay,
  1808. "Delay in seconds before switching back to a failed pool"),
  1809. #ifdef USE_FPGA
  1810. OPT_WITHOUT_ARG("--force-dev-init",
  1811. opt_set_bool, &opt_force_dev_init,
  1812. "Always initialize devices when possible (such as bitstream uploads to some FPGAs)"),
  1813. #endif
  1814. #ifdef HAVE_OPENCL
  1815. OPT_WITH_ARG("--gpu-dyninterval",
  1816. set_int_1_to_65535, opt_show_intval, &opt_dynamic_interval,
  1817. opt_hidden),
  1818. OPT_WITH_ARG("--gpu-platform",
  1819. set_int_0_to_9999, opt_show_intval, &opt_platform_id,
  1820. "Select OpenCL platform ID to use for GPU mining"),
  1821. OPT_WITH_ARG("--gpu-threads|-g",
  1822. set_gpu_threads, opt_show_intval, &opt_g_threads,
  1823. opt_hidden),
  1824. #ifdef HAVE_ADL
  1825. OPT_WITH_ARG("--gpu-engine",
  1826. set_gpu_engine, NULL, NULL,
  1827. opt_hidden),
  1828. OPT_WITH_ARG("--gpu-fan",
  1829. set_gpu_fan, NULL, NULL,
  1830. opt_hidden),
  1831. OPT_WITH_ARG("--gpu-map",
  1832. set_gpu_map, NULL, NULL,
  1833. "Map OpenCL to ADL device order manually, paired CSV (e.g. 1:0,2:1 maps OpenCL 1 to ADL 0, 2 to 1)"),
  1834. OPT_WITH_ARG("--gpu-memclock",
  1835. set_gpu_memclock, NULL, NULL,
  1836. opt_hidden),
  1837. OPT_WITH_ARG("--gpu-memdiff",
  1838. set_gpu_memdiff, NULL, NULL,
  1839. opt_hidden),
  1840. OPT_WITH_ARG("--gpu-powertune",
  1841. set_gpu_powertune, NULL, NULL,
  1842. opt_hidden),
  1843. OPT_WITHOUT_ARG("--gpu-reorder",
  1844. opt_set_bool, &opt_reorder,
  1845. "Attempt to reorder GPU devices according to PCI Bus ID"),
  1846. OPT_WITH_ARG("--gpu-vddc",
  1847. set_gpu_vddc, NULL, NULL,
  1848. opt_hidden),
  1849. #endif
  1850. #ifdef USE_SCRYPT
  1851. OPT_WITH_ARG("--lookup-gap",
  1852. set_lookup_gap, NULL, NULL,
  1853. opt_hidden),
  1854. #endif
  1855. OPT_WITH_ARG("--intensity|-I",
  1856. set_intensity, NULL, NULL,
  1857. opt_hidden),
  1858. #endif
  1859. #if defined(HAVE_OPENCL) || defined(USE_MODMINER) || defined(USE_X6500) || defined(USE_ZTEX)
  1860. OPT_WITH_ARG("--kernel-path",
  1861. opt_set_charp, opt_show_charp, &opt_kernel_path,
  1862. "Specify a path to where bitstream and kernel files are"),
  1863. OPT_WITH_ARG("-K",
  1864. opt_set_charp, opt_show_charp, &opt_kernel_path,
  1865. opt_hidden),
  1866. #endif
  1867. #ifdef HAVE_OPENCL
  1868. OPT_WITH_ARG("--kernel|-k",
  1869. set_kernel, NULL, NULL,
  1870. opt_hidden),
  1871. #endif
  1872. #ifdef USE_ICARUS
  1873. OPT_WITH_ARG("--icarus-options",
  1874. set_icarus_options, NULL, NULL,
  1875. opt_hidden),
  1876. OPT_WITH_ARG("--icarus-timing",
  1877. set_icarus_timing, NULL, NULL,
  1878. opt_hidden),
  1879. #endif
  1880. #ifdef USE_AVALON
  1881. OPT_WITH_ARG("--avalon-options",
  1882. set_avalon_options, NULL, NULL,
  1883. opt_hidden),
  1884. #endif
  1885. #ifdef USE_KLONDIKE
  1886. OPT_WITH_ARG("--klondike-options",
  1887. set_klondike_options, NULL, NULL,
  1888. "Set klondike options clock:temptarget"),
  1889. #endif
  1890. OPT_WITHOUT_ARG("--load-balance",
  1891. set_loadbalance, &pool_strategy,
  1892. "Change multipool strategy from failover to quota based balance"),
  1893. OPT_WITH_ARG("--log|-l",
  1894. set_int_0_to_9999, opt_show_intval, &opt_log_interval,
  1895. "Interval in seconds between log output"),
  1896. OPT_WITH_ARG("--log-file|-L",
  1897. set_log_file, NULL, NULL,
  1898. "Append log file for output messages"),
  1899. OPT_WITH_ARG("--logfile",
  1900. set_log_file, NULL, NULL,
  1901. opt_hidden),
  1902. OPT_WITHOUT_ARG("--log-microseconds",
  1903. opt_set_bool, &opt_log_microseconds,
  1904. "Include microseconds in log output"),
  1905. #if defined(unix) || defined(__APPLE__)
  1906. OPT_WITH_ARG("--monitor|-m",
  1907. opt_set_charp, NULL, &opt_stderr_cmd,
  1908. "Use custom pipe cmd for output messages"),
  1909. #endif // defined(unix)
  1910. OPT_WITHOUT_ARG("--net-delay",
  1911. opt_set_bool, &opt_delaynet,
  1912. "Impose small delays in networking to avoid overloading slow routers"),
  1913. OPT_WITHOUT_ARG("--no-adl",
  1914. opt_set_bool, &opt_noadl,
  1915. #ifdef HAVE_ADL
  1916. "Disable the ATI display library used for monitoring and setting GPU parameters"
  1917. #else
  1918. opt_hidden
  1919. #endif
  1920. ),
  1921. OPT_WITHOUT_ARG("--no-gbt",
  1922. opt_set_invbool, &want_gbt,
  1923. "Disable getblocktemplate support"),
  1924. OPT_WITHOUT_ARG("--no-getwork",
  1925. opt_set_invbool, &want_getwork,
  1926. "Disable getwork support"),
  1927. OPT_WITHOUT_ARG("--no-hotplug",
  1928. #ifdef HAVE_BFG_HOTPLUG
  1929. opt_set_invbool, &opt_hotplug,
  1930. "Disable hotplug detection"
  1931. #else
  1932. set_null, &opt_hotplug,
  1933. opt_hidden
  1934. #endif
  1935. ),
  1936. OPT_WITHOUT_ARG("--no-longpoll",
  1937. opt_set_invbool, &want_longpoll,
  1938. "Disable X-Long-Polling support"),
  1939. OPT_WITHOUT_ARG("--no-pool-disable",
  1940. opt_set_invbool, &opt_disable_pool,
  1941. opt_hidden),
  1942. OPT_WITHOUT_ARG("--no-client-reconnect",
  1943. opt_set_invbool, &opt_disable_client_reconnect,
  1944. opt_hidden),
  1945. OPT_WITHOUT_ARG("--no-pool-redirect",
  1946. disable_pool_redirect, NULL,
  1947. "Ignore pool requests to redirect to another server"),
  1948. OPT_WITHOUT_ARG("--no-restart",
  1949. opt_set_invbool, &opt_restart,
  1950. "Do not attempt to restart devices that hang"
  1951. ),
  1952. OPT_WITHOUT_ARG("--no-show-processors",
  1953. opt_set_invbool, &opt_show_procs,
  1954. opt_hidden),
  1955. OPT_WITHOUT_ARG("--no-show-procs",
  1956. opt_set_invbool, &opt_show_procs,
  1957. opt_hidden),
  1958. OPT_WITHOUT_ARG("--no-stratum",
  1959. opt_set_invbool, &want_stratum,
  1960. "Disable Stratum detection"),
  1961. OPT_WITHOUT_ARG("--no-submit-stale",
  1962. opt_set_invbool, &opt_submit_stale,
  1963. "Don't submit shares if they are detected as stale"),
  1964. #ifdef HAVE_OPENCL
  1965. OPT_WITHOUT_ARG("--no-opencl-binaries",
  1966. set_no_opencl_binaries, NULL,
  1967. opt_hidden),
  1968. #endif
  1969. OPT_WITHOUT_ARG("--no-unicode",
  1970. #ifdef USE_UNICODE
  1971. opt_set_invbool, &use_unicode,
  1972. "Don't use Unicode characters in TUI"
  1973. #else
  1974. set_null, &use_unicode,
  1975. opt_hidden
  1976. #endif
  1977. ),
  1978. OPT_WITH_ARG("--noncelog",
  1979. set_noncelog, NULL, NULL,
  1980. "Create log of all nonces found"),
  1981. OPT_WITH_ARG("--pass|-p",
  1982. set_pass, NULL, NULL,
  1983. "Password for bitcoin JSON-RPC server"),
  1984. OPT_WITHOUT_ARG("--per-device-stats",
  1985. opt_set_bool, &want_per_device_stats,
  1986. "Force verbose mode and output per-device statistics"),
  1987. OPT_WITH_ARG("--userpass|-O", // duplicate to ensure config loads it before pool-priority
  1988. set_userpass, NULL, NULL,
  1989. opt_hidden),
  1990. OPT_WITH_ARG("--pool-priority",
  1991. set_pool_priority, NULL, NULL,
  1992. "Priority for just the previous-defined pool"),
  1993. OPT_WITH_ARG("--pool-proxy|-x",
  1994. set_pool_proxy, NULL, NULL,
  1995. "Proxy URI to use for connecting to just the previous-defined pool"),
  1996. OPT_WITH_ARG("--force-rollntime", // NOTE: must be after --pass for config file ordering
  1997. set_pool_force_rollntime, NULL, NULL,
  1998. opt_hidden),
  1999. OPT_WITHOUT_ARG("--protocol-dump|-P",
  2000. opt_set_bool, &opt_protocol,
  2001. "Verbose dump of protocol-level activities"),
  2002. OPT_WITH_ARG("--queue|-Q",
  2003. set_int_0_to_9999, opt_show_intval, &opt_queue,
  2004. "Minimum number of work items to have queued (0+)"),
  2005. OPT_WITHOUT_ARG("--quiet|-q",
  2006. opt_set_bool, &opt_quiet,
  2007. "Disable logging output, display status and errors"),
  2008. OPT_WITHOUT_ARG("--quiet-work-updates|--quiet-work-update",
  2009. opt_set_bool, &opt_quiet_work_updates,
  2010. opt_hidden),
  2011. OPT_WITH_ARG("--quit-summary",
  2012. set_quit_summary, NULL, NULL,
  2013. "Summary printed when you quit: none/devs/procs/detailed"),
  2014. OPT_WITH_ARG("--quota|-U",
  2015. set_quota, NULL, NULL,
  2016. "quota;URL combination for server with load-balance strategy quotas"),
  2017. OPT_WITHOUT_ARG("--real-quiet",
  2018. opt_set_bool, &opt_realquiet,
  2019. "Disable all output"),
  2020. OPT_WITH_ARG("--request-diff",
  2021. set_request_diff, opt_show_floatval, &request_pdiff,
  2022. "Request a specific difficulty from pools"),
  2023. OPT_WITH_ARG("--retries",
  2024. opt_set_intval, opt_show_intval, &opt_retries,
  2025. "Number of times to retry failed submissions before giving up (-1 means never)"),
  2026. OPT_WITH_ARG("--retry-pause",
  2027. set_null, NULL, NULL,
  2028. opt_hidden),
  2029. OPT_WITH_ARG("--rotate",
  2030. set_rotate, opt_show_intval, &opt_rotate_period,
  2031. "Change multipool strategy from failover to regularly rotate at N minutes"),
  2032. OPT_WITHOUT_ARG("--round-robin",
  2033. set_rr, &pool_strategy,
  2034. "Change multipool strategy from failover to round robin on failure"),
  2035. OPT_WITH_ARG("--scan|-S",
  2036. add_serial, NULL, NULL,
  2037. "Configure how to scan for mining devices"),
  2038. OPT_WITH_ARG("--scan-device|--scan-serial|--devscan",
  2039. add_serial, NULL, NULL,
  2040. opt_hidden),
  2041. OPT_WITH_ARG("--scan-time",
  2042. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  2043. "Upper bound on time spent scanning current work, in seconds"),
  2044. OPT_WITH_ARG("-s",
  2045. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  2046. opt_hidden),
  2047. OPT_WITH_ARG("--scantime",
  2048. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  2049. opt_hidden),
  2050. OPT_WITH_ARG("--sched-start",
  2051. set_schedtime, NULL, &schedstart,
  2052. "Set a time of day in HH:MM to start mining (a once off without a stop time)"),
  2053. OPT_WITH_ARG("--sched-stop",
  2054. set_schedtime, NULL, &schedstop,
  2055. "Set a time of day in HH:MM to stop mining (will quit without a start time)"),
  2056. #ifdef USE_SCRYPT
  2057. OPT_WITHOUT_ARG("--scrypt",
  2058. opt_set_bool, &opt_scrypt,
  2059. "Use the scrypt algorithm for mining (non-bitcoin)"),
  2060. #endif
  2061. OPT_WITH_ARG("--set-device|--set",
  2062. opt_string_elist_add, NULL, &opt_set_device_list,
  2063. "Set default parameters on devices; eg, NFY:osc6_bits=50"),
  2064. #if defined(USE_SCRYPT) && defined(HAVE_OPENCL)
  2065. OPT_WITH_ARG("--shaders",
  2066. set_shaders, NULL, NULL,
  2067. opt_hidden),
  2068. #endif
  2069. #ifdef HAVE_PWD_H
  2070. OPT_WITH_ARG("--setuid",
  2071. opt_set_charp, NULL, &opt_setuid,
  2072. "Username of an unprivileged user to run as"),
  2073. #endif
  2074. OPT_WITH_ARG("--sharelog",
  2075. set_sharelog, NULL, NULL,
  2076. "Append share log to file"),
  2077. OPT_WITH_ARG("--shares",
  2078. opt_set_floatval, NULL, &opt_shares,
  2079. "Quit after mining 2^32 * N hashes worth of shares (default: unlimited)"),
  2080. OPT_WITHOUT_ARG("--show-processors",
  2081. opt_set_bool, &opt_show_procs,
  2082. "Show per processor statistics in summary"),
  2083. OPT_WITHOUT_ARG("--show-procs",
  2084. opt_set_bool, &opt_show_procs,
  2085. opt_hidden),
  2086. OPT_WITH_ARG("--skip-security-checks",
  2087. set_int_0_to_9999, NULL, &opt_skip_checks,
  2088. "Skip security checks sometimes to save bandwidth; only check 1/<arg>th of the time (default: never skip)"),
  2089. OPT_WITH_ARG("--socks-proxy",
  2090. opt_set_charp, NULL, &opt_socks_proxy,
  2091. "Set socks proxy (host:port)"),
  2092. #ifdef USE_LIBEVENT
  2093. OPT_WITH_ARG("--stratum-port",
  2094. opt_set_intval, opt_show_intval, &stratumsrv_port,
  2095. "Port number to listen on for stratum miners (-1 means disabled)"),
  2096. #endif
  2097. OPT_WITHOUT_ARG("--submit-stale",
  2098. opt_set_bool, &opt_submit_stale,
  2099. opt_hidden),
  2100. OPT_WITH_ARG("--submit-threads",
  2101. opt_set_intval, opt_show_intval, &opt_submit_threads,
  2102. "Minimum number of concurrent share submissions (default: 64)"),
  2103. #ifdef HAVE_SYSLOG_H
  2104. OPT_WITHOUT_ARG("--syslog",
  2105. opt_set_bool, &use_syslog,
  2106. "Use system log for output messages (default: standard error)"),
  2107. #endif
  2108. OPT_WITH_ARG("--temp-cutoff",
  2109. set_temp_cutoff, NULL, &opt_cutofftemp,
  2110. opt_hidden),
  2111. OPT_WITH_ARG("--temp-hysteresis",
  2112. set_int_1_to_10, opt_show_intval, &opt_hysteresis,
  2113. "Set how much the temperature can fluctuate outside limits when automanaging speeds"),
  2114. #ifdef HAVE_ADL
  2115. OPT_WITH_ARG("--temp-overheat",
  2116. set_temp_overheat, opt_show_intval, &opt_overheattemp,
  2117. opt_hidden),
  2118. #endif
  2119. OPT_WITH_ARG("--temp-target",
  2120. set_temp_target, NULL, NULL,
  2121. opt_hidden),
  2122. OPT_WITHOUT_ARG("--text-only|-T",
  2123. opt_set_invbool, &use_curses,
  2124. #ifdef HAVE_CURSES
  2125. "Disable ncurses formatted screen output"
  2126. #else
  2127. opt_hidden
  2128. #endif
  2129. ),
  2130. #if defined(USE_SCRYPT) && defined(HAVE_OPENCL)
  2131. OPT_WITH_ARG("--thread-concurrency",
  2132. set_thread_concurrency, NULL, NULL,
  2133. opt_hidden),
  2134. #endif
  2135. #ifdef USE_UNICODE
  2136. OPT_WITHOUT_ARG("--unicode",
  2137. opt_set_bool, &use_unicode,
  2138. "Use Unicode characters in TUI"),
  2139. #endif
  2140. OPT_WITH_ARG("--url|-o",
  2141. set_url, NULL, NULL,
  2142. "URL for bitcoin JSON-RPC server"),
  2143. OPT_WITH_ARG("--user|-u",
  2144. set_user, NULL, NULL,
  2145. "Username for bitcoin JSON-RPC server"),
  2146. #ifdef HAVE_OPENCL
  2147. OPT_WITH_ARG("--vectors|-v",
  2148. set_vector, NULL, NULL,
  2149. opt_hidden),
  2150. #endif
  2151. OPT_WITHOUT_ARG("--verbose",
  2152. opt_set_bool, &opt_log_output,
  2153. "Log verbose output to stderr as well as status output"),
  2154. OPT_WITHOUT_ARG("--verbose-work-updates|--verbose-work-update",
  2155. opt_set_invbool, &opt_quiet_work_updates,
  2156. opt_hidden),
  2157. OPT_WITHOUT_ARG("--weighed-stats",
  2158. opt_set_bool, &opt_weighed_stats,
  2159. "Display statistics weighed to difficulty 1"),
  2160. #ifdef HAVE_OPENCL
  2161. OPT_WITH_ARG("--worksize|-w",
  2162. set_worksize, NULL, NULL,
  2163. opt_hidden),
  2164. #endif
  2165. OPT_WITHOUT_ARG("--unittest",
  2166. opt_set_bool, &opt_unittest, opt_hidden),
  2167. OPT_WITH_ARG("--userpass|-O",
  2168. set_userpass, NULL, NULL,
  2169. "Username:Password pair for bitcoin JSON-RPC server"),
  2170. OPT_WITHOUT_ARG("--worktime",
  2171. opt_set_bool, &opt_worktime,
  2172. "Display extra work time debug information"),
  2173. OPT_WITH_ARG("--pools",
  2174. opt_set_bool, NULL, NULL, opt_hidden),
  2175. OPT_ENDTABLE
  2176. };
  2177. static char *load_config(const char *arg, void __maybe_unused *unused);
  2178. static char *parse_config(json_t *config, bool fileconf, int * const fileconf_load_p)
  2179. {
  2180. static char err_buf[200];
  2181. struct opt_table *opt;
  2182. json_t *val;
  2183. if (fileconf && !*fileconf_load_p)
  2184. *fileconf_load_p = 1;
  2185. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  2186. char *p, *name, *sp;
  2187. /* We don't handle subtables. */
  2188. assert(!(opt->type & OPT_SUBTABLE));
  2189. if (!opt->names)
  2190. continue;
  2191. /* Pull apart the option name(s). */
  2192. name = strdup(opt->names);
  2193. for (p = strtok_r(name, "|", &sp); p; p = strtok_r(NULL, "|", &sp)) {
  2194. char *err = "Invalid value";
  2195. /* Ignore short options. */
  2196. if (p[1] != '-')
  2197. continue;
  2198. val = json_object_get(config, p+2);
  2199. if (!val)
  2200. continue;
  2201. if (opt->type & OPT_HASARG) {
  2202. if (json_is_string(val)) {
  2203. err = opt->cb_arg(json_string_value(val),
  2204. opt->u.arg);
  2205. } else if (json_is_number(val)) {
  2206. char buf[256], *p, *q;
  2207. snprintf(buf, 256, "%f", json_number_value(val));
  2208. if ( (p = strchr(buf, '.')) ) {
  2209. // Trim /\.0*$/ to work properly with integer-only arguments
  2210. q = p;
  2211. while (*(++q) == '0') {}
  2212. if (*q == '\0')
  2213. *p = '\0';
  2214. }
  2215. err = opt->cb_arg(buf, opt->u.arg);
  2216. } else if (json_is_array(val)) {
  2217. int n, size = json_array_size(val);
  2218. err = NULL;
  2219. for (n = 0; n < size && !err; n++) {
  2220. if (json_is_string(json_array_get(val, n)))
  2221. err = opt->cb_arg(json_string_value(json_array_get(val, n)), opt->u.arg);
  2222. else if (json_is_object(json_array_get(val, n)))
  2223. err = parse_config(json_array_get(val, n), false, fileconf_load_p);
  2224. }
  2225. }
  2226. } else if (opt->type & OPT_NOARG) {
  2227. if (json_is_true(val))
  2228. err = opt->cb(opt->u.arg);
  2229. else if (json_is_boolean(val)) {
  2230. if (opt->cb == (void*)opt_set_bool)
  2231. err = opt_set_invbool(opt->u.arg);
  2232. else if (opt->cb == (void*)opt_set_invbool)
  2233. err = opt_set_bool(opt->u.arg);
  2234. }
  2235. }
  2236. if (err) {
  2237. /* Allow invalid values to be in configuration
  2238. * file, just skipping over them provided the
  2239. * JSON is still valid after that. */
  2240. if (fileconf) {
  2241. applog(LOG_ERR, "Invalid config option %s: %s", p, err);
  2242. *fileconf_load_p = -1;
  2243. } else {
  2244. snprintf(err_buf, sizeof(err_buf), "Parsing JSON option %s: %s",
  2245. p, err);
  2246. return err_buf;
  2247. }
  2248. }
  2249. }
  2250. free(name);
  2251. }
  2252. val = json_object_get(config, JSON_INCLUDE_CONF);
  2253. if (val && json_is_string(val))
  2254. return load_config(json_string_value(val), NULL);
  2255. return NULL;
  2256. }
  2257. struct bfg_loaded_configfile *bfg_loaded_configfiles;
  2258. static char *load_config(const char *arg, void __maybe_unused *unused)
  2259. {
  2260. json_error_t err;
  2261. json_t *config;
  2262. char *json_error;
  2263. size_t siz;
  2264. struct bfg_loaded_configfile *cfginfo;
  2265. cfginfo = malloc(sizeof(*cfginfo));
  2266. *cfginfo = (struct bfg_loaded_configfile){
  2267. .filename = strdup(arg),
  2268. };
  2269. LL_APPEND(bfg_loaded_configfiles, cfginfo);
  2270. if (++include_count > JSON_MAX_DEPTH)
  2271. return JSON_MAX_DEPTH_ERR;
  2272. #if JANSSON_MAJOR_VERSION > 1
  2273. config = json_load_file(arg, 0, &err);
  2274. #else
  2275. config = json_load_file(arg, &err);
  2276. #endif
  2277. if (!json_is_object(config)) {
  2278. siz = JSON_LOAD_ERROR_LEN + strlen(arg) + strlen(err.text);
  2279. json_error = malloc(siz);
  2280. if (!json_error)
  2281. quit(1, "Malloc failure in json error");
  2282. snprintf(json_error, siz, JSON_LOAD_ERROR, arg, err.text);
  2283. return json_error;
  2284. }
  2285. config_loaded = true;
  2286. /* Parse the config now, so we can override it. That can keep pointers
  2287. * so don't free config object. */
  2288. return parse_config(config, true, &cfginfo->fileconf_load);
  2289. }
  2290. static void load_default_config(void)
  2291. {
  2292. char cnfbuf[PATH_MAX];
  2293. #if defined(unix)
  2294. if (getenv("HOME") && *getenv("HOME")) {
  2295. strcpy(cnfbuf, getenv("HOME"));
  2296. strcat(cnfbuf, "/");
  2297. } else
  2298. strcpy(cnfbuf, "");
  2299. char *dirp = cnfbuf + strlen(cnfbuf);
  2300. strcpy(dirp, ".bfgminer/");
  2301. strcat(dirp, def_conf);
  2302. if (access(cnfbuf, R_OK))
  2303. // No BFGMiner config, try Cgminer's...
  2304. strcpy(dirp, ".cgminer/cgminer.conf");
  2305. #else
  2306. strcpy(cnfbuf, "");
  2307. strcat(cnfbuf, def_conf);
  2308. #endif
  2309. if (!access(cnfbuf, R_OK))
  2310. load_config(cnfbuf, NULL);
  2311. }
  2312. extern const char *opt_argv0;
  2313. static char *opt_verusage_and_exit(const char *extra)
  2314. {
  2315. puts(packagename);
  2316. printf(" Lowlevel:%s\n", BFG_LOWLLIST);
  2317. printf(" Drivers:%s\n", BFG_DRIVERLIST);
  2318. printf(" Algorithms:%s\n", BFG_ALGOLIST);
  2319. printf(" Options:%s\n", BFG_OPTLIST);
  2320. printf("%s", opt_usage(opt_argv0, extra));
  2321. fflush(stdout);
  2322. exit(0);
  2323. }
  2324. /* These options are parsed before anything else */
  2325. static struct opt_table opt_early_table[] = {
  2326. OPT_EARLY_WITH_ARG("--config|-c",
  2327. set_bool_ignore_arg, NULL, &config_loaded,
  2328. opt_hidden),
  2329. OPT_EARLY_WITHOUT_ARG("--no-config",
  2330. opt_set_bool, &config_loaded,
  2331. "Inhibit loading default config file"),
  2332. OPT_ENDTABLE
  2333. };
  2334. /* These options are available from commandline only */
  2335. static struct opt_table opt_cmdline_table[] = {
  2336. OPT_WITH_ARG("--config|-c",
  2337. load_config, NULL, NULL,
  2338. "Load a JSON-format configuration file\n"
  2339. "See example.conf for an example configuration."),
  2340. OPT_EARLY_WITHOUT_ARG("--no-config",
  2341. opt_set_bool, &config_loaded,
  2342. "Inhibit loading default config file"),
  2343. OPT_WITHOUT_ARG("--help|-h",
  2344. opt_verusage_and_exit, NULL,
  2345. "Print this message"),
  2346. #ifdef HAVE_OPENCL
  2347. OPT_WITHOUT_ARG("--ndevs|-n",
  2348. print_ndevs_and_exit, &nDevs,
  2349. opt_hidden),
  2350. #endif
  2351. OPT_WITHOUT_ARG("--version|-V",
  2352. opt_version_and_exit, packagename,
  2353. "Display version and exit"),
  2354. OPT_ENDTABLE
  2355. };
  2356. static bool jobj_binary(const json_t *obj, const char *key,
  2357. void *buf, size_t buflen, bool required)
  2358. {
  2359. const char *hexstr;
  2360. json_t *tmp;
  2361. tmp = json_object_get(obj, key);
  2362. if (unlikely(!tmp)) {
  2363. if (unlikely(required))
  2364. applog(LOG_ERR, "JSON key '%s' not found", key);
  2365. return false;
  2366. }
  2367. hexstr = json_string_value(tmp);
  2368. if (unlikely(!hexstr)) {
  2369. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  2370. return false;
  2371. }
  2372. if (!hex2bin(buf, hexstr, buflen))
  2373. return false;
  2374. return true;
  2375. }
  2376. static void calc_midstate(struct work *work)
  2377. {
  2378. union {
  2379. unsigned char c[64];
  2380. uint32_t i[16];
  2381. } data;
  2382. swap32yes(&data.i[0], work->data, 16);
  2383. sha256_ctx ctx;
  2384. sha256_init(&ctx);
  2385. sha256_update(&ctx, data.c, 64);
  2386. memcpy(work->midstate, ctx.h, sizeof(work->midstate));
  2387. swap32tole(work->midstate, work->midstate, 8);
  2388. }
  2389. static
  2390. struct bfg_tmpl_ref *tmpl_makeref(blktemplate_t * const tmpl)
  2391. {
  2392. struct bfg_tmpl_ref * const tr = malloc(sizeof(*tr));
  2393. *tr = (struct bfg_tmpl_ref){
  2394. .tmpl = tmpl,
  2395. .refcount = 1,
  2396. };
  2397. mutex_init(&tr->mutex);
  2398. return tr;
  2399. }
  2400. static
  2401. void tmpl_incref(struct bfg_tmpl_ref * const tr)
  2402. {
  2403. mutex_lock(&tr->mutex);
  2404. ++tr->refcount;
  2405. mutex_unlock(&tr->mutex);
  2406. }
  2407. void tmpl_decref(struct bfg_tmpl_ref * const tr)
  2408. {
  2409. mutex_lock(&tr->mutex);
  2410. bool free_tmpl = !--tr->refcount;
  2411. mutex_unlock(&tr->mutex);
  2412. if (free_tmpl)
  2413. {
  2414. blktmpl_free(tr->tmpl);
  2415. mutex_destroy(&tr->mutex);
  2416. free(tr);
  2417. }
  2418. }
  2419. static struct work *make_work(void)
  2420. {
  2421. struct work *work = calloc(1, sizeof(struct work));
  2422. if (unlikely(!work))
  2423. quit(1, "Failed to calloc work in make_work");
  2424. cg_wlock(&control_lock);
  2425. work->id = total_work++;
  2426. cg_wunlock(&control_lock);
  2427. return work;
  2428. }
  2429. /* This is the central place all work that is about to be retired should be
  2430. * cleaned to remove any dynamically allocated arrays within the struct */
  2431. void clean_work(struct work *work)
  2432. {
  2433. free(work->job_id);
  2434. bytes_free(&work->nonce2);
  2435. free(work->nonce1);
  2436. if (work->device_data_free_func)
  2437. work->device_data_free_func(work);
  2438. if (work->tr)
  2439. tmpl_decref(work->tr);
  2440. memset(work, 0, sizeof(struct work));
  2441. }
  2442. /* All dynamically allocated work structs should be freed here to not leak any
  2443. * ram from arrays allocated within the work struct */
  2444. void free_work(struct work *work)
  2445. {
  2446. clean_work(work);
  2447. free(work);
  2448. }
  2449. static const char *workpadding_bin = "\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x02\0\0";
  2450. // Must only be called with ch_lock held!
  2451. static
  2452. void __update_block_title(const unsigned char *hash_swap)
  2453. {
  2454. if (hash_swap) {
  2455. char tmp[17];
  2456. // Only provided when the block has actually changed
  2457. free(current_hash);
  2458. current_hash = malloc(3 /* ... */ + 16 /* block hash segment */ + 1);
  2459. bin2hex(tmp, &hash_swap[24], 8);
  2460. memset(current_hash, '.', 3);
  2461. memcpy(&current_hash[3], tmp, 17);
  2462. known_blkheight_current = false;
  2463. } else if (likely(known_blkheight_current)) {
  2464. return;
  2465. }
  2466. if (current_block_id == known_blkheight_blkid) {
  2467. // FIXME: The block number will overflow this sometime around AD 2025-2027
  2468. if (known_blkheight < 1000000) {
  2469. memmove(&current_hash[3], &current_hash[11], 8);
  2470. snprintf(&current_hash[11], 20-11, " #%6u", known_blkheight);
  2471. }
  2472. known_blkheight_current = true;
  2473. }
  2474. }
  2475. static
  2476. void have_block_height(uint32_t block_id, uint32_t blkheight)
  2477. {
  2478. if (known_blkheight == blkheight)
  2479. return;
  2480. applog(LOG_DEBUG, "Learned that block id %08" PRIx32 " is height %" PRIu32, (uint32_t)be32toh(block_id), blkheight);
  2481. cg_wlock(&ch_lock);
  2482. known_blkheight = blkheight;
  2483. known_blkheight_blkid = block_id;
  2484. block_subsidy = 5000000000LL >> (blkheight / 210000);
  2485. if (block_id == current_block_id)
  2486. __update_block_title(NULL);
  2487. cg_wunlock(&ch_lock);
  2488. }
  2489. static
  2490. void pool_set_opaque(struct pool *pool, bool opaque)
  2491. {
  2492. if (pool->swork.opaque == opaque)
  2493. return;
  2494. pool->swork.opaque = opaque;
  2495. if (opaque)
  2496. applog(LOG_WARNING, "Pool %u is hiding block contents from us",
  2497. pool->pool_no);
  2498. else
  2499. applog(LOG_NOTICE, "Pool %u now providing block contents to us",
  2500. pool->pool_no);
  2501. }
  2502. bool pool_may_redirect_to(struct pool * const pool, const char * const uri)
  2503. {
  2504. if (uri_get_param_bool(pool->rpc_url, "redirect", false))
  2505. return true;
  2506. return match_domains(pool->rpc_url, strlen(pool->rpc_url), uri, strlen(uri));
  2507. }
  2508. void set_simple_ntime_roll_limit(struct ntime_roll_limits * const nrl, const uint32_t ntime_base, const int ntime_roll)
  2509. {
  2510. *nrl = (struct ntime_roll_limits){
  2511. .min = ntime_base,
  2512. .max = ntime_base + ntime_roll,
  2513. .minoff = -ntime_roll,
  2514. .maxoff = ntime_roll,
  2515. };
  2516. }
  2517. void work_set_simple_ntime_roll_limit(struct work * const work, const int ntime_roll)
  2518. {
  2519. set_simple_ntime_roll_limit(&work->ntime_roll_limits, upk_u32be(work->data, 0x44), ntime_roll);
  2520. }
  2521. static double target_diff(const unsigned char *);
  2522. #define GBT_XNONCESZ (sizeof(uint32_t))
  2523. #if BLKMAKER_VERSION > 4
  2524. #define blkmk_append_coinbase_safe(tmpl, append, appendsz) \
  2525. blkmk_append_coinbase_safe2(tmpl, append, appendsz, GBT_XNONCESZ, false)
  2526. #endif
  2527. static bool work_decode(struct pool *pool, struct work *work, json_t *val)
  2528. {
  2529. json_t *res_val = json_object_get(val, "result");
  2530. json_t *tmp_val;
  2531. bool ret = false;
  2532. struct timeval tv_now;
  2533. if (unlikely(detect_algo == 1)) {
  2534. json_t *tmp = json_object_get(res_val, "algorithm");
  2535. const char *v = tmp ? json_string_value(tmp) : "";
  2536. if (strncasecmp(v, "scrypt", 6))
  2537. detect_algo = 2;
  2538. }
  2539. timer_set_now(&tv_now);
  2540. if (work->tr)
  2541. {
  2542. blktemplate_t * const tmpl = work->tr->tmpl;
  2543. const char *err = blktmpl_add_jansson(tmpl, res_val, tv_now.tv_sec);
  2544. if (err) {
  2545. applog(LOG_ERR, "blktmpl error: %s", err);
  2546. return false;
  2547. }
  2548. work->rolltime = blkmk_time_left(tmpl, tv_now.tv_sec);
  2549. #if BLKMAKER_VERSION > 1
  2550. if (opt_coinbase_script.sz)
  2551. {
  2552. bool newcb;
  2553. #if BLKMAKER_VERSION > 2
  2554. blkmk_init_generation2(tmpl, opt_coinbase_script.data, opt_coinbase_script.sz, &newcb);
  2555. #else
  2556. newcb = !tmpl->cbtxn;
  2557. blkmk_init_generation(tmpl, opt_coinbase_script.data, opt_coinbase_script.sz);
  2558. #endif
  2559. if (newcb)
  2560. {
  2561. ssize_t ae = blkmk_append_coinbase_safe(tmpl, &template_nonce, sizeof(template_nonce));
  2562. if (ae < (ssize_t)sizeof(template_nonce))
  2563. applog(LOG_WARNING, "Cannot append template-nonce to coinbase on pool %u (%"PRId64") - you might be wasting hashing!", work->pool->pool_no, (int64_t)ae);
  2564. ++template_nonce;
  2565. }
  2566. }
  2567. #endif
  2568. #if BLKMAKER_VERSION > 0
  2569. {
  2570. ssize_t ae = blkmk_append_coinbase_safe(tmpl, opt_coinbase_sig, 101);
  2571. static bool appenderr = false;
  2572. if (ae <= 0) {
  2573. if (opt_coinbase_sig) {
  2574. applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Cannot append coinbase signature at all on pool %u (%"PRId64")", pool->pool_no, (int64_t)ae);
  2575. appenderr = true;
  2576. }
  2577. } else if (ae >= 3 || opt_coinbase_sig) {
  2578. const char *cbappend = opt_coinbase_sig;
  2579. const char full[] = PACKAGE " " VERSION;
  2580. if (!cbappend) {
  2581. if ((size_t)ae >= sizeof(full) - 1)
  2582. cbappend = full;
  2583. else if ((size_t)ae >= sizeof(PACKAGE) - 1)
  2584. cbappend = PACKAGE;
  2585. else
  2586. cbappend = "BFG";
  2587. }
  2588. size_t cbappendsz = strlen(cbappend);
  2589. static bool truncatewarning = false;
  2590. if (cbappendsz <= (size_t)ae) {
  2591. if (cbappendsz < (size_t)ae)
  2592. // If we have space, include the trailing \0
  2593. ++cbappendsz;
  2594. ae = cbappendsz;
  2595. truncatewarning = false;
  2596. } else {
  2597. char *tmp = malloc(ae + 1);
  2598. memcpy(tmp, opt_coinbase_sig, ae);
  2599. tmp[ae] = '\0';
  2600. applog((truncatewarning ? LOG_DEBUG : LOG_WARNING),
  2601. "Pool %u truncating appended coinbase signature at %"PRId64" bytes: %s(%s)",
  2602. pool->pool_no, (int64_t)ae, tmp, &opt_coinbase_sig[ae]);
  2603. free(tmp);
  2604. truncatewarning = true;
  2605. }
  2606. ae = blkmk_append_coinbase_safe(tmpl, cbappend, ae);
  2607. if (ae <= 0) {
  2608. applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Error appending coinbase signature (%"PRId64")", (int64_t)ae);
  2609. appenderr = true;
  2610. } else
  2611. appenderr = false;
  2612. }
  2613. }
  2614. #endif
  2615. if (blkmk_get_data(tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
  2616. return false;
  2617. swap32yes(work->data, work->data, 80 / 4);
  2618. memcpy(&work->data[80], workpadding_bin, 48);
  2619. work->ntime_roll_limits = (struct ntime_roll_limits){
  2620. .min = tmpl->mintime,
  2621. .max = tmpl->maxtime,
  2622. .minoff = tmpl->mintimeoff,
  2623. .maxoff = tmpl->maxtimeoff,
  2624. };
  2625. const struct blktmpl_longpoll_req *lp;
  2626. if ((lp = blktmpl_get_longpoll(tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) {
  2627. free(pool->lp_id);
  2628. pool->lp_id = strdup(lp->id);
  2629. #if 0 /* This just doesn't work :( */
  2630. curl_socket_t sock = pool->lp_socket;
  2631. if (sock != CURL_SOCKET_BAD) {
  2632. pool->lp_socket = CURL_SOCKET_BAD;
  2633. applog(LOG_WARNING, "Pool %u long poll request hanging, reconnecting", pool->pool_no);
  2634. shutdown(sock, SHUT_RDWR);
  2635. }
  2636. #endif
  2637. }
  2638. }
  2639. else
  2640. if (unlikely(!jobj_binary(res_val, "data", work->data, sizeof(work->data), true))) {
  2641. applog(LOG_ERR, "JSON inval data");
  2642. return false;
  2643. }
  2644. else
  2645. work_set_simple_ntime_roll_limit(work, 0);
  2646. if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) {
  2647. // Calculate it ourselves
  2648. applog(LOG_DEBUG, "Calculating midstate locally");
  2649. calc_midstate(work);
  2650. }
  2651. if (unlikely(!jobj_binary(res_val, "target", work->target, sizeof(work->target), true))) {
  2652. applog(LOG_ERR, "JSON inval target");
  2653. return false;
  2654. }
  2655. if (work->tr)
  2656. {
  2657. for (size_t i = 0; i < sizeof(work->target) / 2; ++i)
  2658. {
  2659. int p = (sizeof(work->target) - 1) - i;
  2660. unsigned char c = work->target[i];
  2661. work->target[i] = work->target[p];
  2662. work->target[p] = c;
  2663. }
  2664. }
  2665. if ( (tmp_val = json_object_get(res_val, "height")) ) {
  2666. uint32_t blkheight = json_number_value(tmp_val);
  2667. uint32_t block_id = ((uint32_t*)work->data)[1];
  2668. have_block_height(block_id, blkheight);
  2669. }
  2670. memset(work->hash, 0, sizeof(work->hash));
  2671. work->tv_staged = tv_now;
  2672. #if BLKMAKER_VERSION > 4
  2673. if (work->tr)
  2674. {
  2675. blktemplate_t * const tmpl = work->tr->tmpl;
  2676. uint8_t buf[80];
  2677. int16_t expire;
  2678. uint8_t *cbtxn;
  2679. size_t cbtxnsz;
  2680. size_t cbextranonceoffset;
  2681. int branchcount;
  2682. libblkmaker_hash_t *branches;
  2683. if (blkmk_get_mdata(tmpl, buf, sizeof(buf), tv_now.tv_sec, &expire, &cbtxn, &cbtxnsz, &cbextranonceoffset, &branchcount, &branches, GBT_XNONCESZ, false))
  2684. {
  2685. struct stratum_work * const swork = &pool->swork;
  2686. const size_t branchdatasz = branchcount * 0x20;
  2687. cg_wlock(&pool->data_lock);
  2688. swork->tr = work->tr;
  2689. bytes_assimilate_raw(&swork->coinbase, cbtxn, cbtxnsz, cbtxnsz);
  2690. swork->nonce2_offset = cbextranonceoffset;
  2691. bytes_assimilate_raw(&swork->merkle_bin, branches, branchdatasz, branchdatasz);
  2692. swork->merkles = branchcount;
  2693. memcpy(swork->header1, &buf[0], 36);
  2694. swork->ntime = le32toh(*(uint32_t *)(&buf[68]));
  2695. swork->tv_received = tv_now;
  2696. memcpy(swork->diffbits, &buf[72], 4);
  2697. memcpy(swork->target, work->target, sizeof(swork->target));
  2698. free(swork->job_id);
  2699. swork->job_id = NULL;
  2700. swork->clean = true;
  2701. swork->work_restart_id = pool->work_restart_id;
  2702. // FIXME: Do something with expire
  2703. pool->nonce2sz = swork->n2size = GBT_XNONCESZ;
  2704. pool->nonce2 = 0;
  2705. cg_wunlock(&pool->data_lock);
  2706. }
  2707. else
  2708. applog(LOG_DEBUG, "blkmk_get_mdata failed for pool %u", pool->pool_no);
  2709. }
  2710. #endif // BLKMAKER_VERSION > 4
  2711. pool_set_opaque(pool, !work->tr);
  2712. ret = true;
  2713. return ret;
  2714. }
  2715. /* Returns whether the pool supports local work generation or not. */
  2716. static bool pool_localgen(struct pool *pool)
  2717. {
  2718. return (pool->last_work_copy || pool->has_stratum);
  2719. }
  2720. int dev_from_id(int thr_id)
  2721. {
  2722. struct cgpu_info *cgpu = get_thr_cgpu(thr_id);
  2723. return cgpu->device_id;
  2724. }
  2725. /* Create an exponentially decaying average over the opt_log_interval */
  2726. void decay_time(double *f, double fadd, double fsecs)
  2727. {
  2728. double ftotal, fprop;
  2729. fprop = 1.0 - 1 / (exp(fsecs / (double)opt_log_interval));
  2730. ftotal = 1.0 + fprop;
  2731. *f += (fadd * fprop);
  2732. *f /= ftotal;
  2733. }
  2734. static int __total_staged(void)
  2735. {
  2736. return HASH_COUNT(staged_work);
  2737. }
  2738. static int total_staged(void)
  2739. {
  2740. int ret;
  2741. mutex_lock(stgd_lock);
  2742. ret = __total_staged();
  2743. mutex_unlock(stgd_lock);
  2744. return ret;
  2745. }
  2746. #ifdef HAVE_CURSES
  2747. WINDOW *mainwin, *statuswin, *logwin;
  2748. #endif
  2749. double total_secs = 1.0;
  2750. static char statusline[256];
  2751. /* logstart is where the log window should start */
  2752. static int devcursor, logstart, logcursor;
  2753. #ifdef HAVE_CURSES
  2754. /* statusy is where the status window goes up to in cases where it won't fit at startup */
  2755. static int statusy;
  2756. static int devsummaryYOffset;
  2757. static int total_lines;
  2758. #endif
  2759. #ifdef HAVE_OPENCL
  2760. struct cgpu_info gpus[MAX_GPUDEVICES]; /* Maximum number apparently possible */
  2761. #endif
  2762. struct cgpu_info *cpus;
  2763. bool _bfg_console_cancel_disabled;
  2764. int _bfg_console_prev_cancelstate;
  2765. #ifdef HAVE_CURSES
  2766. #define lock_curses() bfg_console_lock()
  2767. #define unlock_curses() bfg_console_unlock()
  2768. static bool curses_active_locked(void)
  2769. {
  2770. bool ret;
  2771. lock_curses();
  2772. ret = curses_active;
  2773. if (!ret)
  2774. unlock_curses();
  2775. return ret;
  2776. }
  2777. // Cancellable getch
  2778. int my_cancellable_getch(void)
  2779. {
  2780. // This only works because the macro only hits direct getch() calls
  2781. typedef int (*real_getch_t)(void);
  2782. const real_getch_t real_getch = __real_getch;
  2783. int type, rv;
  2784. bool sct;
  2785. sct = !pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &type);
  2786. rv = real_getch();
  2787. if (sct)
  2788. pthread_setcanceltype(type, &type);
  2789. return rv;
  2790. }
  2791. #ifdef PDCURSES
  2792. static
  2793. int bfg_wresize(WINDOW *win, int lines, int columns)
  2794. {
  2795. int rv = wresize(win, lines, columns);
  2796. int x, y;
  2797. getyx(win, y, x);
  2798. if (unlikely(y >= lines || x >= columns))
  2799. {
  2800. if (y >= lines)
  2801. y = lines - 1;
  2802. if (x >= columns)
  2803. x = columns - 1;
  2804. wmove(win, y, x);
  2805. }
  2806. return rv;
  2807. }
  2808. #else
  2809. # define bfg_wresize wresize
  2810. #endif
  2811. #endif
  2812. void tailsprintf(char *buf, size_t bufsz, const char *fmt, ...)
  2813. {
  2814. va_list ap;
  2815. size_t presz = strlen(buf);
  2816. va_start(ap, fmt);
  2817. vsnprintf(&buf[presz], bufsz - presz, fmt, ap);
  2818. va_end(ap);
  2819. }
  2820. double stats_elapsed(struct cgminer_stats *stats)
  2821. {
  2822. struct timeval now;
  2823. double elapsed;
  2824. if (stats->start_tv.tv_sec == 0)
  2825. elapsed = total_secs;
  2826. else {
  2827. cgtime(&now);
  2828. elapsed = tdiff(&now, &stats->start_tv);
  2829. }
  2830. if (elapsed < 1.0)
  2831. elapsed = 1.0;
  2832. return elapsed;
  2833. }
  2834. bool drv_ready(struct cgpu_info *cgpu)
  2835. {
  2836. switch (cgpu->status) {
  2837. case LIFE_INIT:
  2838. case LIFE_DEAD2:
  2839. return false;
  2840. default:
  2841. return true;
  2842. }
  2843. }
  2844. double cgpu_utility(struct cgpu_info *cgpu)
  2845. {
  2846. double dev_runtime = cgpu_runtime(cgpu);
  2847. return cgpu->utility = cgpu->accepted / dev_runtime * 60;
  2848. }
  2849. #define suffix_string(val, buf, bufsiz, sigdigits) do{ \
  2850. _Static_assert(sigdigits == 0, "suffix_string only supported with sigdigits==0"); \
  2851. format_unit3(buf, bufsiz, FUP_DIFF, "", H2B_SHORTV, val, -1); \
  2852. }while(0)
  2853. static float
  2854. utility_to_hashrate(double utility)
  2855. {
  2856. return utility * 0x4444444;
  2857. }
  2858. static const char*_unitchar = "pn\xb5m kMGTPEZY?";
  2859. static const int _unitbase = 4;
  2860. static
  2861. void pick_unit(float hashrate, unsigned char *unit)
  2862. {
  2863. unsigned char i;
  2864. if (hashrate == 0 || !isfinite(hashrate))
  2865. {
  2866. if (*unit < _unitbase)
  2867. *unit = _unitbase;
  2868. return;
  2869. }
  2870. hashrate *= 1e12;
  2871. for (i = 0; i < *unit; ++i)
  2872. hashrate /= 1e3;
  2873. // 1000 but with tolerance for floating-point rounding, avoid showing "1000.0"
  2874. while (hashrate >= 999.95)
  2875. {
  2876. hashrate /= 1e3;
  2877. if (likely(_unitchar[*unit] != '?'))
  2878. ++*unit;
  2879. }
  2880. }
  2881. #define hashrate_pick_unit(hashrate, unit) pick_unit(hashrate, unit)
  2882. enum h2bs_fmt {
  2883. H2B_NOUNIT, // "xxx.x"
  2884. H2B_SHORT, // "xxx.xMH/s"
  2885. H2B_SPACED, // "xxx.x MH/s"
  2886. H2B_SHORTV, // Like H2B_SHORT, but omit space for base unit
  2887. };
  2888. enum bfu_floatprec {
  2889. FUP_INTEGER,
  2890. FUP_HASHES,
  2891. FUP_BTC,
  2892. FUP_DIFF,
  2893. };
  2894. static
  2895. int format_unit3(char *buf, size_t sz, enum bfu_floatprec fprec, const char *measurement, enum h2bs_fmt fmt, float hashrate, signed char unitin)
  2896. {
  2897. char *s = buf;
  2898. unsigned char prec, i, unit;
  2899. int rv = 0;
  2900. if (unitin == -1)
  2901. {
  2902. unit = 0;
  2903. hashrate_pick_unit(hashrate, &unit);
  2904. }
  2905. else
  2906. unit = unitin;
  2907. hashrate *= 1e12;
  2908. for (i = 0; i < unit; ++i)
  2909. hashrate /= 1000;
  2910. switch (fprec)
  2911. {
  2912. case FUP_HASHES:
  2913. // 100 but with tolerance for floating-point rounding, max "99.99" then "100.0"
  2914. if (hashrate >= 99.995 || unit < 6)
  2915. prec = 1;
  2916. else
  2917. prec = 2;
  2918. _SNP("%5.*f", prec, hashrate);
  2919. break;
  2920. case FUP_INTEGER:
  2921. _SNP("%3d", (int)hashrate);
  2922. break;
  2923. case FUP_BTC:
  2924. if (hashrate >= 99.995)
  2925. prec = 0;
  2926. else
  2927. prec = 2;
  2928. _SNP("%5.*f", prec, hashrate);
  2929. break;
  2930. case FUP_DIFF:
  2931. if (unit > _unitbase)
  2932. _SNP("%.3g", hashrate);
  2933. else
  2934. _SNP("%u", (unsigned int)hashrate);
  2935. }
  2936. if (fmt != H2B_NOUNIT)
  2937. {
  2938. char uc[3] = {_unitchar[unit], '\0'};
  2939. switch (fmt) {
  2940. case H2B_SPACED:
  2941. _SNP(" ");
  2942. default:
  2943. break;
  2944. case H2B_SHORTV:
  2945. if (isspace(uc[0]))
  2946. uc[0] = '\0';
  2947. }
  2948. if (uc[0] == '\xb5')
  2949. // Convert to UTF-8
  2950. snprintf(uc, sizeof(uc), "%s", U8_MICRO);
  2951. _SNP("%s%s", uc, measurement);
  2952. }
  2953. return rv;
  2954. }
  2955. #define format_unit2(buf, sz, floatprec, measurement, fmt, n, unit) \
  2956. format_unit3(buf, sz, floatprec ? FUP_HASHES : FUP_INTEGER, measurement, fmt, n, unit)
  2957. static
  2958. char *_multi_format_unit(char **buflist, size_t *bufszlist, bool floatprec, const char *measurement, enum h2bs_fmt fmt, const char *delim, int count, const float *numbers, bool isarray)
  2959. {
  2960. unsigned char unit = 0;
  2961. bool allzero = true;
  2962. int i;
  2963. size_t delimsz = 0;
  2964. char *buf = buflist[0];
  2965. size_t bufsz = bufszlist[0];
  2966. size_t itemwidth = (floatprec ? 5 : 3);
  2967. if (!isarray)
  2968. delimsz = strlen(delim);
  2969. for (i = 0; i < count; ++i)
  2970. if (numbers[i] != 0)
  2971. {
  2972. pick_unit(numbers[i], &unit);
  2973. allzero = false;
  2974. }
  2975. if (allzero)
  2976. unit = _unitbase;
  2977. --count;
  2978. for (i = 0; i < count; ++i)
  2979. {
  2980. format_unit2(buf, bufsz, floatprec, NULL, H2B_NOUNIT, numbers[i], unit);
  2981. if (isarray)
  2982. {
  2983. buf = buflist[i + 1];
  2984. bufsz = bufszlist[i + 1];
  2985. }
  2986. else
  2987. {
  2988. buf += itemwidth;
  2989. bufsz -= itemwidth;
  2990. if (delimsz > bufsz)
  2991. delimsz = bufsz;
  2992. memcpy(buf, delim, delimsz);
  2993. buf += delimsz;
  2994. bufsz -= delimsz;
  2995. }
  2996. }
  2997. // Last entry has the unit
  2998. format_unit2(buf, bufsz, floatprec, measurement, fmt, numbers[count], unit);
  2999. return buflist[0];
  3000. }
  3001. #define multi_format_unit2(buf, bufsz, floatprec, measurement, fmt, delim, count, ...) _multi_format_unit((char *[]){buf}, (size_t[]){bufsz}, floatprec, measurement, fmt, delim, count, (float[]){ __VA_ARGS__ }, false)
  3002. #define multi_format_unit_array2(buflist, bufszlist, floatprec, measurement, fmt, count, ...) (void)_multi_format_unit(buflist, bufszlist, floatprec, measurement, fmt, NULL, count, (float[]){ __VA_ARGS__ }, true)
  3003. static
  3004. int percentf3(char * const buf, size_t sz, double p, const double t)
  3005. {
  3006. char *s = buf;
  3007. int rv = 0;
  3008. if (!p)
  3009. _SNP("none");
  3010. else
  3011. if (t <= p)
  3012. _SNP("100%%");
  3013. else
  3014. {
  3015. p /= t;
  3016. if (p < 0.00995) // 0.01 but with tolerance for floating-point rounding, max ".99%"
  3017. _SNP(".%02.0f%%", p * 10000); // ".01%"
  3018. else
  3019. if (p < 0.0995) // 0.1 but with tolerance for floating-point rounding, max "9.9%"
  3020. _SNP("%.1f%%", p * 100); // "9.1%"
  3021. else
  3022. _SNP("%3.0f%%", p * 100); // " 99%"
  3023. }
  3024. return rv;
  3025. }
  3026. #define percentf4(buf, bufsz, p, t) percentf3(buf, bufsz, p, p + t)
  3027. static
  3028. void test_decimal_width()
  3029. {
  3030. // The pipe character at end of each line should perfectly line up
  3031. char printbuf[512];
  3032. char testbuf1[64];
  3033. char testbuf2[64];
  3034. char testbuf3[64];
  3035. char testbuf4[64];
  3036. double testn;
  3037. int width;
  3038. int saved;
  3039. // Hotspots around 0.1 and 0.01
  3040. saved = -1;
  3041. for (testn = 0.09; testn <= 0.11; testn += 0.000001) {
  3042. percentf3(testbuf1, sizeof(testbuf1), testn, 1.0);
  3043. percentf3(testbuf2, sizeof(testbuf2), testn, 10.0);
  3044. width = snprintf(printbuf, sizeof(printbuf), "%10g %s %s |", testn, testbuf1, testbuf2);
  3045. if (unlikely((saved != -1) && (width != saved))) {
  3046. applog(LOG_ERR, "Test width mismatch in percentf3! %d not %d at %10g", width, saved, testn);
  3047. applog(LOG_ERR, "%s", printbuf);
  3048. }
  3049. saved = width;
  3050. }
  3051. // Hotspot around 100 (but test this in several units because format_unit2 also has unit<2 check)
  3052. saved = -1;
  3053. for (testn = 99.0; testn <= 101.0; testn += 0.0001) {
  3054. format_unit2(testbuf1, sizeof(testbuf1), true, "x", H2B_SHORT, testn , -1);
  3055. format_unit2(testbuf2, sizeof(testbuf2), true, "x", H2B_SHORT, testn * 1e3, -1);
  3056. format_unit2(testbuf3, sizeof(testbuf3), true, "x", H2B_SHORT, testn * 1e6, -1);
  3057. snprintf(printbuf, sizeof(printbuf), "%10g %s %s %s |", testn, testbuf1, testbuf2, testbuf3);
  3058. width = utf8_strlen(printbuf);
  3059. if (unlikely((saved != -1) && (width != saved))) {
  3060. applog(LOG_ERR, "Test width mismatch in format_unit2! %d not %d at %10g", width, saved, testn);
  3061. applog(LOG_ERR, "%s", printbuf);
  3062. }
  3063. saved = width;
  3064. }
  3065. // Hotspot around unit transition boundary in pick_unit
  3066. saved = -1;
  3067. for (testn = 999.0; testn <= 1001.0; testn += 0.0001) {
  3068. format_unit2(testbuf1, sizeof(testbuf1), true, "x", H2B_SHORT, testn , -1);
  3069. format_unit2(testbuf2, sizeof(testbuf2), true, "x", H2B_SHORT, testn * 1e3, -1);
  3070. format_unit2(testbuf3, sizeof(testbuf3), true, "x", H2B_SHORT, testn * 1e6, -1);
  3071. format_unit2(testbuf4, sizeof(testbuf4), true, "x", H2B_SHORT, testn * 1e9, -1);
  3072. snprintf(printbuf, sizeof(printbuf), "%10g %s %s %s %s |", testn, testbuf1, testbuf2, testbuf3, testbuf4);
  3073. width = utf8_strlen(printbuf);
  3074. if (unlikely((saved != -1) && (width != saved))) {
  3075. applog(LOG_ERR, "Test width mismatch in pick_unit! %d not %d at %10g", width, saved, testn);
  3076. applog(LOG_ERR, "%s", printbuf);
  3077. }
  3078. saved = width;
  3079. }
  3080. }
  3081. #ifdef HAVE_CURSES
  3082. static void adj_width(int var, int *length);
  3083. #endif
  3084. #ifdef HAVE_CURSES
  3085. static int awidth = 1, rwidth = 1, swidth = 1, hwwidth = 1;
  3086. static
  3087. void format_statline(char *buf, size_t bufsz, const char *cHr, const char *aHr, const char *uHr, int accepted, int rejected, int stale, int wnotaccepted, int waccepted, int hwerrs, int bad_diff1, int allnonces)
  3088. {
  3089. char rejpcbuf[6];
  3090. char bnbuf[6];
  3091. adj_width(accepted, &awidth);
  3092. adj_width(rejected, &rwidth);
  3093. adj_width(stale, &swidth);
  3094. adj_width(hwerrs, &hwwidth);
  3095. percentf4(rejpcbuf, sizeof(rejpcbuf), wnotaccepted, waccepted);
  3096. percentf3(bnbuf, sizeof(bnbuf), bad_diff1, allnonces);
  3097. tailsprintf(buf, bufsz, "%s/%s/%s | A:%*d R:%*d+%*d(%s) HW:%*d/%s",
  3098. cHr, aHr, uHr,
  3099. awidth, accepted,
  3100. rwidth, rejected,
  3101. swidth, stale,
  3102. rejpcbuf,
  3103. hwwidth, hwerrs,
  3104. bnbuf
  3105. );
  3106. }
  3107. static
  3108. const char *pool_proto_str(const struct pool * const pool)
  3109. {
  3110. if (pool->idle)
  3111. return "Dead ";
  3112. if (pool->has_stratum)
  3113. return "Strtm";
  3114. if (pool->lp_url && pool->proto != pool->lp_proto)
  3115. return "Mixed";
  3116. switch (pool->proto)
  3117. {
  3118. case PLP_GETBLOCKTEMPLATE:
  3119. return " GBT ";
  3120. case PLP_GETWORK:
  3121. return "GWork";
  3122. default:
  3123. return "Alive";
  3124. }
  3125. }
  3126. #endif
  3127. static inline
  3128. void temperature_column(char *buf, size_t bufsz, bool maybe_unicode, const float * const temp)
  3129. {
  3130. if (!(use_unicode && have_unicode_degrees))
  3131. maybe_unicode = false;
  3132. if (temp && *temp > 0.)
  3133. if (maybe_unicode)
  3134. snprintf(buf, bufsz, "%4.1f"U8_DEGREE"C", *temp);
  3135. else
  3136. snprintf(buf, bufsz, "%4.1fC", *temp);
  3137. else
  3138. {
  3139. if (temp)
  3140. snprintf(buf, bufsz, " ");
  3141. if (maybe_unicode)
  3142. tailsprintf(buf, bufsz, " ");
  3143. }
  3144. tailsprintf(buf, bufsz, " | ");
  3145. }
  3146. void get_statline3(char *buf, size_t bufsz, struct cgpu_info *cgpu, bool for_curses, bool opt_show_procs)
  3147. {
  3148. #ifndef HAVE_CURSES
  3149. assert(for_curses == false);
  3150. #endif
  3151. struct device_drv *drv = cgpu->drv;
  3152. enum h2bs_fmt hashrate_style = for_curses ? H2B_SHORT : H2B_SPACED;
  3153. char cHr[ALLOC_H2B_NOUNIT+1], aHr[ALLOC_H2B_NOUNIT+1], uHr[max(ALLOC_H2B_SHORT, ALLOC_H2B_SPACED)+3+1];
  3154. char rejpcbuf[6];
  3155. char bnbuf[6];
  3156. double dev_runtime;
  3157. if (!opt_show_procs)
  3158. cgpu = cgpu->device;
  3159. dev_runtime = cgpu_runtime(cgpu);
  3160. double rolling, mhashes;
  3161. int accepted, rejected, stale;
  3162. double waccepted;
  3163. double wnotaccepted;
  3164. int hwerrs;
  3165. double bad_diff1, good_diff1;
  3166. rolling = mhashes = waccepted = wnotaccepted = 0;
  3167. accepted = rejected = stale = hwerrs = bad_diff1 = good_diff1 = 0;
  3168. {
  3169. struct cgpu_info *slave = cgpu;
  3170. for (int i = 0; i < cgpu->procs; ++i, (slave = slave->next_proc))
  3171. {
  3172. slave->utility = slave->accepted / dev_runtime * 60;
  3173. slave->utility_diff1 = slave->diff_accepted / dev_runtime * 60;
  3174. rolling += slave->rolling;
  3175. mhashes += slave->total_mhashes;
  3176. if (opt_weighed_stats)
  3177. {
  3178. accepted += slave->diff_accepted;
  3179. rejected += slave->diff_rejected;
  3180. stale += slave->diff_stale;
  3181. }
  3182. else
  3183. {
  3184. accepted += slave->accepted;
  3185. rejected += slave->rejected;
  3186. stale += slave->stale;
  3187. }
  3188. waccepted += slave->diff_accepted;
  3189. wnotaccepted += slave->diff_rejected + slave->diff_stale;
  3190. hwerrs += slave->hw_errors;
  3191. bad_diff1 += slave->bad_diff1;
  3192. good_diff1 += slave->diff1;
  3193. if (opt_show_procs)
  3194. break;
  3195. }
  3196. }
  3197. double wtotal = (waccepted + wnotaccepted);
  3198. multi_format_unit_array2(
  3199. ((char*[]){cHr, aHr, uHr}),
  3200. ((size_t[]){sizeof(cHr), sizeof(aHr), sizeof(uHr)}),
  3201. true, "h/s", hashrate_style,
  3202. 3,
  3203. 1e6*rolling,
  3204. 1e6*mhashes / dev_runtime,
  3205. utility_to_hashrate(good_diff1 * (wtotal ? (waccepted / wtotal) : 1) * 60 / dev_runtime));
  3206. // Processor representation
  3207. #ifdef HAVE_CURSES
  3208. if (for_curses)
  3209. {
  3210. if (opt_show_procs)
  3211. snprintf(buf, bufsz, " %"PRIprepr": ", cgpu->proc_repr);
  3212. else
  3213. snprintf(buf, bufsz, " %s: ", cgpu->dev_repr);
  3214. }
  3215. else
  3216. #endif
  3217. snprintf(buf, bufsz, "%s ", opt_show_procs ? cgpu->proc_repr_ns : cgpu->dev_repr_ns);
  3218. if (include_serial_in_statline && cgpu->dev_serial)
  3219. tailsprintf(buf, bufsz, "[serial=%s] ", cgpu->dev_serial);
  3220. if (unlikely(cgpu->status == LIFE_INIT))
  3221. {
  3222. tailsprintf(buf, bufsz, "Initializing...");
  3223. return;
  3224. }
  3225. {
  3226. const size_t bufln = strlen(buf);
  3227. const size_t abufsz = (bufln >= bufsz) ? 0 : (bufsz - bufln);
  3228. if (likely(cgpu->status != LIFE_DEAD2) && drv->override_statline_temp2 && drv->override_statline_temp2(buf, bufsz, cgpu, opt_show_procs))
  3229. temperature_column(&buf[bufln], abufsz, for_curses, NULL);
  3230. else
  3231. {
  3232. float temp = cgpu->temp;
  3233. if (!opt_show_procs)
  3234. {
  3235. // Find the highest temperature of all processors
  3236. struct cgpu_info *proc = cgpu;
  3237. for (int i = 0; i < cgpu->procs; ++i, (proc = proc->next_proc))
  3238. if (proc->temp > temp)
  3239. temp = proc->temp;
  3240. }
  3241. temperature_column(&buf[bufln], abufsz, for_curses, &temp);
  3242. }
  3243. }
  3244. #ifdef HAVE_CURSES
  3245. if (for_curses)
  3246. {
  3247. const char *cHrStatsOpt[] = {AS_BAD("DEAD "), AS_BAD("SICK "), "OFF ", AS_BAD("REST "), AS_BAD(" ERR "), AS_BAD("WAIT "), cHr};
  3248. const char *cHrStats;
  3249. int cHrStatsI = (sizeof(cHrStatsOpt) / sizeof(*cHrStatsOpt)) - 1;
  3250. bool all_dead = true, all_off = true, all_rdrv = true;
  3251. struct cgpu_info *proc = cgpu;
  3252. for (int i = 0; i < cgpu->procs; ++i, (proc = proc->next_proc))
  3253. {
  3254. switch (cHrStatsI) {
  3255. default:
  3256. if (proc->status == LIFE_WAIT)
  3257. cHrStatsI = 5;
  3258. case 5:
  3259. if (proc->deven == DEV_RECOVER_ERR)
  3260. cHrStatsI = 4;
  3261. case 4:
  3262. if (proc->deven == DEV_RECOVER)
  3263. cHrStatsI = 3;
  3264. case 3:
  3265. if (proc->status == LIFE_SICK || proc->status == LIFE_DEAD || proc->status == LIFE_DEAD2)
  3266. {
  3267. cHrStatsI = 1;
  3268. all_off = false;
  3269. }
  3270. else
  3271. {
  3272. if (likely(proc->deven == DEV_ENABLED))
  3273. all_off = false;
  3274. if (proc->deven != DEV_RECOVER_DRV)
  3275. all_rdrv = false;
  3276. }
  3277. case 1:
  3278. break;
  3279. }
  3280. if (likely(proc->status != LIFE_DEAD && proc->status != LIFE_DEAD2))
  3281. all_dead = false;
  3282. if (opt_show_procs)
  3283. break;
  3284. }
  3285. if (unlikely(all_dead))
  3286. cHrStatsI = 0;
  3287. else
  3288. if (unlikely(all_off))
  3289. cHrStatsI = 2;
  3290. cHrStats = cHrStatsOpt[cHrStatsI];
  3291. if (cHrStatsI == 2 && all_rdrv)
  3292. cHrStats = " RST ";
  3293. format_statline(buf, bufsz,
  3294. cHrStats,
  3295. aHr, uHr,
  3296. accepted, rejected, stale,
  3297. wnotaccepted, waccepted,
  3298. hwerrs,
  3299. bad_diff1, bad_diff1 + good_diff1);
  3300. }
  3301. else
  3302. #endif
  3303. {
  3304. percentf4(rejpcbuf, sizeof(rejpcbuf), wnotaccepted, waccepted);
  3305. percentf4(bnbuf, sizeof(bnbuf), bad_diff1, good_diff1);
  3306. tailsprintf(buf, bufsz, "%ds:%s avg:%s u:%s | A:%d R:%d+%d(%s) HW:%d/%s",
  3307. opt_log_interval,
  3308. cHr, aHr, uHr,
  3309. accepted,
  3310. rejected,
  3311. stale,
  3312. rejpcbuf,
  3313. hwerrs,
  3314. bnbuf
  3315. );
  3316. }
  3317. }
  3318. #define get_statline(buf, bufsz, cgpu) get_statline3(buf, bufsz, cgpu, false, opt_show_procs)
  3319. #define get_statline2(buf, bufsz, cgpu, for_curses) get_statline3(buf, bufsz, cgpu, for_curses, opt_show_procs)
  3320. static void text_print_status(int thr_id)
  3321. {
  3322. struct cgpu_info *cgpu;
  3323. char logline[256];
  3324. cgpu = get_thr_cgpu(thr_id);
  3325. if (cgpu) {
  3326. get_statline(logline, sizeof(logline), cgpu);
  3327. printf("%s\n", logline);
  3328. }
  3329. }
  3330. #ifdef HAVE_CURSES
  3331. static int attr_bad = A_BOLD;
  3332. #ifdef WIN32
  3333. #define swprintf snwprintf
  3334. #endif
  3335. static
  3336. void bfg_waddstr(WINDOW *win, const char *s)
  3337. {
  3338. const char *p = s;
  3339. int32_t w;
  3340. int wlen;
  3341. unsigned char stop_ascii = (use_unicode ? '|' : 0x80);
  3342. while (true)
  3343. {
  3344. while (likely(p[0] == '\n' || (p[0] >= 0x20 && p[0] < stop_ascii)))
  3345. {
  3346. // Printable ASCII
  3347. ++p;
  3348. }
  3349. if (p != s)
  3350. waddnstr(win, s, p - s);
  3351. w = utf8_decode(p, &wlen);
  3352. s = p += wlen;
  3353. switch(w)
  3354. {
  3355. // NOTE: U+F000-U+F7FF are reserved for font hacks
  3356. case '\0':
  3357. return;
  3358. case 0xb5: // micro symbol
  3359. w = unicode_micro;
  3360. goto default_addch;
  3361. case 0xf000: // "bad" off
  3362. wattroff(win, attr_bad);
  3363. break;
  3364. case 0xf001: // "bad" on
  3365. wattron(win, attr_bad);
  3366. break;
  3367. #ifdef USE_UNICODE
  3368. case '|':
  3369. wadd_wch(win, WACS_VLINE);
  3370. break;
  3371. #endif
  3372. case 0x2500: // BOX DRAWINGS LIGHT HORIZONTAL
  3373. case 0x2534: // BOX DRAWINGS LIGHT UP AND HORIZONTAL
  3374. if (!use_unicode)
  3375. {
  3376. waddch(win, '-');
  3377. break;
  3378. }
  3379. #ifdef USE_UNICODE
  3380. wadd_wch(win, (w == 0x2500) ? WACS_HLINE : WACS_BTEE);
  3381. break;
  3382. #endif
  3383. case 0x2022:
  3384. if (w > WCHAR_MAX || !iswprint(w))
  3385. w = '*';
  3386. default:
  3387. default_addch:
  3388. if (w > WCHAR_MAX || !(iswprint(w) || w == '\n'))
  3389. {
  3390. #if REPLACEMENT_CHAR <= WCHAR_MAX
  3391. if (iswprint(REPLACEMENT_CHAR))
  3392. w = REPLACEMENT_CHAR;
  3393. else
  3394. #endif
  3395. w = '?';
  3396. }
  3397. {
  3398. #ifdef USE_UNICODE
  3399. wchar_t wbuf[0x10];
  3400. int wbuflen = sizeof(wbuf) / sizeof(*wbuf);
  3401. wbuflen = swprintf(wbuf, wbuflen, L"%lc", (wint_t)w);
  3402. waddnwstr(win, wbuf, wbuflen);
  3403. #else
  3404. wprintw(win, "%lc", (wint_t)w);
  3405. #endif
  3406. }
  3407. }
  3408. }
  3409. }
  3410. static inline
  3411. void bfg_hline(WINDOW *win, int y)
  3412. {
  3413. int maxx, __maybe_unused maxy;
  3414. getmaxyx(win, maxy, maxx);
  3415. #ifdef USE_UNICODE
  3416. if (use_unicode)
  3417. mvwhline_set(win, y, 0, WACS_HLINE, maxx);
  3418. else
  3419. #endif
  3420. mvwhline(win, y, 0, '-', maxx);
  3421. }
  3422. // Spaces until end of line, using current attributes (ie, not completely clear)
  3423. static
  3424. void bfg_wspctoeol(WINDOW * const win, const int offset)
  3425. {
  3426. int x, maxx;
  3427. int __maybe_unused y;
  3428. getmaxyx(win, y, maxx);
  3429. getyx(win, y, x);
  3430. const int space_count = (maxx - x) - offset;
  3431. // Check for negative - terminal too narrow
  3432. if (space_count <= 0)
  3433. return;
  3434. char buf[space_count];
  3435. memset(buf, ' ', space_count);
  3436. waddnstr(win, buf, space_count);
  3437. }
  3438. static int menu_attr = A_REVERSE;
  3439. #define CURBUFSIZ 256
  3440. #define cg_mvwprintw(win, y, x, fmt, ...) do { \
  3441. char tmp42[CURBUFSIZ]; \
  3442. snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \
  3443. wmove(win, y, x); \
  3444. bfg_waddstr(win, tmp42); \
  3445. } while (0)
  3446. #define cg_wprintw(win, fmt, ...) do { \
  3447. char tmp42[CURBUFSIZ]; \
  3448. snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \
  3449. bfg_waddstr(win, tmp42); \
  3450. } while (0)
  3451. static bool pool_unworkable(const struct pool *);
  3452. /* Must be called with curses mutex lock held and curses_active */
  3453. static void curses_print_status(const int ts)
  3454. {
  3455. struct pool *pool = currentpool;
  3456. struct timeval now, tv;
  3457. float efficiency;
  3458. double income;
  3459. int logdiv;
  3460. efficiency = total_bytes_xfer ? total_diff_accepted * 2048. / total_bytes_xfer : 0.0;
  3461. wattron(statuswin, attr_title);
  3462. cg_mvwprintw(statuswin, 0, 0, " " PACKAGE " version " VERSION " - Started: %s", datestamp);
  3463. timer_set_now(&now);
  3464. {
  3465. unsigned int days, hours;
  3466. div_t d;
  3467. timersub(&now, &miner_started, &tv);
  3468. d = div(tv.tv_sec, 86400);
  3469. days = d.quot;
  3470. d = div(d.rem, 3600);
  3471. hours = d.quot;
  3472. d = div(d.rem, 60);
  3473. cg_wprintw(statuswin, " - [%3u day%c %02d:%02d:%02d]"
  3474. , days
  3475. , (days == 1) ? ' ' : 's'
  3476. , hours
  3477. , d.quot
  3478. , d.rem
  3479. );
  3480. }
  3481. bfg_wspctoeol(statuswin, 0);
  3482. wattroff(statuswin, attr_title);
  3483. wattron(statuswin, menu_attr);
  3484. wmove(statuswin, 1, 0);
  3485. bfg_waddstr(statuswin, " [M]anage devices [P]ool management [S]ettings [D]isplay options ");
  3486. bfg_wspctoeol(statuswin, 14);
  3487. bfg_waddstr(statuswin, "[H]elp [Q]uit ");
  3488. wattroff(statuswin, menu_attr);
  3489. if ((pool_strategy == POOL_LOADBALANCE || pool_strategy == POOL_BALANCE) && enabled_pools > 1) {
  3490. char poolinfo[20], poolinfo2[20];
  3491. int poolinfooff = 0, poolinfo2off, workable_pools = 0;
  3492. double lowdiff = DBL_MAX, highdiff = -1;
  3493. struct pool *lowdiff_pool = pools[0], *highdiff_pool = pools[0];
  3494. time_t oldest_work_restart = time(NULL) + 1;
  3495. struct pool *oldest_work_restart_pool = pools[0];
  3496. for (int i = 0; i < total_pools; ++i)
  3497. {
  3498. if (pool_unworkable(pools[i]))
  3499. continue;
  3500. // NOTE: Only set pool var when it's workable; if only one is, it gets used by single-pool code
  3501. pool = pools[i];
  3502. ++workable_pools;
  3503. if (poolinfooff < sizeof(poolinfo))
  3504. poolinfooff += snprintf(&poolinfo[poolinfooff], sizeof(poolinfo) - poolinfooff, "%u,", pool->pool_no);
  3505. struct cgminer_pool_stats * const pool_stats = &pool->cgminer_pool_stats;
  3506. if (pool_stats->last_diff < lowdiff)
  3507. {
  3508. lowdiff = pool_stats->last_diff;
  3509. lowdiff_pool = pool;
  3510. }
  3511. if (pool_stats->last_diff > highdiff)
  3512. {
  3513. highdiff = pool_stats->last_diff;
  3514. highdiff_pool = pool;
  3515. }
  3516. if (oldest_work_restart >= pool->work_restart_time)
  3517. {
  3518. oldest_work_restart = pool->work_restart_time;
  3519. oldest_work_restart_pool = pool;
  3520. }
  3521. }
  3522. if (unlikely(!workable_pools))
  3523. goto no_workable_pools;
  3524. if (workable_pools == 1)
  3525. goto one_workable_pool;
  3526. poolinfo2off = snprintf(poolinfo2, sizeof(poolinfo2), "%u (", workable_pools);
  3527. if (poolinfooff > sizeof(poolinfo2) - poolinfo2off - 1)
  3528. snprintf(&poolinfo2[poolinfo2off], sizeof(poolinfo2) - poolinfo2off, "%.*s...)", (int)(sizeof(poolinfo2) - poolinfo2off - 5), poolinfo);
  3529. else
  3530. snprintf(&poolinfo2[poolinfo2off], sizeof(poolinfo2) - poolinfo2off, "%.*s)%*s", (int)(poolinfooff - 1), poolinfo, (int)(sizeof(poolinfo2)), "");
  3531. cg_mvwprintw(statuswin, 2, 0, " Pools: %s Diff:%s%s%s %c LU:%s",
  3532. poolinfo2,
  3533. lowdiff_pool->diff,
  3534. (lowdiff == highdiff) ? "" : "-",
  3535. (lowdiff == highdiff) ? "" : highdiff_pool->diff,
  3536. have_longpoll ? '+' : '-',
  3537. oldest_work_restart_pool->work_restart_timestamp);
  3538. }
  3539. else
  3540. if (pool_unworkable(pool))
  3541. {
  3542. no_workable_pools: ;
  3543. wattron(statuswin, attr_bad);
  3544. cg_mvwprintw(statuswin, 2, 0, " (all pools are dead) ");
  3545. wattroff(statuswin, attr_bad);
  3546. }
  3547. else
  3548. {
  3549. one_workable_pool: ;
  3550. char pooladdr[19];
  3551. {
  3552. const char *rawaddr = pool->sockaddr_url;
  3553. BFGINIT(rawaddr, pool->rpc_url);
  3554. size_t pooladdrlen = strlen(rawaddr);
  3555. if (pooladdrlen > 20)
  3556. snprintf(pooladdr, sizeof(pooladdr), "...%s", &rawaddr[pooladdrlen - (sizeof(pooladdr) - 4)]);
  3557. else
  3558. snprintf(pooladdr, sizeof(pooladdr), "%*s", -(int)(sizeof(pooladdr) - 1), rawaddr);
  3559. }
  3560. cg_mvwprintw(statuswin, 2, 0, " Pool%2u: %s Diff:%s %c%s LU:%s User:%s",
  3561. pool->pool_no, pooladdr, pool->diff,
  3562. have_longpoll ? '+' : '-', pool_proto_str(pool),
  3563. pool->work_restart_timestamp,
  3564. pool->rpc_user);
  3565. }
  3566. wclrtoeol(statuswin);
  3567. cg_mvwprintw(statuswin, 3, 0, " Block: %s Diff:%s (%s) Started: %s",
  3568. current_hash, block_diff, net_hashrate, blocktime);
  3569. income = total_diff_accepted * 3600 * block_subsidy / total_secs / current_diff;
  3570. char bwstr[(ALLOC_H2B_SHORT*2)+3+1], incomestr[ALLOC_H2B_SHORT+6+1];
  3571. format_unit3(incomestr, sizeof(incomestr), FUP_BTC, "BTC/hr", H2B_SHORT, income/1e8, -1);
  3572. cg_mvwprintw(statuswin, 4, 0, " ST:%d F:%d NB:%d AS:%d BW:[%s] E:%.2f I:%s BS:%s",
  3573. ts,
  3574. total_go + total_ro,
  3575. new_blocks,
  3576. total_submitting,
  3577. multi_format_unit2(bwstr, sizeof(bwstr),
  3578. false, "B/s", H2B_SHORT, "/", 2,
  3579. (float)(total_bytes_rcvd / total_secs),
  3580. (float)(total_bytes_sent / total_secs)),
  3581. efficiency,
  3582. incomestr,
  3583. best_share);
  3584. wclrtoeol(statuswin);
  3585. mvwaddstr(statuswin, 5, 0, " ");
  3586. bfg_waddstr(statuswin, statusline);
  3587. wclrtoeol(statuswin);
  3588. logdiv = statusy - 1;
  3589. bfg_hline(statuswin, 6);
  3590. bfg_hline(statuswin, logdiv);
  3591. #ifdef USE_UNICODE
  3592. if (use_unicode)
  3593. {
  3594. int offset = 8 /* device */ + 5 /* temperature */ + 1 /* padding space */;
  3595. if (opt_show_procs && !opt_compact)
  3596. ++offset; // proc letter
  3597. if (have_unicode_degrees)
  3598. ++offset; // degrees symbol
  3599. mvwadd_wch(statuswin, 6, offset, WACS_PLUS);
  3600. mvwadd_wch(statuswin, logdiv, offset, WACS_BTEE);
  3601. offset += 24; // hashrates etc
  3602. mvwadd_wch(statuswin, 6, offset, WACS_PLUS);
  3603. mvwadd_wch(statuswin, logdiv, offset, WACS_BTEE);
  3604. }
  3605. #endif
  3606. }
  3607. static void adj_width(int var, int *length)
  3608. {
  3609. if ((int)(log10(var) + 1) > *length)
  3610. (*length)++;
  3611. }
  3612. static int dev_width;
  3613. static void curses_print_devstatus(struct cgpu_info *cgpu)
  3614. {
  3615. char logline[256];
  3616. int ypos;
  3617. if (opt_compact)
  3618. return;
  3619. /* Check this isn't out of the window size */
  3620. if (opt_show_procs)
  3621. ypos = cgpu->cgminer_id;
  3622. else
  3623. {
  3624. if (cgpu->proc_id)
  3625. return;
  3626. ypos = cgpu->device_line_id;
  3627. }
  3628. ypos += devsummaryYOffset;
  3629. if (ypos < 0)
  3630. return;
  3631. ypos += devcursor - 1;
  3632. if (ypos >= statusy - 1)
  3633. return;
  3634. if (wmove(statuswin, ypos, 0) == ERR)
  3635. return;
  3636. get_statline2(logline, sizeof(logline), cgpu, true);
  3637. if (selecting_device && (opt_show_procs ? (selected_device == cgpu->cgminer_id) : (devices[selected_device]->device == cgpu)))
  3638. wattron(statuswin, A_REVERSE);
  3639. bfg_waddstr(statuswin, logline);
  3640. wattroff(statuswin, A_REVERSE);
  3641. wclrtoeol(statuswin);
  3642. }
  3643. static
  3644. void _refresh_devstatus(const bool already_have_lock) {
  3645. if ((!opt_compact) && (already_have_lock || curses_active_locked())) {
  3646. int i;
  3647. if (unlikely(!total_devices))
  3648. {
  3649. const int ypos = devcursor - 1;
  3650. if (ypos < statusy - 1 && wmove(statuswin, ypos, 0) != ERR)
  3651. {
  3652. wattron(statuswin, attr_bad);
  3653. bfg_waddstr(statuswin, "NO DEVICES FOUND: Press 'M' and '+' to add");
  3654. wclrtoeol(statuswin);
  3655. wattroff(statuswin, attr_bad);
  3656. }
  3657. }
  3658. for (i = 0; i < total_devices; i++)
  3659. curses_print_devstatus(get_devices(i));
  3660. touchwin(statuswin);
  3661. wrefresh(statuswin);
  3662. if (!already_have_lock)
  3663. unlock_curses();
  3664. }
  3665. }
  3666. #define refresh_devstatus() _refresh_devstatus(false)
  3667. #endif
  3668. static void print_status(int thr_id)
  3669. {
  3670. if (!curses_active)
  3671. text_print_status(thr_id);
  3672. }
  3673. #ifdef HAVE_CURSES
  3674. static
  3675. bool set_statusy(int maxy)
  3676. {
  3677. if (loginput_size)
  3678. {
  3679. maxy -= loginput_size;
  3680. if (maxy < 0)
  3681. maxy = 0;
  3682. }
  3683. if (logstart < maxy)
  3684. maxy = logstart;
  3685. if (statusy == maxy)
  3686. return false;
  3687. statusy = maxy;
  3688. logcursor = statusy;
  3689. return true;
  3690. }
  3691. /* Check for window resize. Called with curses mutex locked */
  3692. static inline void change_logwinsize(void)
  3693. {
  3694. int x, y, logx, logy;
  3695. getmaxyx(mainwin, y, x);
  3696. if (x < 80 || y < 25)
  3697. return;
  3698. if (y > statusy + 2 && statusy < logstart) {
  3699. set_statusy(y - 2);
  3700. mvwin(logwin, logcursor, 0);
  3701. bfg_wresize(statuswin, statusy, x);
  3702. }
  3703. y -= logcursor;
  3704. getmaxyx(logwin, logy, logx);
  3705. /* Detect screen size change */
  3706. if (x != logx || y != logy)
  3707. bfg_wresize(logwin, y, x);
  3708. }
  3709. static void check_winsizes(void)
  3710. {
  3711. if (!use_curses)
  3712. return;
  3713. if (curses_active_locked()) {
  3714. int y, x;
  3715. x = getmaxx(statuswin);
  3716. if (set_statusy(LINES - 2))
  3717. {
  3718. erase();
  3719. bfg_wresize(statuswin, statusy, x);
  3720. getmaxyx(mainwin, y, x);
  3721. y -= logcursor;
  3722. bfg_wresize(logwin, y, x);
  3723. mvwin(logwin, logcursor, 0);
  3724. }
  3725. unlock_curses();
  3726. }
  3727. }
  3728. static int device_line_id_count;
  3729. static void switch_logsize(void)
  3730. {
  3731. if (curses_active_locked()) {
  3732. if (opt_compact) {
  3733. logstart = devcursor - 1;
  3734. logcursor = logstart + 1;
  3735. } else {
  3736. total_lines = (opt_show_procs ? total_devices : device_line_id_count) ?: 1;
  3737. logstart = devcursor + total_lines;
  3738. logcursor = logstart;
  3739. }
  3740. unlock_curses();
  3741. }
  3742. check_winsizes();
  3743. }
  3744. /* For mandatory printing when mutex is already locked */
  3745. void _wlog(const char *str)
  3746. {
  3747. static bool newline;
  3748. size_t end = strlen(str) - 1;
  3749. if (newline)
  3750. bfg_waddstr(logwin, "\n");
  3751. if (str[end] == '\n')
  3752. {
  3753. char *s;
  3754. newline = true;
  3755. s = alloca(end + 1);
  3756. memcpy(s, str, end);
  3757. s[end] = '\0';
  3758. str = s;
  3759. }
  3760. else
  3761. newline = false;
  3762. bfg_waddstr(logwin, str);
  3763. }
  3764. /* Mandatory printing */
  3765. void _wlogprint(const char *str)
  3766. {
  3767. if (curses_active_locked()) {
  3768. _wlog(str);
  3769. unlock_curses();
  3770. }
  3771. }
  3772. #endif
  3773. #ifdef HAVE_CURSES
  3774. bool _log_curses_only(int prio, const char *datetime, const char *str)
  3775. {
  3776. bool high_prio;
  3777. high_prio = (prio == LOG_WARNING || prio == LOG_ERR);
  3778. if (curses_active)
  3779. {
  3780. if (!loginput_size || high_prio) {
  3781. wlog(" %s %s\n", datetime, str);
  3782. if (high_prio) {
  3783. touchwin(logwin);
  3784. wrefresh(logwin);
  3785. }
  3786. }
  3787. return true;
  3788. }
  3789. return false;
  3790. }
  3791. void clear_logwin(void)
  3792. {
  3793. if (curses_active_locked()) {
  3794. wclear(logwin);
  3795. unlock_curses();
  3796. }
  3797. }
  3798. void logwin_update(void)
  3799. {
  3800. if (curses_active_locked()) {
  3801. touchwin(logwin);
  3802. wrefresh(logwin);
  3803. unlock_curses();
  3804. }
  3805. }
  3806. #endif
  3807. static void enable_pool(struct pool *pool)
  3808. {
  3809. if (pool->enabled != POOL_ENABLED) {
  3810. enabled_pools++;
  3811. pool->enabled = POOL_ENABLED;
  3812. }
  3813. }
  3814. #ifdef HAVE_CURSES
  3815. static void disable_pool(struct pool *pool)
  3816. {
  3817. if (pool->enabled == POOL_ENABLED)
  3818. enabled_pools--;
  3819. pool->enabled = POOL_DISABLED;
  3820. }
  3821. #endif
  3822. static void reject_pool(struct pool *pool)
  3823. {
  3824. if (pool->enabled == POOL_ENABLED)
  3825. enabled_pools--;
  3826. pool->enabled = POOL_REJECTING;
  3827. }
  3828. static double share_diff(const struct work *);
  3829. static
  3830. void share_result_msg(const struct work *work, const char *disp, const char *reason, bool resubmit, const char *worktime) {
  3831. struct cgpu_info *cgpu;
  3832. const unsigned char *hashpart = &work->hash[opt_scrypt ? 26 : 24];
  3833. char shrdiffdisp[ALLOC_H2B_SHORTV];
  3834. const double tgtdiff = work->work_difficulty;
  3835. char tgtdiffdisp[ALLOC_H2B_SHORTV];
  3836. char where[20];
  3837. cgpu = get_thr_cgpu(work->thr_id);
  3838. suffix_string(work->share_diff, shrdiffdisp, sizeof(shrdiffdisp), 0);
  3839. suffix_string(tgtdiff, tgtdiffdisp, sizeof(tgtdiffdisp), 0);
  3840. if (total_pools > 1)
  3841. snprintf(where, sizeof(where), " pool %d", work->pool->pool_no);
  3842. else
  3843. where[0] = '\0';
  3844. applog(LOG_NOTICE, "%s %02x%02x%02x%02x %"PRIprepr"%s Diff %s/%s%s %s%s",
  3845. disp,
  3846. (unsigned)hashpart[3], (unsigned)hashpart[2], (unsigned)hashpart[1], (unsigned)hashpart[0],
  3847. cgpu->proc_repr,
  3848. where,
  3849. shrdiffdisp, tgtdiffdisp,
  3850. reason,
  3851. resubmit ? "(resubmit)" : "",
  3852. worktime
  3853. );
  3854. }
  3855. static bool test_work_current(struct work *);
  3856. static void _submit_work_async(struct work *);
  3857. static
  3858. void maybe_local_submit(const struct work *work)
  3859. {
  3860. #if BLKMAKER_VERSION > 3
  3861. if (unlikely(work->block && work->tr))
  3862. {
  3863. // This is a block with a full template (GBT)
  3864. // Regardless of the result, submit to local bitcoind(s) as well
  3865. struct work *work_cp;
  3866. for (int i = 0; i < total_pools; ++i)
  3867. {
  3868. if (!uri_get_param_bool(pools[i]->rpc_url, "allblocks", false))
  3869. continue;
  3870. applog(LOG_DEBUG, "Attempting submission of full block to pool %d", pools[i]->pool_no);
  3871. work_cp = copy_work(work);
  3872. work_cp->pool = pools[i];
  3873. work_cp->do_foreign_submit = true;
  3874. _submit_work_async(work_cp);
  3875. }
  3876. }
  3877. #endif
  3878. }
  3879. /* Theoretically threads could race when modifying accepted and
  3880. * rejected values but the chance of two submits completing at the
  3881. * same time is zero so there is no point adding extra locking */
  3882. static void
  3883. share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
  3884. /*char *hashshow,*/ bool resubmit, char *worktime)
  3885. {
  3886. struct pool *pool = work->pool;
  3887. struct cgpu_info *cgpu;
  3888. cgpu = get_thr_cgpu(work->thr_id);
  3889. if ((json_is_null(err) || !err) && (json_is_null(res) || json_is_true(res))) {
  3890. mutex_lock(&stats_lock);
  3891. cgpu->accepted++;
  3892. total_accepted++;
  3893. pool->accepted++;
  3894. cgpu->diff_accepted += work->work_difficulty;
  3895. total_diff_accepted += work->work_difficulty;
  3896. pool->diff_accepted += work->work_difficulty;
  3897. mutex_unlock(&stats_lock);
  3898. pool->seq_rejects = 0;
  3899. cgpu->last_share_pool = pool->pool_no;
  3900. cgpu->last_share_pool_time = time(NULL);
  3901. cgpu->last_share_diff = work->work_difficulty;
  3902. pool->last_share_time = cgpu->last_share_pool_time;
  3903. pool->last_share_diff = work->work_difficulty;
  3904. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  3905. if (!QUIET) {
  3906. share_result_msg(work, "Accepted", "", resubmit, worktime);
  3907. }
  3908. sharelog("accept", work);
  3909. if (opt_shares && total_diff_accepted >= opt_shares) {
  3910. applog(LOG_WARNING, "Successfully mined %g accepted shares as requested and exiting.", opt_shares);
  3911. kill_work();
  3912. return;
  3913. }
  3914. /* Detect if a pool that has been temporarily disabled for
  3915. * continually rejecting shares has started accepting shares.
  3916. * This will only happen with the work returned from a
  3917. * longpoll */
  3918. if (unlikely(pool->enabled == POOL_REJECTING)) {
  3919. applog(LOG_WARNING, "Rejecting pool %d now accepting shares, re-enabling!", pool->pool_no);
  3920. enable_pool(pool);
  3921. switch_pools(NULL);
  3922. }
  3923. if (unlikely(work->block)) {
  3924. // Force moving on to this new block :)
  3925. struct work fakework;
  3926. memset(&fakework, 0, sizeof(fakework));
  3927. fakework.pool = work->pool;
  3928. // Copy block version, bits, and time from share
  3929. memcpy(&fakework.data[ 0], &work->data[ 0], 4);
  3930. memcpy(&fakework.data[68], &work->data[68], 8);
  3931. // Set prevblock to winning hash (swap32'd)
  3932. swap32yes(&fakework.data[4], &work->hash[0], 32 / 4);
  3933. test_work_current(&fakework);
  3934. }
  3935. } else {
  3936. mutex_lock(&stats_lock);
  3937. cgpu->rejected++;
  3938. total_rejected++;
  3939. pool->rejected++;
  3940. cgpu->diff_rejected += work->work_difficulty;
  3941. total_diff_rejected += work->work_difficulty;
  3942. pool->diff_rejected += work->work_difficulty;
  3943. pool->seq_rejects++;
  3944. mutex_unlock(&stats_lock);
  3945. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  3946. if (!QUIET) {
  3947. char where[20];
  3948. char disposition[36] = "reject";
  3949. char reason[32];
  3950. strcpy(reason, "");
  3951. if (total_pools > 1)
  3952. snprintf(where, sizeof(where), "pool %d", work->pool->pool_no);
  3953. else
  3954. strcpy(where, "");
  3955. if (!json_is_string(res))
  3956. res = json_object_get(val, "reject-reason");
  3957. if (res) {
  3958. const char *reasontmp = json_string_value(res);
  3959. size_t reasonLen = strlen(reasontmp);
  3960. if (reasonLen > 28)
  3961. reasonLen = 28;
  3962. reason[0] = ' '; reason[1] = '(';
  3963. memcpy(2 + reason, reasontmp, reasonLen);
  3964. reason[reasonLen + 2] = ')'; reason[reasonLen + 3] = '\0';
  3965. memcpy(disposition + 7, reasontmp, reasonLen);
  3966. disposition[6] = ':'; disposition[reasonLen + 7] = '\0';
  3967. } else if (work->stratum && err && json_is_array(err)) {
  3968. json_t *reason_val = json_array_get(err, 1);
  3969. char *reason_str;
  3970. if (reason_val && json_is_string(reason_val)) {
  3971. reason_str = (char *)json_string_value(reason_val);
  3972. snprintf(reason, 31, " (%s)", reason_str);
  3973. }
  3974. }
  3975. share_result_msg(work, "Rejected", reason, resubmit, worktime);
  3976. sharelog(disposition, work);
  3977. }
  3978. /* Once we have more than a nominal amount of sequential rejects,
  3979. * at least 10 and more than 3 mins at the current utility,
  3980. * disable the pool because some pool error is likely to have
  3981. * ensued. Do not do this if we know the share just happened to
  3982. * be stale due to networking delays.
  3983. */
  3984. if (pool->seq_rejects > 10 && !work->stale && opt_disable_pool && enabled_pools > 1) {
  3985. double utility = total_accepted / total_secs * 60;
  3986. if (pool->seq_rejects > utility * 3) {
  3987. applog(LOG_WARNING, "Pool %d rejected %d sequential shares, disabling!",
  3988. pool->pool_no, pool->seq_rejects);
  3989. reject_pool(pool);
  3990. if (pool == current_pool())
  3991. switch_pools(NULL);
  3992. pool->seq_rejects = 0;
  3993. }
  3994. }
  3995. }
  3996. maybe_local_submit(work);
  3997. }
  3998. static char *submit_upstream_work_request(struct work *work)
  3999. {
  4000. char *hexstr = NULL;
  4001. char *s, *sd;
  4002. struct pool *pool = work->pool;
  4003. if (work->tr)
  4004. {
  4005. blktemplate_t * const tmpl = work->tr->tmpl;
  4006. json_t *req;
  4007. unsigned char data[80];
  4008. swap32yes(data, work->data, 80 / 4);
  4009. #if BLKMAKER_VERSION > 3
  4010. if (work->do_foreign_submit)
  4011. req = blkmk_submit_foreign_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
  4012. else
  4013. #endif
  4014. req = blkmk_submit_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
  4015. s = json_dumps(req, 0);
  4016. json_decref(req);
  4017. sd = malloc(161);
  4018. bin2hex(sd, data, 80);
  4019. } else {
  4020. /* build hex string */
  4021. hexstr = malloc((sizeof(work->data) * 2) + 1);
  4022. bin2hex(hexstr, work->data, sizeof(work->data));
  4023. /* build JSON-RPC request */
  4024. s = strdup("{\"method\": \"getwork\", \"params\": [ \"");
  4025. s = realloc_strcat(s, hexstr);
  4026. s = realloc_strcat(s, "\" ], \"id\":1}");
  4027. free(hexstr);
  4028. sd = s;
  4029. }
  4030. applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
  4031. if (work->tr)
  4032. free(sd);
  4033. else
  4034. s = realloc_strcat(s, "\n");
  4035. return s;
  4036. }
  4037. static bool submit_upstream_work_completed(struct work *work, bool resubmit, struct timeval *ptv_submit, json_t *val) {
  4038. json_t *res, *err;
  4039. bool rc = false;
  4040. int thr_id = work->thr_id;
  4041. struct pool *pool = work->pool;
  4042. struct timeval tv_submit_reply;
  4043. time_t ts_submit_reply;
  4044. char worktime[200] = "";
  4045. cgtime(&tv_submit_reply);
  4046. ts_submit_reply = time(NULL);
  4047. if (unlikely(!val)) {
  4048. applog(LOG_INFO, "submit_upstream_work json_rpc_call failed");
  4049. if (!pool_tset(pool, &pool->submit_fail)) {
  4050. total_ro++;
  4051. pool->remotefail_occasions++;
  4052. applog(LOG_WARNING, "Pool %d communication failure, caching submissions", pool->pool_no);
  4053. }
  4054. goto out;
  4055. } else if (pool_tclear(pool, &pool->submit_fail))
  4056. applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
  4057. res = json_object_get(val, "result");
  4058. err = json_object_get(val, "error");
  4059. if (!QUIET) {
  4060. if (opt_worktime) {
  4061. char workclone[20];
  4062. struct tm _tm;
  4063. struct tm *tm, tm_getwork, tm_submit_reply;
  4064. tm = &_tm;
  4065. double getwork_time = tdiff((struct timeval *)&(work->tv_getwork_reply),
  4066. (struct timeval *)&(work->tv_getwork));
  4067. double getwork_to_work = tdiff((struct timeval *)&(work->tv_work_start),
  4068. (struct timeval *)&(work->tv_getwork_reply));
  4069. double work_time = tdiff((struct timeval *)&(work->tv_work_found),
  4070. (struct timeval *)&(work->tv_work_start));
  4071. double work_to_submit = tdiff(ptv_submit,
  4072. (struct timeval *)&(work->tv_work_found));
  4073. double submit_time = tdiff(&tv_submit_reply, ptv_submit);
  4074. int diffplaces = 3;
  4075. localtime_r(&work->ts_getwork, tm);
  4076. memcpy(&tm_getwork, tm, sizeof(struct tm));
  4077. localtime_r(&ts_submit_reply, tm);
  4078. memcpy(&tm_submit_reply, tm, sizeof(struct tm));
  4079. if (work->clone) {
  4080. snprintf(workclone, sizeof(workclone), "C:%1.3f",
  4081. tdiff((struct timeval *)&(work->tv_cloned),
  4082. (struct timeval *)&(work->tv_getwork_reply)));
  4083. }
  4084. else
  4085. strcpy(workclone, "O");
  4086. if (work->work_difficulty < 1)
  4087. diffplaces = 6;
  4088. snprintf(worktime, sizeof(worktime),
  4089. " <-%08lx.%08lx M:%c D:%1.*f G:%02d:%02d:%02d:%1.3f %s (%1.3f) W:%1.3f (%1.3f) S:%1.3f R:%02d:%02d:%02d",
  4090. (unsigned long)be32toh(*(uint32_t *)&(work->data[opt_scrypt ? 32 : 28])),
  4091. (unsigned long)be32toh(*(uint32_t *)&(work->data[opt_scrypt ? 28 : 24])),
  4092. work->getwork_mode, diffplaces, work->work_difficulty,
  4093. tm_getwork.tm_hour, tm_getwork.tm_min,
  4094. tm_getwork.tm_sec, getwork_time, workclone,
  4095. getwork_to_work, work_time, work_to_submit, submit_time,
  4096. tm_submit_reply.tm_hour, tm_submit_reply.tm_min,
  4097. tm_submit_reply.tm_sec);
  4098. }
  4099. }
  4100. share_result(val, res, err, work, resubmit, worktime);
  4101. if (!opt_realquiet)
  4102. print_status(thr_id);
  4103. if (!want_per_device_stats) {
  4104. char logline[256];
  4105. struct cgpu_info *cgpu;
  4106. cgpu = get_thr_cgpu(thr_id);
  4107. get_statline(logline, sizeof(logline), cgpu);
  4108. applog(LOG_INFO, "%s", logline);
  4109. }
  4110. json_decref(val);
  4111. rc = true;
  4112. out:
  4113. return rc;
  4114. }
  4115. /* Specifies whether we can use this pool for work or not. */
  4116. static bool pool_unworkable(const struct pool * const pool)
  4117. {
  4118. if (pool->idle)
  4119. return true;
  4120. if (pool->enabled != POOL_ENABLED)
  4121. return true;
  4122. if (pool->has_stratum && !pool->stratum_active)
  4123. return true;
  4124. return false;
  4125. }
  4126. static
  4127. bool pool_actively_desired(const struct pool * const pool, const struct pool *cp)
  4128. {
  4129. if (pool->enabled != POOL_ENABLED)
  4130. return false;
  4131. if (pool_strategy == POOL_LOADBALANCE || pool_strategy == POOL_BALANCE)
  4132. return true;
  4133. if (!cp)
  4134. cp = current_pool();
  4135. return (pool == cp);
  4136. }
  4137. static
  4138. bool pool_actively_in_use(const struct pool * const pool, const struct pool *cp)
  4139. {
  4140. return (!pool_unworkable(pool)) && pool_actively_desired(pool, cp);
  4141. }
  4142. static
  4143. bool pool_supports_block_change_notification(struct pool * const pool)
  4144. {
  4145. return pool->has_stratum || pool->lp_url;
  4146. }
  4147. static
  4148. bool pool_has_active_block_change_notification(struct pool * const pool)
  4149. {
  4150. return pool->stratum_active || pool->lp_active;
  4151. }
  4152. static struct pool *_select_longpoll_pool(struct pool *, bool(*)(struct pool *));
  4153. #define select_longpoll_pool(pool) _select_longpoll_pool(pool, pool_supports_block_change_notification)
  4154. #define pool_active_lp_pool(pool) _select_longpoll_pool(pool, pool_has_active_block_change_notification)
  4155. /* In balanced mode, the amount of diff1 solutions per pool is monitored as a
  4156. * rolling average per 10 minutes and if pools start getting more, it biases
  4157. * away from them to distribute work evenly. The share count is reset to the
  4158. * rolling average every 10 minutes to not send all work to one pool after it
  4159. * has been disabled/out for an extended period. */
  4160. static struct pool *select_balanced(struct pool *cp)
  4161. {
  4162. int i, lowest = cp->shares;
  4163. struct pool *ret = cp;
  4164. for (i = 0; i < total_pools; i++) {
  4165. struct pool *pool = pools[i];
  4166. if (pool_unworkable(pool))
  4167. continue;
  4168. if (pool->shares < lowest) {
  4169. lowest = pool->shares;
  4170. ret = pool;
  4171. }
  4172. }
  4173. ret->shares++;
  4174. return ret;
  4175. }
  4176. static bool pool_active(struct pool *, bool pinging);
  4177. static void pool_died(struct pool *);
  4178. static struct pool *priority_pool(int choice);
  4179. static bool pool_unusable(struct pool *pool);
  4180. /* Select any active pool in a rotating fashion when loadbalance is chosen if
  4181. * it has any quota left. */
  4182. static inline struct pool *select_pool(bool lagging)
  4183. {
  4184. static int rotating_pool = 0;
  4185. struct pool *pool, *cp;
  4186. bool avail = false;
  4187. int tested, i;
  4188. cp = current_pool();
  4189. retry:
  4190. if (pool_strategy == POOL_BALANCE) {
  4191. pool = select_balanced(cp);
  4192. goto out;
  4193. }
  4194. if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only)) {
  4195. pool = cp;
  4196. goto out;
  4197. } else
  4198. pool = NULL;
  4199. for (i = 0; i < total_pools; i++) {
  4200. struct pool *tp = pools[i];
  4201. if (tp->quota_used < tp->quota_gcd) {
  4202. avail = true;
  4203. break;
  4204. }
  4205. }
  4206. /* There are no pools with quota, so reset them. */
  4207. if (!avail) {
  4208. for (i = 0; i < total_pools; i++)
  4209. pools[i]->quota_used = 0;
  4210. if (++rotating_pool >= total_pools)
  4211. rotating_pool = 0;
  4212. }
  4213. /* Try to find the first pool in the rotation that is usable */
  4214. tested = 0;
  4215. while (!pool && tested++ < total_pools) {
  4216. pool = pools[rotating_pool];
  4217. if (pool->quota_used++ < pool->quota_gcd) {
  4218. if (!pool_unworkable(pool))
  4219. break;
  4220. /* Failover-only flag for load-balance means distribute
  4221. * unused quota to priority pool 0. */
  4222. if (opt_fail_only)
  4223. priority_pool(0)->quota_used--;
  4224. }
  4225. pool = NULL;
  4226. if (++rotating_pool >= total_pools)
  4227. rotating_pool = 0;
  4228. }
  4229. /* If there are no alive pools with quota, choose according to
  4230. * priority. */
  4231. if (!pool) {
  4232. for (i = 0; i < total_pools; i++) {
  4233. struct pool *tp = priority_pool(i);
  4234. if (!pool_unusable(tp)) {
  4235. pool = tp;
  4236. break;
  4237. }
  4238. }
  4239. }
  4240. /* If still nothing is usable, use the current pool */
  4241. if (!pool)
  4242. pool = cp;
  4243. out:
  4244. if (!pool_actively_in_use(pool, cp))
  4245. {
  4246. if (!pool_active(pool, false))
  4247. {
  4248. pool_died(pool);
  4249. goto retry;
  4250. }
  4251. pool_tclear(pool, &pool->idle);
  4252. }
  4253. applog(LOG_DEBUG, "Selecting pool %d for work", pool->pool_no);
  4254. return pool;
  4255. }
  4256. static double DIFFEXACTONE = 26959946667150639794667015087019630673637144422540572481103610249215.0;
  4257. static double target_diff(const unsigned char *target)
  4258. {
  4259. double targ = 0;
  4260. signed int i;
  4261. for (i = 31; i >= 0; --i)
  4262. targ = (targ * 0x100) + target[i];
  4263. return DIFFEXACTONE / (targ ?: 1);
  4264. }
  4265. /*
  4266. * Calculate the work share difficulty
  4267. */
  4268. static void calc_diff(struct work *work, int known)
  4269. {
  4270. struct cgminer_pool_stats *pool_stats = &(work->pool->cgminer_pool_stats);
  4271. double difficulty;
  4272. if (!known) {
  4273. work->work_difficulty = target_diff(work->target);
  4274. } else
  4275. work->work_difficulty = known;
  4276. difficulty = work->work_difficulty;
  4277. pool_stats->last_diff = difficulty;
  4278. suffix_string(difficulty, work->pool->diff, sizeof(work->pool->diff), 0);
  4279. if (difficulty == pool_stats->min_diff)
  4280. pool_stats->min_diff_count++;
  4281. else if (difficulty < pool_stats->min_diff || pool_stats->min_diff == 0) {
  4282. pool_stats->min_diff = difficulty;
  4283. pool_stats->min_diff_count = 1;
  4284. }
  4285. if (difficulty == pool_stats->max_diff)
  4286. pool_stats->max_diff_count++;
  4287. else if (difficulty > pool_stats->max_diff) {
  4288. pool_stats->max_diff = difficulty;
  4289. pool_stats->max_diff_count = 1;
  4290. }
  4291. }
  4292. static uint32_t benchmark_blkhdr[20];
  4293. static
  4294. void setup_benchmark_pool()
  4295. {
  4296. struct pool *pool;
  4297. want_longpoll = false;
  4298. // Temporarily disable opt_benchmark to avoid auto-removal
  4299. opt_benchmark = false;
  4300. pool = add_pool();
  4301. opt_benchmark = true;
  4302. pool->rpc_url = malloc(255);
  4303. strcpy(pool->rpc_url, "Benchmark");
  4304. pool->rpc_user = pool->rpc_url;
  4305. pool->rpc_pass = pool->rpc_url;
  4306. enable_pool(pool);
  4307. pool->idle = false;
  4308. successful_connect = true;
  4309. {
  4310. uint32_t * const blkhdr = benchmark_blkhdr;
  4311. blkhdr[2] = htobe32(1);
  4312. blkhdr[17] = htobe32(0x7fffffff); // timestamp
  4313. blkhdr[18] = htobe32(0x1700ffff); // "bits"
  4314. }
  4315. {
  4316. struct stratum_work * const swork = &pool->swork;
  4317. const int branchcount = 15; // 1 MB block
  4318. const size_t branchdatasz = branchcount * 0x20;
  4319. const size_t coinbase_sz = 6 * 1024;
  4320. bytes_resize(&swork->coinbase, coinbase_sz);
  4321. memset(bytes_buf(&swork->coinbase), '\xff', coinbase_sz);
  4322. swork->nonce2_offset = 0;
  4323. bytes_resize(&swork->merkle_bin, branchdatasz);
  4324. memset(bytes_buf(&swork->merkle_bin), '\xff', branchdatasz);
  4325. swork->merkles = branchcount;
  4326. memset(swork->header1, '\xff', 36);
  4327. swork->ntime = 0x7fffffff;
  4328. timer_unset(&swork->tv_received);
  4329. memcpy(swork->diffbits, "\x17\0\xff\xff", 4);
  4330. set_target_to_pdiff(swork->target, opt_scrypt ? (1./0x10000) : 1.);
  4331. pool->nonce2sz = swork->n2size = GBT_XNONCESZ;
  4332. pool->nonce2 = 0;
  4333. }
  4334. }
  4335. void get_benchmark_work(struct work *work)
  4336. {
  4337. struct pool * const pool = pools[0];
  4338. uint32_t * const blkhdr = benchmark_blkhdr;
  4339. for (int i = 16; i >= 0; --i)
  4340. if (++blkhdr[i])
  4341. break;
  4342. memcpy(&work->data[ 0], blkhdr, 80);
  4343. memcpy(&work->data[80], workpadding_bin, 48);
  4344. calc_midstate(work);
  4345. memcpy(work->target, pool->swork.target, sizeof(work->target));
  4346. work->mandatory = true;
  4347. work->pool = pools[0];
  4348. cgtime(&work->tv_getwork);
  4349. copy_time(&work->tv_getwork_reply, &work->tv_getwork);
  4350. copy_time(&work->tv_staged, &work->tv_getwork);
  4351. work->getwork_mode = GETWORK_MODE_BENCHMARK;
  4352. calc_diff(work, 0);
  4353. work_set_simple_ntime_roll_limit(work, 60);
  4354. }
  4355. static void wake_gws(void);
  4356. static void update_last_work(struct work *work)
  4357. {
  4358. if (!work->tr)
  4359. // Only save GBT jobs, since rollntime isn't coordinated well yet
  4360. return;
  4361. struct pool *pool = work->pool;
  4362. mutex_lock(&pool->last_work_lock);
  4363. if (pool->last_work_copy)
  4364. free_work(pool->last_work_copy);
  4365. pool->last_work_copy = copy_work(work);
  4366. pool->last_work_copy->work_restart_id = pool->work_restart_id;
  4367. mutex_unlock(&pool->last_work_lock);
  4368. }
  4369. static
  4370. void gbt_req_target(json_t *req)
  4371. {
  4372. json_t *j;
  4373. json_t *n;
  4374. if (!request_target_str)
  4375. return;
  4376. j = json_object_get(req, "params");
  4377. if (!j)
  4378. {
  4379. n = json_array();
  4380. if (!n)
  4381. return;
  4382. if (json_object_set_new(req, "params", n))
  4383. goto erradd;
  4384. j = n;
  4385. }
  4386. n = json_array_get(j, 0);
  4387. if (!n)
  4388. {
  4389. n = json_object();
  4390. if (!n)
  4391. return;
  4392. if (json_array_append_new(j, n))
  4393. goto erradd;
  4394. }
  4395. j = n;
  4396. n = json_string(request_target_str);
  4397. if (!n)
  4398. return;
  4399. if (json_object_set_new(j, "target", n))
  4400. goto erradd;
  4401. return;
  4402. erradd:
  4403. json_decref(n);
  4404. }
  4405. static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const char *lpid, bool probe)
  4406. {
  4407. char *rpc_req;
  4408. clean_work(work);
  4409. switch (proto) {
  4410. case PLP_GETWORK:
  4411. work->getwork_mode = GETWORK_MODE_POOL;
  4412. return strdup(getwork_req);
  4413. case PLP_GETBLOCKTEMPLATE:
  4414. work->getwork_mode = GETWORK_MODE_GBT;
  4415. blktemplate_t * const tmpl = blktmpl_create();
  4416. if (!tmpl)
  4417. goto gbtfail2;
  4418. work->tr = tmpl_makeref(tmpl);
  4419. gbt_capabilities_t caps = blktmpl_addcaps(tmpl);
  4420. if (!caps)
  4421. goto gbtfail;
  4422. caps |= GBT_LONGPOLL;
  4423. #if BLKMAKER_VERSION > 1
  4424. if (opt_coinbase_script.sz)
  4425. caps |= GBT_CBVALUE;
  4426. #endif
  4427. json_t *req = blktmpl_request_jansson(caps, lpid);
  4428. if (!req)
  4429. goto gbtfail;
  4430. if (probe)
  4431. gbt_req_target(req);
  4432. rpc_req = json_dumps(req, 0);
  4433. if (!rpc_req)
  4434. goto gbtfail;
  4435. json_decref(req);
  4436. return rpc_req;
  4437. default:
  4438. return NULL;
  4439. }
  4440. return NULL;
  4441. gbtfail:
  4442. tmpl_decref(work->tr);
  4443. work->tr = NULL;
  4444. gbtfail2:
  4445. return NULL;
  4446. }
  4447. #define prepare_rpc_req(work, proto, lpid) prepare_rpc_req2(work, proto, lpid, false)
  4448. #define prepare_rpc_req_probe(work, proto, lpid) prepare_rpc_req2(work, proto, lpid, true)
  4449. static const char *pool_protocol_name(enum pool_protocol proto)
  4450. {
  4451. switch (proto) {
  4452. case PLP_GETBLOCKTEMPLATE:
  4453. return "getblocktemplate";
  4454. case PLP_GETWORK:
  4455. return "getwork";
  4456. default:
  4457. return "UNKNOWN";
  4458. }
  4459. }
  4460. static enum pool_protocol pool_protocol_fallback(enum pool_protocol proto)
  4461. {
  4462. switch (proto) {
  4463. case PLP_GETBLOCKTEMPLATE:
  4464. if (want_getwork)
  4465. return PLP_GETWORK;
  4466. default:
  4467. return PLP_NONE;
  4468. }
  4469. }
  4470. static bool get_upstream_work(struct work *work, CURL *curl)
  4471. {
  4472. struct pool *pool = work->pool;
  4473. struct cgminer_pool_stats *pool_stats = &(pool->cgminer_pool_stats);
  4474. struct timeval tv_elapsed;
  4475. json_t *val = NULL;
  4476. bool rc = false;
  4477. char *url;
  4478. enum pool_protocol proto;
  4479. char *rpc_req;
  4480. if (pool->proto == PLP_NONE)
  4481. pool->proto = PLP_GETBLOCKTEMPLATE;
  4482. tryagain:
  4483. rpc_req = prepare_rpc_req(work, pool->proto, NULL);
  4484. work->pool = pool;
  4485. if (!rpc_req)
  4486. return false;
  4487. applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req);
  4488. url = pool->rpc_url;
  4489. cgtime(&work->tv_getwork);
  4490. val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req, false,
  4491. false, &work->rolltime, pool, false);
  4492. pool_stats->getwork_attempts++;
  4493. free(rpc_req);
  4494. if (likely(val)) {
  4495. rc = work_decode(pool, work, val);
  4496. if (unlikely(!rc))
  4497. applog(LOG_DEBUG, "Failed to decode work in get_upstream_work");
  4498. } else if (PLP_NONE != (proto = pool_protocol_fallback(pool->proto))) {
  4499. applog(LOG_WARNING, "Pool %u failed getblocktemplate request; falling back to getwork protocol", pool->pool_no);
  4500. pool->proto = proto;
  4501. goto tryagain;
  4502. } else
  4503. applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
  4504. cgtime(&work->tv_getwork_reply);
  4505. timersub(&(work->tv_getwork_reply), &(work->tv_getwork), &tv_elapsed);
  4506. pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
  4507. pool_stats->getwork_wait_rolling /= 1.63;
  4508. timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait));
  4509. if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) {
  4510. pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec;
  4511. pool_stats->getwork_wait_max.tv_usec = tv_elapsed.tv_usec;
  4512. }
  4513. if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_min), <)) {
  4514. pool_stats->getwork_wait_min.tv_sec = tv_elapsed.tv_sec;
  4515. pool_stats->getwork_wait_min.tv_usec = tv_elapsed.tv_usec;
  4516. }
  4517. pool_stats->getwork_calls++;
  4518. work->pool = pool;
  4519. work->longpoll = false;
  4520. calc_diff(work, 0);
  4521. total_getworks++;
  4522. pool->getwork_requested++;
  4523. if (rc)
  4524. update_last_work(work);
  4525. if (likely(val))
  4526. json_decref(val);
  4527. return rc;
  4528. }
  4529. #ifdef HAVE_CURSES
  4530. static void disable_curses(void)
  4531. {
  4532. if (curses_active_locked()) {
  4533. use_curses = false;
  4534. curses_active = false;
  4535. leaveok(logwin, false);
  4536. leaveok(statuswin, false);
  4537. leaveok(mainwin, false);
  4538. nocbreak();
  4539. echo();
  4540. delwin(logwin);
  4541. delwin(statuswin);
  4542. delwin(mainwin);
  4543. endwin();
  4544. #ifdef WIN32
  4545. // Move the cursor to after curses output.
  4546. HANDLE hout = GetStdHandle(STD_OUTPUT_HANDLE);
  4547. CONSOLE_SCREEN_BUFFER_INFO csbi;
  4548. COORD coord;
  4549. if (GetConsoleScreenBufferInfo(hout, &csbi)) {
  4550. coord.X = 0;
  4551. coord.Y = csbi.dwSize.Y - 1;
  4552. SetConsoleCursorPosition(hout, coord);
  4553. }
  4554. #endif
  4555. unlock_curses();
  4556. }
  4557. }
  4558. #endif
  4559. static void __kill_work(void)
  4560. {
  4561. struct cgpu_info *cgpu;
  4562. struct thr_info *thr;
  4563. int i;
  4564. if (!successful_connect)
  4565. return;
  4566. applog(LOG_INFO, "Received kill message");
  4567. shutting_down = true;
  4568. applog(LOG_DEBUG, "Prompting submit_work thread to finish");
  4569. notifier_wake(submit_waiting_notifier);
  4570. #ifdef USE_LIBMICROHTTPD
  4571. httpsrv_stop();
  4572. #endif
  4573. applog(LOG_DEBUG, "Killing off watchpool thread");
  4574. /* Kill the watchpool thread */
  4575. thr = &control_thr[watchpool_thr_id];
  4576. thr_info_cancel(thr);
  4577. applog(LOG_DEBUG, "Killing off watchdog thread");
  4578. /* Kill the watchdog thread */
  4579. thr = &control_thr[watchdog_thr_id];
  4580. thr_info_cancel(thr);
  4581. applog(LOG_DEBUG, "Shutting down mining threads");
  4582. for (i = 0; i < mining_threads; i++) {
  4583. thr = get_thread(i);
  4584. if (!thr)
  4585. continue;
  4586. cgpu = thr->cgpu;
  4587. if (!cgpu)
  4588. continue;
  4589. if (!cgpu->threads)
  4590. continue;
  4591. cgpu->shutdown = true;
  4592. thr->work_restart = true;
  4593. notifier_wake(thr->notifier);
  4594. notifier_wake(thr->work_restart_notifier);
  4595. }
  4596. sleep(1);
  4597. applog(LOG_DEBUG, "Killing off mining threads");
  4598. /* Kill the mining threads*/
  4599. for (i = 0; i < mining_threads; i++) {
  4600. thr = get_thread(i);
  4601. if (!thr)
  4602. continue;
  4603. cgpu = thr->cgpu;
  4604. if (cgpu->threads)
  4605. {
  4606. applog(LOG_WARNING, "Killing %"PRIpreprv, thr->cgpu->proc_repr);
  4607. thr_info_cancel(thr);
  4608. }
  4609. cgpu->status = LIFE_DEAD2;
  4610. }
  4611. /* Stop the others */
  4612. applog(LOG_DEBUG, "Killing off API thread");
  4613. thr = &control_thr[api_thr_id];
  4614. thr_info_cancel(thr);
  4615. }
  4616. /* This should be the common exit path */
  4617. void kill_work(void)
  4618. {
  4619. __kill_work();
  4620. quit(0, "Shutdown signal received.");
  4621. }
  4622. static
  4623. #ifdef WIN32
  4624. #ifndef _WIN64
  4625. const
  4626. #endif
  4627. #endif
  4628. char **initial_args;
  4629. void _bfg_clean_up(bool);
  4630. void app_restart(void)
  4631. {
  4632. applog(LOG_WARNING, "Attempting to restart %s", packagename);
  4633. __kill_work();
  4634. _bfg_clean_up(true);
  4635. #if defined(unix) || defined(__APPLE__)
  4636. if (forkpid > 0) {
  4637. kill(forkpid, SIGTERM);
  4638. forkpid = 0;
  4639. }
  4640. #endif
  4641. execv(initial_args[0], initial_args);
  4642. applog(LOG_WARNING, "Failed to restart application");
  4643. }
  4644. static void sighandler(int __maybe_unused sig)
  4645. {
  4646. /* Restore signal handlers so we can still quit if kill_work fails */
  4647. sigaction(SIGTERM, &termhandler, NULL);
  4648. sigaction(SIGINT, &inthandler, NULL);
  4649. kill_work();
  4650. }
  4651. static void start_longpoll(void);
  4652. static void stop_longpoll(void);
  4653. /* Called with pool_lock held. Recruit an extra curl if none are available for
  4654. * this pool. */
  4655. static void recruit_curl(struct pool *pool)
  4656. {
  4657. struct curl_ent *ce = calloc(sizeof(struct curl_ent), 1);
  4658. if (unlikely(!ce))
  4659. quit(1, "Failed to calloc in recruit_curl");
  4660. ce->curl = curl_easy_init();
  4661. if (unlikely(!ce->curl))
  4662. quit(1, "Failed to init in recruit_curl");
  4663. LL_PREPEND(pool->curllist, ce);
  4664. pool->curls++;
  4665. }
  4666. /* Grab an available curl if there is one. If not, then recruit extra curls
  4667. * unless we are in a submit_fail situation, or we have opt_delaynet enabled
  4668. * and there are already 5 curls in circulation. Limit total number to the
  4669. * number of mining threads per pool as well to prevent blasting a pool during
  4670. * network delays/outages. */
  4671. static struct curl_ent *pop_curl_entry3(struct pool *pool, int blocking)
  4672. {
  4673. int curl_limit = opt_delaynet ? 5 : (mining_threads + opt_queue) * 2;
  4674. bool recruited = false;
  4675. struct curl_ent *ce;
  4676. mutex_lock(&pool->pool_lock);
  4677. retry:
  4678. if (!pool->curls) {
  4679. recruit_curl(pool);
  4680. recruited = true;
  4681. } else if (!pool->curllist) {
  4682. if (blocking < 2 && pool->curls >= curl_limit && (blocking || pool->curls >= opt_submit_threads)) {
  4683. if (!blocking) {
  4684. mutex_unlock(&pool->pool_lock);
  4685. return NULL;
  4686. }
  4687. pthread_cond_wait(&pool->cr_cond, &pool->pool_lock);
  4688. goto retry;
  4689. } else {
  4690. recruit_curl(pool);
  4691. recruited = true;
  4692. }
  4693. }
  4694. ce = pool->curllist;
  4695. LL_DELETE(pool->curllist, ce);
  4696. mutex_unlock(&pool->pool_lock);
  4697. if (recruited)
  4698. applog(LOG_DEBUG, "Recruited curl for pool %d", pool->pool_no);
  4699. return ce;
  4700. }
  4701. static struct curl_ent *pop_curl_entry2(struct pool *pool, bool blocking)
  4702. {
  4703. return pop_curl_entry3(pool, blocking ? 1 : 0);
  4704. }
  4705. __maybe_unused
  4706. static struct curl_ent *pop_curl_entry(struct pool *pool)
  4707. {
  4708. return pop_curl_entry3(pool, 1);
  4709. }
  4710. static void push_curl_entry(struct curl_ent *ce, struct pool *pool)
  4711. {
  4712. mutex_lock(&pool->pool_lock);
  4713. if (!ce || !ce->curl)
  4714. quithere(1, "Attempted to add NULL");
  4715. LL_PREPEND(pool->curllist, ce);
  4716. cgtime(&ce->tv);
  4717. pthread_cond_broadcast(&pool->cr_cond);
  4718. mutex_unlock(&pool->pool_lock);
  4719. }
  4720. bool stale_work(struct work *work, bool share);
  4721. static inline bool should_roll(struct work *work)
  4722. {
  4723. struct timeval now;
  4724. time_t expiry;
  4725. if (!pool_actively_in_use(work->pool, NULL))
  4726. return false;
  4727. if (stale_work(work, false))
  4728. return false;
  4729. if (work->rolltime > opt_scantime)
  4730. expiry = work->rolltime;
  4731. else
  4732. expiry = opt_scantime;
  4733. expiry = expiry * 2 / 3;
  4734. /* We shouldn't roll if we're unlikely to get one shares' duration
  4735. * work out of doing so */
  4736. cgtime(&now);
  4737. if (now.tv_sec - work->tv_staged.tv_sec > expiry)
  4738. return false;
  4739. return true;
  4740. }
  4741. /* Limit rolls to 7000 to not beyond 2 hours in the future where bitcoind will
  4742. * reject blocks as invalid. */
  4743. static inline bool can_roll(struct work *work)
  4744. {
  4745. if (work->stratum)
  4746. return false;
  4747. if (!(work->pool && !work->clone))
  4748. return false;
  4749. if (work->tr)
  4750. {
  4751. if (stale_work(work, false))
  4752. return false;
  4753. return blkmk_work_left(work->tr->tmpl);
  4754. }
  4755. return (work->rolltime &&
  4756. work->rolls < 7000 && !stale_work(work, false));
  4757. }
  4758. static void roll_work(struct work *work)
  4759. {
  4760. if (work->tr)
  4761. {
  4762. struct timeval tv_now;
  4763. cgtime(&tv_now);
  4764. if (blkmk_get_data(work->tr->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
  4765. applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
  4766. swap32yes(work->data, work->data, 80 / 4);
  4767. calc_midstate(work);
  4768. applog(LOG_DEBUG, "Successfully rolled extranonce to dataid %u", work->dataid);
  4769. } else {
  4770. uint32_t *work_ntime;
  4771. uint32_t ntime;
  4772. work_ntime = (uint32_t *)(work->data + 68);
  4773. ntime = be32toh(*work_ntime);
  4774. ntime++;
  4775. *work_ntime = htobe32(ntime);
  4776. work_set_simple_ntime_roll_limit(work, 0);
  4777. applog(LOG_DEBUG, "Successfully rolled time header in work");
  4778. }
  4779. local_work++;
  4780. work->rolls++;
  4781. work->blk.nonce = 0;
  4782. /* This is now a different work item so it needs a different ID for the
  4783. * hashtable */
  4784. work->id = total_work++;
  4785. }
  4786. /* Duplicates any dynamically allocated arrays within the work struct to
  4787. * prevent a copied work struct from freeing ram belonging to another struct */
  4788. static void _copy_work(struct work *work, const struct work *base_work, int noffset)
  4789. {
  4790. int id = work->id;
  4791. clean_work(work);
  4792. memcpy(work, base_work, sizeof(struct work));
  4793. /* Keep the unique new id assigned during make_work to prevent copied
  4794. * work from having the same id. */
  4795. work->id = id;
  4796. if (base_work->job_id)
  4797. work->job_id = strdup(base_work->job_id);
  4798. if (base_work->nonce1)
  4799. work->nonce1 = strdup(base_work->nonce1);
  4800. bytes_cpy(&work->nonce2, &base_work->nonce2);
  4801. if (base_work->tr)
  4802. tmpl_incref(base_work->tr);
  4803. if (noffset)
  4804. {
  4805. uint32_t *work_ntime = (uint32_t *)(work->data + 68);
  4806. uint32_t ntime = be32toh(*work_ntime);
  4807. ntime += noffset;
  4808. *work_ntime = htobe32(ntime);
  4809. }
  4810. if (work->device_data_dup_func)
  4811. work->device_data = work->device_data_dup_func(work);
  4812. }
  4813. /* Generates a copy of an existing work struct, creating fresh heap allocations
  4814. * for all dynamically allocated arrays within the struct */
  4815. struct work *copy_work(const struct work *base_work)
  4816. {
  4817. struct work *work = make_work();
  4818. _copy_work(work, base_work, 0);
  4819. return work;
  4820. }
  4821. void __copy_work(struct work *work, const struct work *base_work)
  4822. {
  4823. _copy_work(work, base_work, 0);
  4824. }
  4825. static struct work *make_clone(struct work *work)
  4826. {
  4827. struct work *work_clone = copy_work(work);
  4828. work_clone->clone = true;
  4829. cgtime((struct timeval *)&(work_clone->tv_cloned));
  4830. work_clone->longpoll = false;
  4831. work_clone->mandatory = false;
  4832. /* Make cloned work appear slightly older to bias towards keeping the
  4833. * master work item which can be further rolled */
  4834. work_clone->tv_staged.tv_sec -= 1;
  4835. return work_clone;
  4836. }
  4837. static void stage_work(struct work *work);
  4838. static bool clone_available(void)
  4839. {
  4840. struct work *work_clone = NULL, *work, *tmp;
  4841. bool cloned = false;
  4842. mutex_lock(stgd_lock);
  4843. if (!staged_rollable)
  4844. goto out_unlock;
  4845. HASH_ITER(hh, staged_work, work, tmp) {
  4846. if (can_roll(work) && should_roll(work)) {
  4847. roll_work(work);
  4848. work_clone = make_clone(work);
  4849. applog(LOG_DEBUG, "%s: Rolling work %d to %d", __func__, work->id, work_clone->id);
  4850. roll_work(work);
  4851. cloned = true;
  4852. break;
  4853. }
  4854. }
  4855. out_unlock:
  4856. mutex_unlock(stgd_lock);
  4857. if (cloned) {
  4858. applog(LOG_DEBUG, "Pushing cloned available work to stage thread");
  4859. stage_work(work_clone);
  4860. }
  4861. return cloned;
  4862. }
  4863. static void pool_died(struct pool *pool)
  4864. {
  4865. if (!pool_tset(pool, &pool->idle)) {
  4866. cgtime(&pool->tv_idle);
  4867. if (pool == current_pool()) {
  4868. applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url);
  4869. switch_pools(NULL);
  4870. } else
  4871. applog(LOG_INFO, "Pool %d %s failed to return work", pool->pool_no, pool->rpc_url);
  4872. }
  4873. }
  4874. bool stale_work(struct work *work, bool share)
  4875. {
  4876. unsigned work_expiry;
  4877. struct pool *pool;
  4878. uint32_t block_id;
  4879. unsigned getwork_delay;
  4880. if (opt_benchmark)
  4881. return false;
  4882. block_id = ((uint32_t*)work->data)[1];
  4883. pool = work->pool;
  4884. /* Technically the rolltime should be correct but some pools
  4885. * advertise a broken expire= that is lower than a meaningful
  4886. * scantime */
  4887. if (work->rolltime >= opt_scantime || work->tr)
  4888. work_expiry = work->rolltime;
  4889. else
  4890. work_expiry = opt_expiry;
  4891. unsigned max_expiry = (have_longpoll ? opt_expiry_lp : opt_expiry);
  4892. if (work_expiry > max_expiry)
  4893. work_expiry = max_expiry;
  4894. if (share) {
  4895. /* If the share isn't on this pool's latest block, it's stale */
  4896. if (pool->block_id && pool->block_id != block_id)
  4897. {
  4898. applog(LOG_DEBUG, "Share stale due to block mismatch (%08lx != %08lx)", (long)block_id, (long)pool->block_id);
  4899. return true;
  4900. }
  4901. /* If the pool doesn't want old shares, then any found in work before
  4902. * the most recent longpoll is stale */
  4903. if ((!pool->submit_old) && work->work_restart_id != pool->work_restart_id)
  4904. {
  4905. applog(LOG_DEBUG, "Share stale due to mandatory work update (%02x != %02x)", work->work_restart_id, pool->work_restart_id);
  4906. return true;
  4907. }
  4908. } else {
  4909. /* If this work isn't for the latest Bitcoin block, it's stale */
  4910. /* But only care about the current pool if failover-only */
  4911. if (enabled_pools <= 1 || opt_fail_only) {
  4912. if (pool->block_id && block_id != pool->block_id)
  4913. {
  4914. applog(LOG_DEBUG, "Work stale due to block mismatch (%08lx != 1 ? %08lx : %08lx)", (long)block_id, (long)pool->block_id, (long)current_block_id);
  4915. return true;
  4916. }
  4917. } else {
  4918. if (block_id != current_block_id)
  4919. {
  4920. applog(LOG_DEBUG, "Work stale due to block mismatch (%08lx != 0 ? %08lx : %08lx)", (long)block_id, (long)pool->block_id, (long)current_block_id);
  4921. return true;
  4922. }
  4923. }
  4924. /* If the pool has asked us to restart since this work, it's stale */
  4925. if (work->work_restart_id != pool->work_restart_id)
  4926. {
  4927. applog(LOG_DEBUG, "Work stale due to work update (%02x != %02x)", work->work_restart_id, pool->work_restart_id);
  4928. return true;
  4929. }
  4930. if (pool->has_stratum && work->job_id) {
  4931. bool same_job;
  4932. if (!pool->stratum_active || !pool->stratum_notify) {
  4933. applog(LOG_DEBUG, "Work stale due to stratum inactive");
  4934. return true;
  4935. }
  4936. same_job = true;
  4937. cg_rlock(&pool->data_lock);
  4938. if (strcmp(work->job_id, pool->swork.job_id))
  4939. same_job = false;
  4940. cg_runlock(&pool->data_lock);
  4941. if (!same_job) {
  4942. applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch");
  4943. return true;
  4944. }
  4945. }
  4946. /* Factor in the average getwork delay of this pool, rounding it up to
  4947. * the nearest second */
  4948. getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1;
  4949. if (unlikely(work_expiry <= getwork_delay + 5))
  4950. work_expiry = 5;
  4951. else
  4952. work_expiry -= getwork_delay;
  4953. }
  4954. int elapsed_since_staged = timer_elapsed(&work->tv_staged, NULL);
  4955. if (elapsed_since_staged > work_expiry) {
  4956. applog(LOG_DEBUG, "%s stale due to expiry (%d >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry);
  4957. return true;
  4958. }
  4959. /* If the user only wants strict failover, any work from a pool other than
  4960. * the current one is always considered stale */
  4961. if (opt_fail_only && !share && !work->mandatory && !pool_actively_in_use(pool, NULL))
  4962. {
  4963. applog(LOG_DEBUG, "Work stale due to fail only pool mismatch (pool %u vs %u)", pool->pool_no, current_pool()->pool_no);
  4964. return true;
  4965. }
  4966. return false;
  4967. }
  4968. static double share_diff(const struct work *work)
  4969. {
  4970. double ret;
  4971. bool new_best = false;
  4972. ret = target_diff(work->hash);
  4973. cg_wlock(&control_lock);
  4974. if (unlikely(ret > best_diff)) {
  4975. new_best = true;
  4976. best_diff = ret;
  4977. suffix_string(best_diff, best_share, sizeof(best_share), 0);
  4978. }
  4979. if (unlikely(ret > work->pool->best_diff))
  4980. work->pool->best_diff = ret;
  4981. cg_wunlock(&control_lock);
  4982. if (unlikely(new_best))
  4983. applog(LOG_INFO, "New best share: %s", best_share);
  4984. return ret;
  4985. }
  4986. static
  4987. void work_check_for_block(struct work * const work)
  4988. {
  4989. work->share_diff = share_diff(work);
  4990. if (unlikely(work->share_diff >= current_diff)) {
  4991. work->block = true;
  4992. work->pool->solved++;
  4993. found_blocks++;
  4994. work->mandatory = true;
  4995. applog(LOG_NOTICE, "Found block for pool %d!", work->pool->pool_no);
  4996. }
  4997. }
  4998. static void submit_discard_share2(const char *reason, struct work *work)
  4999. {
  5000. struct cgpu_info *cgpu = get_thr_cgpu(work->thr_id);
  5001. sharelog(reason, work);
  5002. mutex_lock(&stats_lock);
  5003. ++total_stale;
  5004. ++cgpu->stale;
  5005. ++(work->pool->stale_shares);
  5006. total_diff_stale += work->work_difficulty;
  5007. cgpu->diff_stale += work->work_difficulty;
  5008. work->pool->diff_stale += work->work_difficulty;
  5009. mutex_unlock(&stats_lock);
  5010. }
  5011. static void submit_discard_share(struct work *work)
  5012. {
  5013. submit_discard_share2("discard", work);
  5014. }
  5015. struct submit_work_state {
  5016. struct work *work;
  5017. bool resubmit;
  5018. struct curl_ent *ce;
  5019. int failures;
  5020. struct timeval tv_staleexpire;
  5021. char *s;
  5022. struct timeval tv_submit;
  5023. struct submit_work_state *next;
  5024. };
  5025. static int my_curl_timer_set(__maybe_unused CURLM *curlm, long timeout_ms, void *userp)
  5026. {
  5027. long *p_timeout_us = userp;
  5028. const long max_ms = LONG_MAX / 1000;
  5029. if (max_ms < timeout_ms)
  5030. timeout_ms = max_ms;
  5031. *p_timeout_us = timeout_ms * 1000;
  5032. return 0;
  5033. }
  5034. static void sws_has_ce(struct submit_work_state *sws)
  5035. {
  5036. struct pool *pool = sws->work->pool;
  5037. sws->s = submit_upstream_work_request(sws->work);
  5038. cgtime(&sws->tv_submit);
  5039. json_rpc_call_async(sws->ce->curl, pool->rpc_url, pool->rpc_userpass, sws->s, false, pool, true, sws);
  5040. }
  5041. static struct submit_work_state *begin_submission(struct work *work)
  5042. {
  5043. struct pool *pool;
  5044. struct submit_work_state *sws = NULL;
  5045. pool = work->pool;
  5046. sws = malloc(sizeof(*sws));
  5047. *sws = (struct submit_work_state){
  5048. .work = work,
  5049. };
  5050. work_check_for_block(work);
  5051. if (stale_work(work, true)) {
  5052. work->stale = true;
  5053. if (opt_submit_stale)
  5054. applog(LOG_NOTICE, "Pool %d stale share detected, submitting as user requested", pool->pool_no);
  5055. else if (pool->submit_old)
  5056. applog(LOG_NOTICE, "Pool %d stale share detected, submitting as pool requested", pool->pool_no);
  5057. else {
  5058. applog(LOG_NOTICE, "Pool %d stale share detected, discarding", pool->pool_no);
  5059. submit_discard_share(work);
  5060. goto out;
  5061. }
  5062. timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
  5063. }
  5064. if (work->stratum) {
  5065. char *s;
  5066. s = malloc(1024);
  5067. sws->s = s;
  5068. } else {
  5069. /* submit solution to bitcoin via JSON-RPC */
  5070. sws->ce = pop_curl_entry2(pool, false);
  5071. if (sws->ce) {
  5072. sws_has_ce(sws);
  5073. } else {
  5074. sws->next = pool->sws_waiting_on_curl;
  5075. pool->sws_waiting_on_curl = sws;
  5076. if (sws->next)
  5077. applog(LOG_DEBUG, "submit_thread queuing submission");
  5078. else
  5079. applog(LOG_WARNING, "submit_thread queuing submissions (see --submit-threads)");
  5080. }
  5081. }
  5082. return sws;
  5083. out:
  5084. free(sws);
  5085. return NULL;
  5086. }
  5087. static bool retry_submission(struct submit_work_state *sws)
  5088. {
  5089. struct work *work = sws->work;
  5090. struct pool *pool = work->pool;
  5091. sws->resubmit = true;
  5092. if ((!work->stale) && stale_work(work, true)) {
  5093. work->stale = true;
  5094. if (opt_submit_stale)
  5095. applog(LOG_NOTICE, "Pool %d share became stale during submission failure, will retry as user requested", pool->pool_no);
  5096. else if (pool->submit_old)
  5097. applog(LOG_NOTICE, "Pool %d share became stale during submission failure, will retry as pool requested", pool->pool_no);
  5098. else {
  5099. applog(LOG_NOTICE, "Pool %d share became stale during submission failure, discarding", pool->pool_no);
  5100. submit_discard_share(work);
  5101. return false;
  5102. }
  5103. timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
  5104. }
  5105. if (unlikely((opt_retries >= 0) && (++sws->failures > opt_retries))) {
  5106. applog(LOG_ERR, "Pool %d failed %d submission retries, discarding", pool->pool_no, opt_retries);
  5107. submit_discard_share(work);
  5108. return false;
  5109. }
  5110. else if (work->stale) {
  5111. if (unlikely(opt_retries < 0 && timer_passed(&sws->tv_staleexpire, NULL)))
  5112. {
  5113. applog(LOG_NOTICE, "Pool %d stale share failed to submit for 5 minutes, discarding", pool->pool_no);
  5114. submit_discard_share(work);
  5115. return false;
  5116. }
  5117. }
  5118. /* pause, then restart work-request loop */
  5119. applog(LOG_INFO, "json_rpc_call failed on submit_work, retrying");
  5120. cgtime(&sws->tv_submit);
  5121. json_rpc_call_async(sws->ce->curl, pool->rpc_url, pool->rpc_userpass, sws->s, false, pool, true, sws);
  5122. return true;
  5123. }
  5124. static void free_sws(struct submit_work_state *sws)
  5125. {
  5126. free(sws->s);
  5127. free_work(sws->work);
  5128. free(sws);
  5129. }
  5130. static void *submit_work_thread(__maybe_unused void *userdata)
  5131. {
  5132. int wip = 0;
  5133. CURLM *curlm;
  5134. long curlm_timeout_us = -1;
  5135. struct timeval curlm_timer;
  5136. struct submit_work_state *sws, **swsp;
  5137. struct submit_work_state *write_sws = NULL;
  5138. unsigned tsreduce = 0;
  5139. pthread_detach(pthread_self());
  5140. RenameThread("submit_work");
  5141. applog(LOG_DEBUG, "Creating extra submit work thread");
  5142. curlm = curl_multi_init();
  5143. curlm_timeout_us = -1;
  5144. curl_multi_setopt(curlm, CURLMOPT_TIMERDATA, &curlm_timeout_us);
  5145. curl_multi_setopt(curlm, CURLMOPT_TIMERFUNCTION, my_curl_timer_set);
  5146. fd_set rfds, wfds, efds;
  5147. int maxfd;
  5148. struct timeval tv_timeout, tv_now;
  5149. int n;
  5150. CURLMsg *cm;
  5151. FD_ZERO(&rfds);
  5152. while (1) {
  5153. mutex_lock(&submitting_lock);
  5154. total_submitting -= tsreduce;
  5155. tsreduce = 0;
  5156. if (FD_ISSET(submit_waiting_notifier[0], &rfds)) {
  5157. notifier_read(submit_waiting_notifier);
  5158. }
  5159. // Receive any new submissions
  5160. while (submit_waiting) {
  5161. struct work *work = submit_waiting;
  5162. DL_DELETE(submit_waiting, work);
  5163. if ( (sws = begin_submission(work)) ) {
  5164. if (sws->ce)
  5165. curl_multi_add_handle(curlm, sws->ce->curl);
  5166. else if (sws->s) {
  5167. sws->next = write_sws;
  5168. write_sws = sws;
  5169. }
  5170. ++wip;
  5171. }
  5172. else {
  5173. --total_submitting;
  5174. free_work(work);
  5175. }
  5176. }
  5177. if (unlikely(shutting_down && !wip))
  5178. break;
  5179. mutex_unlock(&submitting_lock);
  5180. FD_ZERO(&rfds);
  5181. FD_ZERO(&wfds);
  5182. FD_ZERO(&efds);
  5183. tv_timeout.tv_sec = -1;
  5184. // Setup cURL with select
  5185. // Need to call perform to ensure the timeout gets updated
  5186. curl_multi_perform(curlm, &n);
  5187. curl_multi_fdset(curlm, &rfds, &wfds, &efds, &maxfd);
  5188. if (curlm_timeout_us >= 0)
  5189. {
  5190. timer_set_delay_from_now(&curlm_timer, curlm_timeout_us);
  5191. reduce_timeout_to(&tv_timeout, &curlm_timer);
  5192. }
  5193. // Setup waiting stratum submissions with select
  5194. for (sws = write_sws; sws; sws = sws->next)
  5195. {
  5196. struct pool *pool = sws->work->pool;
  5197. int fd = pool->sock;
  5198. if (fd == INVSOCK || (!pool->stratum_init) || !pool->stratum_notify)
  5199. continue;
  5200. FD_SET(fd, &wfds);
  5201. set_maxfd(&maxfd, fd);
  5202. }
  5203. // Setup "submit waiting" notifier with select
  5204. FD_SET(submit_waiting_notifier[0], &rfds);
  5205. set_maxfd(&maxfd, submit_waiting_notifier[0]);
  5206. // Wait for something interesting to happen :)
  5207. cgtime(&tv_now);
  5208. if (select(maxfd+1, &rfds, &wfds, &efds, select_timeout(&tv_timeout, &tv_now)) < 0) {
  5209. FD_ZERO(&rfds);
  5210. continue;
  5211. }
  5212. // Handle any stratum ready-to-write results
  5213. for (swsp = &write_sws; (sws = *swsp); ) {
  5214. struct work *work = sws->work;
  5215. struct pool *pool = work->pool;
  5216. int fd = pool->sock;
  5217. bool sessionid_match;
  5218. if (fd == INVSOCK || (!pool->stratum_init) || (!pool->stratum_notify) || !FD_ISSET(fd, &wfds)) {
  5219. next_write_sws:
  5220. // TODO: Check if stale, possibly discard etc
  5221. swsp = &sws->next;
  5222. continue;
  5223. }
  5224. cg_rlock(&pool->data_lock);
  5225. // NOTE: cgminer only does this check on retries, but BFGMiner does it for even the first/normal submit; therefore, it needs to be such that it always is true on the same connection regardless of session management
  5226. // NOTE: Worst case scenario for a false positive: the pool rejects it as H-not-zero
  5227. sessionid_match = (!pool->swork.nonce1) || !strcmp(work->nonce1, pool->swork.nonce1);
  5228. cg_runlock(&pool->data_lock);
  5229. if (!sessionid_match)
  5230. {
  5231. applog(LOG_DEBUG, "No matching session id for resubmitting stratum share");
  5232. submit_discard_share2("disconnect", work);
  5233. ++tsreduce;
  5234. next_write_sws_del:
  5235. // Clear the fd from wfds, to avoid potentially blocking on other submissions to the same socket
  5236. FD_CLR(fd, &wfds);
  5237. // Delete sws for this submission, since we're done with it
  5238. *swsp = sws->next;
  5239. free_sws(sws);
  5240. --wip;
  5241. continue;
  5242. }
  5243. char *s = sws->s;
  5244. struct stratum_share *sshare = calloc(sizeof(struct stratum_share), 1);
  5245. int sshare_id;
  5246. uint32_t nonce;
  5247. char nonce2hex[(bytes_len(&work->nonce2) * 2) + 1];
  5248. char noncehex[9];
  5249. char ntimehex[9];
  5250. sshare->work = copy_work(work);
  5251. bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
  5252. nonce = *((uint32_t *)(work->data + 76));
  5253. bin2hex(noncehex, (const unsigned char *)&nonce, 4);
  5254. bin2hex(ntimehex, (void *)&work->data[68], 4);
  5255. mutex_lock(&sshare_lock);
  5256. /* Give the stratum share a unique id */
  5257. sshare_id =
  5258. sshare->id = swork_id++;
  5259. HASH_ADD_INT(stratum_shares, id, sshare);
  5260. snprintf(s, 1024, "{\"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\": %d, \"method\": \"mining.submit\"}",
  5261. pool->rpc_user, work->job_id, nonce2hex, ntimehex, noncehex, sshare->id);
  5262. mutex_unlock(&sshare_lock);
  5263. applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->stratum_url, s);
  5264. if (likely(stratum_send(pool, s, strlen(s)))) {
  5265. if (pool_tclear(pool, &pool->submit_fail))
  5266. applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
  5267. applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db");
  5268. goto next_write_sws_del;
  5269. } else if (!pool_tset(pool, &pool->submit_fail)) {
  5270. // Undo stuff
  5271. mutex_lock(&sshare_lock);
  5272. // NOTE: Need to find it again in case something else has consumed it already (like the stratum-disconnect resubmitter...)
  5273. HASH_FIND_INT(stratum_shares, &sshare_id, sshare);
  5274. if (sshare)
  5275. HASH_DEL(stratum_shares, sshare);
  5276. mutex_unlock(&sshare_lock);
  5277. if (sshare)
  5278. {
  5279. free_work(sshare->work);
  5280. free(sshare);
  5281. }
  5282. applog(LOG_WARNING, "Pool %d stratum share submission failure", pool->pool_no);
  5283. total_ro++;
  5284. pool->remotefail_occasions++;
  5285. if (!sshare)
  5286. goto next_write_sws_del;
  5287. goto next_write_sws;
  5288. }
  5289. }
  5290. // Handle any cURL activities
  5291. curl_multi_perform(curlm, &n);
  5292. while( (cm = curl_multi_info_read(curlm, &n)) ) {
  5293. if (cm->msg == CURLMSG_DONE)
  5294. {
  5295. bool finished;
  5296. json_t *val = json_rpc_call_completed(cm->easy_handle, cm->data.result, false, NULL, &sws);
  5297. curl_multi_remove_handle(curlm, cm->easy_handle);
  5298. finished = submit_upstream_work_completed(sws->work, sws->resubmit, &sws->tv_submit, val);
  5299. if (!finished) {
  5300. if (retry_submission(sws))
  5301. curl_multi_add_handle(curlm, sws->ce->curl);
  5302. else
  5303. finished = true;
  5304. }
  5305. if (finished) {
  5306. --wip;
  5307. ++tsreduce;
  5308. struct pool *pool = sws->work->pool;
  5309. if (pool->sws_waiting_on_curl) {
  5310. pool->sws_waiting_on_curl->ce = sws->ce;
  5311. sws_has_ce(pool->sws_waiting_on_curl);
  5312. pool->sws_waiting_on_curl = pool->sws_waiting_on_curl->next;
  5313. curl_multi_add_handle(curlm, sws->ce->curl);
  5314. } else {
  5315. push_curl_entry(sws->ce, sws->work->pool);
  5316. }
  5317. free_sws(sws);
  5318. }
  5319. }
  5320. }
  5321. }
  5322. assert(!write_sws);
  5323. mutex_unlock(&submitting_lock);
  5324. curl_multi_cleanup(curlm);
  5325. applog(LOG_DEBUG, "submit_work thread exiting");
  5326. return NULL;
  5327. }
  5328. /* Find the pool that currently has the highest priority */
  5329. static struct pool *priority_pool(int choice)
  5330. {
  5331. struct pool *ret = NULL;
  5332. int i;
  5333. for (i = 0; i < total_pools; i++) {
  5334. struct pool *pool = pools[i];
  5335. if (pool->prio == choice) {
  5336. ret = pool;
  5337. break;
  5338. }
  5339. }
  5340. if (unlikely(!ret)) {
  5341. applog(LOG_ERR, "WTF No pool %d found!", choice);
  5342. return pools[choice];
  5343. }
  5344. return ret;
  5345. }
  5346. int prioritize_pools(char *param, int *pid)
  5347. {
  5348. char *ptr, *next;
  5349. int i, pr, prio = 0;
  5350. if (total_pools == 0) {
  5351. return MSG_NOPOOL;
  5352. }
  5353. if (param == NULL || *param == '\0') {
  5354. return MSG_MISPID;
  5355. }
  5356. bool pools_changed[total_pools];
  5357. int new_prio[total_pools];
  5358. for (i = 0; i < total_pools; ++i)
  5359. pools_changed[i] = false;
  5360. next = param;
  5361. while (next && *next) {
  5362. ptr = next;
  5363. next = strchr(ptr, ',');
  5364. if (next)
  5365. *(next++) = '\0';
  5366. i = atoi(ptr);
  5367. if (i < 0 || i >= total_pools) {
  5368. *pid = i;
  5369. return MSG_INVPID;
  5370. }
  5371. if (pools_changed[i]) {
  5372. *pid = i;
  5373. return MSG_DUPPID;
  5374. }
  5375. pools_changed[i] = true;
  5376. new_prio[i] = prio++;
  5377. }
  5378. // Only change them if no errors
  5379. for (i = 0; i < total_pools; i++) {
  5380. if (pools_changed[i])
  5381. pools[i]->prio = new_prio[i];
  5382. }
  5383. // In priority order, cycle through the unchanged pools and append them
  5384. for (pr = 0; pr < total_pools; pr++)
  5385. for (i = 0; i < total_pools; i++) {
  5386. if (!pools_changed[i] && pools[i]->prio == pr) {
  5387. pools[i]->prio = prio++;
  5388. pools_changed[i] = true;
  5389. break;
  5390. }
  5391. }
  5392. if (current_pool()->prio)
  5393. switch_pools(NULL);
  5394. return MSG_POOLPRIO;
  5395. }
  5396. void validate_pool_priorities(void)
  5397. {
  5398. // TODO: this should probably do some sort of logging
  5399. int i, j;
  5400. bool used[total_pools];
  5401. bool valid[total_pools];
  5402. for (i = 0; i < total_pools; i++)
  5403. used[i] = valid[i] = false;
  5404. for (i = 0; i < total_pools; i++) {
  5405. if (pools[i]->prio >=0 && pools[i]->prio < total_pools) {
  5406. if (!used[pools[i]->prio]) {
  5407. valid[i] = true;
  5408. used[pools[i]->prio] = true;
  5409. }
  5410. }
  5411. }
  5412. for (i = 0; i < total_pools; i++) {
  5413. if (!valid[i]) {
  5414. for (j = 0; j < total_pools; j++) {
  5415. if (!used[j]) {
  5416. applog(LOG_WARNING, "Pool %d priority changed from %d to %d", i, pools[i]->prio, j);
  5417. pools[i]->prio = j;
  5418. used[j] = true;
  5419. break;
  5420. }
  5421. }
  5422. }
  5423. }
  5424. }
  5425. static void clear_pool_work(struct pool *pool);
  5426. /* Specifies whether we can switch to this pool or not. */
  5427. static bool pool_unusable(struct pool *pool)
  5428. {
  5429. if (pool->idle)
  5430. return true;
  5431. if (pool->enabled != POOL_ENABLED)
  5432. return true;
  5433. return false;
  5434. }
  5435. void switch_pools(struct pool *selected)
  5436. {
  5437. struct pool *pool, *last_pool;
  5438. int i, pool_no, next_pool;
  5439. cg_wlock(&control_lock);
  5440. last_pool = currentpool;
  5441. pool_no = currentpool->pool_no;
  5442. /* Switch selected to pool number 0 and move the rest down */
  5443. if (selected) {
  5444. if (selected->prio != 0) {
  5445. for (i = 0; i < total_pools; i++) {
  5446. pool = pools[i];
  5447. if (pool->prio < selected->prio)
  5448. pool->prio++;
  5449. }
  5450. selected->prio = 0;
  5451. }
  5452. }
  5453. switch (pool_strategy) {
  5454. /* All of these set to the master pool */
  5455. case POOL_BALANCE:
  5456. case POOL_FAILOVER:
  5457. case POOL_LOADBALANCE:
  5458. for (i = 0; i < total_pools; i++) {
  5459. pool = priority_pool(i);
  5460. if (pool_unusable(pool))
  5461. continue;
  5462. pool_no = pool->pool_no;
  5463. break;
  5464. }
  5465. break;
  5466. /* Both of these simply increment and cycle */
  5467. case POOL_ROUNDROBIN:
  5468. case POOL_ROTATE:
  5469. if (selected && !selected->idle) {
  5470. pool_no = selected->pool_no;
  5471. break;
  5472. }
  5473. next_pool = pool_no;
  5474. /* Select the next alive pool */
  5475. for (i = 1; i < total_pools; i++) {
  5476. next_pool++;
  5477. if (next_pool >= total_pools)
  5478. next_pool = 0;
  5479. pool = pools[next_pool];
  5480. if (pool_unusable(pool))
  5481. continue;
  5482. pool_no = next_pool;
  5483. break;
  5484. }
  5485. break;
  5486. default:
  5487. break;
  5488. }
  5489. currentpool = pools[pool_no];
  5490. pool = currentpool;
  5491. cg_wunlock(&control_lock);
  5492. /* Set the lagging flag to avoid pool not providing work fast enough
  5493. * messages in failover only mode since we have to get all fresh work
  5494. * as in restart_threads */
  5495. if (opt_fail_only)
  5496. pool_tset(pool, &pool->lagging);
  5497. if (pool != last_pool)
  5498. {
  5499. pool->block_id = 0;
  5500. if (pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) {
  5501. applog(LOG_WARNING, "Switching to pool %d %s", pool->pool_no, pool->rpc_url);
  5502. if (pool_localgen(pool) || opt_fail_only)
  5503. clear_pool_work(last_pool);
  5504. }
  5505. }
  5506. mutex_lock(&lp_lock);
  5507. pthread_cond_broadcast(&lp_cond);
  5508. mutex_unlock(&lp_lock);
  5509. }
  5510. static void discard_work(struct work *work)
  5511. {
  5512. if (!work->clone && !work->rolls && !work->mined) {
  5513. if (work->pool) {
  5514. work->pool->discarded_work++;
  5515. work->pool->quota_used--;
  5516. work->pool->works--;
  5517. }
  5518. total_discarded++;
  5519. applog(LOG_DEBUG, "Discarded work");
  5520. } else
  5521. applog(LOG_DEBUG, "Discarded cloned or rolled work");
  5522. free_work(work);
  5523. }
  5524. static void wake_gws(void)
  5525. {
  5526. mutex_lock(stgd_lock);
  5527. pthread_cond_signal(&gws_cond);
  5528. mutex_unlock(stgd_lock);
  5529. }
  5530. static void discard_stale(void)
  5531. {
  5532. struct work *work, *tmp;
  5533. int stale = 0;
  5534. mutex_lock(stgd_lock);
  5535. HASH_ITER(hh, staged_work, work, tmp) {
  5536. if (stale_work(work, false)) {
  5537. HASH_DEL(staged_work, work);
  5538. discard_work(work);
  5539. stale++;
  5540. staged_full = false;
  5541. }
  5542. }
  5543. pthread_cond_signal(&gws_cond);
  5544. mutex_unlock(stgd_lock);
  5545. if (stale)
  5546. applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
  5547. }
  5548. bool stale_work_future(struct work *work, bool share, unsigned long ustime)
  5549. {
  5550. bool rv;
  5551. struct timeval tv, orig;
  5552. ldiv_t d;
  5553. d = ldiv(ustime, 1000000);
  5554. tv = (struct timeval){
  5555. .tv_sec = d.quot,
  5556. .tv_usec = d.rem,
  5557. };
  5558. orig = work->tv_staged;
  5559. timersub(&orig, &tv, &work->tv_staged);
  5560. rv = stale_work(work, share);
  5561. work->tv_staged = orig;
  5562. return rv;
  5563. }
  5564. static
  5565. void pool_update_work_restart_time(struct pool * const pool)
  5566. {
  5567. pool->work_restart_time = time(NULL);
  5568. get_timestamp(pool->work_restart_timestamp, sizeof(pool->work_restart_timestamp), pool->work_restart_time);
  5569. }
  5570. static void restart_threads(void)
  5571. {
  5572. struct pool *cp = current_pool();
  5573. int i;
  5574. struct thr_info *thr;
  5575. /* Artificially set the lagging flag to avoid pool not providing work
  5576. * fast enough messages after every long poll */
  5577. pool_tset(cp, &cp->lagging);
  5578. /* Discard staged work that is now stale */
  5579. discard_stale();
  5580. rd_lock(&mining_thr_lock);
  5581. for (i = 0; i < mining_threads; i++)
  5582. {
  5583. thr = mining_thr[i];
  5584. thr->work_restart = true;
  5585. }
  5586. for (i = 0; i < mining_threads; i++)
  5587. {
  5588. thr = mining_thr[i];
  5589. notifier_wake(thr->work_restart_notifier);
  5590. }
  5591. rd_unlock(&mining_thr_lock);
  5592. }
  5593. static
  5594. void blkhashstr(char *rv, const unsigned char *hash)
  5595. {
  5596. unsigned char hash_swap[32];
  5597. swap256(hash_swap, hash);
  5598. swap32tole(hash_swap, hash_swap, 32 / 4);
  5599. bin2hex(rv, hash_swap, 32);
  5600. }
  5601. static void set_curblock(char *hexstr, unsigned char *hash)
  5602. {
  5603. unsigned char hash_swap[32];
  5604. current_block_id = ((uint32_t*)hash)[0];
  5605. strcpy(current_block, hexstr);
  5606. swap256(hash_swap, hash);
  5607. swap32tole(hash_swap, hash_swap, 32 / 4);
  5608. cg_wlock(&ch_lock);
  5609. block_time = time(NULL);
  5610. __update_block_title(hash_swap);
  5611. free(current_fullhash);
  5612. current_fullhash = malloc(65);
  5613. bin2hex(current_fullhash, hash_swap, 32);
  5614. get_timestamp(blocktime, sizeof(blocktime), block_time);
  5615. cg_wunlock(&ch_lock);
  5616. applog(LOG_INFO, "New block: %s diff %s (%s)", current_hash, block_diff, net_hashrate);
  5617. }
  5618. /* Search to see if this string is from a block that has been seen before */
  5619. static bool block_exists(char *hexstr)
  5620. {
  5621. struct block *s;
  5622. rd_lock(&blk_lock);
  5623. HASH_FIND_STR(blocks, hexstr, s);
  5624. rd_unlock(&blk_lock);
  5625. if (s)
  5626. return true;
  5627. return false;
  5628. }
  5629. #if 0
  5630. /* Tests if this work is from a block that has been seen before */
  5631. static inline bool from_existing_block(struct work *work)
  5632. {
  5633. char hexstr[37];
  5634. bool ret;
  5635. bin2hex(hexstr, work->data + 8, 18);
  5636. ret = block_exists(hexstr);
  5637. return ret;
  5638. }
  5639. #endif
  5640. static int block_sort(struct block *blocka, struct block *blockb)
  5641. {
  5642. return blocka->block_no - blockb->block_no;
  5643. }
  5644. static void set_blockdiff(const struct work *work)
  5645. {
  5646. unsigned char target[32];
  5647. double diff;
  5648. uint64_t diff64;
  5649. real_block_target(target, work->data);
  5650. diff = target_diff(target);
  5651. diff64 = diff;
  5652. suffix_string(diff64, block_diff, sizeof(block_diff), 0);
  5653. format_unit2(net_hashrate, sizeof(net_hashrate),
  5654. true, "h/s", H2B_SHORT, diff * 7158278, -1);
  5655. if (unlikely(current_diff != diff))
  5656. applog(LOG_NOTICE, "Network difficulty changed to %s (%s)", block_diff, net_hashrate);
  5657. current_diff = diff;
  5658. }
  5659. static bool test_work_current(struct work *work)
  5660. {
  5661. bool ret = true;
  5662. char hexstr[65];
  5663. if (work->mandatory)
  5664. return ret;
  5665. uint32_t block_id = ((uint32_t*)(work->data))[1];
  5666. /* Hack to work around dud work sneaking into test */
  5667. bin2hex(hexstr, work->data + 8, 18);
  5668. if (!strncmp(hexstr, "000000000000000000000000000000000000", 36))
  5669. goto out_free;
  5670. struct pool * const pool = work->pool;
  5671. /* Search to see if this block exists yet and if not, consider it a
  5672. * new block and set the current block details to this one */
  5673. if (!block_exists(hexstr))
  5674. {
  5675. struct block *s = calloc(sizeof(struct block), 1);
  5676. int deleted_block = 0;
  5677. ret = false;
  5678. if (unlikely(!s))
  5679. quit (1, "test_work_current OOM");
  5680. strcpy(s->hash, hexstr);
  5681. s->block_no = new_blocks++;
  5682. wr_lock(&blk_lock);
  5683. /* Only keep the last hour's worth of blocks in memory since
  5684. * work from blocks before this is virtually impossible and we
  5685. * want to prevent memory usage from continually rising */
  5686. if (HASH_COUNT(blocks) > 6)
  5687. {
  5688. struct block *oldblock;
  5689. HASH_SORT(blocks, block_sort);
  5690. oldblock = blocks;
  5691. deleted_block = oldblock->block_no;
  5692. HASH_DEL(blocks, oldblock);
  5693. free(oldblock);
  5694. }
  5695. HASH_ADD_STR(blocks, hash, s);
  5696. set_blockdiff(work);
  5697. wr_unlock(&blk_lock);
  5698. pool->block_id = block_id;
  5699. pool_update_work_restart_time(pool);
  5700. if (deleted_block)
  5701. applog(LOG_DEBUG, "Deleted block %d from database", deleted_block);
  5702. #if BLKMAKER_VERSION > 1
  5703. template_nonce = 0;
  5704. #endif
  5705. set_curblock(hexstr, &work->data[4]);
  5706. if (unlikely(new_blocks == 1))
  5707. goto out_free;
  5708. if (!work->stratum)
  5709. {
  5710. if (work->longpoll)
  5711. {
  5712. applog(LOG_NOTICE, "Longpoll from pool %d detected new block",
  5713. pool->pool_no);
  5714. }
  5715. else
  5716. if (have_longpoll)
  5717. applog(LOG_NOTICE, "New block detected on network before longpoll");
  5718. else
  5719. applog(LOG_NOTICE, "New block detected on network");
  5720. }
  5721. restart_threads();
  5722. }
  5723. else
  5724. {
  5725. bool restart = false;
  5726. if (unlikely(pool->block_id != block_id))
  5727. {
  5728. bool was_active = pool->block_id != 0;
  5729. pool->block_id = block_id;
  5730. pool_update_work_restart_time(pool);
  5731. if (!work->longpoll)
  5732. update_last_work(work);
  5733. if (was_active)
  5734. {
  5735. // Pool actively changed block
  5736. if (pool == current_pool())
  5737. restart = true;
  5738. if (block_id == current_block_id)
  5739. {
  5740. // Caught up, only announce if this pool is the one in use
  5741. if (restart)
  5742. applog(LOG_NOTICE, "%s %d caught up to new block",
  5743. work->longpoll ? "Longpoll from pool" : "Pool",
  5744. pool->pool_no);
  5745. }
  5746. else
  5747. {
  5748. // Switched to a block we know, but not the latest... why?
  5749. // This might detect pools trying to double-spend or 51%,
  5750. // but let's not make any accusations until it's had time
  5751. // in the real world.
  5752. blkhashstr(hexstr, &work->data[4]);
  5753. applog(LOG_WARNING, "%s %d is issuing work for an old block: %s",
  5754. work->longpoll ? "Longpoll from pool" : "Pool",
  5755. pool->pool_no,
  5756. hexstr);
  5757. }
  5758. }
  5759. }
  5760. if (work->longpoll)
  5761. {
  5762. struct pool * const cp = current_pool();
  5763. ++pool->work_restart_id;
  5764. if (work->tr && work->tr == pool->swork.tr)
  5765. pool->swork.work_restart_id = pool->work_restart_id;
  5766. update_last_work(work);
  5767. pool_update_work_restart_time(pool);
  5768. applog(
  5769. ((!opt_quiet_work_updates) && pool_actively_in_use(pool, cp) ? LOG_NOTICE : LOG_DEBUG),
  5770. "Longpoll from pool %d requested work update",
  5771. pool->pool_no);
  5772. if ((!restart) && pool == cp)
  5773. restart = true;
  5774. }
  5775. if (restart)
  5776. restart_threads();
  5777. }
  5778. work->longpoll = false;
  5779. out_free:
  5780. return ret;
  5781. }
  5782. static int tv_sort(struct work *worka, struct work *workb)
  5783. {
  5784. return worka->tv_staged.tv_sec - workb->tv_staged.tv_sec;
  5785. }
  5786. static bool work_rollable(struct work *work)
  5787. {
  5788. return (!work->clone && work->rolltime);
  5789. }
  5790. static bool hash_push(struct work *work)
  5791. {
  5792. bool rc = true;
  5793. mutex_lock(stgd_lock);
  5794. if (work_rollable(work))
  5795. staged_rollable++;
  5796. if (likely(!getq->frozen)) {
  5797. HASH_ADD_INT(staged_work, id, work);
  5798. HASH_SORT(staged_work, tv_sort);
  5799. } else
  5800. rc = false;
  5801. pthread_cond_broadcast(&getq->cond);
  5802. mutex_unlock(stgd_lock);
  5803. return rc;
  5804. }
  5805. static void stage_work(struct work *work)
  5806. {
  5807. applog(LOG_DEBUG, "Pushing work %d from pool %d to hash queue",
  5808. work->id, work->pool->pool_no);
  5809. work->work_restart_id = work->pool->work_restart_id;
  5810. work->pool->last_work_time = time(NULL);
  5811. cgtime(&work->pool->tv_last_work_time);
  5812. test_work_current(work);
  5813. work->pool->works++;
  5814. hash_push(work);
  5815. }
  5816. #ifdef HAVE_CURSES
  5817. int curses_int(const char *query)
  5818. {
  5819. int ret;
  5820. char *cvar;
  5821. cvar = curses_input(query);
  5822. if (unlikely(!cvar))
  5823. return -1;
  5824. ret = atoi(cvar);
  5825. free(cvar);
  5826. return ret;
  5827. }
  5828. #endif
  5829. #ifdef HAVE_CURSES
  5830. static bool input_pool(bool live);
  5831. #endif
  5832. #ifdef HAVE_CURSES
  5833. static void display_pool_summary(struct pool *pool)
  5834. {
  5835. double efficiency = 0.0;
  5836. char xfer[ALLOC_H2B_NOUNIT+ALLOC_H2B_SPACED+4+1], bw[ALLOC_H2B_NOUNIT+ALLOC_H2B_SPACED+6+1];
  5837. int pool_secs;
  5838. if (curses_active_locked()) {
  5839. wlog("Pool: %s\n", pool->rpc_url);
  5840. if (pool->solved)
  5841. wlog("SOLVED %d BLOCK%s!\n", pool->solved, pool->solved > 1 ? "S" : "");
  5842. if (!pool->has_stratum)
  5843. wlog("%s own long-poll support\n", pool->lp_url ? "Has" : "Does not have");
  5844. wlog(" Queued work requests: %d\n", pool->getwork_requested);
  5845. wlog(" Share submissions: %d\n", pool->accepted + pool->rejected);
  5846. wlog(" Accepted shares: %d\n", pool->accepted);
  5847. wlog(" Rejected shares: %d + %d stale (%.2f%%)\n",
  5848. pool->rejected, pool->stale_shares,
  5849. (float)(pool->rejected + pool->stale_shares) / (float)(pool->rejected + pool->stale_shares + pool->accepted)
  5850. );
  5851. wlog(" Accepted difficulty shares: %1.f\n", pool->diff_accepted);
  5852. wlog(" Rejected difficulty shares: %1.f\n", pool->diff_rejected);
  5853. pool_secs = timer_elapsed(&pool->cgminer_stats.start_tv, NULL);
  5854. wlog(" Network transfer: %s (%s)\n",
  5855. multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2,
  5856. (float)pool->cgminer_pool_stats.net_bytes_received,
  5857. (float)pool->cgminer_pool_stats.net_bytes_sent),
  5858. multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2,
  5859. (float)(pool->cgminer_pool_stats.net_bytes_received / pool_secs),
  5860. (float)(pool->cgminer_pool_stats.net_bytes_sent / pool_secs)));
  5861. uint64_t pool_bytes_xfer = pool->cgminer_pool_stats.net_bytes_received + pool->cgminer_pool_stats.net_bytes_sent;
  5862. efficiency = pool_bytes_xfer ? pool->diff_accepted * 2048. / pool_bytes_xfer : 0.0;
  5863. wlog(" Efficiency (accepted * difficulty / 2 KB): %.2f\n", efficiency);
  5864. wlog(" Items worked on: %d\n", pool->works);
  5865. wlog(" Stale submissions discarded due to new blocks: %d\n", pool->stale_shares);
  5866. wlog(" Unable to get work from server occasions: %d\n", pool->getfail_occasions);
  5867. wlog(" Submitting work remotely delay occasions: %d\n\n", pool->remotefail_occasions);
  5868. unlock_curses();
  5869. }
  5870. }
  5871. #endif
  5872. /* We can't remove the memory used for this struct pool because there may
  5873. * still be work referencing it. We just remove it from the pools list */
  5874. void remove_pool(struct pool *pool)
  5875. {
  5876. int i, last_pool = total_pools - 1;
  5877. struct pool *other;
  5878. /* Boost priority of any lower prio than this one */
  5879. for (i = 0; i < total_pools; i++) {
  5880. other = pools[i];
  5881. if (other->prio > pool->prio)
  5882. other->prio--;
  5883. }
  5884. if (pool->pool_no < last_pool) {
  5885. /* Swap the last pool for this one */
  5886. (pools[last_pool])->pool_no = pool->pool_no;
  5887. pools[pool->pool_no] = pools[last_pool];
  5888. }
  5889. /* Give it an invalid number */
  5890. pool->pool_no = total_pools;
  5891. pool->removed = true;
  5892. pool->has_stratum = false;
  5893. total_pools--;
  5894. }
  5895. /* add a mutex if this needs to be thread safe in the future */
  5896. static struct JE {
  5897. char *buf;
  5898. struct JE *next;
  5899. } *jedata = NULL;
  5900. static void json_escape_free()
  5901. {
  5902. struct JE *jeptr = jedata;
  5903. struct JE *jenext;
  5904. jedata = NULL;
  5905. while (jeptr) {
  5906. jenext = jeptr->next;
  5907. free(jeptr->buf);
  5908. free(jeptr);
  5909. jeptr = jenext;
  5910. }
  5911. }
  5912. static
  5913. char *json_escape(const char *str)
  5914. {
  5915. struct JE *jeptr;
  5916. char *buf, *ptr;
  5917. /* 2x is the max, may as well just allocate that */
  5918. ptr = buf = malloc(strlen(str) * 2 + 1);
  5919. jeptr = malloc(sizeof(*jeptr));
  5920. jeptr->buf = buf;
  5921. jeptr->next = jedata;
  5922. jedata = jeptr;
  5923. while (*str) {
  5924. if (*str == '\\' || *str == '"')
  5925. *(ptr++) = '\\';
  5926. *(ptr++) = *(str++);
  5927. }
  5928. *ptr = '\0';
  5929. return buf;
  5930. }
  5931. static
  5932. void _write_config_string_elist(FILE *fcfg, const char *configname, struct string_elist * const elist)
  5933. {
  5934. if (!elist)
  5935. return;
  5936. static struct string_elist *entry;
  5937. fprintf(fcfg, ",\n\"%s\" : [", configname);
  5938. bool first = true;
  5939. DL_FOREACH(elist, entry)
  5940. {
  5941. const char * const s = entry->string;
  5942. fprintf(fcfg, "%s\n\t\"%s\"", first ? "" : ",", json_escape(s));
  5943. first = false;
  5944. }
  5945. fprintf(fcfg, "\n]");
  5946. }
  5947. void write_config(FILE *fcfg)
  5948. {
  5949. int i;
  5950. /* Write pool values */
  5951. fputs("{\n\"pools\" : [", fcfg);
  5952. for(i = 0; i < total_pools; i++) {
  5953. struct pool *pool = pools[i];
  5954. if (pool->quota != 1) {
  5955. fprintf(fcfg, "%s\n\t{\n\t\t\"quota\" : \"%d;%s\",", i > 0 ? "," : "",
  5956. pool->quota,
  5957. json_escape(pool->rpc_url));
  5958. } else {
  5959. fprintf(fcfg, "%s\n\t{\n\t\t\"url\" : \"%s\",", i > 0 ? "," : "",
  5960. json_escape(pool->rpc_url));
  5961. }
  5962. if (pool->rpc_proxy)
  5963. fprintf(fcfg, "\n\t\t\"pool-proxy\" : \"%s\",", json_escape(pool->rpc_proxy));
  5964. fprintf(fcfg, "\n\t\t\"user\" : \"%s\",", json_escape(pool->rpc_user));
  5965. fprintf(fcfg, "\n\t\t\"pass\" : \"%s\",", json_escape(pool->rpc_pass));
  5966. fprintf(fcfg, "\n\t\t\"pool-priority\" : \"%d\"", pool->prio);
  5967. if (pool->force_rollntime)
  5968. fprintf(fcfg, ",\n\t\t\"force-rollntime\" : %d", pool->force_rollntime);
  5969. fprintf(fcfg, "\n\t}");
  5970. }
  5971. fputs("\n]\n", fcfg);
  5972. #ifdef HAVE_OPENCL
  5973. write_config_opencl(fcfg);
  5974. #endif
  5975. #ifdef WANT_CPUMINE
  5976. fprintf(fcfg, ",\n\"algo\" : \"%s\"", algo_names[opt_algo]);
  5977. #endif
  5978. /* Simple bool and int options */
  5979. struct opt_table *opt;
  5980. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  5981. char *p, *name = strdup(opt->names);
  5982. for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
  5983. if (p[1] != '-')
  5984. continue;
  5985. if (opt->type & OPT_NOARG &&
  5986. ((void *)opt->cb == (void *)opt_set_bool || (void *)opt->cb == (void *)opt_set_invbool) &&
  5987. (*(bool *)opt->u.arg == ((void *)opt->cb == (void *)opt_set_bool)))
  5988. fprintf(fcfg, ",\n\"%s\" : true", p+2);
  5989. if (opt->type & OPT_HASARG &&
  5990. ((void *)opt->cb_arg == (void *)set_int_0_to_9999 ||
  5991. (void *)opt->cb_arg == (void *)set_int_1_to_65535 ||
  5992. (void *)opt->cb_arg == (void *)set_int_0_to_10 ||
  5993. (void *)opt->cb_arg == (void *)set_int_1_to_10) &&
  5994. opt->desc != opt_hidden &&
  5995. 0 <= *(int *)opt->u.arg)
  5996. fprintf(fcfg, ",\n\"%s\" : \"%d\"", p+2, *(int *)opt->u.arg);
  5997. }
  5998. }
  5999. /* Special case options */
  6000. if (request_target_str)
  6001. {
  6002. if (request_pdiff == (long)request_pdiff)
  6003. fprintf(fcfg, ",\n\"request-diff\" : %ld", (long)request_pdiff);
  6004. else
  6005. fprintf(fcfg, ",\n\"request-diff\" : %f", request_pdiff);
  6006. }
  6007. fprintf(fcfg, ",\n\"shares\" : %g", opt_shares);
  6008. if (pool_strategy == POOL_BALANCE)
  6009. fputs(",\n\"balance\" : true", fcfg);
  6010. if (pool_strategy == POOL_LOADBALANCE)
  6011. fputs(",\n\"load-balance\" : true", fcfg);
  6012. if (pool_strategy == POOL_ROUNDROBIN)
  6013. fputs(",\n\"round-robin\" : true", fcfg);
  6014. if (pool_strategy == POOL_ROTATE)
  6015. fprintf(fcfg, ",\n\"rotate\" : \"%d\"", opt_rotate_period);
  6016. #if defined(unix) || defined(__APPLE__)
  6017. if (opt_stderr_cmd && *opt_stderr_cmd)
  6018. fprintf(fcfg, ",\n\"monitor\" : \"%s\"", json_escape(opt_stderr_cmd));
  6019. #endif // defined(unix)
  6020. if (opt_kernel_path && *opt_kernel_path) {
  6021. char *kpath = strdup(opt_kernel_path);
  6022. if (kpath[strlen(kpath)-1] == '/')
  6023. kpath[strlen(kpath)-1] = 0;
  6024. fprintf(fcfg, ",\n\"kernel-path\" : \"%s\"", json_escape(kpath));
  6025. free(kpath);
  6026. }
  6027. if (schedstart.enable)
  6028. fprintf(fcfg, ",\n\"sched-time\" : \"%d:%d\"", schedstart.tm.tm_hour, schedstart.tm.tm_min);
  6029. if (schedstop.enable)
  6030. fprintf(fcfg, ",\n\"stop-time\" : \"%d:%d\"", schedstop.tm.tm_hour, schedstop.tm.tm_min);
  6031. if (opt_socks_proxy && *opt_socks_proxy)
  6032. fprintf(fcfg, ",\n\"socks-proxy\" : \"%s\"", json_escape(opt_socks_proxy));
  6033. _write_config_string_elist(fcfg, "scan", scan_devices);
  6034. #ifdef USE_LIBMICROHTTPD
  6035. if (httpsrv_port != -1)
  6036. fprintf(fcfg, ",\n\"http-port\" : %d", httpsrv_port);
  6037. #endif
  6038. #ifdef USE_LIBEVENT
  6039. if (stratumsrv_port != -1)
  6040. fprintf(fcfg, ",\n\"stratum-port\" : %d", stratumsrv_port);
  6041. #endif
  6042. _write_config_string_elist(fcfg, "device", opt_devices_enabled_list);
  6043. _write_config_string_elist(fcfg, "set-device", opt_set_device_list);
  6044. if (opt_api_allow)
  6045. fprintf(fcfg, ",\n\"api-allow\" : \"%s\"", json_escape(opt_api_allow));
  6046. if (strcmp(opt_api_mcast_addr, API_MCAST_ADDR) != 0)
  6047. fprintf(fcfg, ",\n\"api-mcast-addr\" : \"%s\"", json_escape(opt_api_mcast_addr));
  6048. if (strcmp(opt_api_mcast_code, API_MCAST_CODE) != 0)
  6049. fprintf(fcfg, ",\n\"api-mcast-code\" : \"%s\"", json_escape(opt_api_mcast_code));
  6050. if (*opt_api_mcast_des)
  6051. fprintf(fcfg, ",\n\"api-mcast-des\" : \"%s\"", json_escape(opt_api_mcast_des));
  6052. if (strcmp(opt_api_description, PACKAGE_STRING) != 0)
  6053. fprintf(fcfg, ",\n\"api-description\" : \"%s\"", json_escape(opt_api_description));
  6054. if (opt_api_groups)
  6055. fprintf(fcfg, ",\n\"api-groups\" : \"%s\"", json_escape(opt_api_groups));
  6056. fputs("\n}\n", fcfg);
  6057. json_escape_free();
  6058. }
  6059. void zero_bestshare(void)
  6060. {
  6061. int i;
  6062. best_diff = 0;
  6063. suffix_string(best_diff, best_share, sizeof(best_share), 0);
  6064. for (i = 0; i < total_pools; i++) {
  6065. struct pool *pool = pools[i];
  6066. pool->best_diff = 0;
  6067. }
  6068. }
  6069. void zero_stats(void)
  6070. {
  6071. int i;
  6072. applog(LOG_DEBUG, "Zeroing stats");
  6073. cgtime(&total_tv_start);
  6074. miner_started = total_tv_start;
  6075. total_rolling = 0;
  6076. total_mhashes_done = 0;
  6077. total_getworks = 0;
  6078. total_accepted = 0;
  6079. total_rejected = 0;
  6080. hw_errors = 0;
  6081. total_stale = 0;
  6082. total_discarded = 0;
  6083. total_bytes_rcvd = total_bytes_sent = 0;
  6084. new_blocks = 0;
  6085. local_work = 0;
  6086. total_go = 0;
  6087. total_ro = 0;
  6088. total_secs = 1.0;
  6089. total_diff1 = 0;
  6090. total_bad_diff1 = 0;
  6091. found_blocks = 0;
  6092. total_diff_accepted = 0;
  6093. total_diff_rejected = 0;
  6094. total_diff_stale = 0;
  6095. #ifdef HAVE_CURSES
  6096. awidth = rwidth = swidth = hwwidth = 1;
  6097. #endif
  6098. for (i = 0; i < total_pools; i++) {
  6099. struct pool *pool = pools[i];
  6100. pool->getwork_requested = 0;
  6101. pool->accepted = 0;
  6102. pool->rejected = 0;
  6103. pool->solved = 0;
  6104. pool->getwork_requested = 0;
  6105. pool->stale_shares = 0;
  6106. pool->discarded_work = 0;
  6107. pool->getfail_occasions = 0;
  6108. pool->remotefail_occasions = 0;
  6109. pool->last_share_time = 0;
  6110. pool->works = 0;
  6111. pool->diff1 = 0;
  6112. pool->diff_accepted = 0;
  6113. pool->diff_rejected = 0;
  6114. pool->diff_stale = 0;
  6115. pool->last_share_diff = 0;
  6116. pool->cgminer_stats.start_tv = total_tv_start;
  6117. pool->cgminer_stats.getwork_calls = 0;
  6118. pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  6119. pool->cgminer_stats.getwork_wait_max.tv_sec = 0;
  6120. pool->cgminer_stats.getwork_wait_max.tv_usec = 0;
  6121. pool->cgminer_pool_stats.getwork_calls = 0;
  6122. pool->cgminer_pool_stats.getwork_attempts = 0;
  6123. pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  6124. pool->cgminer_pool_stats.getwork_wait_max.tv_sec = 0;
  6125. pool->cgminer_pool_stats.getwork_wait_max.tv_usec = 0;
  6126. pool->cgminer_pool_stats.min_diff = 0;
  6127. pool->cgminer_pool_stats.max_diff = 0;
  6128. pool->cgminer_pool_stats.min_diff_count = 0;
  6129. pool->cgminer_pool_stats.max_diff_count = 0;
  6130. pool->cgminer_pool_stats.times_sent = 0;
  6131. pool->cgminer_pool_stats.bytes_sent = 0;
  6132. pool->cgminer_pool_stats.net_bytes_sent = 0;
  6133. pool->cgminer_pool_stats.times_received = 0;
  6134. pool->cgminer_pool_stats.bytes_received = 0;
  6135. pool->cgminer_pool_stats.net_bytes_received = 0;
  6136. }
  6137. zero_bestshare();
  6138. for (i = 0; i < total_devices; ++i) {
  6139. struct cgpu_info *cgpu = get_devices(i);
  6140. mutex_lock(&hash_lock);
  6141. cgpu->total_mhashes = 0;
  6142. cgpu->accepted = 0;
  6143. cgpu->rejected = 0;
  6144. cgpu->stale = 0;
  6145. cgpu->hw_errors = 0;
  6146. cgpu->utility = 0.0;
  6147. cgpu->utility_diff1 = 0;
  6148. cgpu->last_share_pool_time = 0;
  6149. cgpu->bad_diff1 = 0;
  6150. cgpu->diff1 = 0;
  6151. cgpu->diff_accepted = 0;
  6152. cgpu->diff_rejected = 0;
  6153. cgpu->diff_stale = 0;
  6154. cgpu->last_share_diff = 0;
  6155. cgpu->thread_fail_init_count = 0;
  6156. cgpu->thread_zero_hash_count = 0;
  6157. cgpu->thread_fail_queue_count = 0;
  6158. cgpu->dev_sick_idle_60_count = 0;
  6159. cgpu->dev_dead_idle_600_count = 0;
  6160. cgpu->dev_nostart_count = 0;
  6161. cgpu->dev_over_heat_count = 0;
  6162. cgpu->dev_thermal_cutoff_count = 0;
  6163. cgpu->dev_comms_error_count = 0;
  6164. cgpu->dev_throttle_count = 0;
  6165. cgpu->cgminer_stats.start_tv = total_tv_start;
  6166. cgpu->cgminer_stats.getwork_calls = 0;
  6167. cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  6168. cgpu->cgminer_stats.getwork_wait_max.tv_sec = 0;
  6169. cgpu->cgminer_stats.getwork_wait_max.tv_usec = 0;
  6170. mutex_unlock(&hash_lock);
  6171. }
  6172. }
  6173. #ifdef HAVE_CURSES
  6174. static
  6175. void loginput_mode(const int size)
  6176. {
  6177. clear_logwin();
  6178. loginput_size = size;
  6179. check_winsizes();
  6180. }
  6181. static void display_pools(void)
  6182. {
  6183. struct pool *pool;
  6184. int selected, i, j;
  6185. char input;
  6186. loginput_mode(7 + total_pools);
  6187. immedok(logwin, true);
  6188. updated:
  6189. for (j = 0; j < total_pools; j++) {
  6190. for (i = 0; i < total_pools; i++) {
  6191. pool = pools[i];
  6192. if (pool->prio != j)
  6193. continue;
  6194. if (pool == current_pool())
  6195. wattron(logwin, A_BOLD);
  6196. if (pool->enabled != POOL_ENABLED)
  6197. wattron(logwin, A_DIM);
  6198. wlogprint("%d: ", pool->prio);
  6199. switch (pool->enabled) {
  6200. case POOL_ENABLED:
  6201. wlogprint("Enabled ");
  6202. break;
  6203. case POOL_DISABLED:
  6204. wlogprint("Disabled ");
  6205. break;
  6206. case POOL_REJECTING:
  6207. wlogprint("Rejectin ");
  6208. break;
  6209. }
  6210. _wlogprint(pool_proto_str(pool));
  6211. wlogprint(" Quota %d Pool %d: %s User:%s\n",
  6212. pool->quota,
  6213. pool->pool_no,
  6214. pool->rpc_url, pool->rpc_user);
  6215. wattroff(logwin, A_BOLD | A_DIM);
  6216. break; //for (i = 0; i < total_pools; i++)
  6217. }
  6218. }
  6219. retry:
  6220. wlogprint("\nCurrent pool management strategy: %s\n",
  6221. strategies[pool_strategy].s);
  6222. if (pool_strategy == POOL_ROTATE)
  6223. wlogprint("Set to rotate every %d minutes\n", opt_rotate_period);
  6224. wlogprint("[F]ailover only %s\n", opt_fail_only ? "enabled" : "disabled");
  6225. wlogprint("Pool [A]dd [R]emove [D]isable [E]nable [P]rioritize [Q]uota change\n");
  6226. wlogprint("[C]hange management strategy [S]witch pool [I]nformation\n");
  6227. wlogprint("Or press any other key to continue\n");
  6228. logwin_update();
  6229. input = getch();
  6230. if (!strncasecmp(&input, "a", 1)) {
  6231. if (opt_benchmark)
  6232. {
  6233. wlogprint("Cannot add pools in benchmark mode");
  6234. goto retry;
  6235. }
  6236. input_pool(true);
  6237. goto updated;
  6238. } else if (!strncasecmp(&input, "r", 1)) {
  6239. if (total_pools <= 1) {
  6240. wlogprint("Cannot remove last pool");
  6241. goto retry;
  6242. }
  6243. selected = curses_int("Select pool number");
  6244. if (selected < 0 || selected >= total_pools) {
  6245. wlogprint("Invalid selection\n");
  6246. goto retry;
  6247. }
  6248. pool = pools[selected];
  6249. if (pool == current_pool())
  6250. switch_pools(NULL);
  6251. if (pool == current_pool()) {
  6252. wlogprint("Unable to remove pool due to activity\n");
  6253. goto retry;
  6254. }
  6255. disable_pool(pool);
  6256. remove_pool(pool);
  6257. goto updated;
  6258. } else if (!strncasecmp(&input, "s", 1)) {
  6259. selected = curses_int("Select pool number");
  6260. if (selected < 0 || selected >= total_pools) {
  6261. wlogprint("Invalid selection\n");
  6262. goto retry;
  6263. }
  6264. pool = pools[selected];
  6265. enable_pool(pool);
  6266. switch_pools(pool);
  6267. goto updated;
  6268. } else if (!strncasecmp(&input, "d", 1)) {
  6269. if (enabled_pools <= 1) {
  6270. wlogprint("Cannot disable last pool");
  6271. goto retry;
  6272. }
  6273. selected = curses_int("Select pool number");
  6274. if (selected < 0 || selected >= total_pools) {
  6275. wlogprint("Invalid selection\n");
  6276. goto retry;
  6277. }
  6278. pool = pools[selected];
  6279. disable_pool(pool);
  6280. if (pool == current_pool())
  6281. switch_pools(NULL);
  6282. goto updated;
  6283. } else if (!strncasecmp(&input, "e", 1)) {
  6284. selected = curses_int("Select pool number");
  6285. if (selected < 0 || selected >= total_pools) {
  6286. wlogprint("Invalid selection\n");
  6287. goto retry;
  6288. }
  6289. pool = pools[selected];
  6290. enable_pool(pool);
  6291. if (pool->prio < current_pool()->prio)
  6292. switch_pools(pool);
  6293. goto updated;
  6294. } else if (!strncasecmp(&input, "c", 1)) {
  6295. for (i = 0; i <= TOP_STRATEGY; i++)
  6296. wlogprint("%d: %s\n", i, strategies[i].s);
  6297. selected = curses_int("Select strategy number type");
  6298. if (selected < 0 || selected > TOP_STRATEGY) {
  6299. wlogprint("Invalid selection\n");
  6300. goto retry;
  6301. }
  6302. if (selected == POOL_ROTATE) {
  6303. opt_rotate_period = curses_int("Select interval in minutes");
  6304. if (opt_rotate_period < 0 || opt_rotate_period > 9999) {
  6305. opt_rotate_period = 0;
  6306. wlogprint("Invalid selection\n");
  6307. goto retry;
  6308. }
  6309. }
  6310. pool_strategy = selected;
  6311. switch_pools(NULL);
  6312. goto updated;
  6313. } else if (!strncasecmp(&input, "i", 1)) {
  6314. selected = curses_int("Select pool number");
  6315. if (selected < 0 || selected >= total_pools) {
  6316. wlogprint("Invalid selection\n");
  6317. goto retry;
  6318. }
  6319. pool = pools[selected];
  6320. display_pool_summary(pool);
  6321. goto retry;
  6322. } else if (!strncasecmp(&input, "q", 1)) {
  6323. selected = curses_int("Select pool number");
  6324. if (selected < 0 || selected >= total_pools) {
  6325. wlogprint("Invalid selection\n");
  6326. goto retry;
  6327. }
  6328. pool = pools[selected];
  6329. selected = curses_int("Set quota");
  6330. if (selected < 0) {
  6331. wlogprint("Invalid negative quota\n");
  6332. goto retry;
  6333. }
  6334. pool->quota = selected;
  6335. adjust_quota_gcd();
  6336. goto updated;
  6337. } else if (!strncasecmp(&input, "f", 1)) {
  6338. opt_fail_only ^= true;
  6339. goto updated;
  6340. } else if (!strncasecmp(&input, "p", 1)) {
  6341. char *prilist = curses_input("Enter new pool priority (comma separated list)");
  6342. if (!prilist)
  6343. {
  6344. wlogprint("Not changing priorities\n");
  6345. goto retry;
  6346. }
  6347. int res = prioritize_pools(prilist, &i);
  6348. free(prilist);
  6349. switch (res) {
  6350. case MSG_NOPOOL:
  6351. wlogprint("No pools\n");
  6352. goto retry;
  6353. case MSG_MISPID:
  6354. wlogprint("Missing pool id parameter\n");
  6355. goto retry;
  6356. case MSG_INVPID:
  6357. wlogprint("Invalid pool id %d - range is 0 - %d\n", i, total_pools - 1);
  6358. goto retry;
  6359. case MSG_DUPPID:
  6360. wlogprint("Duplicate pool specified %d\n", i);
  6361. goto retry;
  6362. case MSG_POOLPRIO:
  6363. default:
  6364. goto updated;
  6365. }
  6366. }
  6367. immedok(logwin, false);
  6368. loginput_mode(0);
  6369. }
  6370. static const char *summary_detail_level_str(void)
  6371. {
  6372. if (opt_compact)
  6373. return "compact";
  6374. if (opt_show_procs)
  6375. return "processors";
  6376. return "devices";
  6377. }
  6378. static void display_options(void)
  6379. {
  6380. int selected;
  6381. char input;
  6382. immedok(logwin, true);
  6383. loginput_mode(12);
  6384. retry:
  6385. clear_logwin();
  6386. wlogprint("[N]ormal [C]lear [S]ilent mode (disable all output)\n");
  6387. wlogprint("[D]ebug:%s\n[P]er-device:%s\n[Q]uiet:%s\n[V]erbose:%s\n"
  6388. "[R]PC debug:%s\n[W]orkTime details:%s\nsu[M]mary detail level:%s\n"
  6389. "[L]og interval:%d\nS[T]atistical counts: %s\n[Z]ero statistics\n",
  6390. opt_debug_console ? "on" : "off",
  6391. want_per_device_stats? "on" : "off",
  6392. opt_quiet ? "on" : "off",
  6393. opt_log_output ? "on" : "off",
  6394. opt_protocol ? "on" : "off",
  6395. opt_worktime ? "on" : "off",
  6396. summary_detail_level_str(),
  6397. opt_log_interval,
  6398. opt_weighed_stats ? "weighed" : "absolute");
  6399. wlogprint("Select an option or any other key to return\n");
  6400. logwin_update();
  6401. input = getch();
  6402. if (!strncasecmp(&input, "q", 1)) {
  6403. opt_quiet ^= true;
  6404. wlogprint("Quiet mode %s\n", opt_quiet ? "enabled" : "disabled");
  6405. goto retry;
  6406. } else if (!strncasecmp(&input, "v", 1)) {
  6407. opt_log_output ^= true;
  6408. if (opt_log_output)
  6409. opt_quiet = false;
  6410. wlogprint("Verbose mode %s\n", opt_log_output ? "enabled" : "disabled");
  6411. goto retry;
  6412. } else if (!strncasecmp(&input, "n", 1)) {
  6413. opt_log_output = false;
  6414. opt_debug_console = false;
  6415. opt_quiet = false;
  6416. opt_protocol = false;
  6417. opt_compact = false;
  6418. opt_show_procs = false;
  6419. devsummaryYOffset = 0;
  6420. want_per_device_stats = false;
  6421. wlogprint("Output mode reset to normal\n");
  6422. switch_logsize();
  6423. goto retry;
  6424. } else if (!strncasecmp(&input, "d", 1)) {
  6425. opt_debug = true;
  6426. opt_debug_console ^= true;
  6427. opt_log_output = opt_debug_console;
  6428. if (opt_debug_console)
  6429. opt_quiet = false;
  6430. wlogprint("Debug mode %s\n", opt_debug_console ? "enabled" : "disabled");
  6431. goto retry;
  6432. } else if (!strncasecmp(&input, "m", 1)) {
  6433. if (opt_compact)
  6434. opt_compact = false;
  6435. else
  6436. if (!opt_show_procs)
  6437. opt_show_procs = true;
  6438. else
  6439. {
  6440. opt_compact = true;
  6441. opt_show_procs = false;
  6442. devsummaryYOffset = 0;
  6443. }
  6444. wlogprint("su[M]mary detail level changed to: %s\n", summary_detail_level_str());
  6445. switch_logsize();
  6446. goto retry;
  6447. } else if (!strncasecmp(&input, "p", 1)) {
  6448. want_per_device_stats ^= true;
  6449. opt_log_output = want_per_device_stats;
  6450. wlogprint("Per-device stats %s\n", want_per_device_stats ? "enabled" : "disabled");
  6451. goto retry;
  6452. } else if (!strncasecmp(&input, "r", 1)) {
  6453. opt_protocol ^= true;
  6454. if (opt_protocol)
  6455. opt_quiet = false;
  6456. wlogprint("RPC protocol debugging %s\n", opt_protocol ? "enabled" : "disabled");
  6457. goto retry;
  6458. } else if (!strncasecmp(&input, "c", 1))
  6459. clear_logwin();
  6460. else if (!strncasecmp(&input, "l", 1)) {
  6461. selected = curses_int("Interval in seconds");
  6462. if (selected < 0 || selected > 9999) {
  6463. wlogprint("Invalid selection\n");
  6464. goto retry;
  6465. }
  6466. opt_log_interval = selected;
  6467. wlogprint("Log interval set to %d seconds\n", opt_log_interval);
  6468. goto retry;
  6469. } else if (!strncasecmp(&input, "s", 1)) {
  6470. opt_realquiet = true;
  6471. } else if (!strncasecmp(&input, "w", 1)) {
  6472. opt_worktime ^= true;
  6473. wlogprint("WorkTime details %s\n", opt_worktime ? "enabled" : "disabled");
  6474. goto retry;
  6475. } else if (!strncasecmp(&input, "t", 1)) {
  6476. opt_weighed_stats ^= true;
  6477. wlogprint("Now displaying %s statistics\n", opt_weighed_stats ? "weighed" : "absolute");
  6478. goto retry;
  6479. } else if (!strncasecmp(&input, "z", 1)) {
  6480. zero_stats();
  6481. goto retry;
  6482. }
  6483. immedok(logwin, false);
  6484. loginput_mode(0);
  6485. }
  6486. #endif
  6487. void default_save_file(char *filename)
  6488. {
  6489. #if defined(unix) || defined(__APPLE__)
  6490. if (getenv("HOME") && *getenv("HOME")) {
  6491. strcpy(filename, getenv("HOME"));
  6492. strcat(filename, "/");
  6493. }
  6494. else
  6495. strcpy(filename, "");
  6496. strcat(filename, ".bfgminer/");
  6497. mkdir(filename, 0777);
  6498. #else
  6499. strcpy(filename, "");
  6500. #endif
  6501. strcat(filename, def_conf);
  6502. }
  6503. #ifdef HAVE_CURSES
  6504. static void set_options(void)
  6505. {
  6506. int selected;
  6507. char input;
  6508. immedok(logwin, true);
  6509. loginput_mode(8);
  6510. retry:
  6511. wlogprint("\n[L]ongpoll: %s\n", want_longpoll ? "On" : "Off");
  6512. wlogprint("[Q]ueue: %d\n[S]cantime: %d\n[E]xpiry: %d\n[R]etries: %d\n"
  6513. "[W]rite config file\n[B]FGMiner restart\n",
  6514. opt_queue, opt_scantime, opt_expiry, opt_retries);
  6515. wlogprint("Select an option or any other key to return\n");
  6516. logwin_update();
  6517. input = getch();
  6518. if (!strncasecmp(&input, "q", 1)) {
  6519. selected = curses_int("Extra work items to queue");
  6520. if (selected < 0 || selected > 9999) {
  6521. wlogprint("Invalid selection\n");
  6522. goto retry;
  6523. }
  6524. opt_queue = selected;
  6525. goto retry;
  6526. } else if (!strncasecmp(&input, "l", 1)) {
  6527. if (want_longpoll)
  6528. stop_longpoll();
  6529. else
  6530. start_longpoll();
  6531. applog(LOG_WARNING, "Longpoll %s", want_longpoll ? "enabled" : "disabled");
  6532. goto retry;
  6533. } else if (!strncasecmp(&input, "s", 1)) {
  6534. selected = curses_int("Set scantime in seconds");
  6535. if (selected < 0 || selected > 9999) {
  6536. wlogprint("Invalid selection\n");
  6537. goto retry;
  6538. }
  6539. opt_scantime = selected;
  6540. goto retry;
  6541. } else if (!strncasecmp(&input, "e", 1)) {
  6542. selected = curses_int("Set expiry time in seconds");
  6543. if (selected < 0 || selected > 9999) {
  6544. wlogprint("Invalid selection\n");
  6545. goto retry;
  6546. }
  6547. opt_expiry = selected;
  6548. goto retry;
  6549. } else if (!strncasecmp(&input, "r", 1)) {
  6550. selected = curses_int("Retries before failing (-1 infinite)");
  6551. if (selected < -1 || selected > 9999) {
  6552. wlogprint("Invalid selection\n");
  6553. goto retry;
  6554. }
  6555. opt_retries = selected;
  6556. goto retry;
  6557. } else if (!strncasecmp(&input, "w", 1)) {
  6558. FILE *fcfg;
  6559. char *str, filename[PATH_MAX], prompt[PATH_MAX + 50];
  6560. default_save_file(filename);
  6561. snprintf(prompt, sizeof(prompt), "Config filename to write (Enter for default) [%s]", filename);
  6562. str = curses_input(prompt);
  6563. if (str) {
  6564. struct stat statbuf;
  6565. strcpy(filename, str);
  6566. free(str);
  6567. if (!stat(filename, &statbuf)) {
  6568. wlogprint("File exists, overwrite?\n");
  6569. input = getch();
  6570. if (strncasecmp(&input, "y", 1))
  6571. goto retry;
  6572. }
  6573. }
  6574. fcfg = fopen(filename, "w");
  6575. if (!fcfg) {
  6576. wlogprint("Cannot open or create file\n");
  6577. goto retry;
  6578. }
  6579. write_config(fcfg);
  6580. fclose(fcfg);
  6581. goto retry;
  6582. } else if (!strncasecmp(&input, "b", 1)) {
  6583. wlogprint("Are you sure?\n");
  6584. input = getch();
  6585. if (!strncasecmp(&input, "y", 1))
  6586. app_restart();
  6587. else
  6588. clear_logwin();
  6589. } else
  6590. clear_logwin();
  6591. loginput_mode(0);
  6592. immedok(logwin, false);
  6593. }
  6594. int scan_serial(const char *);
  6595. static
  6596. void _managetui_msg(const char *repr, const char **msg)
  6597. {
  6598. if (*msg)
  6599. {
  6600. applog(LOG_DEBUG, "ManageTUI: %"PRIpreprv": %s", repr, *msg);
  6601. wattron(logwin, A_BOLD);
  6602. wlogprint("%s", *msg);
  6603. wattroff(logwin, A_BOLD);
  6604. *msg = NULL;
  6605. }
  6606. logwin_update();
  6607. }
  6608. void manage_device(void)
  6609. {
  6610. char logline[256];
  6611. const char *msg = NULL;
  6612. struct cgpu_info *cgpu;
  6613. const struct device_drv *drv;
  6614. selecting_device = true;
  6615. immedok(logwin, true);
  6616. loginput_mode(12);
  6617. devchange:
  6618. if (unlikely(!total_devices))
  6619. {
  6620. clear_logwin();
  6621. wlogprint("(no devices)\n");
  6622. wlogprint("[Plus] Add device(s) [Enter] Close device manager\n");
  6623. _managetui_msg("(none)", &msg);
  6624. int input = getch();
  6625. switch (input)
  6626. {
  6627. case '+': case '=': // add new device
  6628. goto addnew;
  6629. default:
  6630. goto out;
  6631. }
  6632. }
  6633. cgpu = devices[selected_device];
  6634. drv = cgpu->drv;
  6635. refresh_devstatus();
  6636. refresh:
  6637. clear_logwin();
  6638. wlogprint("Select processor to manage using up/down arrow keys\n");
  6639. get_statline3(logline, sizeof(logline), cgpu, true, true);
  6640. wattron(logwin, A_BOLD);
  6641. wlogprint("%s", logline);
  6642. wattroff(logwin, A_BOLD);
  6643. wlogprint("\n");
  6644. if (cgpu->dev_manufacturer)
  6645. wlogprint(" %s from %s\n", (cgpu->dev_product ?: "Device"), cgpu->dev_manufacturer);
  6646. else
  6647. if (cgpu->dev_product)
  6648. wlogprint(" %s\n", cgpu->dev_product);
  6649. if (cgpu->dev_serial)
  6650. wlogprint("Serial: %s\n", cgpu->dev_serial);
  6651. if (cgpu->kname)
  6652. wlogprint("Kernel: %s\n", cgpu->kname);
  6653. if (drv->proc_wlogprint_status && likely(cgpu->status != LIFE_INIT))
  6654. drv->proc_wlogprint_status(cgpu);
  6655. wlogprint("\n");
  6656. // TODO: Last share at TIMESTAMP on pool N
  6657. // TODO: Custom device info/commands
  6658. if (cgpu->deven != DEV_ENABLED)
  6659. wlogprint("[E]nable ");
  6660. if (cgpu->deven != DEV_DISABLED)
  6661. wlogprint("[D]isable ");
  6662. if (drv->identify_device)
  6663. wlogprint("[I]dentify ");
  6664. if (drv->proc_tui_wlogprint_choices && likely(cgpu->status != LIFE_INIT))
  6665. drv->proc_tui_wlogprint_choices(cgpu);
  6666. wlogprint("\n");
  6667. wlogprint("[Slash] Find processor [Plus] Add device(s) [Enter] Close device manager\n");
  6668. _managetui_msg(cgpu->proc_repr, &msg);
  6669. while (true)
  6670. {
  6671. int input = getch();
  6672. applog(LOG_DEBUG, "ManageTUI: %"PRIpreprv": (choice %d)", cgpu->proc_repr, input);
  6673. switch (input) {
  6674. case 'd': case 'D':
  6675. if (cgpu->deven == DEV_DISABLED)
  6676. msg = "Processor already disabled\n";
  6677. else
  6678. {
  6679. cgpu->deven = DEV_DISABLED;
  6680. msg = "Processor being disabled\n";
  6681. }
  6682. goto refresh;
  6683. case 'e': case 'E':
  6684. if (cgpu->deven == DEV_ENABLED)
  6685. msg = "Processor already enabled\n";
  6686. else
  6687. {
  6688. proc_enable(cgpu);
  6689. msg = "Processor being enabled\n";
  6690. }
  6691. goto refresh;
  6692. case 'i': case 'I':
  6693. if (drv->identify_device && drv->identify_device(cgpu))
  6694. msg = "Identify command sent\n";
  6695. else
  6696. goto key_default;
  6697. goto refresh;
  6698. case KEY_DOWN:
  6699. if (selected_device >= total_devices - 1)
  6700. break;
  6701. ++selected_device;
  6702. goto devchange;
  6703. case KEY_UP:
  6704. if (selected_device <= 0)
  6705. break;
  6706. --selected_device;
  6707. goto devchange;
  6708. case KEY_NPAGE:
  6709. {
  6710. if (selected_device >= total_devices - 1)
  6711. break;
  6712. struct cgpu_info *mdev = devices[selected_device]->device;
  6713. do {
  6714. ++selected_device;
  6715. } while (devices[selected_device]->device == mdev && selected_device < total_devices - 1);
  6716. goto devchange;
  6717. }
  6718. case KEY_PPAGE:
  6719. {
  6720. if (selected_device <= 0)
  6721. break;
  6722. struct cgpu_info *mdev = devices[selected_device]->device;
  6723. do {
  6724. --selected_device;
  6725. } while (devices[selected_device]->device == mdev && selected_device > 0);
  6726. goto devchange;
  6727. }
  6728. case '/': case '?': // find device
  6729. {
  6730. static char *pattern = NULL;
  6731. char *newpattern = curses_input("Enter pattern");
  6732. if (newpattern)
  6733. {
  6734. free(pattern);
  6735. pattern = newpattern;
  6736. }
  6737. else
  6738. if (!pattern)
  6739. pattern = calloc(1, 1);
  6740. int match = cgpu_search(pattern, selected_device + 1);
  6741. if (match == -1)
  6742. {
  6743. msg = "Couldn't find device\n";
  6744. goto refresh;
  6745. }
  6746. selected_device = match;
  6747. goto devchange;
  6748. }
  6749. case '+': case '=': // add new device
  6750. {
  6751. addnew:
  6752. clear_logwin();
  6753. _wlogprint(
  6754. "Enter \"auto\", \"all\", or a serial port to probe for mining devices.\n"
  6755. "Prefix by a driver name and colon to only probe a specific driver.\n"
  6756. "For example: erupter:"
  6757. #ifdef WIN32
  6758. "\\\\.\\COM40"
  6759. #elif defined(__APPLE__)
  6760. "/dev/cu.SLAB_USBtoUART"
  6761. #else
  6762. "/dev/ttyUSB39"
  6763. #endif
  6764. "\n"
  6765. );
  6766. char *scanser = curses_input("Enter target");
  6767. if (scan_serial(scanser))
  6768. {
  6769. selected_device = total_devices - 1;
  6770. msg = "Device scan succeeded\n";
  6771. }
  6772. else
  6773. msg = "No new devices found\n";
  6774. goto devchange;
  6775. }
  6776. case 'Q': case 'q':
  6777. case KEY_BREAK: case KEY_BACKSPACE: case KEY_CANCEL: case KEY_CLOSE: case KEY_EXIT:
  6778. case '\x1b': // ESC
  6779. case KEY_ENTER:
  6780. case '\r': // Ctrl-M on Windows, with nonl
  6781. #ifdef PADENTER
  6782. case PADENTER: // pdcurses, used by Enter key on Windows with nonl
  6783. #endif
  6784. case '\n':
  6785. goto out;
  6786. default:
  6787. ;
  6788. key_default:
  6789. if (drv->proc_tui_handle_choice && likely(drv_ready(cgpu)))
  6790. {
  6791. msg = drv->proc_tui_handle_choice(cgpu, input);
  6792. if (msg)
  6793. goto refresh;
  6794. }
  6795. }
  6796. }
  6797. out:
  6798. selecting_device = false;
  6799. loginput_mode(0);
  6800. immedok(logwin, false);
  6801. }
  6802. void show_help(void)
  6803. {
  6804. loginput_mode(11);
  6805. // NOTE: wlogprint is a macro with a buffer limit
  6806. _wlogprint(
  6807. "LU: oldest explicit work update currently being used for new work\n"
  6808. "ST: work in queue | F: network fails | NB: new blocks detected\n"
  6809. "AS: shares being submitted | BW: bandwidth (up/down)\n"
  6810. "E: # shares * diff per 2kB bw | I: expected income | BS: best share ever found\n"
  6811. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6812. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6813. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6814. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_BTEE
  6815. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6816. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6817. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_BTEE U8_HLINE U8_HLINE U8_HLINE
  6818. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6819. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6820. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  6821. "\n"
  6822. "devices/processors hashing (only for totals line), hottest temperature\n"
  6823. );
  6824. wlogprint(
  6825. "hashrates: %ds decaying / all-time average / all-time average (effective)\n"
  6826. , opt_log_interval);
  6827. _wlogprint(
  6828. "A: accepted shares | R: rejected+discarded(% of total)\n"
  6829. "HW: hardware errors / % nonces invalid\n"
  6830. "\n"
  6831. "Press any key to clear"
  6832. );
  6833. logwin_update();
  6834. getch();
  6835. loginput_mode(0);
  6836. }
  6837. static void *input_thread(void __maybe_unused *userdata)
  6838. {
  6839. RenameThread("input");
  6840. if (!curses_active)
  6841. return NULL;
  6842. while (1) {
  6843. int input;
  6844. input = getch();
  6845. switch (input) {
  6846. case 'h': case 'H': case '?':
  6847. case KEY_F(1):
  6848. show_help();
  6849. break;
  6850. case 'q': case 'Q':
  6851. kill_work();
  6852. return NULL;
  6853. case 'd': case 'D':
  6854. display_options();
  6855. break;
  6856. case 'm': case 'M':
  6857. manage_device();
  6858. break;
  6859. case 'p': case 'P':
  6860. display_pools();
  6861. break;
  6862. case 's': case 'S':
  6863. set_options();
  6864. break;
  6865. #ifdef HAVE_CURSES
  6866. case KEY_DOWN:
  6867. {
  6868. const int visible_lines = logcursor - devcursor;
  6869. const int invisible_lines = total_lines - visible_lines;
  6870. if (devsummaryYOffset <= -invisible_lines)
  6871. break;
  6872. devsummaryYOffset -= 2;
  6873. }
  6874. case KEY_UP:
  6875. if (devsummaryYOffset == 0)
  6876. break;
  6877. ++devsummaryYOffset;
  6878. refresh_devstatus();
  6879. break;
  6880. case KEY_NPAGE:
  6881. {
  6882. const int visible_lines = logcursor - devcursor;
  6883. const int invisible_lines = total_lines - visible_lines;
  6884. if (devsummaryYOffset - visible_lines <= -invisible_lines)
  6885. devsummaryYOffset = -invisible_lines;
  6886. else
  6887. devsummaryYOffset -= visible_lines;
  6888. refresh_devstatus();
  6889. break;
  6890. }
  6891. case KEY_PPAGE:
  6892. {
  6893. const int visible_lines = logcursor - devcursor;
  6894. if (devsummaryYOffset + visible_lines >= 0)
  6895. devsummaryYOffset = 0;
  6896. else
  6897. devsummaryYOffset += visible_lines;
  6898. refresh_devstatus();
  6899. break;
  6900. }
  6901. #endif
  6902. }
  6903. if (opt_realquiet) {
  6904. disable_curses();
  6905. break;
  6906. }
  6907. }
  6908. return NULL;
  6909. }
  6910. #endif
  6911. static void *api_thread(void *userdata)
  6912. {
  6913. struct thr_info *mythr = userdata;
  6914. pthread_detach(pthread_self());
  6915. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  6916. RenameThread("rpc");
  6917. api(api_thr_id);
  6918. mythr->has_pth = false;
  6919. return NULL;
  6920. }
  6921. void thread_reportin(struct thr_info *thr)
  6922. {
  6923. cgtime(&thr->last);
  6924. thr->cgpu->status = LIFE_WELL;
  6925. thr->getwork = 0;
  6926. thr->cgpu->device_last_well = time(NULL);
  6927. }
  6928. void thread_reportout(struct thr_info *thr)
  6929. {
  6930. thr->getwork = time(NULL);
  6931. }
  6932. static void hashmeter(int thr_id, struct timeval *diff,
  6933. uint64_t hashes_done)
  6934. {
  6935. char logstatusline[256];
  6936. struct timeval temp_tv_end, total_diff;
  6937. double secs;
  6938. double local_secs;
  6939. static double local_mhashes_done = 0;
  6940. double local_mhashes = (double)hashes_done / 1000000.0;
  6941. bool showlog = false;
  6942. char cHr[ALLOC_H2B_NOUNIT+1], aHr[ALLOC_H2B_NOUNIT+1], uHr[ALLOC_H2B_SPACED+3+1];
  6943. char rejpcbuf[6];
  6944. char bnbuf[6];
  6945. struct thr_info *thr;
  6946. /* Update the last time this thread reported in */
  6947. if (thr_id >= 0) {
  6948. thr = get_thread(thr_id);
  6949. cgtime(&(thr->last));
  6950. thr->cgpu->device_last_well = time(NULL);
  6951. }
  6952. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  6953. /* So we can call hashmeter from a non worker thread */
  6954. if (thr_id >= 0) {
  6955. struct cgpu_info *cgpu = thr->cgpu;
  6956. int threadobj = cgpu->threads ?: 1;
  6957. double thread_rolling = 0.0;
  6958. int i;
  6959. applog(LOG_DEBUG, "[thread %d: %"PRIu64" hashes, %.1f khash/sec]",
  6960. thr_id, hashes_done, hashes_done / 1000 / secs);
  6961. /* Rolling average for each thread and each device */
  6962. decay_time(&thr->rolling, local_mhashes / secs, secs);
  6963. for (i = 0; i < threadobj; i++)
  6964. thread_rolling += cgpu->thr[i]->rolling;
  6965. mutex_lock(&hash_lock);
  6966. decay_time(&cgpu->rolling, thread_rolling, secs);
  6967. cgpu->total_mhashes += local_mhashes;
  6968. mutex_unlock(&hash_lock);
  6969. // If needed, output detailed, per-device stats
  6970. if (want_per_device_stats) {
  6971. struct timeval now;
  6972. struct timeval elapsed;
  6973. struct timeval *last_msg_tv = opt_show_procs ? &thr->cgpu->last_message_tv : &thr->cgpu->device->last_message_tv;
  6974. cgtime(&now);
  6975. timersub(&now, last_msg_tv, &elapsed);
  6976. if (opt_log_interval <= elapsed.tv_sec) {
  6977. struct cgpu_info *cgpu = thr->cgpu;
  6978. char logline[255];
  6979. *last_msg_tv = now;
  6980. get_statline(logline, sizeof(logline), cgpu);
  6981. if (!curses_active) {
  6982. printf("%s \r", logline);
  6983. fflush(stdout);
  6984. } else
  6985. applog(LOG_INFO, "%s", logline);
  6986. }
  6987. }
  6988. }
  6989. /* Totals are updated by all threads so can race without locking */
  6990. mutex_lock(&hash_lock);
  6991. cgtime(&temp_tv_end);
  6992. timersub(&temp_tv_end, &total_tv_end, &total_diff);
  6993. total_mhashes_done += local_mhashes;
  6994. local_mhashes_done += local_mhashes;
  6995. /* Only update with opt_log_interval */
  6996. if (total_diff.tv_sec < opt_log_interval)
  6997. goto out_unlock;
  6998. showlog = true;
  6999. cgtime(&total_tv_end);
  7000. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  7001. decay_time(&total_rolling, local_mhashes_done / local_secs, local_secs);
  7002. global_hashrate = ((unsigned long long)lround(total_rolling)) * 1000000;
  7003. timersub(&total_tv_end, &total_tv_start, &total_diff);
  7004. total_secs = (double)total_diff.tv_sec +
  7005. ((double)total_diff.tv_usec / 1000000.0);
  7006. double wtotal = (total_diff_accepted + total_diff_rejected + total_diff_stale);
  7007. multi_format_unit_array2(
  7008. ((char*[]){cHr, aHr, uHr}),
  7009. ((size_t[]){sizeof(cHr), sizeof(aHr), sizeof(uHr)}),
  7010. true, "h/s", H2B_SHORT,
  7011. 3,
  7012. 1e6*total_rolling,
  7013. 1e6*total_mhashes_done / total_secs,
  7014. utility_to_hashrate(total_diff1 * (wtotal ? (total_diff_accepted / wtotal) : 1) * 60 / total_secs));
  7015. int ui_accepted, ui_rejected, ui_stale;
  7016. if (opt_weighed_stats)
  7017. {
  7018. ui_accepted = total_diff_accepted;
  7019. ui_rejected = total_diff_rejected;
  7020. ui_stale = total_diff_stale;
  7021. }
  7022. else
  7023. {
  7024. ui_accepted = total_accepted;
  7025. ui_rejected = total_rejected;
  7026. ui_stale = total_stale;
  7027. }
  7028. #ifdef HAVE_CURSES
  7029. if (curses_active_locked()) {
  7030. float temp = 0;
  7031. struct cgpu_info *proc, *last_working_dev = NULL;
  7032. int i, working_devs = 0, working_procs = 0;
  7033. int divx;
  7034. bool bad = false;
  7035. // Find the highest temperature of all processors
  7036. for (i = 0; i < total_devices; ++i)
  7037. {
  7038. proc = get_devices(i);
  7039. if (proc->temp > temp)
  7040. temp = proc->temp;
  7041. if (unlikely(proc->deven == DEV_DISABLED))
  7042. ; // Just need to block it off from both conditions
  7043. else
  7044. if (likely(proc->status == LIFE_WELL && proc->deven == DEV_ENABLED))
  7045. {
  7046. if (proc->rolling > .1)
  7047. {
  7048. ++working_procs;
  7049. if (proc->device != last_working_dev)
  7050. {
  7051. ++working_devs;
  7052. last_working_dev = proc->device;
  7053. }
  7054. }
  7055. }
  7056. else
  7057. bad = true;
  7058. }
  7059. if (working_devs == working_procs)
  7060. snprintf(statusline, sizeof(statusline), "%s%d ", bad ? U8_BAD_START : "", working_devs);
  7061. else
  7062. snprintf(statusline, sizeof(statusline), "%s%d/%d ", bad ? U8_BAD_START : "", working_devs, working_procs);
  7063. divx = 7;
  7064. if (opt_show_procs && !opt_compact)
  7065. ++divx;
  7066. if (bad)
  7067. {
  7068. divx += sizeof(U8_BAD_START)-1;
  7069. strcpy(&statusline[divx], U8_BAD_END);
  7070. divx += sizeof(U8_BAD_END)-1;
  7071. }
  7072. temperature_column(&statusline[divx], sizeof(statusline)-divx, true, &temp);
  7073. format_statline(statusline, sizeof(statusline),
  7074. cHr, aHr,
  7075. uHr,
  7076. ui_accepted,
  7077. ui_rejected,
  7078. ui_stale,
  7079. total_diff_rejected + total_diff_stale, total_diff_accepted,
  7080. hw_errors,
  7081. total_bad_diff1, total_bad_diff1 + total_diff1);
  7082. unlock_curses();
  7083. }
  7084. #endif
  7085. // Add a space
  7086. memmove(&uHr[6], &uHr[5], strlen(&uHr[5]) + 1);
  7087. uHr[5] = ' ';
  7088. percentf4(rejpcbuf, sizeof(rejpcbuf), total_diff_rejected + total_diff_stale, total_diff_accepted);
  7089. percentf4(bnbuf, sizeof(bnbuf), total_bad_diff1, total_diff1);
  7090. snprintf(logstatusline, sizeof(logstatusline),
  7091. "%s%ds:%s avg:%s u:%s | A:%d R:%d+%d(%s) HW:%d/%s",
  7092. want_per_device_stats ? "ALL " : "",
  7093. opt_log_interval,
  7094. cHr, aHr,
  7095. uHr,
  7096. ui_accepted,
  7097. ui_rejected,
  7098. ui_stale,
  7099. rejpcbuf,
  7100. hw_errors,
  7101. bnbuf
  7102. );
  7103. local_mhashes_done = 0;
  7104. out_unlock:
  7105. mutex_unlock(&hash_lock);
  7106. if (showlog) {
  7107. if (!curses_active) {
  7108. printf("%s \r", logstatusline);
  7109. fflush(stdout);
  7110. } else
  7111. applog(LOG_INFO, "%s", logstatusline);
  7112. }
  7113. }
  7114. void hashmeter2(struct thr_info *thr)
  7115. {
  7116. struct timeval tv_now, tv_elapsed;
  7117. timerclear(&thr->tv_hashes_done);
  7118. cgtime(&tv_now);
  7119. timersub(&tv_now, &thr->tv_lastupdate, &tv_elapsed);
  7120. /* Update the hashmeter at most 5 times per second */
  7121. if ((thr->hashes_done && (tv_elapsed.tv_sec > 0 || tv_elapsed.tv_usec > 200000)) ||
  7122. tv_elapsed.tv_sec >= opt_log_interval) {
  7123. hashmeter(thr->id, &tv_elapsed, thr->hashes_done);
  7124. thr->hashes_done = 0;
  7125. thr->tv_lastupdate = tv_now;
  7126. }
  7127. }
  7128. static void stratum_share_result(json_t *val, json_t *res_val, json_t *err_val,
  7129. struct stratum_share *sshare)
  7130. {
  7131. struct work *work = sshare->work;
  7132. share_result(val, res_val, err_val, work, false, "");
  7133. }
  7134. /* Parses stratum json responses and tries to find the id that the request
  7135. * matched to and treat it accordingly. */
  7136. bool parse_stratum_response(struct pool *pool, char *s)
  7137. {
  7138. json_t *val = NULL, *err_val, *res_val, *id_val;
  7139. struct stratum_share *sshare;
  7140. json_error_t err;
  7141. bool ret = false;
  7142. int id;
  7143. val = JSON_LOADS(s, &err);
  7144. if (!val) {
  7145. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  7146. goto out;
  7147. }
  7148. res_val = json_object_get(val, "result");
  7149. err_val = json_object_get(val, "error");
  7150. id_val = json_object_get(val, "id");
  7151. if (json_is_null(id_val) || !id_val) {
  7152. char *ss;
  7153. if (err_val)
  7154. ss = json_dumps(err_val, JSON_INDENT(3));
  7155. else
  7156. ss = strdup("(unknown reason)");
  7157. applog(LOG_INFO, "JSON-RPC non method decode failed: %s", ss);
  7158. free(ss);
  7159. goto out;
  7160. }
  7161. if (!json_is_integer(id_val)) {
  7162. if (json_is_string(id_val)
  7163. && !strncmp(json_string_value(id_val), "txlist", 6))
  7164. {
  7165. const bool is_array = json_is_array(res_val);
  7166. applog(LOG_DEBUG, "Received %s for pool %u job %s",
  7167. is_array ? "transaction list" : "no-transaction-list response",
  7168. pool->pool_no, &json_string_value(id_val)[6]);
  7169. if (!is_array)
  7170. {
  7171. // No need to wait for a timeout
  7172. timer_unset(&pool->swork.tv_transparency);
  7173. pool_set_opaque(pool, true);
  7174. goto fishy;
  7175. }
  7176. if (strcmp(json_string_value(id_val) + 6, pool->swork.job_id))
  7177. // We only care about a transaction list for the current job id
  7178. goto fishy;
  7179. // Check that the transactions actually hash to the merkle links
  7180. {
  7181. unsigned maxtx = 1 << pool->swork.merkles;
  7182. unsigned mintx = maxtx >> 1;
  7183. --maxtx;
  7184. unsigned acttx = (unsigned)json_array_size(res_val);
  7185. if (acttx < mintx || acttx > maxtx) {
  7186. applog(LOG_WARNING, "Pool %u is sending mismatched block contents to us (%u is not %u-%u)",
  7187. pool->pool_no, acttx, mintx, maxtx);
  7188. goto fishy;
  7189. }
  7190. // TODO: Check hashes match actual merkle links
  7191. }
  7192. pool_set_opaque(pool, false);
  7193. timer_unset(&pool->swork.tv_transparency);
  7194. fishy:
  7195. ret = true;
  7196. }
  7197. goto out;
  7198. }
  7199. id = json_integer_value(id_val);
  7200. mutex_lock(&sshare_lock);
  7201. HASH_FIND_INT(stratum_shares, &id, sshare);
  7202. if (sshare)
  7203. HASH_DEL(stratum_shares, sshare);
  7204. mutex_unlock(&sshare_lock);
  7205. if (!sshare) {
  7206. double pool_diff;
  7207. /* Since the share is untracked, we can only guess at what the
  7208. * work difficulty is based on the current pool diff. */
  7209. cg_rlock(&pool->data_lock);
  7210. pool_diff = target_diff(pool->swork.target);
  7211. cg_runlock(&pool->data_lock);
  7212. if (json_is_true(res_val)) {
  7213. applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no);
  7214. /* We don't know what device this came from so we can't
  7215. * attribute the work to the relevant cgpu */
  7216. mutex_lock(&stats_lock);
  7217. total_accepted++;
  7218. pool->accepted++;
  7219. total_diff_accepted += pool_diff;
  7220. pool->diff_accepted += pool_diff;
  7221. mutex_unlock(&stats_lock);
  7222. } else {
  7223. applog(LOG_NOTICE, "Rejected untracked stratum share from pool %d", pool->pool_no);
  7224. mutex_lock(&stats_lock);
  7225. total_rejected++;
  7226. pool->rejected++;
  7227. total_diff_rejected += pool_diff;
  7228. pool->diff_rejected += pool_diff;
  7229. mutex_unlock(&stats_lock);
  7230. }
  7231. goto out;
  7232. }
  7233. else {
  7234. mutex_lock(&submitting_lock);
  7235. --total_submitting;
  7236. mutex_unlock(&submitting_lock);
  7237. }
  7238. stratum_share_result(val, res_val, err_val, sshare);
  7239. free_work(sshare->work);
  7240. free(sshare);
  7241. ret = true;
  7242. out:
  7243. if (val)
  7244. json_decref(val);
  7245. return ret;
  7246. }
  7247. static void shutdown_stratum(struct pool *pool)
  7248. {
  7249. // Shut down Stratum as if we never had it
  7250. pool->stratum_active = false;
  7251. pool->stratum_init = false;
  7252. pool->has_stratum = false;
  7253. shutdown(pool->sock, SHUT_RDWR);
  7254. free(pool->stratum_url);
  7255. if (pool->sockaddr_url == pool->stratum_url)
  7256. pool->sockaddr_url = NULL;
  7257. pool->stratum_url = NULL;
  7258. }
  7259. void clear_stratum_shares(struct pool *pool)
  7260. {
  7261. int my_mining_threads = mining_threads; // Cached outside of locking
  7262. struct stratum_share *sshare, *tmpshare;
  7263. struct work *work;
  7264. struct cgpu_info *cgpu;
  7265. double diff_cleared = 0;
  7266. double thr_diff_cleared[my_mining_threads];
  7267. int cleared = 0;
  7268. int thr_cleared[my_mining_threads];
  7269. // NOTE: This is per-thread rather than per-device to avoid getting devices lock in stratum_shares loop
  7270. for (int i = 0; i < my_mining_threads; ++i)
  7271. {
  7272. thr_diff_cleared[i] = 0;
  7273. thr_cleared[i] = 0;
  7274. }
  7275. mutex_lock(&sshare_lock);
  7276. HASH_ITER(hh, stratum_shares, sshare, tmpshare) {
  7277. work = sshare->work;
  7278. if (sshare->work->pool == pool && work->thr_id < my_mining_threads) {
  7279. HASH_DEL(stratum_shares, sshare);
  7280. sharelog("disconnect", work);
  7281. diff_cleared += sshare->work->work_difficulty;
  7282. thr_diff_cleared[work->thr_id] += work->work_difficulty;
  7283. ++thr_cleared[work->thr_id];
  7284. free_work(sshare->work);
  7285. free(sshare);
  7286. cleared++;
  7287. }
  7288. }
  7289. mutex_unlock(&sshare_lock);
  7290. if (cleared) {
  7291. applog(LOG_WARNING, "Lost %d shares due to stratum disconnect on pool %d", cleared, pool->pool_no);
  7292. mutex_lock(&stats_lock);
  7293. pool->stale_shares += cleared;
  7294. total_stale += cleared;
  7295. pool->diff_stale += diff_cleared;
  7296. total_diff_stale += diff_cleared;
  7297. for (int i = 0; i < my_mining_threads; ++i)
  7298. if (thr_cleared[i])
  7299. {
  7300. cgpu = get_thr_cgpu(i);
  7301. cgpu->diff_stale += thr_diff_cleared[i];
  7302. cgpu->stale += thr_cleared[i];
  7303. }
  7304. mutex_unlock(&stats_lock);
  7305. mutex_lock(&submitting_lock);
  7306. total_submitting -= cleared;
  7307. mutex_unlock(&submitting_lock);
  7308. }
  7309. }
  7310. static void resubmit_stratum_shares(struct pool *pool)
  7311. {
  7312. struct stratum_share *sshare, *tmpshare;
  7313. struct work *work;
  7314. unsigned resubmitted = 0;
  7315. mutex_lock(&sshare_lock);
  7316. mutex_lock(&submitting_lock);
  7317. HASH_ITER(hh, stratum_shares, sshare, tmpshare) {
  7318. if (sshare->work->pool != pool)
  7319. continue;
  7320. HASH_DEL(stratum_shares, sshare);
  7321. work = sshare->work;
  7322. DL_APPEND(submit_waiting, work);
  7323. free(sshare);
  7324. ++resubmitted;
  7325. }
  7326. mutex_unlock(&submitting_lock);
  7327. mutex_unlock(&sshare_lock);
  7328. if (resubmitted) {
  7329. notifier_wake(submit_waiting_notifier);
  7330. applog(LOG_DEBUG, "Resubmitting %u shares due to stratum disconnect on pool %u", resubmitted, pool->pool_no);
  7331. }
  7332. }
  7333. static void clear_pool_work(struct pool *pool)
  7334. {
  7335. struct work *work, *tmp;
  7336. int cleared = 0;
  7337. mutex_lock(stgd_lock);
  7338. HASH_ITER(hh, staged_work, work, tmp) {
  7339. if (work->pool == pool) {
  7340. HASH_DEL(staged_work, work);
  7341. free_work(work);
  7342. cleared++;
  7343. staged_full = false;
  7344. }
  7345. }
  7346. mutex_unlock(stgd_lock);
  7347. }
  7348. static int cp_prio(void)
  7349. {
  7350. int prio;
  7351. cg_rlock(&control_lock);
  7352. prio = currentpool->prio;
  7353. cg_runlock(&control_lock);
  7354. return prio;
  7355. }
  7356. /* We only need to maintain a secondary pool connection when we need the
  7357. * capacity to get work from the backup pools while still on the primary */
  7358. static bool cnx_needed(struct pool *pool)
  7359. {
  7360. struct pool *cp;
  7361. if (pool->enabled != POOL_ENABLED)
  7362. return false;
  7363. /* Idle stratum pool needs something to kick it alive again */
  7364. if (pool->has_stratum && pool->idle)
  7365. return true;
  7366. /* Getwork pools without opt_fail_only need backup pools up to be able
  7367. * to leak shares */
  7368. cp = current_pool();
  7369. if (pool_actively_desired(pool, cp))
  7370. return true;
  7371. if (!pool_localgen(cp) && (!opt_fail_only || !cp->hdr_path))
  7372. return true;
  7373. /* Keep the connection open to allow any stray shares to be submitted
  7374. * on switching pools for 2 minutes. */
  7375. if (timer_elapsed(&pool->tv_last_work_time, NULL) < 120)
  7376. return true;
  7377. /* If the pool has only just come to life and is higher priority than
  7378. * the current pool keep the connection open so we can fail back to
  7379. * it. */
  7380. if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio())
  7381. return true;
  7382. if (pool_unworkable(cp))
  7383. return true;
  7384. /* We've run out of work, bring anything back to life. */
  7385. if (no_work)
  7386. return true;
  7387. // If the current pool lacks its own block change detection, see if we are needed for that
  7388. if (pool_active_lp_pool(cp) == pool)
  7389. return true;
  7390. return false;
  7391. }
  7392. static void wait_lpcurrent(struct pool *pool);
  7393. static void pool_resus(struct pool *pool);
  7394. static void gen_stratum_work(struct pool *pool, struct work *work);
  7395. static void stratum_resumed(struct pool *pool)
  7396. {
  7397. if (!pool->stratum_notify)
  7398. return;
  7399. if (pool_tclear(pool, &pool->idle)) {
  7400. applog(LOG_INFO, "Stratum connection to pool %d resumed", pool->pool_no);
  7401. pool_resus(pool);
  7402. }
  7403. }
  7404. static bool supports_resume(struct pool *pool)
  7405. {
  7406. bool ret;
  7407. cg_rlock(&pool->data_lock);
  7408. ret = (pool->sessionid != NULL);
  7409. cg_runlock(&pool->data_lock);
  7410. return ret;
  7411. }
  7412. static bool pools_active;
  7413. /* One stratum thread per pool that has stratum waits on the socket checking
  7414. * for new messages and for the integrity of the socket connection. We reset
  7415. * the connection based on the integrity of the receive side only as the send
  7416. * side will eventually expire data it fails to send. */
  7417. static void *stratum_thread(void *userdata)
  7418. {
  7419. struct pool *pool = (struct pool *)userdata;
  7420. pthread_detach(pthread_self());
  7421. char threadname[20];
  7422. snprintf(threadname, 20, "stratum%u", pool->pool_no);
  7423. RenameThread(threadname);
  7424. srand(time(NULL) + (intptr_t)userdata);
  7425. while (42) {
  7426. struct timeval timeout;
  7427. int sel_ret;
  7428. fd_set rd;
  7429. char *s;
  7430. int sock;
  7431. if (unlikely(!pool->has_stratum))
  7432. break;
  7433. /* Check to see whether we need to maintain this connection
  7434. * indefinitely or just bring it up when we switch to this
  7435. * pool */
  7436. while (true)
  7437. {
  7438. sock = pool->sock;
  7439. if (sock == INVSOCK)
  7440. applog(LOG_DEBUG, "Pool %u: Invalid socket, suspending",
  7441. pool->pool_no);
  7442. else
  7443. if (!sock_full(pool) && !cnx_needed(pool) && pools_active)
  7444. applog(LOG_DEBUG, "Pool %u: Connection not needed, suspending",
  7445. pool->pool_no);
  7446. else
  7447. break;
  7448. suspend_stratum(pool);
  7449. clear_stratum_shares(pool);
  7450. clear_pool_work(pool);
  7451. wait_lpcurrent(pool);
  7452. if (!restart_stratum(pool)) {
  7453. pool_died(pool);
  7454. while (!restart_stratum(pool)) {
  7455. if (pool->removed)
  7456. goto out;
  7457. cgsleep_ms(30000);
  7458. }
  7459. }
  7460. }
  7461. FD_ZERO(&rd);
  7462. FD_SET(sock, &rd);
  7463. timeout.tv_sec = 120;
  7464. timeout.tv_usec = 0;
  7465. /* If we fail to receive any notify messages for 2 minutes we
  7466. * assume the connection has been dropped and treat this pool
  7467. * as dead */
  7468. if (!sock_full(pool) && (sel_ret = select(sock + 1, &rd, NULL, NULL, &timeout)) < 1) {
  7469. applog(LOG_DEBUG, "Stratum select failed on pool %d with value %d", pool->pool_no, sel_ret);
  7470. s = NULL;
  7471. } else
  7472. s = recv_line(pool);
  7473. if (!s) {
  7474. if (!pool->has_stratum)
  7475. break;
  7476. applog(LOG_NOTICE, "Stratum connection to pool %d interrupted", pool->pool_no);
  7477. pool->getfail_occasions++;
  7478. total_go++;
  7479. mutex_lock(&pool->stratum_lock);
  7480. pool->stratum_active = pool->stratum_notify = false;
  7481. pool->sock = INVSOCK;
  7482. mutex_unlock(&pool->stratum_lock);
  7483. /* If the socket to our stratum pool disconnects, all
  7484. * submissions need to be discarded or resent. */
  7485. if (!supports_resume(pool))
  7486. clear_stratum_shares(pool);
  7487. else
  7488. resubmit_stratum_shares(pool);
  7489. clear_pool_work(pool);
  7490. if (pool == current_pool())
  7491. restart_threads();
  7492. if (restart_stratum(pool))
  7493. continue;
  7494. shutdown_stratum(pool);
  7495. pool_died(pool);
  7496. break;
  7497. }
  7498. /* Check this pool hasn't died while being a backup pool and
  7499. * has not had its idle flag cleared */
  7500. stratum_resumed(pool);
  7501. if (!parse_method(pool, s) && !parse_stratum_response(pool, s))
  7502. applog(LOG_INFO, "Unknown stratum msg: %s", s);
  7503. free(s);
  7504. if (pool->swork.clean) {
  7505. struct work *work = make_work();
  7506. /* Generate a single work item to update the current
  7507. * block database */
  7508. pool->swork.clean = false;
  7509. gen_stratum_work(pool, work);
  7510. /* Try to extract block height from coinbase scriptSig */
  7511. uint8_t *bin_height = &bytes_buf(&pool->swork.coinbase)[4 /*version*/ + 1 /*txin count*/ + 36 /*prevout*/ + 1 /*scriptSig len*/ + 1 /*push opcode*/];
  7512. unsigned char cb_height_sz;
  7513. cb_height_sz = bin_height[-1];
  7514. if (cb_height_sz == 3) {
  7515. // FIXME: The block number will overflow this by AD 2173
  7516. uint32_t block_id = ((uint32_t*)work->data)[1];
  7517. uint32_t height = 0;
  7518. memcpy(&height, bin_height, 3);
  7519. height = le32toh(height);
  7520. have_block_height(block_id, height);
  7521. }
  7522. pool->swork.work_restart_id =
  7523. ++pool->work_restart_id;
  7524. pool_update_work_restart_time(pool);
  7525. if (test_work_current(work)) {
  7526. /* Only accept a work update if this stratum
  7527. * connection is from the current pool */
  7528. struct pool * const cp = current_pool();
  7529. if (pool == cp)
  7530. restart_threads();
  7531. applog(
  7532. ((!opt_quiet_work_updates) && pool_actively_in_use(pool, cp) ? LOG_NOTICE : LOG_DEBUG),
  7533. "Stratum from pool %d requested work update", pool->pool_no);
  7534. } else
  7535. applog(LOG_NOTICE, "Stratum from pool %d detected new block", pool->pool_no);
  7536. free_work(work);
  7537. }
  7538. if (timer_passed(&pool->swork.tv_transparency, NULL)) {
  7539. // More than 4 timmills past since requested transactions
  7540. timer_unset(&pool->swork.tv_transparency);
  7541. pool_set_opaque(pool, true);
  7542. }
  7543. }
  7544. out:
  7545. return NULL;
  7546. }
  7547. static void init_stratum_thread(struct pool *pool)
  7548. {
  7549. have_longpoll = true;
  7550. if (unlikely(pthread_create(&pool->stratum_thread, NULL, stratum_thread, (void *)pool)))
  7551. quit(1, "Failed to create stratum thread");
  7552. }
  7553. static void *longpoll_thread(void *userdata);
  7554. static bool stratum_works(struct pool *pool)
  7555. {
  7556. applog(LOG_INFO, "Testing pool %d stratum %s", pool->pool_no, pool->stratum_url);
  7557. if (!extract_sockaddr(pool->stratum_url, &pool->sockaddr_url, &pool->stratum_port))
  7558. return false;
  7559. if (pool->stratum_active)
  7560. return true;
  7561. if (!initiate_stratum(pool))
  7562. return false;
  7563. return true;
  7564. }
  7565. static
  7566. bool pool_recently_got_work(struct pool * const pool, const struct timeval * const tvp_now)
  7567. {
  7568. return (timer_isset(&pool->tv_last_work_time) && timer_elapsed(&pool->tv_last_work_time, tvp_now) < 60);
  7569. }
  7570. static bool pool_active(struct pool *pool, bool pinging)
  7571. {
  7572. struct timeval tv_now, tv_getwork, tv_getwork_reply;
  7573. bool ret = false;
  7574. json_t *val;
  7575. CURL *curl = NULL;
  7576. int rolltime;
  7577. char *rpc_req;
  7578. struct work *work;
  7579. enum pool_protocol proto;
  7580. if (pool->stratum_init)
  7581. {
  7582. if (pool->stratum_active)
  7583. return true;
  7584. }
  7585. else
  7586. if (!pool->idle)
  7587. {
  7588. timer_set_now(&tv_now);
  7589. if (pool_recently_got_work(pool, &tv_now))
  7590. return true;
  7591. }
  7592. mutex_lock(&pool->pool_test_lock);
  7593. if (pool->stratum_init)
  7594. {
  7595. ret = pool->stratum_active;
  7596. goto out;
  7597. }
  7598. timer_set_now(&tv_now);
  7599. if (pool->idle)
  7600. {
  7601. if (timer_elapsed(&pool->tv_idle, &tv_now) < 30)
  7602. goto out;
  7603. }
  7604. else
  7605. if (pool_recently_got_work(pool, &tv_now))
  7606. {
  7607. ret = true;
  7608. goto out;
  7609. }
  7610. applog(LOG_INFO, "Testing pool %s", pool->rpc_url);
  7611. /* This is the central point we activate stratum when we can */
  7612. curl = curl_easy_init();
  7613. if (unlikely(!curl)) {
  7614. applog(LOG_ERR, "CURL initialisation failed");
  7615. goto out;
  7616. }
  7617. if (!(want_gbt || want_getwork))
  7618. goto nohttp;
  7619. work = make_work();
  7620. /* Probe for GBT support on first pass */
  7621. proto = want_gbt ? PLP_GETBLOCKTEMPLATE : PLP_GETWORK;
  7622. tryagain:
  7623. rpc_req = prepare_rpc_req_probe(work, proto, NULL);
  7624. work->pool = pool;
  7625. if (!rpc_req)
  7626. goto out;
  7627. pool->probed = false;
  7628. cgtime(&tv_getwork);
  7629. val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req,
  7630. true, false, &rolltime, pool, false);
  7631. cgtime(&tv_getwork_reply);
  7632. free(rpc_req);
  7633. /* Detect if a http getwork pool has an X-Stratum header at startup,
  7634. * and if so, switch to that in preference to getwork if it works */
  7635. if (pool->stratum_url && want_stratum && pool_may_redirect_to(pool, pool->stratum_url) && (pool->has_stratum || stratum_works(pool))) {
  7636. if (!pool->has_stratum) {
  7637. applog(LOG_NOTICE, "Switching pool %d %s to %s", pool->pool_no, pool->rpc_url, pool->stratum_url);
  7638. if (!pool->rpc_url)
  7639. pool->rpc_url = strdup(pool->stratum_url);
  7640. pool->has_stratum = true;
  7641. }
  7642. free_work(work);
  7643. if (val)
  7644. json_decref(val);
  7645. retry_stratum:
  7646. ;
  7647. /* We create the stratum thread for each pool just after
  7648. * successful authorisation. Once the init flag has been set
  7649. * we never unset it and the stratum thread is responsible for
  7650. * setting/unsetting the active flag */
  7651. bool init = pool_tset(pool, &pool->stratum_init);
  7652. if (!init) {
  7653. ret = initiate_stratum(pool) && auth_stratum(pool);
  7654. if (ret)
  7655. {
  7656. detect_algo = 2;
  7657. init_stratum_thread(pool);
  7658. }
  7659. else
  7660. {
  7661. pool_tclear(pool, &pool->stratum_init);
  7662. pool->tv_idle = tv_getwork_reply;
  7663. }
  7664. goto out;
  7665. }
  7666. ret = pool->stratum_active;
  7667. goto out;
  7668. }
  7669. else if (pool->has_stratum)
  7670. shutdown_stratum(pool);
  7671. if (val) {
  7672. bool rc;
  7673. json_t *res;
  7674. res = json_object_get(val, "result");
  7675. if ((!json_is_object(res)) || (proto == PLP_GETBLOCKTEMPLATE && !json_object_get(res, "bits")))
  7676. goto badwork;
  7677. work->rolltime = rolltime;
  7678. rc = work_decode(pool, work, val);
  7679. if (rc) {
  7680. applog(LOG_DEBUG, "Successfully retrieved and deciphered work from pool %u %s",
  7681. pool->pool_no, pool->rpc_url);
  7682. work->pool = pool;
  7683. copy_time(&work->tv_getwork, &tv_getwork);
  7684. copy_time(&work->tv_getwork_reply, &tv_getwork_reply);
  7685. work->getwork_mode = GETWORK_MODE_TESTPOOL;
  7686. calc_diff(work, 0);
  7687. update_last_work(work);
  7688. applog(LOG_DEBUG, "Pushing pooltest work to base pool");
  7689. stage_work(work);
  7690. total_getworks++;
  7691. pool->getwork_requested++;
  7692. ret = true;
  7693. pool->tv_idle = tv_getwork_reply;
  7694. } else {
  7695. badwork:
  7696. json_decref(val);
  7697. applog(LOG_DEBUG, "Successfully retrieved but FAILED to decipher work from pool %u %s",
  7698. pool->pool_no, pool->rpc_url);
  7699. pool->proto = proto = pool_protocol_fallback(proto);
  7700. if (PLP_NONE != proto)
  7701. goto tryagain;
  7702. pool->tv_idle = tv_getwork_reply;
  7703. free_work(work);
  7704. goto out;
  7705. }
  7706. json_decref(val);
  7707. if (proto != pool->proto) {
  7708. pool->proto = proto;
  7709. applog(LOG_INFO, "Selected %s protocol for pool %u", pool_protocol_name(proto), pool->pool_no);
  7710. }
  7711. if (pool->lp_url)
  7712. goto out;
  7713. /* Decipher the longpoll URL, if any, and store it in ->lp_url */
  7714. const struct blktmpl_longpoll_req *lp;
  7715. if (work->tr && (lp = blktmpl_get_longpoll(work->tr->tmpl))) {
  7716. // NOTE: work_decode takes care of lp id
  7717. pool->lp_url = lp->uri ? absolute_uri(lp->uri, pool->rpc_url) : pool->rpc_url;
  7718. if (!pool->lp_url)
  7719. {
  7720. ret = false;
  7721. goto out;
  7722. }
  7723. pool->lp_proto = PLP_GETBLOCKTEMPLATE;
  7724. }
  7725. else
  7726. if (pool->hdr_path && want_getwork) {
  7727. pool->lp_url = absolute_uri(pool->hdr_path, pool->rpc_url);
  7728. if (!pool->lp_url)
  7729. {
  7730. ret = false;
  7731. goto out;
  7732. }
  7733. pool->lp_proto = PLP_GETWORK;
  7734. } else
  7735. pool->lp_url = NULL;
  7736. if (want_longpoll && !pool->lp_started) {
  7737. pool->lp_started = true;
  7738. if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool)))
  7739. quit(1, "Failed to create pool longpoll thread");
  7740. }
  7741. } else if (PLP_NONE != (proto = pool_protocol_fallback(proto))) {
  7742. pool->proto = proto;
  7743. goto tryagain;
  7744. } else {
  7745. pool->tv_idle = tv_getwork_reply;
  7746. free_work(work);
  7747. nohttp:
  7748. /* If we failed to parse a getwork, this could be a stratum
  7749. * url without the prefix stratum+tcp:// so let's check it */
  7750. if (extract_sockaddr(pool->rpc_url, &pool->sockaddr_url, &pool->stratum_port) && initiate_stratum(pool)) {
  7751. pool->has_stratum = true;
  7752. goto retry_stratum;
  7753. }
  7754. applog(LOG_DEBUG, "FAILED to retrieve work from pool %u %s",
  7755. pool->pool_no, pool->rpc_url);
  7756. if (!pinging)
  7757. applog(LOG_WARNING, "Pool %u slow/down or URL or credentials invalid", pool->pool_no);
  7758. }
  7759. out:
  7760. if (curl)
  7761. curl_easy_cleanup(curl);
  7762. mutex_unlock(&pool->pool_test_lock);
  7763. return ret;
  7764. }
  7765. static void pool_resus(struct pool *pool)
  7766. {
  7767. if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio())
  7768. applog(LOG_WARNING, "Pool %d %s alive, testing stability", pool->pool_no, pool->rpc_url);
  7769. else
  7770. applog(LOG_INFO, "Pool %d %s alive", pool->pool_no, pool->rpc_url);
  7771. }
  7772. static struct work *hash_pop(void)
  7773. {
  7774. struct work *work = NULL, *tmp;
  7775. int hc;
  7776. struct timespec ts;
  7777. retry:
  7778. mutex_lock(stgd_lock);
  7779. while (!HASH_COUNT(staged_work))
  7780. {
  7781. if (unlikely(staged_full))
  7782. {
  7783. if (likely(opt_queue < 10 + mining_threads))
  7784. {
  7785. ++opt_queue;
  7786. applog(LOG_WARNING, "Staged work underrun; increasing queue minimum to %d", opt_queue);
  7787. }
  7788. else
  7789. applog(LOG_WARNING, "Staged work underrun; not automatically increasing above %d", opt_queue);
  7790. staged_full = false; // Let it fill up before triggering an underrun again
  7791. no_work = true;
  7792. }
  7793. ts = (struct timespec){ .tv_sec = opt_log_interval, };
  7794. pthread_cond_signal(&gws_cond);
  7795. if (ETIMEDOUT == pthread_cond_timedwait(&getq->cond, stgd_lock, &ts))
  7796. {
  7797. run_cmd(cmd_idle);
  7798. pthread_cond_signal(&gws_cond);
  7799. pthread_cond_wait(&getq->cond, stgd_lock);
  7800. }
  7801. }
  7802. no_work = false;
  7803. hc = HASH_COUNT(staged_work);
  7804. /* Find clone work if possible, to allow masters to be reused */
  7805. if (hc > staged_rollable) {
  7806. HASH_ITER(hh, staged_work, work, tmp) {
  7807. if (!work_rollable(work))
  7808. break;
  7809. }
  7810. } else
  7811. work = staged_work;
  7812. if (can_roll(work) && should_roll(work))
  7813. {
  7814. // Instead of consuming it, force it to be cloned and grab the clone
  7815. mutex_unlock(stgd_lock);
  7816. clone_available();
  7817. goto retry;
  7818. }
  7819. HASH_DEL(staged_work, work);
  7820. if (work_rollable(work))
  7821. staged_rollable--;
  7822. /* Signal the getwork scheduler to look for more work */
  7823. pthread_cond_signal(&gws_cond);
  7824. /* Signal hash_pop again in case there are mutliple hash_pop waiters */
  7825. pthread_cond_signal(&getq->cond);
  7826. mutex_unlock(stgd_lock);
  7827. work->pool->last_work_time = time(NULL);
  7828. cgtime(&work->pool->tv_last_work_time);
  7829. return work;
  7830. }
  7831. /* Clones work by rolling it if possible, and returning a clone instead of the
  7832. * original work item which gets staged again to possibly be rolled again in
  7833. * the future */
  7834. static struct work *clone_work(struct work *work)
  7835. {
  7836. int mrs = mining_threads + opt_queue - total_staged();
  7837. struct work *work_clone;
  7838. bool cloned;
  7839. if (mrs < 1)
  7840. return work;
  7841. cloned = false;
  7842. work_clone = make_clone(work);
  7843. while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
  7844. applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
  7845. stage_work(work_clone);
  7846. roll_work(work);
  7847. work_clone = make_clone(work);
  7848. /* Roll it again to prevent duplicates should this be used
  7849. * directly later on */
  7850. roll_work(work);
  7851. cloned = true;
  7852. }
  7853. if (cloned) {
  7854. stage_work(work);
  7855. return work_clone;
  7856. }
  7857. free_work(work_clone);
  7858. return work;
  7859. }
  7860. void gen_hash(unsigned char *data, unsigned char *hash, int len)
  7861. {
  7862. unsigned char hash1[32];
  7863. sha256(data, len, hash1);
  7864. sha256(hash1, 32, hash);
  7865. }
  7866. /* PDiff 1 is a 256 bit unsigned integer of
  7867. * 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
  7868. * so we use a big endian 32 bit unsigned integer positioned at the Nth byte to
  7869. * cover a huge range of difficulty targets, though not all 256 bits' worth */
  7870. static void pdiff_target_leadzero(void * const target_p, double diff)
  7871. {
  7872. uint8_t *target = target_p;
  7873. diff *= 0x100000000;
  7874. int skip = log2(diff) / 8;
  7875. if (skip)
  7876. {
  7877. if (skip > 0x1c)
  7878. skip = 0x1c;
  7879. diff /= pow(0x100, skip);
  7880. memset(target, 0, skip);
  7881. }
  7882. uint32_t n = 0xffffffff;
  7883. n = (double)n / diff;
  7884. n = htobe32(n);
  7885. memcpy(&target[skip], &n, sizeof(n));
  7886. memset(&target[skip + sizeof(n)], 0xff, 32 - (skip + sizeof(n)));
  7887. }
  7888. void set_target_to_pdiff(void * const dest_target, const double pdiff)
  7889. {
  7890. unsigned char rtarget[32];
  7891. pdiff_target_leadzero(rtarget, pdiff);
  7892. swab256(dest_target, rtarget);
  7893. if (opt_debug) {
  7894. char htarget[65];
  7895. bin2hex(htarget, rtarget, 32);
  7896. applog(LOG_DEBUG, "Generated target %s", htarget);
  7897. }
  7898. }
  7899. void set_target_to_bdiff(void * const dest_target, const double bdiff)
  7900. {
  7901. set_target_to_pdiff(dest_target, bdiff_to_pdiff(bdiff));
  7902. }
  7903. void _test_target(void * const funcp, const char * const funcname, const bool little_endian, const void * const expectp, const double diff)
  7904. {
  7905. uint8_t bufr[32], buf[32], expectr[32], expect[32];
  7906. int off;
  7907. void (*func)(void *, double) = funcp;
  7908. func(little_endian ? bufr : buf, diff);
  7909. if (little_endian)
  7910. swab256(buf, bufr);
  7911. swap32tobe(expect, expectp, 256/32);
  7912. // Fuzzy comparison: the first 32 bits set must match, and the actual target must be >= the expected
  7913. for (off = 0; off < 28 && !buf[off]; ++off)
  7914. {}
  7915. if (memcmp(&buf[off], &expect[off], 4))
  7916. {
  7917. testfail: ;
  7918. char hexbuf[65], expectbuf[65];
  7919. bin2hex(hexbuf, buf, 32);
  7920. bin2hex(expectbuf, expect, 32);
  7921. applogr(, LOG_WARNING, "%s test failed: diff %g got %s (expected %s)",
  7922. funcname, diff, hexbuf, expectbuf);
  7923. }
  7924. if (!little_endian)
  7925. swab256(bufr, buf);
  7926. swab256(expectr, expect);
  7927. if (!hash_target_check(expectr, bufr))
  7928. goto testfail;
  7929. }
  7930. #define TEST_TARGET(func, le, expect, diff) \
  7931. _test_target(func, #func, le, expect, diff)
  7932. void test_target()
  7933. {
  7934. uint32_t expect[8] = {0};
  7935. // bdiff 1 should be exactly 00000000ffff0000000006f29cfd29510a6caee84634e86a57257cf03152537f due to floating-point imprecision (pdiff1 / 1.0000152590218966)
  7936. expect[0] = 0x0000ffff;
  7937. TEST_TARGET(set_target_to_bdiff, true, expect, 1./0x10000);
  7938. expect[0] = 0;
  7939. expect[1] = 0xffff0000;
  7940. TEST_TARGET(set_target_to_bdiff, true, expect, 1);
  7941. expect[1] >>= 1;
  7942. TEST_TARGET(set_target_to_bdiff, true, expect, 2);
  7943. expect[1] >>= 3;
  7944. TEST_TARGET(set_target_to_bdiff, true, expect, 0x10);
  7945. expect[1] >>= 4;
  7946. TEST_TARGET(set_target_to_bdiff, true, expect, 0x100);
  7947. memset(&expect[1], '\xff', 28);
  7948. expect[0] = 0x0000ffff;
  7949. TEST_TARGET(set_target_to_pdiff, true, expect, 1./0x10000);
  7950. expect[0] = 0;
  7951. TEST_TARGET(set_target_to_pdiff, true, expect, 1);
  7952. expect[1] >>= 1;
  7953. TEST_TARGET(set_target_to_pdiff, true, expect, 2);
  7954. expect[1] >>= 3;
  7955. TEST_TARGET(set_target_to_pdiff, true, expect, 0x10);
  7956. expect[1] >>= 4;
  7957. TEST_TARGET(set_target_to_pdiff, true, expect, 0x100);
  7958. }
  7959. void stratum_work_cpy(struct stratum_work * const dst, const struct stratum_work * const src)
  7960. {
  7961. *dst = *src;
  7962. if (dst->tr)
  7963. tmpl_incref(dst->tr);
  7964. dst->nonce1 = maybe_strdup(src->nonce1);
  7965. dst->job_id = maybe_strdup(src->job_id);
  7966. bytes_cpy(&dst->coinbase, &src->coinbase);
  7967. bytes_cpy(&dst->merkle_bin, &src->merkle_bin);
  7968. }
  7969. void stratum_work_clean(struct stratum_work * const swork)
  7970. {
  7971. if (swork->tr)
  7972. tmpl_decref(swork->tr);
  7973. free(swork->nonce1);
  7974. free(swork->job_id);
  7975. bytes_free(&swork->coinbase);
  7976. bytes_free(&swork->merkle_bin);
  7977. }
  7978. bool pool_has_usable_swork(const struct pool * const pool)
  7979. {
  7980. if (opt_benchmark)
  7981. return true;
  7982. if (pool->swork.tr)
  7983. {
  7984. // GBT
  7985. struct timeval tv_now;
  7986. timer_set_now(&tv_now);
  7987. return blkmk_time_left(pool->swork.tr->tmpl, tv_now.tv_sec);
  7988. }
  7989. return pool->stratum_notify;
  7990. }
  7991. /* Generates stratum based work based on the most recent notify information
  7992. * from the pool. This will keep generating work while a pool is down so we use
  7993. * other means to detect when the pool has died in stratum_thread */
  7994. static void gen_stratum_work(struct pool *pool, struct work *work)
  7995. {
  7996. clean_work(work);
  7997. cg_wlock(&pool->data_lock);
  7998. pool->swork.data_lock_p = &pool->data_lock;
  7999. const int n2size = pool->swork.n2size;
  8000. bytes_resize(&work->nonce2, n2size);
  8001. if (pool->nonce2sz < n2size)
  8002. memset(&bytes_buf(&work->nonce2)[pool->nonce2sz], 0, n2size - pool->nonce2sz);
  8003. memcpy(bytes_buf(&work->nonce2),
  8004. #ifdef WORDS_BIGENDIAN
  8005. // NOTE: On big endian, the most significant bits are stored at the end, so skip the LSBs
  8006. &((char*)&pool->nonce2)[pool->nonce2off],
  8007. #else
  8008. &pool->nonce2,
  8009. #endif
  8010. pool->nonce2sz);
  8011. pool->nonce2++;
  8012. work->pool = pool;
  8013. work->work_restart_id = pool->swork.work_restart_id;
  8014. gen_stratum_work2(work, &pool->swork);
  8015. cgtime(&work->tv_staged);
  8016. }
  8017. void gen_stratum_work2(struct work *work, struct stratum_work *swork)
  8018. {
  8019. unsigned char *coinbase, merkle_root[32], merkle_sha[64];
  8020. uint8_t *merkle_bin;
  8021. uint32_t *data32, *swap32;
  8022. int i;
  8023. /* Generate coinbase */
  8024. coinbase = bytes_buf(&swork->coinbase);
  8025. memcpy(&coinbase[swork->nonce2_offset], bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
  8026. /* Downgrade to a read lock to read off the variables */
  8027. if (swork->data_lock_p)
  8028. cg_dwlock(swork->data_lock_p);
  8029. /* Generate merkle root */
  8030. gen_hash(coinbase, merkle_root, bytes_len(&swork->coinbase));
  8031. memcpy(merkle_sha, merkle_root, 32);
  8032. merkle_bin = bytes_buf(&swork->merkle_bin);
  8033. for (i = 0; i < swork->merkles; ++i, merkle_bin += 32) {
  8034. memcpy(merkle_sha + 32, merkle_bin, 32);
  8035. gen_hash(merkle_sha, merkle_root, 64);
  8036. memcpy(merkle_sha, merkle_root, 32);
  8037. }
  8038. data32 = (uint32_t *)merkle_sha;
  8039. swap32 = (uint32_t *)merkle_root;
  8040. flip32(swap32, data32);
  8041. memcpy(&work->data[0], swork->header1, 36);
  8042. memcpy(&work->data[36], merkle_root, 32);
  8043. *((uint32_t*)&work->data[68]) = htobe32(swork->ntime + timer_elapsed(&swork->tv_received, NULL));
  8044. memcpy(&work->data[72], swork->diffbits, 4);
  8045. memset(&work->data[76], 0, 4); // nonce
  8046. memcpy(&work->data[80], workpadding_bin, 48);
  8047. work->ntime_roll_limits = swork->ntime_roll_limits;
  8048. /* Copy parameters required for share submission */
  8049. memcpy(work->target, swork->target, sizeof(work->target));
  8050. work->job_id = maybe_strdup(swork->job_id);
  8051. work->nonce1 = maybe_strdup(swork->nonce1);
  8052. if (swork->data_lock_p)
  8053. cg_runlock(swork->data_lock_p);
  8054. if (opt_debug)
  8055. {
  8056. char header[161];
  8057. char nonce2hex[(bytes_len(&work->nonce2) * 2) + 1];
  8058. bin2hex(header, work->data, 80);
  8059. bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
  8060. applog(LOG_DEBUG, "Generated stratum header %s", header);
  8061. applog(LOG_DEBUG, "Work job_id %s nonce2 %s", work->job_id, nonce2hex);
  8062. }
  8063. calc_midstate(work);
  8064. local_work++;
  8065. work->stratum = true;
  8066. work->blk.nonce = 0;
  8067. work->id = total_work++;
  8068. work->longpoll = false;
  8069. work->getwork_mode = GETWORK_MODE_STRATUM;
  8070. calc_diff(work, 0);
  8071. }
  8072. void request_work(struct thr_info *thr)
  8073. {
  8074. struct cgpu_info *cgpu = thr->cgpu;
  8075. struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats);
  8076. /* Tell the watchdog thread this thread is waiting on getwork and
  8077. * should not be restarted */
  8078. thread_reportout(thr);
  8079. // HACK: Since get_work still blocks, reportout all processors dependent on this thread
  8080. for (struct cgpu_info *proc = thr->cgpu->next_proc; proc; proc = proc->next_proc)
  8081. {
  8082. if (proc->threads)
  8083. break;
  8084. thread_reportout(proc->thr[0]);
  8085. }
  8086. cgtime(&dev_stats->_get_start);
  8087. }
  8088. // FIXME: Make this non-blocking (and remove HACK above)
  8089. struct work *get_work(struct thr_info *thr)
  8090. {
  8091. const int thr_id = thr->id;
  8092. struct cgpu_info *cgpu = thr->cgpu;
  8093. struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats);
  8094. struct cgminer_stats *pool_stats;
  8095. struct timeval tv_get;
  8096. struct work *work = NULL;
  8097. applog(LOG_DEBUG, "%"PRIpreprv": Popping work from get queue to get work", cgpu->proc_repr);
  8098. while (!work) {
  8099. work = hash_pop();
  8100. if (stale_work(work, false)) {
  8101. staged_full = false; // It wasn't really full, since it was stale :(
  8102. discard_work(work);
  8103. work = NULL;
  8104. wake_gws();
  8105. }
  8106. }
  8107. last_getwork = time(NULL);
  8108. applog(LOG_DEBUG, "%"PRIpreprv": Got work %d from get queue to get work for thread %d",
  8109. cgpu->proc_repr, work->id, thr_id);
  8110. work->thr_id = thr_id;
  8111. thread_reportin(thr);
  8112. // HACK: Since get_work still blocks, reportin all processors dependent on this thread
  8113. for (struct cgpu_info *proc = thr->cgpu->next_proc; proc; proc = proc->next_proc)
  8114. {
  8115. if (proc->threads)
  8116. break;
  8117. thread_reportin(proc->thr[0]);
  8118. }
  8119. work->mined = true;
  8120. work->blk.nonce = 0;
  8121. cgtime(&tv_get);
  8122. timersub(&tv_get, &dev_stats->_get_start, &tv_get);
  8123. timeradd(&tv_get, &dev_stats->getwork_wait, &dev_stats->getwork_wait);
  8124. if (timercmp(&tv_get, &dev_stats->getwork_wait_max, >))
  8125. dev_stats->getwork_wait_max = tv_get;
  8126. if (timercmp(&tv_get, &dev_stats->getwork_wait_min, <))
  8127. dev_stats->getwork_wait_min = tv_get;
  8128. ++dev_stats->getwork_calls;
  8129. pool_stats = &(work->pool->cgminer_stats);
  8130. timeradd(&tv_get, &pool_stats->getwork_wait, &pool_stats->getwork_wait);
  8131. if (timercmp(&tv_get, &pool_stats->getwork_wait_max, >))
  8132. pool_stats->getwork_wait_max = tv_get;
  8133. if (timercmp(&tv_get, &pool_stats->getwork_wait_min, <))
  8134. pool_stats->getwork_wait_min = tv_get;
  8135. ++pool_stats->getwork_calls;
  8136. if (work->work_difficulty < 1)
  8137. {
  8138. if (unlikely(work->work_difficulty < cgpu->min_nonce_diff))
  8139. {
  8140. if (cgpu->min_nonce_diff - work->work_difficulty > 1./0x10000000)
  8141. applog(LOG_WARNING, "%"PRIpreprv": Using work with lower difficulty than device supports",
  8142. cgpu->proc_repr);
  8143. work->nonce_diff = cgpu->min_nonce_diff;
  8144. }
  8145. else
  8146. work->nonce_diff = work->work_difficulty;
  8147. }
  8148. else
  8149. work->nonce_diff = 1;
  8150. return work;
  8151. }
  8152. static
  8153. void _submit_work_async(struct work *work)
  8154. {
  8155. applog(LOG_DEBUG, "Pushing submit work to work thread");
  8156. if (opt_benchmark)
  8157. {
  8158. json_t * const jn = json_null();
  8159. work_check_for_block(work);
  8160. share_result(jn, jn, jn, work, false, "");
  8161. free_work(work);
  8162. return;
  8163. }
  8164. mutex_lock(&submitting_lock);
  8165. ++total_submitting;
  8166. DL_APPEND(submit_waiting, work);
  8167. mutex_unlock(&submitting_lock);
  8168. notifier_wake(submit_waiting_notifier);
  8169. }
  8170. /* Submit a copy of the tested, statistic recorded work item asynchronously */
  8171. static void submit_work_async2(struct work *work, struct timeval *tv_work_found)
  8172. {
  8173. if (tv_work_found)
  8174. copy_time(&work->tv_work_found, tv_work_found);
  8175. _submit_work_async(work);
  8176. }
  8177. void inc_hw_errors3(struct thr_info *thr, const struct work *work, const uint32_t *bad_nonce_p, float nonce_diff)
  8178. {
  8179. struct cgpu_info * const cgpu = thr->cgpu;
  8180. if (bad_nonce_p)
  8181. {
  8182. if (bad_nonce_p == UNKNOWN_NONCE)
  8183. applog(LOG_DEBUG, "%"PRIpreprv": invalid nonce - HW error",
  8184. cgpu->proc_repr);
  8185. else
  8186. applog(LOG_DEBUG, "%"PRIpreprv": invalid nonce (%08lx) - HW error",
  8187. cgpu->proc_repr, (unsigned long)be32toh(*bad_nonce_p));
  8188. }
  8189. mutex_lock(&stats_lock);
  8190. hw_errors++;
  8191. ++cgpu->hw_errors;
  8192. if (bad_nonce_p)
  8193. {
  8194. total_bad_diff1 += nonce_diff;
  8195. cgpu->bad_diff1 += nonce_diff;
  8196. }
  8197. mutex_unlock(&stats_lock);
  8198. if (thr->cgpu->drv->hw_error)
  8199. thr->cgpu->drv->hw_error(thr);
  8200. }
  8201. void work_hash(struct work * const work)
  8202. {
  8203. #ifdef USE_SCRYPT
  8204. if (opt_scrypt)
  8205. scrypt_hash_data(work->hash, work->data);
  8206. else
  8207. #endif
  8208. hash_data(work->hash, work->data);
  8209. }
  8210. static
  8211. bool test_hash(const void * const phash, const float diff)
  8212. {
  8213. const uint32_t * const hash = phash;
  8214. if (diff >= 1.)
  8215. // FIXME: > 1 should check more
  8216. return !hash[7];
  8217. const uint32_t Htarg = (uint32_t)(0x100000000 * diff) - 1;
  8218. const uint32_t tmp_hash7 = le32toh(hash[7]);
  8219. applog(LOG_DEBUG, "htarget %08lx hash %08lx",
  8220. (long unsigned int)Htarg,
  8221. (long unsigned int)tmp_hash7);
  8222. return (tmp_hash7 <= Htarg);
  8223. }
  8224. enum test_nonce2_result _test_nonce2(struct work *work, uint32_t nonce, bool checktarget)
  8225. {
  8226. uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12);
  8227. *work_nonce = htole32(nonce);
  8228. work_hash(work);
  8229. if (!test_hash(work->hash, work->nonce_diff))
  8230. return TNR_BAD;
  8231. if (checktarget && !hash_target_check_v(work->hash, work->target))
  8232. {
  8233. bool high_hash = true;
  8234. struct pool * const pool = work->pool;
  8235. if (pool->stratum_active)
  8236. {
  8237. // Some stratum pools are buggy and expect difficulty changes to be immediate retroactively, so if the target has changed, check and submit just in case
  8238. if (memcmp(pool->swork.target, work->target, sizeof(work->target)))
  8239. {
  8240. applog(LOG_DEBUG, "Stratum pool %u target has changed since work job issued, checking that too",
  8241. pool->pool_no);
  8242. if (hash_target_check_v(work->hash, pool->swork.target))
  8243. high_hash = false;
  8244. }
  8245. }
  8246. if (high_hash)
  8247. return TNR_HIGH;
  8248. }
  8249. return TNR_GOOD;
  8250. }
  8251. /* Returns true if nonce for work was a valid share */
  8252. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  8253. {
  8254. return submit_noffset_nonce(thr, work, nonce, 0);
  8255. }
  8256. /* Allows drivers to submit work items where the driver has changed the ntime
  8257. * value by noffset. Must be only used with a work protocol that does not ntime
  8258. * roll itself intrinsically to generate work (eg stratum). We do not touch
  8259. * the original work struct, but the copy of it only. */
  8260. bool submit_noffset_nonce(struct thr_info *thr, struct work *work_in, uint32_t nonce,
  8261. int noffset)
  8262. {
  8263. struct work *work = make_work();
  8264. _copy_work(work, work_in, noffset);
  8265. uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12);
  8266. struct timeval tv_work_found;
  8267. enum test_nonce2_result res;
  8268. bool ret = true;
  8269. thread_reportout(thr);
  8270. cgtime(&tv_work_found);
  8271. *work_nonce = htole32(nonce);
  8272. work->thr_id = thr->id;
  8273. /* Do one last check before attempting to submit the work */
  8274. /* Side effect: sets work->data and work->hash for us */
  8275. res = test_nonce2(work, nonce);
  8276. if (unlikely(res == TNR_BAD))
  8277. {
  8278. inc_hw_errors(thr, work, nonce);
  8279. ret = false;
  8280. goto out;
  8281. }
  8282. mutex_lock(&stats_lock);
  8283. total_diff1 += work->nonce_diff;
  8284. thr ->cgpu->diff1 += work->nonce_diff;
  8285. work->pool->diff1 += work->nonce_diff;
  8286. thr->cgpu->last_device_valid_work = time(NULL);
  8287. mutex_unlock(&stats_lock);
  8288. if (noncelog_file)
  8289. noncelog(work);
  8290. if (res == TNR_HIGH)
  8291. {
  8292. // Share above target, normal
  8293. /* Check the diff of the share, even if it didn't reach the
  8294. * target, just to set the best share value if it's higher. */
  8295. share_diff(work);
  8296. goto out;
  8297. }
  8298. submit_work_async2(work, &tv_work_found);
  8299. work = NULL; // Taken by submit_work_async2
  8300. out:
  8301. if (work)
  8302. free_work(work);
  8303. thread_reportin(thr);
  8304. return ret;
  8305. }
  8306. bool abandon_work(struct work *work, struct timeval *wdiff, uint64_t hashes)
  8307. {
  8308. if (wdiff->tv_sec > opt_scantime ||
  8309. work->blk.nonce >= 0xfffffffe - hashes ||
  8310. hashes >= 0xfffffffe ||
  8311. stale_work(work, false))
  8312. return true;
  8313. return false;
  8314. }
  8315. void __thr_being_msg(int prio, struct thr_info *thr, const char *being)
  8316. {
  8317. struct cgpu_info *proc = thr->cgpu;
  8318. if (proc->threads > 1)
  8319. applog(prio, "%"PRIpreprv" (thread %d) %s", proc->proc_repr, thr->id, being);
  8320. else
  8321. applog(prio, "%"PRIpreprv" %s", proc->proc_repr, being);
  8322. }
  8323. // Called by asynchronous minerloops, when they find their processor should be disabled
  8324. void mt_disable_start(struct thr_info *mythr)
  8325. {
  8326. struct cgpu_info *cgpu = mythr->cgpu;
  8327. struct device_drv *drv = cgpu->drv;
  8328. if (drv->thread_disable)
  8329. drv->thread_disable(mythr);
  8330. hashmeter2(mythr);
  8331. __thr_being_msg(LOG_WARNING, mythr, "being disabled");
  8332. mythr->rolling = mythr->cgpu->rolling = 0;
  8333. thread_reportout(mythr);
  8334. mythr->_mt_disable_called = true;
  8335. }
  8336. /* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till
  8337. * the driver tells us it's full so that it may extract the work item using
  8338. * the get_queued() function which adds it to the hashtable on
  8339. * cgpu->queued_work. */
  8340. static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id)
  8341. {
  8342. thread_reportout(mythr);
  8343. do {
  8344. bool need_work;
  8345. /* Do this lockless just to know if we need more unqueued work. */
  8346. need_work = (!cgpu->unqueued_work);
  8347. /* get_work is a blocking function so do it outside of lock
  8348. * to prevent deadlocks with other locks. */
  8349. if (need_work) {
  8350. struct work *work = get_work(mythr);
  8351. wr_lock(&cgpu->qlock);
  8352. /* Check we haven't grabbed work somehow between
  8353. * checking and picking up the lock. */
  8354. if (likely(!cgpu->unqueued_work))
  8355. cgpu->unqueued_work = work;
  8356. else
  8357. need_work = false;
  8358. wr_unlock(&cgpu->qlock);
  8359. if (unlikely(!need_work))
  8360. discard_work(work);
  8361. }
  8362. /* The queue_full function should be used by the driver to
  8363. * actually place work items on the physical device if it
  8364. * does have a queue. */
  8365. } while (drv->queue_full && !drv->queue_full(cgpu));
  8366. }
  8367. /* Add a work item to a cgpu's queued hashlist */
  8368. void __add_queued(struct cgpu_info *cgpu, struct work *work)
  8369. {
  8370. cgpu->queued_count++;
  8371. HASH_ADD_INT(cgpu->queued_work, id, work);
  8372. }
  8373. /* This function is for retrieving one work item from the unqueued pointer and
  8374. * adding it to the hashtable of queued work. Code using this function must be
  8375. * able to handle NULL as a return which implies there is no work available. */
  8376. struct work *get_queued(struct cgpu_info *cgpu)
  8377. {
  8378. struct work *work = NULL;
  8379. wr_lock(&cgpu->qlock);
  8380. if (cgpu->unqueued_work) {
  8381. work = cgpu->unqueued_work;
  8382. if (unlikely(stale_work(work, false))) {
  8383. discard_work(work);
  8384. work = NULL;
  8385. wake_gws();
  8386. } else
  8387. __add_queued(cgpu, work);
  8388. cgpu->unqueued_work = NULL;
  8389. }
  8390. wr_unlock(&cgpu->qlock);
  8391. return work;
  8392. }
  8393. void add_queued(struct cgpu_info *cgpu, struct work *work)
  8394. {
  8395. wr_lock(&cgpu->qlock);
  8396. __add_queued(cgpu, work);
  8397. wr_unlock(&cgpu->qlock);
  8398. }
  8399. /* Get fresh work and add it to cgpu's queued hashlist */
  8400. struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id)
  8401. {
  8402. struct work *work = get_work(thr);
  8403. add_queued(cgpu, work);
  8404. return work;
  8405. }
  8406. /* This function is for finding an already queued work item in the
  8407. * given que hashtable. Code using this function must be able
  8408. * to handle NULL as a return which implies there is no matching work.
  8409. * The calling function must lock access to the que if it is required.
  8410. * The common values for midstatelen, offset, datalen are 32, 64, 12 */
  8411. struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  8412. {
  8413. struct work *work, *tmp, *ret = NULL;
  8414. HASH_ITER(hh, que, work, tmp) {
  8415. if (memcmp(work->midstate, midstate, midstatelen) == 0 &&
  8416. memcmp(work->data + offset, data, datalen) == 0) {
  8417. ret = work;
  8418. break;
  8419. }
  8420. }
  8421. return ret;
  8422. }
  8423. /* This function is for finding an already queued work item in the
  8424. * device's queued_work hashtable. Code using this function must be able
  8425. * to handle NULL as a return which implies there is no matching work.
  8426. * The common values for midstatelen, offset, datalen are 32, 64, 12 */
  8427. struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  8428. {
  8429. struct work *ret;
  8430. rd_lock(&cgpu->qlock);
  8431. ret = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen);
  8432. rd_unlock(&cgpu->qlock);
  8433. return ret;
  8434. }
  8435. struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  8436. {
  8437. struct work *work, *ret = NULL;
  8438. rd_lock(&cgpu->qlock);
  8439. work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen);
  8440. if (work)
  8441. ret = copy_work(work);
  8442. rd_unlock(&cgpu->qlock);
  8443. return ret;
  8444. }
  8445. void __work_completed(struct cgpu_info *cgpu, struct work *work)
  8446. {
  8447. cgpu->queued_count--;
  8448. HASH_DEL(cgpu->queued_work, work);
  8449. }
  8450. /* This function should be used by queued device drivers when they're sure
  8451. * the work struct is no longer in use. */
  8452. void work_completed(struct cgpu_info *cgpu, struct work *work)
  8453. {
  8454. wr_lock(&cgpu->qlock);
  8455. __work_completed(cgpu, work);
  8456. wr_unlock(&cgpu->qlock);
  8457. free_work(work);
  8458. }
  8459. /* Combines find_queued_work_bymidstate and work_completed in one function
  8460. * withOUT destroying the work so the driver must free it. */
  8461. struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  8462. {
  8463. struct work *work;
  8464. wr_lock(&cgpu->qlock);
  8465. work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen);
  8466. if (work)
  8467. __work_completed(cgpu, work);
  8468. wr_unlock(&cgpu->qlock);
  8469. return work;
  8470. }
  8471. static void flush_queue(struct cgpu_info *cgpu)
  8472. {
  8473. struct work *work = NULL;
  8474. wr_lock(&cgpu->qlock);
  8475. work = cgpu->unqueued_work;
  8476. cgpu->unqueued_work = NULL;
  8477. wr_unlock(&cgpu->qlock);
  8478. if (work) {
  8479. free_work(work);
  8480. applog(LOG_DEBUG, "Discarded queued work item");
  8481. }
  8482. }
  8483. /* This version of hash work is for devices that are fast enough to always
  8484. * perform a full nonce range and need a queue to maintain the device busy.
  8485. * Work creation and destruction is not done from within this function
  8486. * directly. */
  8487. void hash_queued_work(struct thr_info *mythr)
  8488. {
  8489. const long cycle = opt_log_interval / 5 ? : 1;
  8490. struct timeval tv_start = {0, 0}, tv_end;
  8491. struct cgpu_info *cgpu = mythr->cgpu;
  8492. struct device_drv *drv = cgpu->drv;
  8493. const int thr_id = mythr->id;
  8494. int64_t hashes_done = 0;
  8495. if (unlikely(cgpu->deven != DEV_ENABLED))
  8496. mt_disable(mythr);
  8497. while (likely(!cgpu->shutdown)) {
  8498. struct timeval diff;
  8499. int64_t hashes;
  8500. fill_queue(mythr, cgpu, drv, thr_id);
  8501. thread_reportin(mythr);
  8502. hashes = drv->scanwork(mythr);
  8503. /* Reset the bool here in case the driver looks for it
  8504. * synchronously in the scanwork loop. */
  8505. mythr->work_restart = false;
  8506. if (unlikely(hashes == -1 )) {
  8507. applog(LOG_ERR, "%s %d failure, disabling!", drv->name, cgpu->device_id);
  8508. cgpu->deven = DEV_DISABLED;
  8509. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  8510. mt_disable(mythr);
  8511. }
  8512. hashes_done += hashes;
  8513. cgtime(&tv_end);
  8514. timersub(&tv_end, &tv_start, &diff);
  8515. if (diff.tv_sec >= cycle) {
  8516. hashmeter(thr_id, &diff, hashes_done);
  8517. hashes_done = 0;
  8518. copy_time(&tv_start, &tv_end);
  8519. }
  8520. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  8521. mt_disable(mythr);
  8522. if (unlikely(mythr->work_restart)) {
  8523. flush_queue(cgpu);
  8524. if (drv->flush_work)
  8525. drv->flush_work(cgpu);
  8526. }
  8527. }
  8528. // cgpu->deven = DEV_DISABLED; set in miner_thread
  8529. }
  8530. // Called by minerloop, when it is re-enabling a processor
  8531. void mt_disable_finish(struct thr_info *mythr)
  8532. {
  8533. struct device_drv *drv = mythr->cgpu->drv;
  8534. thread_reportin(mythr);
  8535. __thr_being_msg(LOG_WARNING, mythr, "being re-enabled");
  8536. if (drv->thread_enable)
  8537. drv->thread_enable(mythr);
  8538. mythr->_mt_disable_called = false;
  8539. }
  8540. // Called by synchronous minerloops, when they find their processor should be disabled
  8541. // Calls mt_disable_start, waits until it's re-enabled, then calls mt_disable_finish
  8542. void mt_disable(struct thr_info *mythr)
  8543. {
  8544. const struct cgpu_info * const cgpu = mythr->cgpu;
  8545. mt_disable_start(mythr);
  8546. applog(LOG_DEBUG, "Waiting for wakeup notification in miner thread");
  8547. do {
  8548. notifier_read(mythr->notifier);
  8549. } while (mythr->pause || cgpu->deven != DEV_ENABLED);
  8550. mt_disable_finish(mythr);
  8551. }
  8552. enum {
  8553. STAT_SLEEP_INTERVAL = 1,
  8554. STAT_CTR_INTERVAL = 10000000,
  8555. FAILURE_INTERVAL = 30,
  8556. };
  8557. /* Stage another work item from the work returned in a longpoll */
  8558. static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct work *work, struct timeval *tv_lp, struct timeval *tv_lp_reply)
  8559. {
  8560. bool rc;
  8561. work->rolltime = rolltime;
  8562. rc = work_decode(pool, work, val);
  8563. if (unlikely(!rc)) {
  8564. applog(LOG_ERR, "Could not convert longpoll data to work");
  8565. free_work(work);
  8566. return;
  8567. }
  8568. total_getworks++;
  8569. pool->getwork_requested++;
  8570. work->pool = pool;
  8571. copy_time(&work->tv_getwork, tv_lp);
  8572. copy_time(&work->tv_getwork_reply, tv_lp_reply);
  8573. calc_diff(work, 0);
  8574. if (pool->enabled == POOL_REJECTING)
  8575. work->mandatory = true;
  8576. work->longpoll = true;
  8577. work->getwork_mode = GETWORK_MODE_LP;
  8578. update_last_work(work);
  8579. /* We'll be checking this work item twice, but we already know it's
  8580. * from a new block so explicitly force the new block detection now
  8581. * rather than waiting for it to hit the stage thread. This also
  8582. * allows testwork to know whether LP discovered the block or not. */
  8583. test_work_current(work);
  8584. /* Don't use backup LPs as work if we have failover-only enabled. Use
  8585. * the longpoll work from a pool that has been rejecting shares as a
  8586. * way to detect when the pool has recovered.
  8587. */
  8588. if (pool != current_pool() && opt_fail_only && pool->enabled != POOL_REJECTING) {
  8589. free_work(work);
  8590. return;
  8591. }
  8592. work = clone_work(work);
  8593. applog(LOG_DEBUG, "Pushing converted work to stage thread");
  8594. stage_work(work);
  8595. applog(LOG_DEBUG, "Converted longpoll data to work");
  8596. }
  8597. /* If we want longpoll, enable it for the chosen default pool, or, if
  8598. * the pool does not support longpoll, find the first one that does
  8599. * and use its longpoll support */
  8600. static
  8601. struct pool *_select_longpoll_pool(struct pool *cp, bool(*func)(struct pool *))
  8602. {
  8603. int i;
  8604. if (func(cp))
  8605. return cp;
  8606. for (i = 0; i < total_pools; i++) {
  8607. struct pool *pool = pools[i];
  8608. if (func(pool))
  8609. return pool;
  8610. }
  8611. return NULL;
  8612. }
  8613. /* This will make the longpoll thread wait till it's the current pool, or it
  8614. * has been flagged as rejecting, before attempting to open any connections.
  8615. */
  8616. static void wait_lpcurrent(struct pool *pool)
  8617. {
  8618. while (!cnx_needed(pool))
  8619. {
  8620. pool->lp_active = false;
  8621. mutex_lock(&lp_lock);
  8622. pthread_cond_wait(&lp_cond, &lp_lock);
  8623. mutex_unlock(&lp_lock);
  8624. }
  8625. }
  8626. static curl_socket_t save_curl_socket(void *vpool, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr) {
  8627. struct pool *pool = vpool;
  8628. curl_socket_t sock = bfg_socket(addr->family, addr->socktype, addr->protocol);
  8629. pool->lp_socket = sock;
  8630. return sock;
  8631. }
  8632. static void *longpoll_thread(void *userdata)
  8633. {
  8634. struct pool *cp = (struct pool *)userdata;
  8635. /* This *pool is the source of the actual longpoll, not the pool we've
  8636. * tied it to */
  8637. struct timeval start, reply, end;
  8638. struct pool *pool = NULL;
  8639. char threadname[20];
  8640. CURL *curl = NULL;
  8641. int failures = 0;
  8642. char *lp_url;
  8643. int rolltime;
  8644. #ifndef HAVE_PTHREAD_CANCEL
  8645. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  8646. #endif
  8647. snprintf(threadname, 20, "longpoll%u", cp->pool_no);
  8648. RenameThread(threadname);
  8649. curl = curl_easy_init();
  8650. if (unlikely(!curl)) {
  8651. applog(LOG_ERR, "CURL initialisation failed");
  8652. return NULL;
  8653. }
  8654. retry_pool:
  8655. pool = select_longpoll_pool(cp);
  8656. if (!pool) {
  8657. applog(LOG_WARNING, "No suitable long-poll found for %s", cp->rpc_url);
  8658. while (!pool) {
  8659. cgsleep_ms(60000);
  8660. pool = select_longpoll_pool(cp);
  8661. }
  8662. }
  8663. if (pool->has_stratum) {
  8664. applog(LOG_WARNING, "Block change for %s detection via %s stratum",
  8665. cp->rpc_url, pool->rpc_url);
  8666. goto out;
  8667. }
  8668. /* Any longpoll from any pool is enough for this to be true */
  8669. have_longpoll = true;
  8670. wait_lpcurrent(cp);
  8671. {
  8672. lp_url = pool->lp_url;
  8673. if (cp == pool)
  8674. applog(LOG_WARNING, "Long-polling activated for %s (%s)", lp_url, pool_protocol_name(pool->lp_proto));
  8675. else
  8676. applog(LOG_WARNING, "Long-polling activated for %s via %s (%s)", cp->rpc_url, lp_url, pool_protocol_name(pool->lp_proto));
  8677. }
  8678. while (42) {
  8679. json_t *val, *soval;
  8680. struct work *work = make_work();
  8681. char *lpreq;
  8682. lpreq = prepare_rpc_req(work, pool->lp_proto, pool->lp_id);
  8683. work->pool = pool;
  8684. if (!lpreq)
  8685. {
  8686. free_work(work);
  8687. goto lpfail;
  8688. }
  8689. wait_lpcurrent(cp);
  8690. cgtime(&start);
  8691. /* Longpoll connections can be persistent for a very long time
  8692. * and any number of issues could have come up in the meantime
  8693. * so always establish a fresh connection instead of relying on
  8694. * a persistent one. */
  8695. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  8696. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, save_curl_socket);
  8697. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  8698. val = json_rpc_call(curl, lp_url, pool->rpc_userpass,
  8699. lpreq, false, true, &rolltime, pool, false);
  8700. pool->lp_socket = CURL_SOCKET_BAD;
  8701. cgtime(&reply);
  8702. free(lpreq);
  8703. if (likely(val)) {
  8704. soval = json_object_get(json_object_get(val, "result"), "submitold");
  8705. if (soval)
  8706. pool->submit_old = json_is_true(soval);
  8707. else
  8708. pool->submit_old = false;
  8709. convert_to_work(val, rolltime, pool, work, &start, &reply);
  8710. failures = 0;
  8711. json_decref(val);
  8712. } else {
  8713. /* Some pools regularly drop the longpoll request so
  8714. * only see this as longpoll failure if it happens
  8715. * immediately and just restart it the rest of the
  8716. * time. */
  8717. cgtime(&end);
  8718. free_work(work);
  8719. if (end.tv_sec - start.tv_sec <= 30)
  8720. {
  8721. if (failures == 1)
  8722. applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", lp_url);
  8723. lpfail:
  8724. cgsleep_ms(30000);
  8725. }
  8726. }
  8727. if (pool != cp) {
  8728. pool = select_longpoll_pool(cp);
  8729. if (pool->has_stratum) {
  8730. applog(LOG_WARNING, "Block change for %s detection via %s stratum",
  8731. cp->rpc_url, pool->rpc_url);
  8732. break;
  8733. }
  8734. if (unlikely(!pool))
  8735. goto retry_pool;
  8736. }
  8737. if (unlikely(pool->removed))
  8738. break;
  8739. }
  8740. out:
  8741. pool->lp_active = false;
  8742. curl_easy_cleanup(curl);
  8743. return NULL;
  8744. }
  8745. static void stop_longpoll(void)
  8746. {
  8747. int i;
  8748. want_longpoll = false;
  8749. for (i = 0; i < total_pools; ++i)
  8750. {
  8751. struct pool *pool = pools[i];
  8752. if (unlikely(!pool->lp_started))
  8753. continue;
  8754. pool->lp_started = false;
  8755. pthread_cancel(pool->longpoll_thread);
  8756. }
  8757. have_longpoll = false;
  8758. }
  8759. static void start_longpoll(void)
  8760. {
  8761. int i;
  8762. want_longpoll = true;
  8763. for (i = 0; i < total_pools; ++i)
  8764. {
  8765. struct pool *pool = pools[i];
  8766. if (unlikely(pool->removed || pool->lp_started || !pool->lp_url))
  8767. continue;
  8768. pool->lp_started = true;
  8769. if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool)))
  8770. quit(1, "Failed to create pool longpoll thread");
  8771. }
  8772. }
  8773. void reinit_device(struct cgpu_info *cgpu)
  8774. {
  8775. if (cgpu->drv->reinit_device)
  8776. cgpu->drv->reinit_device(cgpu);
  8777. }
  8778. static struct timeval rotate_tv;
  8779. /* We reap curls if they are unused for over a minute */
  8780. static void reap_curl(struct pool *pool)
  8781. {
  8782. struct curl_ent *ent, *iter;
  8783. struct timeval now;
  8784. int reaped = 0;
  8785. cgtime(&now);
  8786. mutex_lock(&pool->pool_lock);
  8787. LL_FOREACH_SAFE(pool->curllist, ent, iter) {
  8788. if (pool->curls < 2)
  8789. break;
  8790. if (now.tv_sec - ent->tv.tv_sec > 300) {
  8791. reaped++;
  8792. pool->curls--;
  8793. LL_DELETE(pool->curllist, ent);
  8794. curl_easy_cleanup(ent->curl);
  8795. free(ent);
  8796. }
  8797. }
  8798. mutex_unlock(&pool->pool_lock);
  8799. if (reaped)
  8800. applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no);
  8801. }
  8802. static void *watchpool_thread(void __maybe_unused *userdata)
  8803. {
  8804. int intervals = 0;
  8805. #ifndef HAVE_PTHREAD_CANCEL
  8806. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  8807. #endif
  8808. RenameThread("watchpool");
  8809. while (42) {
  8810. struct timeval now;
  8811. int i;
  8812. if (++intervals > 20)
  8813. intervals = 0;
  8814. cgtime(&now);
  8815. for (i = 0; i < total_pools; i++) {
  8816. struct pool *pool = pools[i];
  8817. if (!opt_benchmark)
  8818. reap_curl(pool);
  8819. /* Get a rolling utility per pool over 10 mins */
  8820. if (intervals > 19) {
  8821. int shares = pool->diff1 - pool->last_shares;
  8822. pool->last_shares = pool->diff1;
  8823. pool->utility = (pool->utility + (double)shares * 0.63) / 1.63;
  8824. pool->shares = pool->utility;
  8825. }
  8826. if (pool->enabled == POOL_DISABLED)
  8827. continue;
  8828. /* Don't start testing any pools if the test threads
  8829. * from startup are still doing their first attempt. */
  8830. if (unlikely(pool->testing)) {
  8831. pthread_join(pool->test_thread, NULL);
  8832. }
  8833. /* Test pool is idle once every minute */
  8834. if (pool->idle && now.tv_sec - pool->tv_idle.tv_sec > 30) {
  8835. if (pool_active(pool, true) && pool_tclear(pool, &pool->idle))
  8836. pool_resus(pool);
  8837. }
  8838. /* Only switch pools if the failback pool has been
  8839. * alive for more than 5 minutes (default) to prevent
  8840. * intermittently failing pools from being used. */
  8841. if (!pool->idle && pool_strategy == POOL_FAILOVER && pool->prio < cp_prio() &&
  8842. now.tv_sec - pool->tv_idle.tv_sec > opt_fail_switch_delay) {
  8843. if (opt_fail_switch_delay % 60)
  8844. applog(LOG_WARNING, "Pool %d %s stable for %d second%s",
  8845. pool->pool_no, pool->rpc_url,
  8846. opt_fail_switch_delay,
  8847. (opt_fail_switch_delay == 1 ? "" : "s"));
  8848. else
  8849. applog(LOG_WARNING, "Pool %d %s stable for %d minute%s",
  8850. pool->pool_no, pool->rpc_url,
  8851. opt_fail_switch_delay / 60,
  8852. (opt_fail_switch_delay == 60 ? "" : "s"));
  8853. switch_pools(NULL);
  8854. }
  8855. }
  8856. if (current_pool()->idle)
  8857. switch_pools(NULL);
  8858. if (pool_strategy == POOL_ROTATE && now.tv_sec - rotate_tv.tv_sec > 60 * opt_rotate_period) {
  8859. cgtime(&rotate_tv);
  8860. switch_pools(NULL);
  8861. }
  8862. cgsleep_ms(30000);
  8863. }
  8864. return NULL;
  8865. }
  8866. void mt_enable(struct thr_info *thr)
  8867. {
  8868. applog(LOG_DEBUG, "Waking up thread %d", thr->id);
  8869. notifier_wake(thr->notifier);
  8870. }
  8871. void proc_enable(struct cgpu_info *cgpu)
  8872. {
  8873. int j;
  8874. cgpu->deven = DEV_ENABLED;
  8875. for (j = cgpu->threads ?: 1; j--; )
  8876. mt_enable(cgpu->thr[j]);
  8877. }
  8878. #define device_recovered(cgpu) proc_enable(cgpu)
  8879. void cgpu_set_defaults(struct cgpu_info * const cgpu)
  8880. {
  8881. struct string_elist *setstr_elist;
  8882. const char *p, *p2;
  8883. char replybuf[0x2000];
  8884. size_t L;
  8885. DL_FOREACH(opt_set_device_list, setstr_elist)
  8886. {
  8887. const char * const setstr = setstr_elist->string;
  8888. p = strchr(setstr, ':');
  8889. if (!p)
  8890. p = setstr;
  8891. {
  8892. L = p - setstr;
  8893. char pattern[L + 1];
  8894. if (L)
  8895. memcpy(pattern, setstr, L);
  8896. pattern[L] = '\0';
  8897. if (!cgpu_match(pattern, cgpu))
  8898. continue;
  8899. }
  8900. applog(LOG_DEBUG, "%"PRIpreprv": %s: Matched with set default: %s",
  8901. cgpu->proc_repr, __func__, setstr);
  8902. if (p[0] == ':')
  8903. ++p;
  8904. p2 = strchr(p, '=');
  8905. if (!p2)
  8906. {
  8907. L = strlen(p);
  8908. p2 = "";
  8909. }
  8910. else
  8911. {
  8912. L = p2 - p;
  8913. ++p2;
  8914. }
  8915. char opt[L + 1];
  8916. if (L)
  8917. memcpy(opt, p, L);
  8918. opt[L] = '\0';
  8919. L = strlen(p2);
  8920. char setval[L + 1];
  8921. if (L)
  8922. memcpy(setval, p2, L);
  8923. setval[L] = '\0';
  8924. enum bfg_set_device_replytype success;
  8925. p = proc_set_device(cgpu, opt, setval, replybuf, &success);
  8926. switch (success)
  8927. {
  8928. case SDR_OK:
  8929. applog(LOG_DEBUG, "%"PRIpreprv": Applied rule %s%s%s",
  8930. cgpu->proc_repr, setstr,
  8931. p ? ": " : "", p ?: "");
  8932. break;
  8933. case SDR_ERR:
  8934. case SDR_HELP:
  8935. case SDR_UNKNOWN:
  8936. applog(LOG_DEBUG, "%"PRIpreprv": Applying rule %s: %s",
  8937. cgpu->proc_repr, setstr, p);
  8938. break;
  8939. case SDR_AUTO:
  8940. case SDR_NOSUPP:
  8941. applog(LOG_DEBUG, "%"PRIpreprv": set_device is not implemented (trying to apply rule: %s)",
  8942. cgpu->proc_repr, setstr);
  8943. }
  8944. }
  8945. cgpu->already_set_defaults = true;
  8946. }
  8947. void drv_set_defaults(const struct device_drv * const drv, const void *datap, void *userp, const char * const devpath, const char * const serial, const int mode)
  8948. {
  8949. struct device_drv dummy_drv = *drv;
  8950. struct cgpu_info dummy_cgpu = {
  8951. .drv = &dummy_drv,
  8952. .device = &dummy_cgpu,
  8953. .device_id = -1,
  8954. .proc_id = -1,
  8955. .device_data = userp,
  8956. .device_path = devpath,
  8957. .dev_serial = serial,
  8958. };
  8959. strcpy(dummy_cgpu.proc_repr, drv->name);
  8960. switch (mode)
  8961. {
  8962. case 0:
  8963. dummy_drv.set_device = datap;
  8964. break;
  8965. case 1:
  8966. dummy_drv.set_device = NULL;
  8967. dummy_cgpu.set_device_funcs = datap;
  8968. break;
  8969. }
  8970. cgpu_set_defaults(&dummy_cgpu);
  8971. }
  8972. /* Makes sure the hashmeter keeps going even if mining threads stall, updates
  8973. * the screen at regular intervals, and restarts threads if they appear to have
  8974. * died. */
  8975. #define WATCHDOG_SICK_TIME 60
  8976. #define WATCHDOG_DEAD_TIME 600
  8977. #define WATCHDOG_SICK_COUNT (WATCHDOG_SICK_TIME/WATCHDOG_INTERVAL)
  8978. #define WATCHDOG_DEAD_COUNT (WATCHDOG_DEAD_TIME/WATCHDOG_INTERVAL)
  8979. static void *watchdog_thread(void __maybe_unused *userdata)
  8980. {
  8981. const unsigned int interval = WATCHDOG_INTERVAL;
  8982. struct timeval zero_tv;
  8983. #ifndef HAVE_PTHREAD_CANCEL
  8984. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  8985. #endif
  8986. RenameThread("watchdog");
  8987. memset(&zero_tv, 0, sizeof(struct timeval));
  8988. cgtime(&rotate_tv);
  8989. while (1) {
  8990. int i;
  8991. struct timeval now;
  8992. sleep(interval);
  8993. discard_stale();
  8994. hashmeter(-1, &zero_tv, 0);
  8995. #ifdef HAVE_CURSES
  8996. const int ts = total_staged();
  8997. if (curses_active_locked()) {
  8998. change_logwinsize();
  8999. curses_print_status(ts);
  9000. _refresh_devstatus(true);
  9001. touchwin(logwin);
  9002. wrefresh(logwin);
  9003. unlock_curses();
  9004. }
  9005. #endif
  9006. cgtime(&now);
  9007. if (!sched_paused && !should_run()) {
  9008. applog(LOG_WARNING, "Pausing execution as per stop time %02d:%02d scheduled",
  9009. schedstop.tm.tm_hour, schedstop.tm.tm_min);
  9010. if (!schedstart.enable) {
  9011. quit(0, "Terminating execution as planned");
  9012. break;
  9013. }
  9014. applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
  9015. schedstart.tm.tm_hour, schedstart.tm.tm_min);
  9016. sched_paused = true;
  9017. rd_lock(&mining_thr_lock);
  9018. for (i = 0; i < mining_threads; i++)
  9019. mining_thr[i]->pause = true;
  9020. rd_unlock(&mining_thr_lock);
  9021. } else if (sched_paused && should_run()) {
  9022. applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled",
  9023. schedstart.tm.tm_hour, schedstart.tm.tm_min);
  9024. if (schedstop.enable)
  9025. applog(LOG_WARNING, "Will pause execution as scheduled at %02d:%02d",
  9026. schedstop.tm.tm_hour, schedstop.tm.tm_min);
  9027. sched_paused = false;
  9028. for (i = 0; i < mining_threads; i++) {
  9029. struct thr_info *thr;
  9030. thr = get_thread(i);
  9031. thr->pause = false;
  9032. }
  9033. for (i = 0; i < total_devices; ++i)
  9034. {
  9035. struct cgpu_info *cgpu = get_devices(i);
  9036. /* Don't touch disabled devices */
  9037. if (cgpu->deven == DEV_DISABLED)
  9038. continue;
  9039. proc_enable(cgpu);
  9040. }
  9041. }
  9042. for (i = 0; i < total_devices; ++i) {
  9043. struct cgpu_info *cgpu = get_devices(i);
  9044. if (!cgpu->disable_watchdog)
  9045. bfg_watchdog(cgpu, &now);
  9046. }
  9047. }
  9048. return NULL;
  9049. }
  9050. void bfg_watchdog(struct cgpu_info * const cgpu, struct timeval * const tvp_now)
  9051. {
  9052. struct thr_info *thr = cgpu->thr[0];
  9053. enum dev_enable *denable;
  9054. char *dev_str = cgpu->proc_repr;
  9055. if (likely(drv_ready(cgpu)))
  9056. {
  9057. if (unlikely(!cgpu->already_set_defaults))
  9058. cgpu_set_defaults(cgpu);
  9059. if (cgpu->drv->get_stats)
  9060. cgpu->drv->get_stats(cgpu);
  9061. }
  9062. denable = &cgpu->deven;
  9063. if (cgpu->drv->watchdog)
  9064. cgpu->drv->watchdog(cgpu, tvp_now);
  9065. /* Thread is disabled */
  9066. if (*denable == DEV_DISABLED)
  9067. return;
  9068. else
  9069. if (*denable == DEV_RECOVER_ERR) {
  9070. if (opt_restart && timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > cgpu->reinit_backoff) {
  9071. applog(LOG_NOTICE, "Attempting to reinitialize %s",
  9072. dev_str);
  9073. if (cgpu->reinit_backoff < 300)
  9074. cgpu->reinit_backoff *= 2;
  9075. device_recovered(cgpu);
  9076. }
  9077. return;
  9078. }
  9079. else
  9080. if (*denable == DEV_RECOVER) {
  9081. if (opt_restart && cgpu->temp < cgpu->targettemp) {
  9082. applog(LOG_NOTICE, "%s recovered to temperature below target, re-enabling",
  9083. dev_str);
  9084. device_recovered(cgpu);
  9085. }
  9086. dev_error_update(cgpu, REASON_DEV_THERMAL_CUTOFF);
  9087. return;
  9088. }
  9089. else
  9090. if (cgpu->temp > cgpu->cutofftemp)
  9091. {
  9092. applog(LOG_WARNING, "%s hit thermal cutoff limit at %dC, disabling!",
  9093. dev_str, (int)cgpu->temp);
  9094. *denable = DEV_RECOVER;
  9095. dev_error(cgpu, REASON_DEV_THERMAL_CUTOFF);
  9096. run_cmd(cmd_idle);
  9097. }
  9098. if (thr->getwork) {
  9099. if (cgpu->status == LIFE_WELL && thr->getwork < tvp_now->tv_sec - opt_log_interval) {
  9100. int thrid;
  9101. bool cgpu_idle = true;
  9102. thr->rolling = 0;
  9103. for (thrid = 0; thrid < cgpu->threads; ++thrid)
  9104. if (!cgpu->thr[thrid]->getwork)
  9105. cgpu_idle = false;
  9106. if (cgpu_idle) {
  9107. cgpu->rolling = 0;
  9108. cgpu->status = LIFE_WAIT;
  9109. }
  9110. }
  9111. return;
  9112. }
  9113. else if (cgpu->status == LIFE_WAIT)
  9114. cgpu->status = LIFE_WELL;
  9115. #ifdef WANT_CPUMINE
  9116. if (!strcmp(cgpu->drv->dname, "cpu"))
  9117. return;
  9118. #endif
  9119. if (cgpu->status != LIFE_WELL && (tvp_now->tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME)) {
  9120. if (likely(cgpu->status != LIFE_INIT && cgpu->status != LIFE_INIT2))
  9121. applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str);
  9122. cgpu->status = LIFE_WELL;
  9123. cgpu->device_last_well = time(NULL);
  9124. } else if (cgpu->status == LIFE_WELL && (tvp_now->tv_sec - thr->last.tv_sec > WATCHDOG_SICK_TIME)) {
  9125. thr->rolling = cgpu->rolling = 0;
  9126. cgpu->status = LIFE_SICK;
  9127. applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str);
  9128. cgtime(&thr->sick);
  9129. dev_error(cgpu, REASON_DEV_SICK_IDLE_60);
  9130. run_cmd(cmd_sick);
  9131. if (opt_restart && cgpu->drv->reinit_device) {
  9132. applog(LOG_ERR, "%s: Attempting to restart", dev_str);
  9133. reinit_device(cgpu);
  9134. }
  9135. } else if (cgpu->status == LIFE_SICK && (tvp_now->tv_sec - thr->last.tv_sec > WATCHDOG_DEAD_TIME)) {
  9136. cgpu->status = LIFE_DEAD;
  9137. applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str);
  9138. cgtime(&thr->sick);
  9139. dev_error(cgpu, REASON_DEV_DEAD_IDLE_600);
  9140. run_cmd(cmd_dead);
  9141. } else if (tvp_now->tv_sec - thr->sick.tv_sec > 60 &&
  9142. (cgpu->status == LIFE_SICK || cgpu->status == LIFE_DEAD)) {
  9143. /* Attempt to restart a GPU that's sick or dead once every minute */
  9144. cgtime(&thr->sick);
  9145. if (opt_restart)
  9146. reinit_device(cgpu);
  9147. }
  9148. }
  9149. static void log_print_status(struct cgpu_info *cgpu)
  9150. {
  9151. char logline[255];
  9152. get_statline(logline, sizeof(logline), cgpu);
  9153. applog(LOG_WARNING, "%s", logline);
  9154. }
  9155. void print_summary(void)
  9156. {
  9157. struct timeval diff;
  9158. int hours, mins, secs, i;
  9159. double utility, efficiency = 0.0;
  9160. char xfer[(ALLOC_H2B_SPACED*2)+4+1], bw[(ALLOC_H2B_SPACED*2)+6+1];
  9161. int pool_secs;
  9162. timersub(&total_tv_end, &total_tv_start, &diff);
  9163. hours = diff.tv_sec / 3600;
  9164. mins = (diff.tv_sec % 3600) / 60;
  9165. secs = diff.tv_sec % 60;
  9166. utility = total_accepted / total_secs * 60;
  9167. efficiency = total_bytes_xfer ? total_diff_accepted * 2048. / total_bytes_xfer : 0.0;
  9168. applog(LOG_WARNING, "\nSummary of runtime statistics:\n");
  9169. applog(LOG_WARNING, "Started at %s", datestamp);
  9170. if (total_pools == 1)
  9171. applog(LOG_WARNING, "Pool: %s", pools[0]->rpc_url);
  9172. #ifdef WANT_CPUMINE
  9173. if (opt_n_threads > 0)
  9174. applog(LOG_WARNING, "CPU hasher algorithm used: %s", algo_names[opt_algo]);
  9175. #endif
  9176. applog(LOG_WARNING, "Runtime: %d hrs : %d mins : %d secs", hours, mins, secs);
  9177. applog(LOG_WARNING, "Average hashrate: %.1f Megahash/s", total_mhashes_done / total_secs);
  9178. applog(LOG_WARNING, "Solved blocks: %d", found_blocks);
  9179. applog(LOG_WARNING, "Best share difficulty: %s", best_share);
  9180. applog(LOG_WARNING, "Share submissions: %d", total_accepted + total_rejected);
  9181. applog(LOG_WARNING, "Accepted shares: %d", total_accepted);
  9182. applog(LOG_WARNING, "Rejected shares: %d + %d stale (%.2f%%)",
  9183. total_rejected, total_stale,
  9184. (float)(total_rejected + total_stale) / (float)(total_rejected + total_stale + total_accepted)
  9185. );
  9186. applog(LOG_WARNING, "Accepted difficulty shares: %1.f", total_diff_accepted);
  9187. applog(LOG_WARNING, "Rejected difficulty shares: %1.f", total_diff_rejected);
  9188. applog(LOG_WARNING, "Hardware errors: %d", hw_errors);
  9189. applog(LOG_WARNING, "Network transfer: %s (%s)",
  9190. multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2,
  9191. (float)total_bytes_rcvd,
  9192. (float)total_bytes_sent),
  9193. multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2,
  9194. (float)(total_bytes_rcvd / total_secs),
  9195. (float)(total_bytes_sent / total_secs)));
  9196. applog(LOG_WARNING, "Efficiency (accepted shares * difficulty / 2 KB): %.2f", efficiency);
  9197. applog(LOG_WARNING, "Utility (accepted shares / min): %.2f/min\n", utility);
  9198. applog(LOG_WARNING, "Unable to get work from server occasions: %d", total_go);
  9199. applog(LOG_WARNING, "Work items generated locally: %d", local_work);
  9200. applog(LOG_WARNING, "Submitting work remotely delay occasions: %d", total_ro);
  9201. applog(LOG_WARNING, "New blocks detected on network: %d\n", new_blocks);
  9202. if (total_pools > 1) {
  9203. for (i = 0; i < total_pools; i++) {
  9204. struct pool *pool = pools[i];
  9205. applog(LOG_WARNING, "Pool: %s", pool->rpc_url);
  9206. if (pool->solved)
  9207. applog(LOG_WARNING, "SOLVED %d BLOCK%s!", pool->solved, pool->solved > 1 ? "S" : "");
  9208. applog(LOG_WARNING, " Share submissions: %d", pool->accepted + pool->rejected);
  9209. applog(LOG_WARNING, " Accepted shares: %d", pool->accepted);
  9210. applog(LOG_WARNING, " Rejected shares: %d + %d stale (%.2f%%)",
  9211. pool->rejected, pool->stale_shares,
  9212. (float)(pool->rejected + pool->stale_shares) / (float)(pool->rejected + pool->stale_shares + pool->accepted)
  9213. );
  9214. applog(LOG_WARNING, " Accepted difficulty shares: %1.f", pool->diff_accepted);
  9215. applog(LOG_WARNING, " Rejected difficulty shares: %1.f", pool->diff_rejected);
  9216. pool_secs = timer_elapsed(&pool->cgminer_stats.start_tv, NULL);
  9217. applog(LOG_WARNING, " Network transfer: %s (%s)",
  9218. multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2,
  9219. (float)pool->cgminer_pool_stats.net_bytes_received,
  9220. (float)pool->cgminer_pool_stats.net_bytes_sent),
  9221. multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2,
  9222. (float)(pool->cgminer_pool_stats.net_bytes_received / pool_secs),
  9223. (float)(pool->cgminer_pool_stats.net_bytes_sent / pool_secs)));
  9224. uint64_t pool_bytes_xfer = pool->cgminer_pool_stats.net_bytes_received + pool->cgminer_pool_stats.net_bytes_sent;
  9225. efficiency = pool_bytes_xfer ? pool->diff_accepted * 2048. / pool_bytes_xfer : 0.0;
  9226. applog(LOG_WARNING, " Efficiency (accepted * difficulty / 2 KB): %.2f", efficiency);
  9227. applog(LOG_WARNING, " Items worked on: %d", pool->works);
  9228. applog(LOG_WARNING, " Unable to get work from server occasions: %d", pool->getfail_occasions);
  9229. applog(LOG_WARNING, " Submitting work remotely delay occasions: %d\n", pool->remotefail_occasions);
  9230. }
  9231. }
  9232. if (opt_quit_summary != BQS_NONE)
  9233. {
  9234. if (opt_quit_summary == BQS_DETAILED)
  9235. include_serial_in_statline = true;
  9236. applog(LOG_WARNING, "Summary of per device statistics:\n");
  9237. for (i = 0; i < total_devices; ++i) {
  9238. struct cgpu_info *cgpu = get_devices(i);
  9239. if (!cgpu->proc_id)
  9240. {
  9241. // Device summary line
  9242. opt_show_procs = false;
  9243. log_print_status(cgpu);
  9244. opt_show_procs = true;
  9245. }
  9246. if ((opt_quit_summary == BQS_PROCS || opt_quit_summary == BQS_DETAILED) && cgpu->procs > 1)
  9247. log_print_status(cgpu);
  9248. }
  9249. }
  9250. if (opt_shares) {
  9251. applog(LOG_WARNING, "Mined %g accepted shares of %g requested\n", total_diff_accepted, opt_shares);
  9252. if (opt_shares > total_diff_accepted)
  9253. applog(LOG_WARNING, "WARNING - Mined only %g shares of %g requested.", total_diff_accepted, opt_shares);
  9254. }
  9255. applog(LOG_WARNING, " ");
  9256. fflush(stderr);
  9257. fflush(stdout);
  9258. }
  9259. void _bfg_clean_up(bool restarting)
  9260. {
  9261. #ifdef HAVE_OPENCL
  9262. clear_adl(nDevs);
  9263. #endif
  9264. #ifdef HAVE_LIBUSB
  9265. if (likely(have_libusb))
  9266. libusb_exit(NULL);
  9267. #endif
  9268. cgtime(&total_tv_end);
  9269. #ifdef WIN32
  9270. timeEndPeriod(1);
  9271. #endif
  9272. if (!restarting) {
  9273. /* Attempting to disable curses or print a summary during a
  9274. * restart can lead to a deadlock. */
  9275. #ifdef HAVE_CURSES
  9276. disable_curses();
  9277. #endif
  9278. if (!opt_realquiet && successful_connect)
  9279. print_summary();
  9280. }
  9281. if (opt_n_threads > 0)
  9282. free(cpus);
  9283. curl_global_cleanup();
  9284. #ifdef WIN32
  9285. WSACleanup();
  9286. #endif
  9287. }
  9288. void _quit(int status)
  9289. {
  9290. if (status) {
  9291. const char *ev = getenv("__BFGMINER_SEGFAULT_ERRQUIT");
  9292. if (unlikely(ev && ev[0] && ev[0] != '0')) {
  9293. int *p = NULL;
  9294. // NOTE debugger can bypass with: p = &p
  9295. *p = status; // Segfault, hopefully dumping core
  9296. }
  9297. }
  9298. #if defined(unix) || defined(__APPLE__)
  9299. if (forkpid > 0) {
  9300. kill(forkpid, SIGTERM);
  9301. forkpid = 0;
  9302. }
  9303. #endif
  9304. exit(status);
  9305. }
  9306. #ifdef HAVE_CURSES
  9307. char *curses_input(const char *query)
  9308. {
  9309. char *input;
  9310. echo();
  9311. input = malloc(255);
  9312. if (!input)
  9313. quit(1, "Failed to malloc input");
  9314. leaveok(logwin, false);
  9315. wlogprint("%s:\n", query);
  9316. wgetnstr(logwin, input, 255);
  9317. if (!strlen(input))
  9318. {
  9319. free(input);
  9320. input = NULL;
  9321. }
  9322. leaveok(logwin, true);
  9323. noecho();
  9324. return input;
  9325. }
  9326. #endif
  9327. static void *test_pool_thread(void *arg)
  9328. {
  9329. struct pool *pool = (struct pool *)arg;
  9330. if (pool_active(pool, false)) {
  9331. pool_tset(pool, &pool->lagging);
  9332. pool_tclear(pool, &pool->idle);
  9333. bool first_pool = false;
  9334. cg_wlock(&control_lock);
  9335. if (!pools_active) {
  9336. currentpool = pool;
  9337. if (pool->pool_no != 0)
  9338. first_pool = true;
  9339. pools_active = true;
  9340. }
  9341. cg_wunlock(&control_lock);
  9342. if (unlikely(first_pool))
  9343. applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url);
  9344. else
  9345. applog(LOG_NOTICE, "Pool %d %s alive", pool->pool_no, pool->rpc_url);
  9346. switch_pools(NULL);
  9347. } else
  9348. pool_died(pool);
  9349. pool->testing = false;
  9350. return NULL;
  9351. }
  9352. /* Always returns true that the pool details were added unless we are not
  9353. * live, implying this is the only pool being added, so if no pools are
  9354. * active it returns false. */
  9355. bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass)
  9356. {
  9357. size_t siz;
  9358. pool->rpc_url = url;
  9359. pool->rpc_user = user;
  9360. pool->rpc_pass = pass;
  9361. siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2;
  9362. pool->rpc_userpass = malloc(siz);
  9363. if (!pool->rpc_userpass)
  9364. quit(1, "Failed to malloc userpass");
  9365. snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass);
  9366. pool->testing = true;
  9367. pool->idle = true;
  9368. enable_pool(pool);
  9369. pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool);
  9370. if (!live) {
  9371. pthread_join(pool->test_thread, NULL);
  9372. return pools_active;
  9373. }
  9374. return true;
  9375. }
  9376. #ifdef HAVE_CURSES
  9377. static bool input_pool(bool live)
  9378. {
  9379. char *url = NULL, *user = NULL, *pass = NULL;
  9380. struct pool *pool;
  9381. bool ret = false;
  9382. immedok(logwin, true);
  9383. wlogprint("Input server details.\n");
  9384. url = curses_input("URL");
  9385. if (!url)
  9386. goto out;
  9387. user = curses_input("Username");
  9388. if (!user)
  9389. goto out;
  9390. pass = curses_input("Password");
  9391. if (!pass)
  9392. pass = calloc(1, 1);
  9393. pool = add_pool();
  9394. if (!detect_stratum(pool, url) && strncmp(url, "http://", 7) &&
  9395. strncmp(url, "https://", 8)) {
  9396. char *httpinput;
  9397. httpinput = malloc(256);
  9398. if (!httpinput)
  9399. quit(1, "Failed to malloc httpinput");
  9400. strcpy(httpinput, "http://");
  9401. strncat(httpinput, url, 248);
  9402. free(url);
  9403. url = httpinput;
  9404. }
  9405. ret = add_pool_details(pool, live, url, user, pass);
  9406. out:
  9407. immedok(logwin, false);
  9408. if (!ret) {
  9409. if (url)
  9410. free(url);
  9411. if (user)
  9412. free(user);
  9413. if (pass)
  9414. free(pass);
  9415. }
  9416. return ret;
  9417. }
  9418. #endif
  9419. #if defined(unix) || defined(__APPLE__)
  9420. static void fork_monitor()
  9421. {
  9422. // Make a pipe: [readFD, writeFD]
  9423. int pfd[2];
  9424. int r = pipe(pfd);
  9425. if (r < 0) {
  9426. perror("pipe - failed to create pipe for --monitor");
  9427. exit(1);
  9428. }
  9429. // Make stderr write end of pipe
  9430. fflush(stderr);
  9431. r = dup2(pfd[1], 2);
  9432. if (r < 0) {
  9433. perror("dup2 - failed to alias stderr to write end of pipe for --monitor");
  9434. exit(1);
  9435. }
  9436. r = close(pfd[1]);
  9437. if (r < 0) {
  9438. perror("close - failed to close write end of pipe for --monitor");
  9439. exit(1);
  9440. }
  9441. // Don't allow a dying monitor to kill the main process
  9442. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  9443. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  9444. if (SIG_ERR == sr0 || SIG_ERR == sr1) {
  9445. perror("signal - failed to edit signal mask for --monitor");
  9446. exit(1);
  9447. }
  9448. // Fork a child process
  9449. forkpid = fork();
  9450. if (forkpid < 0) {
  9451. perror("fork - failed to fork child process for --monitor");
  9452. exit(1);
  9453. }
  9454. // Child: launch monitor command
  9455. if (0 == forkpid) {
  9456. // Make stdin read end of pipe
  9457. r = dup2(pfd[0], 0);
  9458. if (r < 0) {
  9459. perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor");
  9460. exit(1);
  9461. }
  9462. close(pfd[0]);
  9463. if (r < 0) {
  9464. perror("close - in child, failed to close read end of pipe for --monitor");
  9465. exit(1);
  9466. }
  9467. // Launch user specified command
  9468. execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL);
  9469. perror("execl - in child failed to exec user specified command for --monitor");
  9470. exit(1);
  9471. }
  9472. // Parent: clean up unused fds and bail
  9473. r = close(pfd[0]);
  9474. if (r < 0) {
  9475. perror("close - failed to close read end of pipe for --monitor");
  9476. exit(1);
  9477. }
  9478. }
  9479. #endif // defined(unix)
  9480. #ifdef HAVE_CURSES
  9481. #ifdef USE_UNICODE
  9482. static
  9483. wchar_t select_unicode_char(const wchar_t *opt)
  9484. {
  9485. for ( ; *opt; ++opt)
  9486. if (iswprint(*opt))
  9487. return *opt;
  9488. return '?';
  9489. }
  9490. #endif
  9491. void enable_curses(void) {
  9492. int x;
  9493. __maybe_unused int y;
  9494. lock_curses();
  9495. if (curses_active) {
  9496. unlock_curses();
  9497. return;
  9498. }
  9499. #ifdef USE_UNICODE
  9500. if (use_unicode)
  9501. {
  9502. setlocale(LC_CTYPE, "");
  9503. if (iswprint(0xb0))
  9504. have_unicode_degrees = true;
  9505. unicode_micro = select_unicode_char(L"\xb5\u03bcu");
  9506. }
  9507. #endif
  9508. mainwin = initscr();
  9509. start_color();
  9510. #if defined(PDCURSES) || defined(NCURSES_VERSION)
  9511. if (ERR != use_default_colors())
  9512. default_bgcolor = -1;
  9513. #endif
  9514. if (has_colors() && ERR != init_pair(1, COLOR_WHITE, COLOR_BLUE))
  9515. {
  9516. menu_attr = COLOR_PAIR(1);
  9517. if (ERR != init_pair(2, COLOR_RED, default_bgcolor))
  9518. attr_bad |= COLOR_PAIR(2);
  9519. }
  9520. keypad(mainwin, true);
  9521. getmaxyx(mainwin, y, x);
  9522. statuswin = newwin(logstart, x, 0, 0);
  9523. leaveok(statuswin, true);
  9524. // For whatever reason, PDCurses crashes if the logwin is initialized to height y-logcursor
  9525. // We resize the window later anyway, so just start it off at 1 :)
  9526. logwin = newwin(1, 0, logcursor, 0);
  9527. idlok(logwin, true);
  9528. scrollok(logwin, true);
  9529. leaveok(logwin, true);
  9530. cbreak();
  9531. noecho();
  9532. nonl();
  9533. curses_active = true;
  9534. statusy = logstart;
  9535. unlock_curses();
  9536. }
  9537. #endif
  9538. /* TODO: fix need a dummy CPU device_drv even if no support for CPU mining */
  9539. #ifndef WANT_CPUMINE
  9540. struct device_drv cpu_drv;
  9541. struct device_drv cpu_drv = {
  9542. .name = "CPU",
  9543. };
  9544. #endif
  9545. static int cgminer_id_count = 0;
  9546. static int device_line_id_count;
  9547. void register_device(struct cgpu_info *cgpu)
  9548. {
  9549. cgpu->deven = DEV_ENABLED;
  9550. wr_lock(&devices_lock);
  9551. devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
  9552. wr_unlock(&devices_lock);
  9553. if (!cgpu->proc_id)
  9554. cgpu->device_line_id = device_line_id_count++;
  9555. mining_threads += cgpu->threads ?: 1;
  9556. #ifdef HAVE_CURSES
  9557. adj_width(mining_threads, &dev_width);
  9558. #endif
  9559. rwlock_init(&cgpu->qlock);
  9560. cgpu->queued_work = NULL;
  9561. }
  9562. struct _cgpu_devid_counter {
  9563. char name[4];
  9564. int lastid;
  9565. UT_hash_handle hh;
  9566. };
  9567. void renumber_cgpu(struct cgpu_info *cgpu)
  9568. {
  9569. static struct _cgpu_devid_counter *devids = NULL;
  9570. struct _cgpu_devid_counter *d;
  9571. HASH_FIND_STR(devids, cgpu->drv->name, d);
  9572. if (d)
  9573. cgpu->device_id = ++d->lastid;
  9574. else {
  9575. d = malloc(sizeof(*d));
  9576. memcpy(d->name, cgpu->drv->name, sizeof(d->name));
  9577. cgpu->device_id = d->lastid = 0;
  9578. HASH_ADD_STR(devids, name, d);
  9579. }
  9580. // Build repr strings
  9581. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  9582. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  9583. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  9584. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  9585. const int lpcount = cgpu->procs;
  9586. if (lpcount > 1)
  9587. {
  9588. int ns;
  9589. struct cgpu_info *slave;
  9590. int lpdigits = 1;
  9591. for (int i = lpcount; i > 26 && lpdigits < 3; i /= 26)
  9592. ++lpdigits;
  9593. memset(&cgpu->proc_repr[5], 'a', lpdigits);
  9594. cgpu->proc_repr[5 + lpdigits] = '\0';
  9595. ns = strlen(cgpu->proc_repr_ns);
  9596. strcpy(&cgpu->proc_repr_ns[ns], &cgpu->proc_repr[5]);
  9597. slave = cgpu;
  9598. for (int i = 1; i < lpcount; ++i)
  9599. {
  9600. slave = slave->next_proc;
  9601. strcpy(slave->proc_repr, cgpu->proc_repr);
  9602. strcpy(slave->proc_repr_ns, cgpu->proc_repr_ns);
  9603. for (int x = i, y = lpdigits; --y, x; x /= 26)
  9604. {
  9605. slave->proc_repr_ns[ns + y] =
  9606. slave->proc_repr[5 + y] += (x % 26);
  9607. }
  9608. }
  9609. }
  9610. }
  9611. static bool my_blkmaker_sha256_callback(void *digest, const void *buffer, size_t length)
  9612. {
  9613. sha256(buffer, length, digest);
  9614. return true;
  9615. }
  9616. static
  9617. int drv_algo_check(const struct device_drv * const drv)
  9618. {
  9619. const int algomatch = opt_scrypt ? POW_SCRYPT : POW_SHA256D;
  9620. const supported_algos_t algos = drv->supported_algos ?: POW_SHA256D;
  9621. return (algos & algomatch);
  9622. }
  9623. #ifndef HAVE_PTHREAD_CANCEL
  9624. extern void setup_pthread_cancel_workaround();
  9625. extern struct sigaction pcwm_orig_term_handler;
  9626. #endif
  9627. bool bfg_need_detect_rescan;
  9628. extern void probe_device(struct lowlevel_device_info *);
  9629. static void schedule_rescan(const struct timeval *);
  9630. static
  9631. void drv_detect_all()
  9632. {
  9633. bool rescanning = false;
  9634. rescan:
  9635. bfg_need_detect_rescan = false;
  9636. #ifdef HAVE_BFG_LOWLEVEL
  9637. struct lowlevel_device_info * const infolist = lowlevel_scan(), *info, *infotmp;
  9638. LL_FOREACH_SAFE(infolist, info, infotmp)
  9639. probe_device(info);
  9640. LL_FOREACH_SAFE(infolist, info, infotmp)
  9641. pthread_join(info->probe_pth, NULL);
  9642. #endif
  9643. struct driver_registration *reg, *tmp;
  9644. BFG_FOREACH_DRIVER_BY_PRIORITY(reg, tmp)
  9645. {
  9646. const struct device_drv * const drv = reg->drv;
  9647. if (!(drv_algo_check(drv) && drv->drv_detect))
  9648. continue;
  9649. drv->drv_detect();
  9650. }
  9651. #ifdef HAVE_BFG_LOWLEVEL
  9652. lowlevel_scan_free();
  9653. #endif
  9654. if (bfg_need_detect_rescan)
  9655. {
  9656. if (rescanning)
  9657. {
  9658. applog(LOG_DEBUG, "Device rescan requested a second time, delaying");
  9659. struct timeval tv_when;
  9660. timer_set_delay_from_now(&tv_when, rescan_delay_ms * 1000);
  9661. schedule_rescan(&tv_when);
  9662. }
  9663. else
  9664. {
  9665. rescanning = true;
  9666. applog(LOG_DEBUG, "Device rescan requested");
  9667. goto rescan;
  9668. }
  9669. }
  9670. }
  9671. static
  9672. void allocate_cgpu(struct cgpu_info *cgpu, unsigned int *kp)
  9673. {
  9674. struct thr_info *thr;
  9675. int j;
  9676. struct device_drv *api = cgpu->drv;
  9677. cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  9678. int threadobj = cgpu->threads;
  9679. if (!threadobj)
  9680. // Create a fake thread object to handle hashmeter etc
  9681. threadobj = 1;
  9682. cgpu->thr = calloc(threadobj + 1, sizeof(*cgpu->thr));
  9683. cgpu->thr[threadobj] = NULL;
  9684. cgpu->status = LIFE_INIT;
  9685. if (opt_devices_enabled_list)
  9686. {
  9687. struct string_elist *enablestr_elist;
  9688. cgpu->deven = DEV_DISABLED;
  9689. DL_FOREACH(opt_devices_enabled_list, enablestr_elist)
  9690. {
  9691. const char * const enablestr = enablestr_elist->string;
  9692. if (cgpu_match(enablestr, cgpu))
  9693. {
  9694. cgpu->deven = DEV_ENABLED;
  9695. break;
  9696. }
  9697. }
  9698. }
  9699. cgpu->max_hashes = 0;
  9700. BFGINIT(cgpu->min_nonce_diff, 1);
  9701. BFGINIT(cgpu->cutofftemp, opt_cutofftemp);
  9702. BFGINIT(cgpu->targettemp, cgpu->cutofftemp - 6);
  9703. // Setup thread structs before starting any of the threads, in case they try to interact
  9704. for (j = 0; j < threadobj; ++j, ++*kp) {
  9705. thr = get_thread(*kp);
  9706. thr->id = *kp;
  9707. thr->cgpu = cgpu;
  9708. thr->device_thread = j;
  9709. thr->work_restart_notifier[1] = INVSOCK;
  9710. thr->mutex_request[1] = INVSOCK;
  9711. thr->_job_transition_in_progress = true;
  9712. timerclear(&thr->tv_morework);
  9713. thr->scanhash_working = true;
  9714. thr->hashes_done = 0;
  9715. timerclear(&thr->tv_hashes_done);
  9716. cgtime(&thr->tv_lastupdate);
  9717. thr->tv_poll.tv_sec = -1;
  9718. thr->_max_nonce = api->can_limit_work ? api->can_limit_work(thr) : 0xffffffff;
  9719. cgpu->thr[j] = thr;
  9720. }
  9721. if (!cgpu->device->threads)
  9722. notifier_init_invalid(cgpu->thr[0]->notifier);
  9723. else
  9724. if (!cgpu->threads)
  9725. memcpy(&cgpu->thr[0]->notifier, &cgpu->device->thr[0]->notifier, sizeof(cgpu->thr[0]->notifier));
  9726. else
  9727. for (j = 0; j < cgpu->threads; ++j)
  9728. {
  9729. thr = cgpu->thr[j];
  9730. notifier_init(thr->notifier);
  9731. }
  9732. }
  9733. static
  9734. void start_cgpu(struct cgpu_info *cgpu)
  9735. {
  9736. struct thr_info *thr;
  9737. int j;
  9738. for (j = 0; j < cgpu->threads; ++j) {
  9739. thr = cgpu->thr[j];
  9740. /* Enable threads for devices set not to mine but disable
  9741. * their queue in case we wish to enable them later */
  9742. if (cgpu->drv->thread_prepare && !cgpu->drv->thread_prepare(thr))
  9743. continue;
  9744. thread_reportout(thr);
  9745. if (unlikely(thr_info_create(thr, NULL, miner_thread, thr)))
  9746. quit(1, "thread %d create failed", thr->id);
  9747. notifier_wake(thr->notifier);
  9748. }
  9749. if (cgpu->deven == DEV_ENABLED)
  9750. proc_enable(cgpu);
  9751. }
  9752. static
  9753. void _scan_serial(void *p)
  9754. {
  9755. const char *s = p;
  9756. struct string_elist *iter, *tmp;
  9757. struct string_elist *orig_scan_devices = scan_devices;
  9758. if (s)
  9759. {
  9760. // Make temporary scan_devices list
  9761. scan_devices = NULL;
  9762. string_elist_add("noauto", &scan_devices);
  9763. add_serial(s);
  9764. }
  9765. drv_detect_all();
  9766. if (s)
  9767. {
  9768. DL_FOREACH_SAFE(scan_devices, iter, tmp)
  9769. {
  9770. string_elist_del(&scan_devices, iter);
  9771. }
  9772. scan_devices = orig_scan_devices;
  9773. }
  9774. }
  9775. #ifdef HAVE_BFG_LOWLEVEL
  9776. static
  9777. bool _probe_device_match(const struct lowlevel_device_info * const info, const char * const ser)
  9778. {
  9779. if (!(false
  9780. || (info->serial && !strcasecmp(ser, info->serial))
  9781. || (info->path && !strcasecmp(ser, info->path ))
  9782. || (info->devid && !strcasecmp(ser, info->devid ))
  9783. ))
  9784. {
  9785. char *devid = devpath_to_devid(ser);
  9786. if (!devid)
  9787. return false;
  9788. const bool different = strcmp(info->devid, devid);
  9789. free(devid);
  9790. if (different)
  9791. return false;
  9792. }
  9793. return true;
  9794. }
  9795. static
  9796. const struct device_drv *_probe_device_find_drv(const char * const _dname, const size_t dnamelen)
  9797. {
  9798. struct driver_registration *dreg;
  9799. char dname[dnamelen];
  9800. int i;
  9801. for (i = 0; i < dnamelen; ++i)
  9802. dname[i] = tolower(_dname[i]);
  9803. BFG_FIND_DRV_BY_DNAME(dreg, dname, dnamelen);
  9804. if (!dreg)
  9805. {
  9806. for (i = 0; i < dnamelen; ++i)
  9807. dname[i] = toupper(_dname[i]);
  9808. BFG_FIND_DRV_BY_NAME(dreg, dname, dnamelen);
  9809. if (!dreg)
  9810. return NULL;
  9811. }
  9812. if (!drv_algo_check(dreg->drv))
  9813. return NULL;
  9814. return dreg->drv;
  9815. }
  9816. static
  9817. bool _probe_device_do_probe(const struct device_drv * const drv, const struct lowlevel_device_info * const info, bool * const request_rescan_p)
  9818. {
  9819. bfg_probe_result_flags = 0;
  9820. if (drv->lowl_probe(info))
  9821. {
  9822. if (!(bfg_probe_result_flags & BPR_CONTINUE_PROBES))
  9823. return true;
  9824. }
  9825. else
  9826. if (request_rescan_p && opt_hotplug && !(bfg_probe_result_flags & BPR_DONT_RESCAN))
  9827. *request_rescan_p = true;
  9828. return false;
  9829. }
  9830. bool dummy_check_never_true = false;
  9831. static
  9832. void *probe_device_thread(void *p)
  9833. {
  9834. struct lowlevel_device_info * const infolist = p;
  9835. struct lowlevel_device_info *info = infolist;
  9836. bool request_rescan = false;
  9837. {
  9838. char threadname[5 + strlen(info->devid) + 1];
  9839. sprintf(threadname, "probe_%s", info->devid);
  9840. RenameThread(threadname);
  9841. }
  9842. // If already in use, ignore
  9843. if (bfg_claim_any(NULL, NULL, info->devid))
  9844. applogr(NULL, LOG_DEBUG, "%s: \"%s\" already in use",
  9845. __func__, info->product);
  9846. // if lowlevel device matches specific user assignment, probe requested driver(s)
  9847. struct string_elist *sd_iter, *sd_tmp;
  9848. struct driver_registration *dreg, *dreg_tmp;
  9849. DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp)
  9850. {
  9851. const char * const dname = sd_iter->string;
  9852. const char * const colon = strpbrk(dname, ":@");
  9853. if (!(colon && colon != dname))
  9854. continue;
  9855. const char * const ser = &colon[1];
  9856. LL_FOREACH2(infolist, info, same_devid_next)
  9857. {
  9858. if (!_probe_device_match(info, ser))
  9859. continue;
  9860. const size_t dnamelen = (colon - dname);
  9861. const struct device_drv * const drv = _probe_device_find_drv(dname, dnamelen);
  9862. if (!(drv && drv->lowl_probe && drv_algo_check(drv)))
  9863. continue;
  9864. if (_probe_device_do_probe(drv, info, &request_rescan))
  9865. return NULL;
  9866. }
  9867. }
  9868. // probe driver(s) with auto enabled and matching VID/PID/Product/etc of device
  9869. BFG_FOREACH_DRIVER_BY_PRIORITY(dreg, dreg_tmp)
  9870. {
  9871. const struct device_drv * const drv = dreg->drv;
  9872. if (!drv_algo_check(drv))
  9873. continue;
  9874. // Check for "noauto" flag
  9875. // NOTE: driver-specific configuration overrides general
  9876. bool doauto = true;
  9877. DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp)
  9878. {
  9879. const char * const dname = sd_iter->string;
  9880. // NOTE: Only checking flags here, NOT path/serial, so @ is unacceptable
  9881. const char *colon = strchr(dname, ':');
  9882. if (!colon)
  9883. colon = &dname[-1];
  9884. if (strcasecmp("noauto", &colon[1]) && strcasecmp("auto", &colon[1]))
  9885. continue;
  9886. const ssize_t dnamelen = (colon - dname);
  9887. if (dnamelen >= 0 && _probe_device_find_drv(dname, dnamelen) != drv)
  9888. continue;
  9889. doauto = (tolower(colon[1]) == 'a');
  9890. if (dnamelen != -1)
  9891. break;
  9892. }
  9893. if (doauto && drv->lowl_match)
  9894. {
  9895. LL_FOREACH2(infolist, info, same_devid_next)
  9896. {
  9897. /*
  9898. The below call to applog is absolutely necessary
  9899. Starting with commit 76d0cc183b1c9ddcc0ef34d2e43bc696ef9de92e installing BFGMiner on
  9900. Mac OS X using Homebrew results in a binary that segfaults on startup
  9901. There are two unresolved issues:
  9902. 1) The BFGMiner authors cannot find a way to install BFGMiner with Homebrew that results
  9903. in debug symbols being available to help troubleshoot the issue
  9904. 2) The issue disappears when unrelated code changes are made, such as adding the following
  9905. call to applog with infolist and / or p
  9906. We would encourage revisiting this in the future to come up with a more concrete solution
  9907. Reproducing should only require commenting / removing the following line and installing
  9908. BFGMiner using "brew install bfgminer --HEAD"
  9909. */
  9910. if (dummy_check_never_true)
  9911. applog(LOG_DEBUG, "lowl_match: %p(%s) %p %p %p", drv, drv->dname, info, infolist, p);
  9912. if (!drv->lowl_match(info))
  9913. continue;
  9914. if (_probe_device_do_probe(drv, info, &request_rescan))
  9915. return NULL;
  9916. }
  9917. }
  9918. }
  9919. // probe driver(s) with 'all' enabled
  9920. DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp)
  9921. {
  9922. const char * const dname = sd_iter->string;
  9923. // NOTE: Only checking flags here, NOT path/serial, so @ is unacceptable
  9924. const char * const colon = strchr(dname, ':');
  9925. if (!colon)
  9926. {
  9927. LL_FOREACH2(infolist, info, same_devid_next)
  9928. {
  9929. if (
  9930. #ifdef NEED_BFG_LOWL_VCOM
  9931. (info->lowl == &lowl_vcom && !strcasecmp(dname, "all")) ||
  9932. #endif
  9933. _probe_device_match(info, (dname[0] == '@') ? &dname[1] : dname))
  9934. {
  9935. bool dont_rescan = false;
  9936. BFG_FOREACH_DRIVER_BY_PRIORITY(dreg, dreg_tmp)
  9937. {
  9938. const struct device_drv * const drv = dreg->drv;
  9939. if (!drv_algo_check(drv))
  9940. continue;
  9941. if (drv->lowl_probe_by_name_only)
  9942. continue;
  9943. if (!drv->lowl_probe)
  9944. continue;
  9945. if (_probe_device_do_probe(drv, info, NULL))
  9946. return NULL;
  9947. if (bfg_probe_result_flags & BPR_DONT_RESCAN)
  9948. dont_rescan = true;
  9949. }
  9950. if (opt_hotplug && !dont_rescan)
  9951. request_rescan = true;
  9952. break;
  9953. }
  9954. }
  9955. continue;
  9956. }
  9957. if (strcasecmp(&colon[1], "all"))
  9958. continue;
  9959. const size_t dnamelen = (colon - dname);
  9960. const struct device_drv * const drv = _probe_device_find_drv(dname, dnamelen);
  9961. if (!(drv && drv->lowl_probe && drv_algo_check(drv)))
  9962. continue;
  9963. LL_FOREACH2(infolist, info, same_devid_next)
  9964. {
  9965. if (info->lowl->exclude_from_all)
  9966. continue;
  9967. if (_probe_device_do_probe(drv, info, NULL))
  9968. return NULL;
  9969. }
  9970. }
  9971. // Only actually request a rescan if we never found any cgpu
  9972. if (request_rescan)
  9973. bfg_need_detect_rescan = true;
  9974. return NULL;
  9975. }
  9976. void probe_device(struct lowlevel_device_info * const info)
  9977. {
  9978. pthread_create(&info->probe_pth, NULL, probe_device_thread, info);
  9979. }
  9980. #endif
  9981. int create_new_cgpus(void (*addfunc)(void*), void *arg)
  9982. {
  9983. static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  9984. int devcount, i, mining_threads_new = 0;
  9985. unsigned int k;
  9986. struct cgpu_info *cgpu;
  9987. struct thr_info *thr;
  9988. void *p;
  9989. mutex_lock(&mutex);
  9990. devcount = total_devices;
  9991. addfunc(arg);
  9992. if (!total_devices_new)
  9993. goto out;
  9994. wr_lock(&devices_lock);
  9995. p = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + total_devices_new + 1));
  9996. if (unlikely(!p))
  9997. {
  9998. wr_unlock(&devices_lock);
  9999. applog(LOG_ERR, "scan_serial: realloc failed trying to grow devices array");
  10000. goto out;
  10001. }
  10002. devices = p;
  10003. wr_unlock(&devices_lock);
  10004. for (i = 0; i < total_devices_new; ++i)
  10005. {
  10006. cgpu = devices_new[i];
  10007. mining_threads_new += cgpu->threads ?: 1;
  10008. }
  10009. wr_lock(&mining_thr_lock);
  10010. mining_threads_new += mining_threads;
  10011. p = realloc(mining_thr, sizeof(struct thr_info *) * mining_threads_new);
  10012. if (unlikely(!p))
  10013. {
  10014. wr_unlock(&mining_thr_lock);
  10015. applog(LOG_ERR, "scan_serial: realloc failed trying to grow mining_thr");
  10016. goto out;
  10017. }
  10018. mining_thr = p;
  10019. wr_unlock(&mining_thr_lock);
  10020. for (i = mining_threads; i < mining_threads_new; ++i) {
  10021. mining_thr[i] = calloc(1, sizeof(*thr));
  10022. if (!mining_thr[i])
  10023. {
  10024. applog(LOG_ERR, "scan_serial: Failed to calloc mining_thr[%d]", i);
  10025. for ( ; --i >= mining_threads; )
  10026. free(mining_thr[i]);
  10027. goto out;
  10028. }
  10029. }
  10030. k = mining_threads;
  10031. for (i = 0; i < total_devices_new; ++i)
  10032. {
  10033. cgpu = devices_new[i];
  10034. allocate_cgpu(cgpu, &k);
  10035. }
  10036. for (i = 0; i < total_devices_new; ++i)
  10037. {
  10038. cgpu = devices_new[i];
  10039. start_cgpu(cgpu);
  10040. register_device(cgpu);
  10041. ++total_devices;
  10042. }
  10043. #ifdef HAVE_CURSES
  10044. switch_logsize();
  10045. #endif
  10046. out:
  10047. total_devices_new = 0;
  10048. devcount = total_devices - devcount;
  10049. mutex_unlock(&mutex);
  10050. return devcount;
  10051. }
  10052. int scan_serial(const char *s)
  10053. {
  10054. return create_new_cgpus(_scan_serial, (void*)s);
  10055. }
  10056. static pthread_mutex_t rescan_mutex = PTHREAD_MUTEX_INITIALIZER;
  10057. static bool rescan_active;
  10058. static struct timeval tv_rescan;
  10059. static notifier_t rescan_notifier;
  10060. static
  10061. void *rescan_thread(__maybe_unused void *p)
  10062. {
  10063. pthread_detach(pthread_self());
  10064. RenameThread("rescan");
  10065. struct timeval tv_timeout, tv_now;
  10066. fd_set rfds;
  10067. while (true)
  10068. {
  10069. mutex_lock(&rescan_mutex);
  10070. tv_timeout = tv_rescan;
  10071. if (!timer_isset(&tv_timeout))
  10072. {
  10073. rescan_active = false;
  10074. mutex_unlock(&rescan_mutex);
  10075. break;
  10076. }
  10077. mutex_unlock(&rescan_mutex);
  10078. FD_ZERO(&rfds);
  10079. FD_SET(rescan_notifier[0], &rfds);
  10080. const int maxfd = rescan_notifier[0];
  10081. timer_set_now(&tv_now);
  10082. if (select(maxfd+1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now)) > 0)
  10083. notifier_read(rescan_notifier);
  10084. mutex_lock(&rescan_mutex);
  10085. if (timer_passed(&tv_rescan, NULL))
  10086. {
  10087. timer_unset(&tv_rescan);
  10088. mutex_unlock(&rescan_mutex);
  10089. applog(LOG_DEBUG, "Rescan timer expired, triggering");
  10090. scan_serial(NULL);
  10091. }
  10092. else
  10093. mutex_unlock(&rescan_mutex);
  10094. }
  10095. return NULL;
  10096. }
  10097. static
  10098. void _schedule_rescan(const struct timeval * const tvp_when)
  10099. {
  10100. if (rescan_active)
  10101. {
  10102. if (timercmp(tvp_when, &tv_rescan, <))
  10103. applog(LOG_DEBUG, "schedule_rescan: New schedule is before current, waiting it out");
  10104. else
  10105. {
  10106. applog(LOG_DEBUG, "schedule_rescan: New schedule is after current, delaying rescan");
  10107. tv_rescan = *tvp_when;
  10108. }
  10109. return;
  10110. }
  10111. applog(LOG_DEBUG, "schedule_rescan: Scheduling rescan (no rescans currently pending)");
  10112. tv_rescan = *tvp_when;
  10113. rescan_active = true;
  10114. static pthread_t pth;
  10115. if (unlikely(pthread_create(&pth, NULL, rescan_thread, NULL)))
  10116. applog(LOG_ERR, "Failed to start rescan thread");
  10117. }
  10118. static
  10119. void schedule_rescan(const struct timeval * const tvp_when)
  10120. {
  10121. mutex_lock(&rescan_mutex);
  10122. _schedule_rescan(tvp_when);
  10123. mutex_unlock(&rescan_mutex);
  10124. }
  10125. #ifdef HAVE_BFG_HOTPLUG
  10126. static
  10127. void hotplug_trigger()
  10128. {
  10129. applog(LOG_DEBUG, "%s: Scheduling rescan immediately", __func__);
  10130. struct timeval tv_now;
  10131. timer_set_now(&tv_now);
  10132. schedule_rescan(&tv_now);
  10133. }
  10134. #endif
  10135. #if defined(HAVE_LIBUDEV) && defined(HAVE_SYS_EPOLL_H)
  10136. static
  10137. void *hotplug_thread(__maybe_unused void *p)
  10138. {
  10139. pthread_detach(pthread_self());
  10140. RenameThread("hotplug");
  10141. struct udev * const udev = udev_new();
  10142. if (unlikely(!udev))
  10143. applogfailr(NULL, LOG_ERR, "udev_new");
  10144. struct udev_monitor * const mon = udev_monitor_new_from_netlink(udev, "udev");
  10145. if (unlikely(!mon))
  10146. applogfailr(NULL, LOG_ERR, "udev_monitor_new_from_netlink");
  10147. if (unlikely(udev_monitor_enable_receiving(mon)))
  10148. applogfailr(NULL, LOG_ERR, "udev_monitor_enable_receiving");
  10149. const int epfd = epoll_create(1);
  10150. if (unlikely(epfd == -1))
  10151. applogfailr(NULL, LOG_ERR, "epoll_create");
  10152. {
  10153. const int fd = udev_monitor_get_fd(mon);
  10154. struct epoll_event ev = {
  10155. .events = EPOLLIN | EPOLLPRI,
  10156. .data.fd = fd,
  10157. };
  10158. if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev))
  10159. applogfailr(NULL, LOG_ERR, "epoll_ctl");
  10160. }
  10161. struct epoll_event ev;
  10162. int rv;
  10163. bool pending = false;
  10164. while (true)
  10165. {
  10166. rv = epoll_wait(epfd, &ev, 1, pending ? hotplug_delay_ms : -1);
  10167. if (rv == -1)
  10168. {
  10169. if (errno == EAGAIN || errno == EINTR)
  10170. continue;
  10171. break;
  10172. }
  10173. if (!rv)
  10174. {
  10175. hotplug_trigger();
  10176. pending = false;
  10177. continue;
  10178. }
  10179. struct udev_device * const device = udev_monitor_receive_device(mon);
  10180. if (!device)
  10181. continue;
  10182. const char * const action = udev_device_get_action(device);
  10183. applog(LOG_DEBUG, "%s: Received %s event", __func__, action);
  10184. if (!strcmp(action, "add"))
  10185. pending = true;
  10186. udev_device_unref(device);
  10187. }
  10188. applogfailr(NULL, LOG_ERR, "epoll_wait");
  10189. }
  10190. #elif defined(WIN32)
  10191. static UINT_PTR _hotplug_wintimer_id;
  10192. VOID CALLBACK hotplug_win_timer(HWND hwnd, UINT msg, UINT_PTR idEvent, DWORD dwTime)
  10193. {
  10194. KillTimer(NULL, _hotplug_wintimer_id);
  10195. _hotplug_wintimer_id = 0;
  10196. hotplug_trigger();
  10197. }
  10198. LRESULT CALLBACK hotplug_win_callback(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
  10199. {
  10200. if (msg == WM_DEVICECHANGE && wParam == DBT_DEVNODES_CHANGED)
  10201. {
  10202. applog(LOG_DEBUG, "%s: Received DBT_DEVNODES_CHANGED event", __func__);
  10203. _hotplug_wintimer_id = SetTimer(NULL, _hotplug_wintimer_id, hotplug_delay_ms, hotplug_win_timer);
  10204. }
  10205. return DefWindowProc(hwnd, msg, wParam, lParam);
  10206. }
  10207. static
  10208. void *hotplug_thread(__maybe_unused void *p)
  10209. {
  10210. pthread_detach(pthread_self());
  10211. WNDCLASS DummyWinCls = {
  10212. .lpszClassName = "BFGDummyWinCls",
  10213. .lpfnWndProc = hotplug_win_callback,
  10214. };
  10215. ATOM a = RegisterClass(&DummyWinCls);
  10216. if (unlikely(!a))
  10217. applogfailinfor(NULL, LOG_ERR, "RegisterClass", "%d", (int)GetLastError());
  10218. HWND hwnd = CreateWindow((void*)(intptr_t)a, NULL, WS_OVERLAPPED, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL, NULL, NULL, NULL);
  10219. if (unlikely(!hwnd))
  10220. applogfailinfor(NULL, LOG_ERR, "CreateWindow", "%d", (int)GetLastError());
  10221. MSG msg;
  10222. while (GetMessage(&msg, NULL, 0, 0))
  10223. {
  10224. TranslateMessage(&msg);
  10225. DispatchMessage(&msg);
  10226. }
  10227. quit(0, "WM_QUIT received");
  10228. return NULL;
  10229. }
  10230. #endif
  10231. #ifdef HAVE_BFG_HOTPLUG
  10232. static
  10233. void hotplug_start()
  10234. {
  10235. pthread_t pth;
  10236. if (unlikely(pthread_create(&pth, NULL, hotplug_thread, NULL)))
  10237. applog(LOG_ERR, "Failed to start hotplug thread");
  10238. }
  10239. #endif
  10240. static void probe_pools(void)
  10241. {
  10242. int i;
  10243. for (i = 0; i < total_pools; i++) {
  10244. struct pool *pool = pools[i];
  10245. pool->testing = true;
  10246. pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool);
  10247. }
  10248. }
  10249. static void raise_fd_limits(void)
  10250. {
  10251. #ifdef HAVE_SETRLIMIT
  10252. struct rlimit fdlimit;
  10253. rlim_t old_soft_limit;
  10254. char frombuf[0x10] = "unlimited";
  10255. char hardbuf[0x10] = "unlimited";
  10256. if (getrlimit(RLIMIT_NOFILE, &fdlimit))
  10257. applogr(, LOG_DEBUG, "setrlimit: Failed to getrlimit(RLIMIT_NOFILE)");
  10258. old_soft_limit = fdlimit.rlim_cur;
  10259. if (fdlimit.rlim_max > FD_SETSIZE || fdlimit.rlim_max == RLIM_INFINITY)
  10260. fdlimit.rlim_cur = FD_SETSIZE;
  10261. else
  10262. fdlimit.rlim_cur = fdlimit.rlim_max;
  10263. if (fdlimit.rlim_max != RLIM_INFINITY)
  10264. snprintf(hardbuf, sizeof(hardbuf), "%lu", (unsigned long)fdlimit.rlim_max);
  10265. if (old_soft_limit != RLIM_INFINITY)
  10266. snprintf(frombuf, sizeof(frombuf), "%lu", (unsigned long)old_soft_limit);
  10267. if (fdlimit.rlim_cur == old_soft_limit)
  10268. applogr(, LOG_DEBUG, "setrlimit: Soft fd limit not being changed from %lu (FD_SETSIZE=%lu; hard limit=%s)",
  10269. (unsigned long)old_soft_limit, (unsigned long)FD_SETSIZE, hardbuf);
  10270. if (setrlimit(RLIMIT_NOFILE, &fdlimit))
  10271. applogr(, LOG_DEBUG, "setrlimit: Failed to change soft fd limit from %s to %lu (FD_SETSIZE=%lu; hard limit=%s)",
  10272. frombuf, (unsigned long)fdlimit.rlim_cur, (unsigned long)FD_SETSIZE, hardbuf);
  10273. applog(LOG_DEBUG, "setrlimit: Changed soft fd limit from %s to %lu (FD_SETSIZE=%lu; hard limit=%s)",
  10274. frombuf, (unsigned long)fdlimit.rlim_cur, (unsigned long)FD_SETSIZE, hardbuf);
  10275. #else
  10276. applog(LOG_DEBUG, "setrlimit: Not supported by platform");
  10277. #endif
  10278. }
  10279. extern void bfg_init_threadlocal();
  10280. extern void stratumsrv_start();
  10281. int main(int argc, char *argv[])
  10282. {
  10283. struct sigaction handler;
  10284. struct thr_info *thr;
  10285. struct block *block;
  10286. unsigned int k;
  10287. int i;
  10288. int rearrange_pools = 0;
  10289. char *s;
  10290. #ifdef WIN32
  10291. LoadLibrary("backtrace.dll");
  10292. #endif
  10293. blkmk_sha256_impl = my_blkmaker_sha256_callback;
  10294. bfg_init_threadlocal();
  10295. #ifndef HAVE_PTHREAD_CANCEL
  10296. setup_pthread_cancel_workaround();
  10297. #endif
  10298. bfg_init_checksums();
  10299. #ifdef WIN32
  10300. {
  10301. WSADATA wsa;
  10302. i = WSAStartup(MAKEWORD(2, 2), &wsa);
  10303. if (i)
  10304. quit(1, "Failed to initialise Winsock: %s", bfg_strerror(i, BST_SOCKET));
  10305. }
  10306. #endif
  10307. /* This dangerous functions tramples random dynamically allocated
  10308. * variables so do it before anything at all */
  10309. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  10310. quit(1, "Failed to curl_global_init");
  10311. initial_args = malloc(sizeof(char *) * (argc + 1));
  10312. for (i = 0; i < argc; i++)
  10313. initial_args[i] = strdup(argv[i]);
  10314. initial_args[argc] = NULL;
  10315. mutex_init(&hash_lock);
  10316. mutex_init(&console_lock);
  10317. cglock_init(&control_lock);
  10318. mutex_init(&stats_lock);
  10319. mutex_init(&sharelog_lock);
  10320. cglock_init(&ch_lock);
  10321. mutex_init(&sshare_lock);
  10322. rwlock_init(&blk_lock);
  10323. rwlock_init(&netacc_lock);
  10324. rwlock_init(&mining_thr_lock);
  10325. rwlock_init(&devices_lock);
  10326. mutex_init(&lp_lock);
  10327. if (unlikely(pthread_cond_init(&lp_cond, NULL)))
  10328. quit(1, "Failed to pthread_cond_init lp_cond");
  10329. if (unlikely(pthread_cond_init(&gws_cond, NULL)))
  10330. quit(1, "Failed to pthread_cond_init gws_cond");
  10331. notifier_init(submit_waiting_notifier);
  10332. timer_unset(&tv_rescan);
  10333. notifier_init(rescan_notifier);
  10334. /* Create a unique get work queue */
  10335. getq = tq_new();
  10336. if (!getq)
  10337. quit(1, "Failed to create getq");
  10338. /* We use the getq mutex as the staged lock */
  10339. stgd_lock = &getq->mutex;
  10340. snprintf(packagename, sizeof(packagename), "%s %s", PACKAGE, VERSION);
  10341. #ifdef WANT_CPUMINE
  10342. init_max_name_len();
  10343. #endif
  10344. handler.sa_handler = &sighandler;
  10345. handler.sa_flags = 0;
  10346. sigemptyset(&handler.sa_mask);
  10347. #ifdef HAVE_PTHREAD_CANCEL
  10348. sigaction(SIGTERM, &handler, &termhandler);
  10349. #else
  10350. // Need to let pthread_cancel emulation handle SIGTERM first
  10351. termhandler = pcwm_orig_term_handler;
  10352. pcwm_orig_term_handler = handler;
  10353. #endif
  10354. sigaction(SIGINT, &handler, &inthandler);
  10355. #ifndef WIN32
  10356. signal(SIGPIPE, SIG_IGN);
  10357. #else
  10358. timeBeginPeriod(1);
  10359. #endif
  10360. opt_kernel_path = CGMINER_PREFIX;
  10361. cgminer_path = alloca(PATH_MAX);
  10362. s = strdup(argv[0]);
  10363. strcpy(cgminer_path, dirname(s));
  10364. free(s);
  10365. strcat(cgminer_path, "/");
  10366. #if defined(WANT_CPUMINE) && defined(WIN32)
  10367. {
  10368. char buf[32];
  10369. int gev = GetEnvironmentVariable("BFGMINER_BENCH_ALGO", buf, sizeof(buf));
  10370. if (gev > 0 && gev < sizeof(buf))
  10371. {
  10372. setup_benchmark_pool();
  10373. double rate = bench_algo_stage3(atoi(buf));
  10374. // Write result to shared memory for parent
  10375. char unique_name[64];
  10376. if (GetEnvironmentVariable("BFGMINER_SHARED_MEM", unique_name, 32))
  10377. {
  10378. HANDLE map_handle = CreateFileMapping(
  10379. INVALID_HANDLE_VALUE, // use paging file
  10380. NULL, // default security attributes
  10381. PAGE_READWRITE, // read/write access
  10382. 0, // size: high 32-bits
  10383. 4096, // size: low 32-bits
  10384. unique_name // name of map object
  10385. );
  10386. if (NULL != map_handle) {
  10387. void *shared_mem = MapViewOfFile(
  10388. map_handle, // object to map view of
  10389. FILE_MAP_WRITE, // read/write access
  10390. 0, // high offset: map from
  10391. 0, // low offset: beginning
  10392. 0 // default: map entire file
  10393. );
  10394. if (NULL != shared_mem)
  10395. CopyMemory(shared_mem, &rate, sizeof(rate));
  10396. (void)UnmapViewOfFile(shared_mem);
  10397. }
  10398. (void)CloseHandle(map_handle);
  10399. }
  10400. exit(0);
  10401. }
  10402. }
  10403. #endif
  10404. devcursor = 8;
  10405. logstart = devcursor;
  10406. logcursor = logstart;
  10407. block = calloc(sizeof(struct block), 1);
  10408. if (unlikely(!block))
  10409. quit (1, "main OOM");
  10410. for (i = 0; i < 36; i++)
  10411. strcat(block->hash, "0");
  10412. HASH_ADD_STR(blocks, hash, block);
  10413. strcpy(current_block, block->hash);
  10414. mutex_init(&submitting_lock);
  10415. #ifdef HAVE_OPENCL
  10416. opencl_early_init();
  10417. #endif
  10418. schedstart.tm.tm_sec = 1;
  10419. schedstop .tm.tm_sec = 1;
  10420. opt_register_table(opt_early_table, NULL);
  10421. opt_register_table(opt_config_table, NULL);
  10422. opt_register_table(opt_cmdline_table, NULL);
  10423. opt_early_parse(argc, argv, applog_and_exit);
  10424. if (!config_loaded)
  10425. {
  10426. load_default_config();
  10427. rearrange_pools = total_pools;
  10428. }
  10429. opt_free_table();
  10430. /* parse command line */
  10431. opt_register_table(opt_config_table,
  10432. "Options for both config file and command line");
  10433. opt_register_table(opt_cmdline_table,
  10434. "Options for command line only");
  10435. opt_parse(&argc, argv, applog_and_exit);
  10436. if (argc != 1)
  10437. quit(1, "Unexpected extra commandline arguments");
  10438. if (rearrange_pools && rearrange_pools < total_pools)
  10439. {
  10440. // Prioritise commandline pools before default-config pools
  10441. for (i = 0; i < rearrange_pools; ++i)
  10442. pools[i]->prio += rearrange_pools;
  10443. for ( ; i < total_pools; ++i)
  10444. pools[i]->prio -= rearrange_pools;
  10445. }
  10446. #ifndef HAVE_PTHREAD_CANCEL
  10447. // Can't do this any earlier, or config isn't loaded
  10448. applog(LOG_DEBUG, "pthread_cancel workaround in use");
  10449. #endif
  10450. #ifdef HAVE_PWD_H
  10451. struct passwd *user_info = NULL;
  10452. if (opt_setuid != NULL) {
  10453. if ((user_info = getpwnam(opt_setuid)) == NULL) {
  10454. quit(1, "Unable to find setuid user information");
  10455. }
  10456. }
  10457. #endif
  10458. #ifdef HAVE_CHROOT
  10459. if (chroot_dir != NULL) {
  10460. #ifdef HAVE_PWD_H
  10461. if (user_info == NULL && getuid() == 0) {
  10462. applog(LOG_WARNING, "Running as root inside chroot");
  10463. }
  10464. #endif
  10465. if (chroot(chroot_dir) != 0) {
  10466. quit(1, "Unable to chroot");
  10467. }
  10468. if (chdir("/"))
  10469. quit(1, "Unable to chdir to chroot");
  10470. }
  10471. #endif
  10472. #ifdef HAVE_PWD_H
  10473. if (user_info != NULL) {
  10474. if (setgid((*user_info).pw_gid) != 0)
  10475. quit(1, "Unable to setgid");
  10476. if (setuid((*user_info).pw_uid) != 0)
  10477. quit(1, "Unable to setuid");
  10478. }
  10479. #endif
  10480. raise_fd_limits();
  10481. if (opt_benchmark) {
  10482. while (total_pools)
  10483. remove_pool(pools[0]);
  10484. setup_benchmark_pool();
  10485. }
  10486. if (opt_unittest) {
  10487. test_cgpu_match();
  10488. test_intrange();
  10489. test_decimal_width();
  10490. test_domain_funcs();
  10491. test_target();
  10492. test_uri_get_param();
  10493. utf8_test();
  10494. }
  10495. #ifdef HAVE_CURSES
  10496. if (opt_realquiet || opt_display_devs)
  10497. use_curses = false;
  10498. setlocale(LC_ALL, "C");
  10499. if (use_curses)
  10500. enable_curses();
  10501. #endif
  10502. #ifdef HAVE_LIBUSB
  10503. int err = libusb_init(NULL);
  10504. if (err)
  10505. applog(LOG_WARNING, "libusb_init() failed err %d", err);
  10506. else
  10507. have_libusb = true;
  10508. #endif
  10509. applog(LOG_WARNING, "Started %s", packagename);
  10510. {
  10511. struct bfg_loaded_configfile *configfile;
  10512. LL_FOREACH(bfg_loaded_configfiles, configfile)
  10513. {
  10514. char * const cnfbuf = configfile->filename;
  10515. int fileconf_load = configfile->fileconf_load;
  10516. applog(LOG_NOTICE, "Loaded configuration file %s", cnfbuf);
  10517. switch (fileconf_load) {
  10518. case 0:
  10519. applog(LOG_WARNING, "Fatal JSON error in configuration file.");
  10520. applog(LOG_WARNING, "Configuration file could not be used.");
  10521. break;
  10522. case -1:
  10523. applog(LOG_WARNING, "Error in configuration file, partially loaded.");
  10524. if (use_curses)
  10525. applog(LOG_WARNING, "Start BFGMiner with -T to see what failed to load.");
  10526. break;
  10527. default:
  10528. break;
  10529. }
  10530. }
  10531. }
  10532. i = strlen(opt_kernel_path) + 2;
  10533. char __kernel_path[i];
  10534. snprintf(__kernel_path, i, "%s/", opt_kernel_path);
  10535. opt_kernel_path = __kernel_path;
  10536. if (want_per_device_stats)
  10537. opt_log_output = true;
  10538. #ifdef WANT_CPUMINE
  10539. #ifdef USE_SCRYPT
  10540. if (opt_scrypt)
  10541. set_scrypt_algo(&opt_algo);
  10542. #endif
  10543. #endif
  10544. bfg_devapi_init();
  10545. drv_detect_all();
  10546. total_devices = total_devices_new;
  10547. devices = devices_new;
  10548. total_devices_new = 0;
  10549. devices_new = NULL;
  10550. if (opt_display_devs) {
  10551. int devcount = 0;
  10552. applog(LOG_ERR, "Devices detected:");
  10553. for (i = 0; i < total_devices; ++i) {
  10554. struct cgpu_info *cgpu = devices[i];
  10555. char buf[0x100];
  10556. if (cgpu->device != cgpu)
  10557. continue;
  10558. if (cgpu->name)
  10559. snprintf(buf, sizeof(buf), " %s", cgpu->name);
  10560. else
  10561. if (cgpu->dev_manufacturer)
  10562. snprintf(buf, sizeof(buf), " %s by %s", (cgpu->dev_product ?: "Device"), cgpu->dev_manufacturer);
  10563. else
  10564. if (cgpu->dev_product)
  10565. snprintf(buf, sizeof(buf), " %s", cgpu->dev_product);
  10566. else
  10567. strcpy(buf, " Device");
  10568. tailsprintf(buf, sizeof(buf), " (driver=%s; procs=%d", cgpu->drv->dname, cgpu->procs);
  10569. if (cgpu->dev_serial)
  10570. tailsprintf(buf, sizeof(buf), "; serial=%s", cgpu->dev_serial);
  10571. if (cgpu->device_path)
  10572. tailsprintf(buf, sizeof(buf), "; path=%s", cgpu->device_path);
  10573. tailsprintf(buf, sizeof(buf), ")");
  10574. _applog(LOG_NOTICE, buf);
  10575. ++devcount;
  10576. }
  10577. quit(0, "%d devices listed", devcount);
  10578. }
  10579. mining_threads = 0;
  10580. for (i = 0; i < total_devices; ++i)
  10581. register_device(devices[i]);
  10582. if (!total_devices) {
  10583. applog(LOG_WARNING, "No devices detected!");
  10584. if (use_curses)
  10585. applog(LOG_WARNING, "Waiting for devices; press 'M+' to add, or 'Q' to quit");
  10586. else
  10587. applog(LOG_WARNING, "Waiting for devices");
  10588. }
  10589. if (opt_quit_summary == BQS_DEFAULT)
  10590. {
  10591. if (total_devices < 25)
  10592. opt_quit_summary = BQS_PROCS;
  10593. else
  10594. opt_quit_summary = BQS_DEVS;
  10595. }
  10596. #ifdef HAVE_CURSES
  10597. switch_logsize();
  10598. #endif
  10599. if (!total_pools) {
  10600. applog(LOG_WARNING, "Need to specify at least one pool server.");
  10601. #ifdef HAVE_CURSES
  10602. if (!use_curses || !input_pool(false))
  10603. #endif
  10604. quit(1, "Pool setup failed");
  10605. }
  10606. for (i = 0; i < total_pools; i++) {
  10607. struct pool *pool = pools[i];
  10608. size_t siz;
  10609. pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  10610. pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  10611. if (!pool->rpc_url)
  10612. quit(1, "No URI supplied for pool %u", i);
  10613. if (!pool->rpc_userpass) {
  10614. if (!pool->rpc_user || !pool->rpc_pass)
  10615. quit(1, "No login credentials supplied for pool %u %s", i, pool->rpc_url);
  10616. siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2;
  10617. pool->rpc_userpass = malloc(siz);
  10618. if (!pool->rpc_userpass)
  10619. quit(1, "Failed to malloc userpass");
  10620. snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass);
  10621. }
  10622. }
  10623. /* Set the currentpool to pool with priority 0 */
  10624. validate_pool_priorities();
  10625. for (i = 0; i < total_pools; i++) {
  10626. struct pool *pool = pools[i];
  10627. if (!pool->prio)
  10628. currentpool = pool;
  10629. }
  10630. #ifdef HAVE_SYSLOG_H
  10631. if (use_syslog)
  10632. openlog(PACKAGE, LOG_PID, LOG_USER);
  10633. #endif
  10634. #if defined(unix) || defined(__APPLE__)
  10635. if (opt_stderr_cmd)
  10636. fork_monitor();
  10637. #endif // defined(unix)
  10638. mining_thr = calloc(mining_threads, sizeof(thr));
  10639. if (!mining_thr)
  10640. quit(1, "Failed to calloc mining_thr");
  10641. for (i = 0; i < mining_threads; i++) {
  10642. mining_thr[i] = calloc(1, sizeof(*thr));
  10643. if (!mining_thr[i])
  10644. quit(1, "Failed to calloc mining_thr[%d]", i);
  10645. }
  10646. total_control_threads = 6;
  10647. control_thr = calloc(total_control_threads, sizeof(*thr));
  10648. if (!control_thr)
  10649. quit(1, "Failed to calloc control_thr");
  10650. if (opt_benchmark)
  10651. goto begin_bench;
  10652. for (i = 0; i < total_pools; i++) {
  10653. struct pool *pool = pools[i];
  10654. enable_pool(pool);
  10655. pool->idle = true;
  10656. }
  10657. applog(LOG_NOTICE, "Probing for an alive pool");
  10658. do {
  10659. bool still_testing;
  10660. int i;
  10661. /* Look for at least one active pool before starting */
  10662. probe_pools();
  10663. do {
  10664. sleep(1);
  10665. if (pools_active)
  10666. break;
  10667. still_testing = false;
  10668. for (int i = 0; i < total_pools; ++i)
  10669. if (pools[i]->testing)
  10670. still_testing = true;
  10671. } while (still_testing);
  10672. if (!pools_active) {
  10673. applog(LOG_ERR, "No servers were found that could be used to get work from.");
  10674. applog(LOG_ERR, "Please check the details from the list below of the servers you have input");
  10675. applog(LOG_ERR, "Most likely you have input the wrong URL, forgotten to add a port, or have not set up workers");
  10676. for (i = 0; i < total_pools; i++) {
  10677. struct pool *pool;
  10678. pool = pools[i];
  10679. applog(LOG_WARNING, "Pool: %d URL: %s User: %s Password: %s",
  10680. i, pool->rpc_url, pool->rpc_user, pool->rpc_pass);
  10681. }
  10682. #ifdef HAVE_CURSES
  10683. if (use_curses) {
  10684. halfdelay(150);
  10685. applog(LOG_ERR, "Press any key to exit, or BFGMiner will try again in 15s.");
  10686. if (getch() != ERR)
  10687. quit(0, "No servers could be used! Exiting.");
  10688. cbreak();
  10689. } else
  10690. #endif
  10691. quit(0, "No servers could be used! Exiting.");
  10692. }
  10693. } while (!pools_active);
  10694. #ifdef USE_SCRYPT
  10695. if (detect_algo == 1 && !opt_scrypt) {
  10696. applog(LOG_NOTICE, "Detected scrypt algorithm");
  10697. opt_scrypt = true;
  10698. }
  10699. #endif
  10700. detect_algo = 0;
  10701. begin_bench:
  10702. total_mhashes_done = 0;
  10703. for (i = 0; i < total_devices; i++) {
  10704. struct cgpu_info *cgpu = devices[i];
  10705. cgpu->rolling = cgpu->total_mhashes = 0;
  10706. }
  10707. cgtime(&total_tv_start);
  10708. cgtime(&total_tv_end);
  10709. miner_started = total_tv_start;
  10710. time_t miner_start_ts = time(NULL);
  10711. if (schedstart.tm.tm_sec)
  10712. localtime_r(&miner_start_ts, &schedstart.tm);
  10713. if (schedstop.tm.tm_sec)
  10714. localtime_r(&miner_start_ts, &schedstop .tm);
  10715. get_datestamp(datestamp, sizeof(datestamp), miner_start_ts);
  10716. // Initialise processors and threads
  10717. k = 0;
  10718. for (i = 0; i < total_devices; ++i) {
  10719. struct cgpu_info *cgpu = devices[i];
  10720. allocate_cgpu(cgpu, &k);
  10721. }
  10722. // Start threads
  10723. for (i = 0; i < total_devices; ++i) {
  10724. struct cgpu_info *cgpu = devices[i];
  10725. start_cgpu(cgpu);
  10726. }
  10727. #ifdef HAVE_OPENCL
  10728. for (i = 0; i < nDevs; i++)
  10729. pause_dynamic_threads(i);
  10730. #endif
  10731. #ifdef WANT_CPUMINE
  10732. if (opt_n_threads > 0)
  10733. applog(LOG_INFO, "%d cpu miner threads started, using '%s' algorithm.",
  10734. opt_n_threads, algo_names[opt_algo]);
  10735. #endif
  10736. cgtime(&total_tv_start);
  10737. cgtime(&total_tv_end);
  10738. if (!opt_benchmark)
  10739. {
  10740. pthread_t submit_thread;
  10741. if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, NULL)))
  10742. quit(1, "submit_work thread create failed");
  10743. }
  10744. watchpool_thr_id = 1;
  10745. thr = &control_thr[watchpool_thr_id];
  10746. /* start watchpool thread */
  10747. if (thr_info_create(thr, NULL, watchpool_thread, NULL))
  10748. quit(1, "watchpool thread create failed");
  10749. pthread_detach(thr->pth);
  10750. watchdog_thr_id = 2;
  10751. thr = &control_thr[watchdog_thr_id];
  10752. /* start watchdog thread */
  10753. if (thr_info_create(thr, NULL, watchdog_thread, NULL))
  10754. quit(1, "watchdog thread create failed");
  10755. pthread_detach(thr->pth);
  10756. #ifdef HAVE_OPENCL
  10757. /* Create reinit gpu thread */
  10758. gpur_thr_id = 3;
  10759. thr = &control_thr[gpur_thr_id];
  10760. thr->q = tq_new();
  10761. if (!thr->q)
  10762. quit(1, "tq_new failed for gpur_thr_id");
  10763. if (thr_info_create(thr, NULL, reinit_gpu, thr))
  10764. quit(1, "reinit_gpu thread create failed");
  10765. #endif
  10766. /* Create API socket thread */
  10767. api_thr_id = 4;
  10768. thr = &control_thr[api_thr_id];
  10769. if (thr_info_create(thr, NULL, api_thread, thr))
  10770. quit(1, "API thread create failed");
  10771. #ifdef USE_LIBMICROHTTPD
  10772. if (httpsrv_port != -1)
  10773. httpsrv_start(httpsrv_port);
  10774. #endif
  10775. #ifdef USE_LIBEVENT
  10776. if (stratumsrv_port != -1)
  10777. stratumsrv_start();
  10778. #endif
  10779. #ifdef HAVE_BFG_HOTPLUG
  10780. if (opt_hotplug)
  10781. hotplug_start();
  10782. #endif
  10783. #ifdef HAVE_CURSES
  10784. /* Create curses input thread for keyboard input. Create this last so
  10785. * that we know all threads are created since this can call kill_work
  10786. * to try and shut down ll previous threads. */
  10787. input_thr_id = 5;
  10788. thr = &control_thr[input_thr_id];
  10789. if (thr_info_create(thr, NULL, input_thread, thr))
  10790. quit(1, "input thread create failed");
  10791. pthread_detach(thr->pth);
  10792. #endif
  10793. /* Just to be sure */
  10794. if (total_control_threads != 6)
  10795. quit(1, "incorrect total_control_threads (%d) should be 7", total_control_threads);
  10796. /* Once everything is set up, main() becomes the getwork scheduler */
  10797. while (42) {
  10798. int ts, max_staged = opt_queue;
  10799. struct pool *pool, *cp;
  10800. bool lagging = false;
  10801. struct curl_ent *ce;
  10802. struct work *work;
  10803. cp = current_pool();
  10804. // Generally, each processor needs a new work, and all at once during work restarts
  10805. max_staged += mining_threads;
  10806. mutex_lock(stgd_lock);
  10807. ts = __total_staged();
  10808. if (!pool_localgen(cp) && !ts && !opt_fail_only)
  10809. lagging = true;
  10810. /* Wait until hash_pop tells us we need to create more work */
  10811. if (ts > max_staged) {
  10812. staged_full = true;
  10813. pthread_cond_wait(&gws_cond, stgd_lock);
  10814. ts = __total_staged();
  10815. }
  10816. mutex_unlock(stgd_lock);
  10817. if (ts > max_staged)
  10818. continue;
  10819. work = make_work();
  10820. if (lagging && !pool_tset(cp, &cp->lagging)) {
  10821. applog(LOG_WARNING, "Pool %d not providing work fast enough", cp->pool_no);
  10822. cp->getfail_occasions++;
  10823. total_go++;
  10824. }
  10825. pool = select_pool(lagging);
  10826. retry:
  10827. if (pool->has_stratum) {
  10828. while (!pool->stratum_active || !pool->stratum_notify) {
  10829. struct pool *altpool = select_pool(true);
  10830. if (altpool == pool && pool->has_stratum)
  10831. cgsleep_ms(5000);
  10832. pool = altpool;
  10833. goto retry;
  10834. }
  10835. gen_stratum_work(pool, work);
  10836. applog(LOG_DEBUG, "Generated stratum work");
  10837. stage_work(work);
  10838. continue;
  10839. }
  10840. if (pool->last_work_copy) {
  10841. mutex_lock(&pool->last_work_lock);
  10842. struct work *last_work = pool->last_work_copy;
  10843. if (!last_work)
  10844. {}
  10845. else
  10846. if (can_roll(last_work) && should_roll(last_work)) {
  10847. struct timeval tv_now;
  10848. cgtime(&tv_now);
  10849. free_work(work);
  10850. work = make_clone(pool->last_work_copy);
  10851. mutex_unlock(&pool->last_work_lock);
  10852. roll_work(work);
  10853. applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tr->tmpl, tv_now.tv_sec));
  10854. stage_work(work);
  10855. continue;
  10856. } else if (last_work->tr && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tr->tmpl) > (unsigned long)mining_threads) {
  10857. // Don't free last_work_copy, since it is used to detect upstream provides plenty of work per template
  10858. } else {
  10859. free_work(last_work);
  10860. pool->last_work_copy = NULL;
  10861. }
  10862. mutex_unlock(&pool->last_work_lock);
  10863. }
  10864. if (clone_available()) {
  10865. applog(LOG_DEBUG, "Cloned getwork work");
  10866. free_work(work);
  10867. continue;
  10868. }
  10869. if (opt_benchmark) {
  10870. get_benchmark_work(work);
  10871. applog(LOG_DEBUG, "Generated benchmark work");
  10872. stage_work(work);
  10873. continue;
  10874. }
  10875. work->pool = pool;
  10876. ce = pop_curl_entry3(pool, 2);
  10877. /* obtain new work from bitcoin via JSON-RPC */
  10878. if (!get_upstream_work(work, ce->curl)) {
  10879. struct pool *next_pool;
  10880. /* Make sure the pool just hasn't stopped serving
  10881. * requests but is up as we'll keep hammering it */
  10882. push_curl_entry(ce, pool);
  10883. ++pool->seq_getfails;
  10884. pool_died(pool);
  10885. next_pool = select_pool(!opt_fail_only);
  10886. if (pool == next_pool) {
  10887. applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, retrying in 5s", pool->pool_no);
  10888. cgsleep_ms(5000);
  10889. } else {
  10890. applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, failover activated", pool->pool_no);
  10891. pool = next_pool;
  10892. }
  10893. goto retry;
  10894. }
  10895. if (ts >= max_staged)
  10896. pool_tclear(pool, &pool->lagging);
  10897. if (pool_tclear(pool, &pool->idle))
  10898. pool_resus(pool);
  10899. applog(LOG_DEBUG, "Generated getwork work");
  10900. stage_work(work);
  10901. push_curl_entry(ce, pool);
  10902. }
  10903. return 0;
  10904. }