miner.c 361 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976
  1. /*
  2. * Copyright 2011-2014 Con Kolivas
  3. * Copyright 2011-2017 Luke Dashjr
  4. * Copyright 2014 Nate Woolls
  5. * Copyright 2012-2014 Andrew Smith
  6. * Copyright 2010 Jeff Garzik
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 3 of the License, or (at your option)
  11. * any later version. See COPYING for more details.
  12. */
  13. #include "config.h"
  14. #ifdef HAVE_CURSES
  15. #ifdef USE_UNICODE
  16. #define PDC_WIDE
  17. #endif
  18. // Must be before stdbool, since pdcurses typedefs bool :/
  19. #include <curses.h>
  20. #endif
  21. #include <ctype.h>
  22. #include <float.h>
  23. #include <limits.h>
  24. #include <locale.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <string.h>
  28. #include <stdbool.h>
  29. #include <stdint.h>
  30. #include <unistd.h>
  31. #include <sys/time.h>
  32. #include <time.h>
  33. #include <math.h>
  34. #include <stdarg.h>
  35. #include <assert.h>
  36. #include <signal.h>
  37. #include <wctype.h>
  38. #include <sys/stat.h>
  39. #include <sys/types.h>
  40. #include <dirent.h>
  41. #ifdef HAVE_PWD_H
  42. #include <pwd.h>
  43. #endif
  44. #ifndef WIN32
  45. #include <sys/resource.h>
  46. #include <sys/socket.h>
  47. #if defined(HAVE_LIBUDEV) && defined(HAVE_SYS_EPOLL_H)
  48. #include <libudev.h>
  49. #include <sys/epoll.h>
  50. #define HAVE_BFG_HOTPLUG
  51. #endif
  52. #else
  53. #include <winsock2.h>
  54. #include <windows.h>
  55. #include <dbt.h>
  56. #define HAVE_BFG_HOTPLUG
  57. #endif
  58. #include <ccan/opt/opt.h>
  59. #include <jansson.h>
  60. #include <curl/curl.h>
  61. #include <libgen.h>
  62. #include <sha2.h>
  63. #include <utlist.h>
  64. #include <blkmaker.h>
  65. #include <blkmaker_jansson.h>
  66. #include <blktemplate.h>
  67. #include <libbase58.h>
  68. #include "compat.h"
  69. #include "deviceapi.h"
  70. #include "logging.h"
  71. #include "miner.h"
  72. #include "adl.h"
  73. #include "driver-cpu.h"
  74. #include "driver-opencl.h"
  75. #include "util.h"
  76. #ifdef USE_AVALON
  77. #include "driver-avalon.h"
  78. #endif
  79. #ifdef HAVE_BFG_LOWLEVEL
  80. #include "lowlevel.h"
  81. #endif
  82. #if defined(unix) || defined(__APPLE__)
  83. #include <errno.h>
  84. #include <fcntl.h>
  85. #include <sys/wait.h>
  86. #endif
  87. #ifdef USE_SCRYPT
  88. #include "malgo/scrypt.h"
  89. #endif
  90. #if defined(USE_AVALON) || defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_MODMINER) || defined(USE_NANOFURY) || defined(USE_X6500) || defined(USE_ZTEX)
  91. # define USE_FPGA
  92. #endif
  93. enum bfg_quit_summary {
  94. BQS_DEFAULT,
  95. BQS_NONE,
  96. BQS_DEVS,
  97. BQS_PROCS,
  98. BQS_DETAILED,
  99. };
  100. struct strategies strategies[] = {
  101. { "Failover" },
  102. { "Round Robin" },
  103. { "Rotate" },
  104. { "Load Balance" },
  105. { "Balance" },
  106. };
  107. #define packagename bfgminer_name_space_ver
  108. bool opt_protocol;
  109. bool opt_dev_protocol;
  110. static bool opt_benchmark, opt_benchmark_intense;
  111. static bool want_longpoll = true;
  112. static bool want_gbt = true;
  113. static bool want_getwork = true;
  114. #if BLKMAKER_VERSION > 1
  115. static bool opt_load_bitcoin_conf = true;
  116. static uint32_t coinbase_script_block_id;
  117. static uint32_t template_nonce;
  118. #endif
  119. #if BLKMAKER_VERSION > 0
  120. char *opt_coinbase_sig;
  121. #endif
  122. static enum bfg_quit_summary opt_quit_summary = BQS_DEFAULT;
  123. static bool include_serial_in_statline;
  124. char *request_target_str;
  125. float request_pdiff = 1.0;
  126. double request_bdiff;
  127. static bool want_stratum = true;
  128. int opt_skip_checks;
  129. bool want_per_device_stats;
  130. bool use_syslog;
  131. bool opt_quiet_work_updates = true;
  132. bool opt_quiet;
  133. bool opt_realquiet;
  134. int loginput_size;
  135. bool opt_compact;
  136. bool opt_show_procs;
  137. const int opt_cutofftemp = 95;
  138. int opt_hysteresis = 3;
  139. static int opt_retries = -1;
  140. int opt_fail_pause = 5;
  141. int opt_log_interval = 20;
  142. int opt_queue = 1;
  143. int opt_scantime = 60;
  144. int opt_expiry = 120;
  145. int opt_expiry_lp = 3600;
  146. unsigned long long global_hashrate;
  147. static bool opt_unittest = false;
  148. unsigned unittest_failures;
  149. unsigned long global_quota_gcd = 1;
  150. time_t last_getwork;
  151. #ifdef USE_OPENCL
  152. int opt_dynamic_interval = 7;
  153. int nDevs;
  154. int opt_g_threads = -1;
  155. #endif
  156. #ifdef USE_SCRYPT
  157. static char detect_algo = 1;
  158. #else
  159. static char detect_algo;
  160. #endif
  161. bool opt_restart = true;
  162. #ifdef USE_LIBMICROHTTPD
  163. #include "httpsrv.h"
  164. int httpsrv_port = -1;
  165. #endif
  166. #ifdef USE_LIBEVENT
  167. long stratumsrv_port = -1;
  168. #endif
  169. const
  170. int rescan_delay_ms = 1000;
  171. #ifdef HAVE_BFG_HOTPLUG
  172. bool opt_hotplug = 1;
  173. const
  174. int hotplug_delay_ms = 100;
  175. #else
  176. const bool opt_hotplug;
  177. #endif
  178. struct string_elist *scan_devices;
  179. static struct string_elist *opt_set_device_list;
  180. bool opt_force_dev_init;
  181. static struct string_elist *opt_devices_enabled_list;
  182. static bool opt_display_devs;
  183. int total_devices;
  184. struct cgpu_info **devices;
  185. int total_devices_new;
  186. struct cgpu_info **devices_new;
  187. bool have_opencl;
  188. int opt_n_threads = -1;
  189. int mining_threads;
  190. int base_queue;
  191. int num_processors;
  192. #ifdef HAVE_CURSES
  193. bool use_curses = true;
  194. #else
  195. bool use_curses;
  196. #endif
  197. int last_logstatusline_len;
  198. #ifdef HAVE_LIBUSB
  199. bool have_libusb;
  200. #endif
  201. static bool opt_submit_stale = true;
  202. static float opt_shares;
  203. static int opt_submit_threads = 0x40;
  204. bool opt_fail_only;
  205. int opt_fail_switch_delay = 300;
  206. bool opt_autofan;
  207. bool opt_autoengine;
  208. bool opt_noadl;
  209. char *opt_api_allow = NULL;
  210. char *opt_api_groups;
  211. char *opt_api_description = PACKAGE_STRING;
  212. int opt_api_port = 4028;
  213. bool opt_api_listen;
  214. bool opt_api_mcast;
  215. char *opt_api_mcast_addr = API_MCAST_ADDR;
  216. char *opt_api_mcast_code = API_MCAST_CODE;
  217. char *opt_api_mcast_des = "";
  218. int opt_api_mcast_port = 4028;
  219. bool opt_api_network;
  220. bool opt_delaynet;
  221. bool opt_disable_pool;
  222. bool opt_disable_client_reconnect = false;
  223. static bool no_work;
  224. bool opt_worktime;
  225. bool opt_weighed_stats;
  226. char *opt_kernel_path;
  227. char *cgminer_path;
  228. #if defined(USE_BITFORCE)
  229. bool opt_bfl_noncerange;
  230. #endif
  231. #define QUIET (opt_quiet || opt_realquiet)
  232. struct thr_info *control_thr;
  233. struct thr_info **mining_thr;
  234. static int watchpool_thr_id;
  235. static int watchdog_thr_id;
  236. #ifdef HAVE_CURSES
  237. static int input_thr_id;
  238. #endif
  239. int gpur_thr_id;
  240. static int api_thr_id;
  241. static int total_control_threads;
  242. pthread_mutex_t hash_lock;
  243. static pthread_mutex_t *stgd_lock;
  244. pthread_mutex_t console_lock;
  245. cglock_t ch_lock;
  246. static pthread_rwlock_t blk_lock;
  247. static pthread_mutex_t sshare_lock;
  248. pthread_rwlock_t netacc_lock;
  249. pthread_rwlock_t mining_thr_lock;
  250. pthread_rwlock_t devices_lock;
  251. static pthread_mutex_t lp_lock;
  252. static pthread_cond_t lp_cond;
  253. pthread_cond_t gws_cond;
  254. bool shutting_down;
  255. double total_rolling;
  256. double total_mhashes_done;
  257. static struct timeval total_tv_start, total_tv_end;
  258. static struct timeval miner_started;
  259. cglock_t control_lock;
  260. pthread_mutex_t stats_lock;
  261. static pthread_mutex_t submitting_lock;
  262. static int total_submitting;
  263. static struct work *submit_waiting;
  264. notifier_t submit_waiting_notifier;
  265. int hw_errors;
  266. int total_accepted, total_rejected;
  267. int total_getworks, total_stale, total_discarded;
  268. uint64_t total_bytes_rcvd, total_bytes_sent;
  269. double total_diff1, total_bad_diff1;
  270. double total_diff_accepted, total_diff_rejected, total_diff_stale;
  271. static int staged_rollable, staged_spare;
  272. unsigned int new_blocks;
  273. unsigned int found_blocks;
  274. unsigned int local_work;
  275. unsigned int total_go, total_ro;
  276. struct pool **pools;
  277. static struct pool *currentpool = NULL;
  278. int total_pools, enabled_pools;
  279. enum pool_strategy pool_strategy = POOL_FAILOVER;
  280. int opt_rotate_period;
  281. static int total_urls, total_users, total_passes;
  282. static
  283. #ifndef HAVE_CURSES
  284. const
  285. #endif
  286. bool curses_active;
  287. #ifdef HAVE_CURSES
  288. #if !(defined(PDCURSES) || defined(NCURSES_VERSION))
  289. const
  290. #endif
  291. short default_bgcolor = COLOR_BLACK;
  292. static int attr_title = A_BOLD;
  293. #endif
  294. static
  295. #if defined(HAVE_CURSES) && defined(USE_UNICODE)
  296. bool use_unicode;
  297. static
  298. bool have_unicode_degrees;
  299. static
  300. wchar_t unicode_micro = 'u';
  301. #else
  302. const bool use_unicode;
  303. static
  304. const bool have_unicode_degrees;
  305. #ifdef HAVE_CURSES
  306. static
  307. const char unicode_micro = 'u';
  308. #endif
  309. #endif
  310. #ifdef HAVE_CURSES
  311. #define U8_BAD_START "\xef\x80\x81"
  312. #define U8_BAD_END "\xef\x80\x80"
  313. #define AS_BAD(x) U8_BAD_START x U8_BAD_END
  314. /* logstart is where the log window should start */
  315. static int devcursor, logstart, logcursor;
  316. bool selecting_device;
  317. unsigned selected_device;
  318. #endif
  319. static int max_lpdigits;
  320. // current_hash was replaced with goal->current_goal_detail
  321. // current_block_id was replaced with blkchain->currentblk->block_id
  322. static char datestamp[40];
  323. static char best_share[ALLOC_H2B_SHORTV] = "0";
  324. double best_diff = 0;
  325. struct mining_algorithm *mining_algorithms;
  326. struct mining_goal_info *mining_goals;
  327. int active_goals = 1;
  328. int swork_id;
  329. /* For creating a hash database of stratum shares submitted that have not had
  330. * a response yet */
  331. struct stratum_share {
  332. UT_hash_handle hh;
  333. bool block;
  334. struct work *work;
  335. int id;
  336. };
  337. static struct stratum_share *stratum_shares = NULL;
  338. char *opt_socks_proxy = NULL;
  339. static const char def_conf[] = "bfgminer.conf";
  340. static bool config_loaded;
  341. static int include_count;
  342. #define JSON_INCLUDE_CONF "include"
  343. #define JSON_LOAD_ERROR "JSON decode of file '%s' failed\n %s"
  344. #define JSON_LOAD_ERROR_LEN strlen(JSON_LOAD_ERROR)
  345. #define JSON_MAX_DEPTH 10
  346. #define JSON_MAX_DEPTH_ERR "Too many levels of JSON includes (limit 10) or a loop"
  347. #define JSON_WEB_ERROR "WEB config err"
  348. char *cmd_idle, *cmd_sick, *cmd_dead;
  349. #if defined(unix) || defined(__APPLE__)
  350. static char *opt_stderr_cmd = NULL;
  351. static int forkpid;
  352. #endif // defined(unix)
  353. #ifdef HAVE_CHROOT
  354. char *chroot_dir;
  355. #endif
  356. #ifdef HAVE_PWD_H
  357. char *opt_setuid;
  358. #endif
  359. struct sigaction termhandler, inthandler;
  360. struct thread_q *getq;
  361. static int total_work;
  362. static bool staged_full;
  363. struct work *staged_work = NULL;
  364. struct schedtime {
  365. bool enable;
  366. struct tm tm;
  367. };
  368. struct schedtime schedstart;
  369. struct schedtime schedstop;
  370. bool sched_paused;
  371. static bool time_before(struct tm *tm1, struct tm *tm2)
  372. {
  373. if (tm1->tm_hour < tm2->tm_hour)
  374. return true;
  375. if (tm1->tm_hour == tm2->tm_hour && tm1->tm_min < tm2->tm_min)
  376. return true;
  377. return false;
  378. }
  379. static bool should_run(void)
  380. {
  381. struct tm tm;
  382. time_t tt;
  383. bool within_range;
  384. if (!schedstart.enable && !schedstop.enable)
  385. return true;
  386. tt = time(NULL);
  387. localtime_r(&tt, &tm);
  388. // NOTE: This is delicately balanced so that should_run is always false if schedstart==schedstop
  389. if (time_before(&schedstop.tm, &schedstart.tm))
  390. within_range = (time_before(&tm, &schedstop.tm) || !time_before(&tm, &schedstart.tm));
  391. else
  392. within_range = (time_before(&tm, &schedstop.tm) && !time_before(&tm, &schedstart.tm));
  393. if (within_range && !schedstop.enable)
  394. /* This is a once off event with no stop time set */
  395. schedstart.enable = false;
  396. return within_range;
  397. }
  398. void get_datestamp(char *f, size_t fsiz, time_t tt)
  399. {
  400. struct tm _tm;
  401. struct tm *tm = &_tm;
  402. if (tt == INVALID_TIMESTAMP)
  403. tt = time(NULL);
  404. localtime_r(&tt, tm);
  405. snprintf(f, fsiz, "[%d-%02d-%02d %02d:%02d:%02d]",
  406. tm->tm_year + 1900,
  407. tm->tm_mon + 1,
  408. tm->tm_mday,
  409. tm->tm_hour,
  410. tm->tm_min,
  411. tm->tm_sec);
  412. }
  413. static
  414. void get_timestamp(char *f, size_t fsiz, time_t tt)
  415. {
  416. struct tm _tm;
  417. struct tm *tm = &_tm;
  418. localtime_r(&tt, tm);
  419. snprintf(f, fsiz, "[%02d:%02d:%02d]",
  420. tm->tm_hour,
  421. tm->tm_min,
  422. tm->tm_sec);
  423. }
  424. static void applog_and_exit(const char *fmt, ...) FORMAT_SYNTAX_CHECK(printf, 1, 2);
  425. static char exit_buf[512];
  426. static void applog_and_exit(const char *fmt, ...)
  427. {
  428. va_list ap;
  429. va_start(ap, fmt);
  430. vsnprintf(exit_buf, sizeof(exit_buf), fmt, ap);
  431. va_end(ap);
  432. _applog(LOG_ERR, exit_buf);
  433. exit(1);
  434. }
  435. static
  436. float drv_min_nonce_diff(const struct device_drv * const drv, struct cgpu_info * const proc, const struct mining_algorithm * const malgo)
  437. {
  438. if (drv->drv_min_nonce_diff)
  439. return drv->drv_min_nonce_diff(proc, malgo);
  440. #ifdef USE_SHA256D
  441. return (malgo->algo == POW_SHA256D) ? 1. : -1.;
  442. #else
  443. return -1.;
  444. #endif
  445. }
  446. char *devpath_to_devid(const char *devpath)
  447. {
  448. #ifndef WIN32
  449. if (devpath[0] != '/')
  450. return NULL;
  451. struct stat my_stat;
  452. if (stat(devpath, &my_stat))
  453. return NULL;
  454. char *devs = malloc(6 + (sizeof(dev_t) * 2) + 1);
  455. memcpy(devs, "dev_t:", 6);
  456. bin2hex(&devs[6], &my_stat.st_rdev, sizeof(dev_t));
  457. #else
  458. if (!strncmp(devpath, "\\\\.\\", 4))
  459. devpath += 4;
  460. if (strncasecmp(devpath, "COM", 3) || !devpath[3])
  461. return NULL;
  462. devpath += 3;
  463. char *p;
  464. strtol(devpath, &p, 10);
  465. if (p[0])
  466. return NULL;
  467. const int sz = (p - devpath);
  468. char *devs = malloc(4 + sz + 1);
  469. sprintf(devs, "com:%s", devpath);
  470. #endif
  471. return devs;
  472. }
  473. static
  474. bool devpaths_match(const char * const ap, const char * const bp)
  475. {
  476. char * const a = devpath_to_devid(ap);
  477. if (!a)
  478. return false;
  479. char * const b = devpath_to_devid(bp);
  480. bool rv = false;
  481. if (b)
  482. {
  483. rv = !strcmp(a, b);
  484. free(b);
  485. }
  486. free(a);
  487. return rv;
  488. }
  489. static
  490. int proc_letter_to_number(const char *s, const char ** const rem)
  491. {
  492. int n = 0, c;
  493. for ( ; s[0]; ++s)
  494. {
  495. if (unlikely(n > INT_MAX / 26))
  496. break;
  497. c = tolower(s[0]) - 'a';
  498. if (unlikely(c < 0 || c > 25))
  499. break;
  500. if (unlikely(INT_MAX - c < n))
  501. break;
  502. n = (n * 26) + c;
  503. }
  504. *rem = s;
  505. return n;
  506. }
  507. static
  508. bool cgpu_match(const char * const pattern, const struct cgpu_info * const cgpu)
  509. {
  510. // all - matches anything
  511. // d0 - matches all processors of device 0
  512. // d0-3 - matches all processors of device 0, 1, 2, or 3
  513. // d0a - matches first processor of device 0
  514. // 0 - matches processor 0
  515. // 0-4 - matches processors 0, 1, 2, 3, or 4
  516. // ___ - matches all processors on all devices using driver/name ___
  517. // ___0 - matches all processors of 0th device using driver/name ___
  518. // ___0a - matches first processor of 0th device using driver/name ___
  519. // @* - matches device with serial or path *
  520. // @*@a - matches first processor of device with serial or path *
  521. // ___@* - matches device with serial or path * using driver/name ___
  522. if (!strcasecmp(pattern, "all"))
  523. return true;
  524. const struct device_drv * const drv = cgpu->drv;
  525. const char *p = pattern, *p2;
  526. size_t L;
  527. int n, i, c = -1;
  528. int n2;
  529. int proc_first = -1, proc_last = -1;
  530. struct cgpu_info *device;
  531. if (!(strncasecmp(drv->dname, p, (L = strlen(drv->dname)))
  532. && strncasecmp(drv-> name, p, (L = strlen(drv-> name)))))
  533. // dname or name
  534. p = &pattern[L];
  535. else
  536. if (p[0] == 'd' && (isdigit(p[1]) || p[1] == '-'))
  537. // d#
  538. ++p;
  539. else
  540. if (isdigit(p[0]) || p[0] == '@' || p[0] == '-')
  541. // # or @
  542. {}
  543. else
  544. return false;
  545. L = p - pattern;
  546. while (isspace(p[0]))
  547. ++p;
  548. if (p[0] == '@')
  549. {
  550. // Serial/path
  551. const char * const ser = &p[1];
  552. for (p = ser; p[0] != '@' && p[0] != '\0'; ++p)
  553. {}
  554. p2 = (p[0] == '@') ? &p[1] : p;
  555. const size_t serlen = (p - ser);
  556. p = "";
  557. n = n2 = 0;
  558. const char * const devpath = cgpu->device_path ?: "";
  559. const char * const devser = cgpu->dev_serial ?: "";
  560. if ((!strncmp(devpath, ser, serlen)) && devpath[serlen] == '\0')
  561. {} // Match
  562. else
  563. if ((!strncmp(devser, ser, serlen)) && devser[serlen] == '\0')
  564. {} // Match
  565. else
  566. {
  567. char devpath2[serlen + 1];
  568. memcpy(devpath2, ser, serlen);
  569. devpath2[serlen] = '\0';
  570. if (!devpaths_match(devpath, ser))
  571. return false;
  572. }
  573. }
  574. else
  575. {
  576. if (isdigit(p[0]))
  577. n = strtol(p, (void*)&p2, 0);
  578. else
  579. {
  580. n = 0;
  581. p2 = p;
  582. }
  583. if (p2[0] == '-')
  584. {
  585. ++p2;
  586. if (p2[0] && isdigit(p2[0]))
  587. n2 = strtol(p2, (void*)&p2, 0);
  588. else
  589. n2 = INT_MAX;
  590. }
  591. else
  592. n2 = n;
  593. if (p == pattern)
  594. {
  595. if (!p[0])
  596. return true;
  597. if (p2 && p2[0])
  598. goto invsyntax;
  599. for (i = n; i <= n2; ++i)
  600. {
  601. if (i >= total_devices)
  602. break;
  603. if (cgpu == devices[i])
  604. return true;
  605. }
  606. return false;
  607. }
  608. }
  609. if (p2[0])
  610. {
  611. proc_first = proc_letter_to_number(&p2[0], &p2);
  612. if (p2[0] == '-')
  613. {
  614. ++p2;
  615. if (p2[0])
  616. proc_last = proc_letter_to_number(p2, &p2);
  617. else
  618. proc_last = INT_MAX;
  619. }
  620. else
  621. proc_last = proc_first;
  622. if (p2[0])
  623. goto invsyntax;
  624. }
  625. if (L > 1 || tolower(pattern[0]) != 'd' || !p[0])
  626. {
  627. if ((L == 3 && !strncasecmp(pattern, drv->name, 3)) ||
  628. (!L) ||
  629. (L == strlen(drv->dname) && !strncasecmp(pattern, drv->dname, L)))
  630. {} // Matched name or dname
  631. else
  632. return false;
  633. if (p[0] && (cgpu->device_id < n || cgpu->device_id > n2))
  634. return false;
  635. if (proc_first != -1 && (cgpu->proc_id < proc_first || cgpu->proc_id > proc_last))
  636. return false;
  637. return true;
  638. }
  639. // d#
  640. c = -1;
  641. for (i = 0; ; ++i)
  642. {
  643. if (i == total_devices)
  644. return false;
  645. if (devices[i]->device != devices[i])
  646. continue;
  647. ++c;
  648. if (c < n)
  649. continue;
  650. if (c > n2)
  651. break;
  652. for (device = devices[i]; device; device = device->next_proc)
  653. {
  654. if (proc_first != -1 && (device->proc_id < proc_first || device->proc_id > proc_last))
  655. continue;
  656. if (device == cgpu)
  657. return true;
  658. }
  659. }
  660. return false;
  661. invsyntax:
  662. applog(LOG_WARNING, "%s: Invalid syntax: %s", __func__, pattern);
  663. return false;
  664. }
  665. #define TEST_CGPU_MATCH(pattern) \
  666. if (!cgpu_match(pattern, &cgpu)) \
  667. { \
  668. ++unittest_failures; \
  669. applog(LOG_ERR, "%s: Pattern \"%s\" should have matched!", __func__, pattern); \
  670. } \
  671. // END TEST_CGPU_MATCH
  672. #define TEST_CGPU_NOMATCH(pattern) \
  673. if (cgpu_match(pattern, &cgpu)) \
  674. { \
  675. ++unittest_failures; \
  676. applog(LOG_ERR, "%s: Pattern \"%s\" should NOT have matched!", __func__, pattern); \
  677. } \
  678. // END TEST_CGPU_MATCH
  679. static __maybe_unused
  680. void test_cgpu_match()
  681. {
  682. struct device_drv drv = {
  683. .dname = "test",
  684. .name = "TST",
  685. };
  686. struct cgpu_info cgpu = {
  687. .drv = &drv,
  688. .device = &cgpu,
  689. .device_id = 1,
  690. .proc_id = 1,
  691. .proc_repr = "TST 1b",
  692. }, cgpu0a = {
  693. .drv = &drv,
  694. .device = &cgpu0a,
  695. .device_id = 0,
  696. .proc_id = 0,
  697. .proc_repr = "TST 0a",
  698. }, cgpu1a = {
  699. .drv = &drv,
  700. .device = &cgpu0a,
  701. .device_id = 1,
  702. .proc_id = 0,
  703. .proc_repr = "TST 1a",
  704. };
  705. struct cgpu_info *devices_list[3] = {&cgpu0a, &cgpu1a, &cgpu,};
  706. devices = devices_list;
  707. total_devices = 3;
  708. TEST_CGPU_MATCH("all")
  709. TEST_CGPU_MATCH("d1")
  710. TEST_CGPU_NOMATCH("d2")
  711. TEST_CGPU_MATCH("d0-5")
  712. TEST_CGPU_NOMATCH("d0-0")
  713. TEST_CGPU_NOMATCH("d2-5")
  714. TEST_CGPU_MATCH("d-1")
  715. TEST_CGPU_MATCH("d1-")
  716. TEST_CGPU_NOMATCH("d-0")
  717. TEST_CGPU_NOMATCH("d2-")
  718. TEST_CGPU_MATCH("2")
  719. TEST_CGPU_NOMATCH("3")
  720. TEST_CGPU_MATCH("1-2")
  721. TEST_CGPU_MATCH("2-3")
  722. TEST_CGPU_NOMATCH("1-1")
  723. TEST_CGPU_NOMATCH("3-4")
  724. TEST_CGPU_MATCH("TST")
  725. TEST_CGPU_MATCH("test")
  726. TEST_CGPU_MATCH("tst")
  727. TEST_CGPU_MATCH("TEST")
  728. TEST_CGPU_NOMATCH("TSF")
  729. TEST_CGPU_NOMATCH("TS")
  730. TEST_CGPU_NOMATCH("TSTF")
  731. TEST_CGPU_MATCH("TST1")
  732. TEST_CGPU_MATCH("test1")
  733. TEST_CGPU_MATCH("TST0-1")
  734. TEST_CGPU_MATCH("TST 1")
  735. TEST_CGPU_MATCH("TST 1-2")
  736. TEST_CGPU_MATCH("TEST 1-2")
  737. TEST_CGPU_NOMATCH("TST2")
  738. TEST_CGPU_NOMATCH("TST2-3")
  739. TEST_CGPU_NOMATCH("TST0-0")
  740. TEST_CGPU_MATCH("TST1b")
  741. TEST_CGPU_MATCH("tst1b")
  742. TEST_CGPU_NOMATCH("TST1c")
  743. TEST_CGPU_NOMATCH("TST1bb")
  744. TEST_CGPU_MATCH("TST0-1b")
  745. TEST_CGPU_NOMATCH("TST0-1c")
  746. TEST_CGPU_MATCH("TST1a-d")
  747. TEST_CGPU_NOMATCH("TST1a-a")
  748. TEST_CGPU_NOMATCH("TST1-a")
  749. TEST_CGPU_NOMATCH("TST1c-z")
  750. TEST_CGPU_NOMATCH("TST1c-")
  751. TEST_CGPU_MATCH("@")
  752. TEST_CGPU_NOMATCH("@abc")
  753. TEST_CGPU_MATCH("@@b")
  754. TEST_CGPU_NOMATCH("@@c")
  755. TEST_CGPU_MATCH("TST@")
  756. TEST_CGPU_NOMATCH("TST@abc")
  757. TEST_CGPU_MATCH("TST@@b")
  758. TEST_CGPU_NOMATCH("TST@@c")
  759. TEST_CGPU_MATCH("TST@@b-f")
  760. TEST_CGPU_NOMATCH("TST@@c-f")
  761. TEST_CGPU_NOMATCH("TST@@-a")
  762. cgpu.device_path = "/dev/test";
  763. cgpu.dev_serial = "testy";
  764. TEST_CGPU_MATCH("TST@/dev/test")
  765. TEST_CGPU_MATCH("TST@testy")
  766. TEST_CGPU_NOMATCH("TST@")
  767. TEST_CGPU_NOMATCH("TST@/dev/test5@b")
  768. TEST_CGPU_NOMATCH("TST@testy3@b")
  769. TEST_CGPU_MATCH("TST@/dev/test@b")
  770. TEST_CGPU_MATCH("TST@testy@b")
  771. TEST_CGPU_NOMATCH("TST@/dev/test@c")
  772. TEST_CGPU_NOMATCH("TST@testy@c")
  773. cgpu.device_path = "usb:000:999";
  774. TEST_CGPU_MATCH("TST@usb:000:999")
  775. drv.dname = "test7";
  776. TEST_CGPU_MATCH("test7")
  777. TEST_CGPU_MATCH("TEST7")
  778. TEST_CGPU_NOMATCH("test&")
  779. TEST_CGPU_MATCH("test7 1-2")
  780. TEST_CGPU_MATCH("test7@testy@b")
  781. }
  782. static
  783. int cgpu_search(const char * const pattern, const int first)
  784. {
  785. int i;
  786. struct cgpu_info *cgpu;
  787. #define CHECK_CGPU_SEARCH do{ \
  788. cgpu = get_devices(i); \
  789. if (cgpu_match(pattern, cgpu)) \
  790. return i; \
  791. }while(0)
  792. for (i = first; i < total_devices; ++i)
  793. CHECK_CGPU_SEARCH;
  794. for (i = 0; i < first; ++i)
  795. CHECK_CGPU_SEARCH;
  796. #undef CHECK_CGPU_SEARCH
  797. return -1;
  798. }
  799. static pthread_mutex_t sharelog_lock;
  800. static FILE *sharelog_file = NULL;
  801. struct thr_info *get_thread(int thr_id)
  802. {
  803. struct thr_info *thr;
  804. rd_lock(&mining_thr_lock);
  805. thr = mining_thr[thr_id];
  806. rd_unlock(&mining_thr_lock);
  807. return thr;
  808. }
  809. static struct cgpu_info *get_thr_cgpu(int thr_id)
  810. {
  811. struct thr_info *thr = get_thread(thr_id);
  812. return thr->cgpu;
  813. }
  814. struct cgpu_info *get_devices(int id)
  815. {
  816. struct cgpu_info *cgpu;
  817. rd_lock(&devices_lock);
  818. cgpu = devices[id];
  819. rd_unlock(&devices_lock);
  820. return cgpu;
  821. }
  822. static pthread_mutex_t noncelog_lock = PTHREAD_MUTEX_INITIALIZER;
  823. static FILE *noncelog_file = NULL;
  824. static
  825. void noncelog(const struct work * const work)
  826. {
  827. const int thr_id = work->thr_id;
  828. const struct cgpu_info *proc = get_thr_cgpu(thr_id);
  829. char buf[0x200], hash[65], data[161], midstate[65];
  830. int rv;
  831. size_t ret;
  832. bin2hex(hash, work->hash, 32);
  833. bin2hex(data, work->data, 80);
  834. bin2hex(midstate, work->midstate, 32);
  835. // timestamp,proc,hash,data,midstate
  836. rv = snprintf(buf, sizeof(buf), "%lu,%s,%s,%s,%s\n",
  837. (unsigned long)time(NULL), proc->proc_repr_ns,
  838. hash, data, midstate);
  839. if (unlikely(rv < 1))
  840. {
  841. applog(LOG_ERR, "noncelog printf error");
  842. return;
  843. }
  844. mutex_lock(&noncelog_lock);
  845. ret = fwrite(buf, rv, 1, noncelog_file);
  846. fflush(noncelog_file);
  847. mutex_unlock(&noncelog_lock);
  848. if (ret != 1)
  849. applog(LOG_ERR, "noncelog fwrite error");
  850. }
  851. static void sharelog(const char*disposition, const struct work*work)
  852. {
  853. char target[(sizeof(work->target) * 2) + 1];
  854. char hash[(sizeof(work->hash) * 2) + 1];
  855. char data[(sizeof(work->data) * 2) + 1];
  856. struct cgpu_info *cgpu;
  857. unsigned long int t;
  858. struct pool *pool;
  859. int thr_id, rv;
  860. char s[1024];
  861. size_t ret;
  862. if (!sharelog_file)
  863. return;
  864. thr_id = work->thr_id;
  865. cgpu = get_thr_cgpu(thr_id);
  866. pool = work->pool;
  867. t = work->ts_getwork + timer_elapsed(&work->tv_getwork, &work->tv_work_found);
  868. bin2hex(target, work->target, sizeof(work->target));
  869. bin2hex(hash, work->hash, sizeof(work->hash));
  870. bin2hex(data, work->data, sizeof(work->data));
  871. // timestamp,disposition,target,pool,dev,thr,sharehash,sharedata
  872. rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->proc_repr_ns, thr_id, hash, data);
  873. if (rv >= (int)(sizeof(s)))
  874. s[sizeof(s) - 1] = '\0';
  875. else if (rv < 0) {
  876. applog(LOG_ERR, "sharelog printf error");
  877. return;
  878. }
  879. mutex_lock(&sharelog_lock);
  880. ret = fwrite(s, rv, 1, sharelog_file);
  881. fflush(sharelog_file);
  882. mutex_unlock(&sharelog_lock);
  883. if (ret != 1)
  884. applog(LOG_ERR, "sharelog fwrite error");
  885. }
  886. #ifdef HAVE_CURSES
  887. static void switch_logsize(void);
  888. #endif
  889. static void hotplug_trigger();
  890. void goal_set_malgo(struct mining_goal_info * const goal, struct mining_algorithm * const malgo)
  891. {
  892. if (goal->malgo == malgo)
  893. return;
  894. if (goal->malgo)
  895. --goal->malgo->goal_refs;
  896. if (malgo->goal_refs++)
  897. // First time using a new mining algorithm may means we need to add mining hardware to support it
  898. // api_thr_id is used as an ugly hack to determine if mining has started - if not, we do NOT want to try to hotplug anything (let the initial detect handle it)
  899. if (opt_hotplug && api_thr_id)
  900. hotplug_trigger();
  901. goal->malgo = malgo;
  902. }
  903. struct mining_algorithm *mining_algorithm_by_alias(const char * const alias)
  904. {
  905. struct mining_algorithm *malgo;
  906. LL_FOREACH(mining_algorithms, malgo)
  907. {
  908. if (match_strtok(malgo->aliases, "|", alias))
  909. return malgo;
  910. }
  911. return NULL;
  912. }
  913. #ifdef USE_SCRYPT
  914. extern struct mining_algorithm malgo_scrypt;
  915. static
  916. const char *set_malgo_scrypt()
  917. {
  918. goal_set_malgo(get_mining_goal("default"), &malgo_scrypt);
  919. return NULL;
  920. }
  921. #endif
  922. static
  923. int mining_goals_name_cmp(const struct mining_goal_info * const a, const struct mining_goal_info * const b)
  924. {
  925. // default always goes first
  926. if (a->is_default)
  927. return -1;
  928. if (b->is_default)
  929. return 1;
  930. return strcmp(a->name, b->name);
  931. }
  932. static
  933. void blkchain_init_block(struct blockchain_info * const blkchain)
  934. {
  935. struct block_info * const dummy_block = calloc(sizeof(*dummy_block), 1);
  936. memset(dummy_block->prevblkhash, 0, 0x20);
  937. HASH_ADD(hh, blkchain->blocks, prevblkhash, sizeof(dummy_block->prevblkhash), dummy_block);
  938. blkchain->currentblk = dummy_block;
  939. }
  940. extern struct mining_algorithm malgo_sha256d;
  941. struct mining_goal_info *get_mining_goal(const char * const name)
  942. {
  943. static unsigned next_goal_id;
  944. struct mining_goal_info *goal;
  945. HASH_FIND_STR(mining_goals, name, goal);
  946. if (!goal)
  947. {
  948. struct blockchain_info * const blkchain = malloc(sizeof(*blkchain) + sizeof(*goal));
  949. goal = (void*)(&blkchain[1]);
  950. *blkchain = (struct blockchain_info){
  951. .currentblk = NULL,
  952. };
  953. blkchain_init_block(blkchain);
  954. *goal = (struct mining_goal_info){
  955. .id = next_goal_id++,
  956. .name = strdup(name),
  957. .is_default = !strcmp(name, "default"),
  958. .blkchain = blkchain,
  959. .current_diff = 0xFFFFFFFFFFFFFFFFULL,
  960. };
  961. #ifdef USE_SHA256D
  962. goal_set_malgo(goal, &malgo_sha256d);
  963. #else
  964. // NOTE: Basically random default
  965. goal_set_malgo(goal, mining_algorithms);
  966. #endif
  967. HASH_ADD_KEYPTR(hh, mining_goals, goal->name, strlen(goal->name), goal);
  968. HASH_SORT(mining_goals, mining_goals_name_cmp);
  969. #ifdef HAVE_CURSES
  970. devcursor = 7 + active_goals;
  971. switch_logsize();
  972. #endif
  973. }
  974. return goal;
  975. }
  976. void mining_goal_reset(struct mining_goal_info * const goal)
  977. {
  978. struct blockchain_info * const blkchain = goal->blkchain;
  979. struct block_info *blkinfo, *tmpblkinfo;
  980. HASH_ITER(hh, blkchain->blocks, blkinfo, tmpblkinfo)
  981. {
  982. HASH_DEL(blkchain->blocks, blkinfo);
  983. free(blkinfo);
  984. }
  985. blkchain_init_block(blkchain);
  986. }
  987. static char *getwork_req = "{\"method\": \"getwork\", \"params\": [], \"id\":0}\n";
  988. /* Adjust all the pools' quota to the greatest common denominator after a pool
  989. * has been added or the quotas changed. */
  990. void adjust_quota_gcd(void)
  991. {
  992. unsigned long gcd, lowest_quota = ~0UL, quota;
  993. struct pool *pool;
  994. int i;
  995. for (i = 0; i < total_pools; i++) {
  996. pool = pools[i];
  997. quota = pool->quota;
  998. if (!quota)
  999. continue;
  1000. if (quota < lowest_quota)
  1001. lowest_quota = quota;
  1002. }
  1003. if (likely(lowest_quota < ~0UL)) {
  1004. gcd = lowest_quota;
  1005. for (i = 0; i < total_pools; i++) {
  1006. pool = pools[i];
  1007. quota = pool->quota;
  1008. if (!quota)
  1009. continue;
  1010. while (quota % gcd)
  1011. gcd--;
  1012. }
  1013. } else
  1014. gcd = 1;
  1015. for (i = 0; i < total_pools; i++) {
  1016. pool = pools[i];
  1017. pool->quota_used *= global_quota_gcd;
  1018. pool->quota_used /= gcd;
  1019. pool->quota_gcd = pool->quota / gcd;
  1020. }
  1021. global_quota_gcd = gcd;
  1022. applog(LOG_DEBUG, "Global quota greatest common denominator set to %lu", gcd);
  1023. }
  1024. /* Return value is ignored if not called from add_pool_details */
  1025. struct pool *add_pool2(struct mining_goal_info * const goal)
  1026. {
  1027. struct pool *pool;
  1028. pool = calloc(sizeof(struct pool), 1);
  1029. if (!pool)
  1030. quit(1, "Failed to malloc pool in add_pool");
  1031. pool->pool_no = pool->prio = total_pools;
  1032. mutex_init(&pool->last_work_lock);
  1033. mutex_init(&pool->pool_lock);
  1034. mutex_init(&pool->pool_test_lock);
  1035. if (unlikely(pthread_cond_init(&pool->cr_cond, bfg_condattr)))
  1036. quit(1, "Failed to pthread_cond_init in add_pool");
  1037. cglock_init(&pool->data_lock);
  1038. pool->swork.data_lock_p = &pool->data_lock;
  1039. mutex_init(&pool->stratum_lock);
  1040. timer_unset(&pool->swork.tv_transparency);
  1041. pool->swork.pool = pool;
  1042. pool->goal = goal;
  1043. pool->idle = true;
  1044. /* Make sure the pool doesn't think we've been idle since time 0 */
  1045. pool->tv_idle.tv_sec = ~0UL;
  1046. cgtime(&pool->cgminer_stats.start_tv);
  1047. pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  1048. pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  1049. pool->rpc_proxy = NULL;
  1050. pool->quota = 1;
  1051. pool->sock = INVSOCK;
  1052. pool->lp_socket = CURL_SOCKET_BAD;
  1053. pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2));
  1054. pools[total_pools++] = pool;
  1055. if (opt_benchmark)
  1056. {
  1057. // Immediately remove it
  1058. remove_pool(pool);
  1059. return pool;
  1060. }
  1061. adjust_quota_gcd();
  1062. if (!currentpool)
  1063. currentpool = pool;
  1064. enable_pool(pool);
  1065. return pool;
  1066. }
  1067. static
  1068. void pool_set_uri(struct pool * const pool, char * const uri)
  1069. {
  1070. pool->rpc_url = uri;
  1071. pool->pool_diff_effective_retroactively = uri_get_param_bool2(uri, "retrodiff");
  1072. }
  1073. static
  1074. bool pool_diff_effective_retroactively(struct pool * const pool)
  1075. {
  1076. if (pool->pool_diff_effective_retroactively != BTS_UNKNOWN) {
  1077. return pool->pool_diff_effective_retroactively;
  1078. }
  1079. // By default, we enable retrodiff for stratum pools since some servers implement mining.set_difficulty in this way
  1080. // Note that share_result will explicitly disable BTS_UNKNOWN -> BTS_FALSE if a retrodiff share is rejected specifically for its failure to meet the target.
  1081. return pool->stratum_active;
  1082. }
  1083. /* Pool variant of test and set */
  1084. static bool pool_tset(struct pool *pool, bool *var)
  1085. {
  1086. bool ret;
  1087. mutex_lock(&pool->pool_lock);
  1088. ret = *var;
  1089. *var = true;
  1090. mutex_unlock(&pool->pool_lock);
  1091. return ret;
  1092. }
  1093. bool pool_tclear(struct pool *pool, bool *var)
  1094. {
  1095. bool ret;
  1096. mutex_lock(&pool->pool_lock);
  1097. ret = *var;
  1098. *var = false;
  1099. mutex_unlock(&pool->pool_lock);
  1100. return ret;
  1101. }
  1102. struct pool *current_pool(void)
  1103. {
  1104. struct pool *pool;
  1105. cg_rlock(&control_lock);
  1106. pool = currentpool;
  1107. cg_runlock(&control_lock);
  1108. return pool;
  1109. }
  1110. #if defined(USE_CPUMINING) && !defined(USE_SHA256D)
  1111. static
  1112. char *arg_ignored(const char * const arg)
  1113. {
  1114. return NULL;
  1115. }
  1116. #endif
  1117. static
  1118. char *set_bool_ignore_arg(const char * const arg, bool * const b)
  1119. {
  1120. return opt_set_bool(b);
  1121. }
  1122. char *set_int_range(const char *arg, int *i, int min, int max)
  1123. {
  1124. char *err = opt_set_intval(arg, i);
  1125. if (err)
  1126. return err;
  1127. if (*i < min || *i > max)
  1128. return "Value out of range";
  1129. return NULL;
  1130. }
  1131. static char *set_int_0_to_9999(const char *arg, int *i)
  1132. {
  1133. return set_int_range(arg, i, 0, 9999);
  1134. }
  1135. static char *set_int_1_to_65535(const char *arg, int *i)
  1136. {
  1137. return set_int_range(arg, i, 1, 65535);
  1138. }
  1139. static char *set_int_0_to_10(const char *arg, int *i)
  1140. {
  1141. return set_int_range(arg, i, 0, 10);
  1142. }
  1143. static char *set_int_1_to_10(const char *arg, int *i)
  1144. {
  1145. return set_int_range(arg, i, 1, 10);
  1146. }
  1147. static char *set_long_1_to_65535_or_neg1(const char * const arg, long * const i)
  1148. {
  1149. const long min = 1, max = 65535;
  1150. char * const err = opt_set_longval(arg, i);
  1151. if (err) {
  1152. return err;
  1153. }
  1154. if (*i != -1 && (*i < min || *i > max)) {
  1155. return "Value out of range";
  1156. }
  1157. return NULL;
  1158. }
  1159. char *set_strdup(const char *arg, char **p)
  1160. {
  1161. *p = strdup((char *)arg);
  1162. return NULL;
  1163. }
  1164. #if BLKMAKER_VERSION > 1
  1165. static
  1166. char *set_b58addr(const char * const arg, bytes_t * const b)
  1167. {
  1168. size_t scriptsz = blkmk_address_to_script(NULL, 0, arg);
  1169. if (!scriptsz)
  1170. return "Invalid address";
  1171. char *script = malloc(scriptsz);
  1172. if (blkmk_address_to_script(script, scriptsz, arg) != scriptsz) {
  1173. free(script);
  1174. return "Failed to convert address to script";
  1175. }
  1176. bytes_assimilate_raw(b, script, scriptsz, scriptsz);
  1177. return NULL;
  1178. }
  1179. static char *set_generate_addr2(struct mining_goal_info *, const char *);
  1180. static
  1181. char *set_generate_addr(char *arg)
  1182. {
  1183. char * const colon = strchr(arg, ':');
  1184. struct mining_goal_info *goal;
  1185. if (colon)
  1186. {
  1187. colon[0] = '\0';
  1188. goal = get_mining_goal(arg);
  1189. arg = &colon[1];
  1190. }
  1191. else
  1192. goal = get_mining_goal("default");
  1193. return set_generate_addr2(goal, arg);
  1194. }
  1195. static
  1196. char *set_generate_addr2(struct mining_goal_info * const goal, const char * const arg)
  1197. {
  1198. bytes_t newscript = BYTES_INIT;
  1199. char *estr = set_b58addr(arg, &newscript);
  1200. if (estr)
  1201. {
  1202. bytes_free(&newscript);
  1203. return estr;
  1204. }
  1205. if (!goal->generation_script)
  1206. {
  1207. goal->generation_script = malloc(sizeof(*goal->generation_script));
  1208. bytes_init(goal->generation_script);
  1209. }
  1210. bytes_assimilate(goal->generation_script, &newscript);
  1211. bytes_free(&newscript);
  1212. return NULL;
  1213. }
  1214. #endif
  1215. static
  1216. char *set_quit_summary(const char * const arg)
  1217. {
  1218. if (!(strcasecmp(arg, "none") && strcasecmp(arg, "no")))
  1219. opt_quit_summary = BQS_NONE;
  1220. else
  1221. if (!(strcasecmp(arg, "devs") && strcasecmp(arg, "devices")))
  1222. opt_quit_summary = BQS_DEVS;
  1223. else
  1224. if (!(strcasecmp(arg, "procs") && strcasecmp(arg, "processors") && strcasecmp(arg, "chips") && strcasecmp(arg, "cores")))
  1225. opt_quit_summary = BQS_PROCS;
  1226. else
  1227. if (!(strcasecmp(arg, "detailed") && strcasecmp(arg, "detail") && strcasecmp(arg, "all")))
  1228. opt_quit_summary = BQS_DETAILED;
  1229. else
  1230. return "Quit summary must be one of none/devs/procs/detailed";
  1231. return NULL;
  1232. }
  1233. static void pdiff_target_leadzero(void *, double);
  1234. char *set_request_diff(const char *arg, float *p)
  1235. {
  1236. unsigned char target[32];
  1237. char *e = opt_set_floatval(arg, p);
  1238. if (e)
  1239. return e;
  1240. request_bdiff = (double)*p * 0.9999847412109375;
  1241. pdiff_target_leadzero(target, *p);
  1242. request_target_str = malloc(65);
  1243. bin2hex(request_target_str, target, 32);
  1244. return NULL;
  1245. }
  1246. #ifdef NEED_BFG_LOWL_VCOM
  1247. extern struct lowlevel_device_info *_vcom_devinfo_findorcreate(struct lowlevel_device_info **, const char *);
  1248. #ifdef WIN32
  1249. void _vcom_devinfo_scan_querydosdevice(struct lowlevel_device_info ** const devinfo_list)
  1250. {
  1251. char dev[PATH_MAX];
  1252. char *devp = dev;
  1253. size_t bufLen = 0x100;
  1254. tryagain: ;
  1255. char buf[bufLen];
  1256. if (!QueryDosDevice(NULL, buf, bufLen)) {
  1257. if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
  1258. bufLen *= 2;
  1259. applog(LOG_DEBUG, "QueryDosDevice returned insufficent buffer error; enlarging to %lx", (unsigned long)bufLen);
  1260. goto tryagain;
  1261. }
  1262. applogr(, LOG_WARNING, "Error occurred trying to enumerate COM ports with QueryDosDevice");
  1263. }
  1264. size_t tLen;
  1265. memcpy(devp, "\\\\.\\", 4);
  1266. devp = &devp[4];
  1267. for (char *t = buf; *t; t += tLen) {
  1268. tLen = strlen(t) + 1;
  1269. if (strncmp("COM", t, 3))
  1270. continue;
  1271. memcpy(devp, t, tLen);
  1272. // NOTE: We depend on _vcom_devinfo_findorcreate to further check that there's a number (and only a number) on the end
  1273. _vcom_devinfo_findorcreate(devinfo_list, dev);
  1274. }
  1275. }
  1276. #else
  1277. void _vcom_devinfo_scan_lsdev(struct lowlevel_device_info ** const devinfo_list)
  1278. {
  1279. char dev[PATH_MAX];
  1280. char *devp = dev;
  1281. DIR *D;
  1282. struct dirent *de;
  1283. const char devdir[] = "/dev";
  1284. const size_t devdirlen = sizeof(devdir) - 1;
  1285. char *devpath = devp;
  1286. char *devfile = devpath + devdirlen + 1;
  1287. D = opendir(devdir);
  1288. if (!D)
  1289. applogr(, LOG_DEBUG, "No /dev directory to look for VCOM devices in");
  1290. memcpy(devpath, devdir, devdirlen);
  1291. devpath[devdirlen] = '/';
  1292. while ( (de = readdir(D)) ) {
  1293. if (!strncmp(de->d_name, "cu.", 3)
  1294. //don't probe Bluetooth devices - causes bus errors and segfaults
  1295. && strncmp(de->d_name, "cu.Bluetooth", 12))
  1296. goto trydev;
  1297. if (strncmp(de->d_name, "tty", 3))
  1298. continue;
  1299. if (strncmp(&de->d_name[3], "USB", 3) && strncmp(&de->d_name[3], "ACM", 3))
  1300. continue;
  1301. trydev:
  1302. strcpy(devfile, de->d_name);
  1303. _vcom_devinfo_findorcreate(devinfo_list, dev);
  1304. }
  1305. closedir(D);
  1306. }
  1307. #endif
  1308. #endif
  1309. static char *add_serial(const char *arg)
  1310. {
  1311. string_elist_add(arg, &scan_devices);
  1312. return NULL;
  1313. }
  1314. static
  1315. char *opt_string_elist_add(const char *arg, struct string_elist **elist)
  1316. {
  1317. string_elist_add(arg, elist);
  1318. return NULL;
  1319. }
  1320. bool get_intrange(const char *arg, int *val1, int *val2)
  1321. {
  1322. // NOTE: This could be done with sscanf, but its %n is broken in strange ways on Windows
  1323. char *p, *p2;
  1324. *val1 = strtol(arg, &p, 0);
  1325. if (arg == p)
  1326. // Zero-length ending number, invalid
  1327. return false;
  1328. while (true)
  1329. {
  1330. if (!p[0])
  1331. {
  1332. *val2 = *val1;
  1333. return true;
  1334. }
  1335. if (p[0] == '-')
  1336. break;
  1337. if (!isspace(p[0]))
  1338. // Garbage, invalid
  1339. return false;
  1340. ++p;
  1341. }
  1342. p2 = &p[1];
  1343. *val2 = strtol(p2, &p, 0);
  1344. if (p2 == p)
  1345. // Zero-length ending number, invalid
  1346. return false;
  1347. while (true)
  1348. {
  1349. if (!p[0])
  1350. return true;
  1351. if (!isspace(p[0]))
  1352. // Garbage, invalid
  1353. return false;
  1354. ++p;
  1355. }
  1356. }
  1357. static
  1358. void _test_intrange(const char *s, const int v[2])
  1359. {
  1360. int a[2];
  1361. if (!get_intrange(s, &a[0], &a[1]))
  1362. {
  1363. ++unittest_failures;
  1364. applog(LOG_ERR, "Test \"%s\" failed: returned false", s);
  1365. }
  1366. for (int i = 0; i < 2; ++i)
  1367. if (unlikely(a[i] != v[i]))
  1368. {
  1369. ++unittest_failures;
  1370. applog(LOG_ERR, "Test \"%s\" failed: value %d should be %d but got %d", s, i, v[i], a[i]);
  1371. }
  1372. }
  1373. #define _test_intrange(s, ...) _test_intrange(s, (int[]){ __VA_ARGS__ })
  1374. static
  1375. void _test_intrange_fail(const char *s)
  1376. {
  1377. int a[2];
  1378. if (get_intrange(s, &a[0], &a[1]))
  1379. {
  1380. ++unittest_failures;
  1381. applog(LOG_ERR, "Test !\"%s\" failed: returned true with %d and %d", s, a[0], a[1]);
  1382. }
  1383. }
  1384. static
  1385. void test_intrange()
  1386. {
  1387. _test_intrange("-1--2", -1, -2);
  1388. _test_intrange("-1-2", -1, 2);
  1389. _test_intrange("1--2", 1, -2);
  1390. _test_intrange("1-2", 1, 2);
  1391. _test_intrange("111-222", 111, 222);
  1392. _test_intrange(" 11 - 22 ", 11, 22);
  1393. _test_intrange("+11-+22", 11, 22);
  1394. _test_intrange("-1", -1, -1);
  1395. _test_intrange_fail("all");
  1396. _test_intrange_fail("1-");
  1397. _test_intrange_fail("");
  1398. _test_intrange_fail("1-54x");
  1399. }
  1400. static char *set_devices(char *arg)
  1401. {
  1402. if (*arg) {
  1403. if (*arg == '?') {
  1404. opt_display_devs = true;
  1405. return NULL;
  1406. }
  1407. } else
  1408. return "Invalid device parameters";
  1409. string_elist_add(arg, &opt_devices_enabled_list);
  1410. return NULL;
  1411. }
  1412. static char *set_balance(enum pool_strategy *strategy)
  1413. {
  1414. *strategy = POOL_BALANCE;
  1415. return NULL;
  1416. }
  1417. static char *set_loadbalance(enum pool_strategy *strategy)
  1418. {
  1419. *strategy = POOL_LOADBALANCE;
  1420. return NULL;
  1421. }
  1422. static char *set_rotate(const char *arg, int *i)
  1423. {
  1424. pool_strategy = POOL_ROTATE;
  1425. return set_int_range(arg, i, 0, 9999);
  1426. }
  1427. static char *set_rr(enum pool_strategy *strategy)
  1428. {
  1429. *strategy = POOL_ROUNDROBIN;
  1430. return NULL;
  1431. }
  1432. static
  1433. char *set_benchmark_intense()
  1434. {
  1435. opt_benchmark = true;
  1436. opt_benchmark_intense = true;
  1437. return NULL;
  1438. }
  1439. /* Detect that url is for a stratum protocol either via the presence of
  1440. * stratum+tcp or by detecting a stratum server response */
  1441. bool detect_stratum(struct pool *pool, char *url)
  1442. {
  1443. if (!extract_sockaddr(url, &pool->sockaddr_url, &pool->stratum_port))
  1444. return false;
  1445. if (!strncasecmp(url, "stratum+tcp://", 14)) {
  1446. pool_set_uri(pool, strdup(url));
  1447. pool->has_stratum = true;
  1448. pool->stratum_url = pool->sockaddr_url;
  1449. return true;
  1450. }
  1451. return false;
  1452. }
  1453. static struct pool *add_url(void)
  1454. {
  1455. total_urls++;
  1456. if (total_urls > total_pools)
  1457. add_pool();
  1458. return pools[total_urls - 1];
  1459. }
  1460. static void setup_url(struct pool *pool, char *arg)
  1461. {
  1462. if (detect_stratum(pool, arg))
  1463. return;
  1464. opt_set_charp(arg, &pool->rpc_url);
  1465. if (strncmp(arg, "http://", 7) &&
  1466. strncmp(arg, "https://", 8)) {
  1467. const size_t L = strlen(arg);
  1468. char *httpinput;
  1469. httpinput = malloc(8 + L);
  1470. if (!httpinput)
  1471. quit(1, "Failed to malloc httpinput");
  1472. sprintf(httpinput, "http://%s", arg);
  1473. pool_set_uri(pool, httpinput);
  1474. }
  1475. }
  1476. static char *set_url(char *arg)
  1477. {
  1478. struct pool *pool = add_url();
  1479. setup_url(pool, arg);
  1480. return NULL;
  1481. }
  1482. static char *set_quota(char *arg)
  1483. {
  1484. char *semicolon = strchr(arg, ';'), *url;
  1485. int len, qlen, quota;
  1486. struct pool *pool;
  1487. if (!semicolon)
  1488. return "No semicolon separated quota;URL pair found";
  1489. len = strlen(arg);
  1490. *semicolon = '\0';
  1491. qlen = strlen(arg);
  1492. if (!qlen)
  1493. return "No parameter for quota found";
  1494. len -= qlen + 1;
  1495. if (len < 1)
  1496. return "No parameter for URL found";
  1497. quota = atoi(arg);
  1498. if (quota < 0)
  1499. return "Invalid negative parameter for quota set";
  1500. url = arg + qlen + 1;
  1501. pool = add_url();
  1502. setup_url(pool, url);
  1503. pool->quota = quota;
  1504. applog(LOG_INFO, "Setting pool %d to quota %d", pool->pool_no, pool->quota);
  1505. adjust_quota_gcd();
  1506. return NULL;
  1507. }
  1508. static char *set_user(const char *arg)
  1509. {
  1510. struct pool *pool;
  1511. total_users++;
  1512. if (total_users > total_pools)
  1513. add_pool();
  1514. pool = pools[total_users - 1];
  1515. opt_set_charp(arg, &pool->rpc_user);
  1516. return NULL;
  1517. }
  1518. static char *set_pass(const char *arg)
  1519. {
  1520. struct pool *pool;
  1521. total_passes++;
  1522. if (total_passes > total_pools)
  1523. add_pool();
  1524. pool = pools[total_passes - 1];
  1525. opt_set_charp(arg, &pool->rpc_pass);
  1526. return NULL;
  1527. }
  1528. static char *set_userpass(const char *arg)
  1529. {
  1530. struct pool *pool;
  1531. char *updup;
  1532. if (total_users != total_passes)
  1533. return "User + pass options must be balanced before userpass";
  1534. ++total_users;
  1535. ++total_passes;
  1536. if (total_users > total_pools)
  1537. add_pool();
  1538. pool = pools[total_users - 1];
  1539. updup = strdup(arg);
  1540. opt_set_charp(arg, &pool->rpc_userpass);
  1541. pool->rpc_user = updup;
  1542. pool->rpc_pass = strchr(updup, ':');
  1543. if (pool->rpc_pass)
  1544. pool->rpc_pass++[0] = '\0';
  1545. else
  1546. pool->rpc_pass = &updup[strlen(updup)];
  1547. return NULL;
  1548. }
  1549. static char *set_cbcaddr(char *arg)
  1550. {
  1551. struct pool *pool;
  1552. char *p, *addr;
  1553. bytes_t target_script = BYTES_INIT;
  1554. if (!total_pools)
  1555. return "Define pool first, then the --coinbase-check-addr list";
  1556. pool = pools[total_pools - 1];
  1557. /* NOTE: 'x' is a new prefix which leads both mainnet and testnet address, we would
  1558. * need support it later, but now leave the code just so.
  1559. *
  1560. * Regarding details of address prefix 'x', check the below URL:
  1561. * https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format
  1562. */
  1563. pool->cb_param.testnet = (arg[0] != '1' && arg[0] != '3' && arg[0] != 'x');
  1564. for (; (addr = strtok_r(arg, ",", &p)); arg = NULL)
  1565. {
  1566. struct bytes_hashtbl *ah;
  1567. if (set_b58addr(addr, &target_script))
  1568. /* No bother to free memory since we are going to exit anyway */
  1569. return "Invalid address in --coinbase-check-address list";
  1570. HASH_FIND(hh, pool->cb_param.scripts, bytes_buf(&target_script), bytes_len(&target_script), ah);
  1571. if (!ah)
  1572. {
  1573. /* Note: for the below allocated memory we have good way to release its memory
  1574. * since we can't be sure there are no reference to the pool struct when remove_pool()
  1575. * get called.
  1576. *
  1577. * We just hope the remove_pool() would not be called many many times during
  1578. * the whole running life of this program.
  1579. */
  1580. ah = malloc(sizeof(*ah));
  1581. bytes_init(&ah->b);
  1582. bytes_assimilate(&ah->b, &target_script);
  1583. HASH_ADD(hh, pool->cb_param.scripts, b.buf, bytes_len(&ah->b), ah);
  1584. }
  1585. }
  1586. bytes_free(&target_script);
  1587. return NULL;
  1588. }
  1589. static char *set_cbctotal(const char *arg)
  1590. {
  1591. struct pool *pool;
  1592. if (!total_pools)
  1593. return "Define pool first, then the --coinbase-check-total argument";
  1594. pool = pools[total_pools - 1];
  1595. pool->cb_param.total = atoll(arg);
  1596. if (pool->cb_param.total < 0)
  1597. return "The total payout amount in coinbase should be greater than 0";
  1598. return NULL;
  1599. }
  1600. static char *set_cbcperc(const char *arg)
  1601. {
  1602. struct pool *pool;
  1603. if (!total_pools)
  1604. return "Define pool first, then the --coinbase-check-percent argument";
  1605. pool = pools[total_pools - 1];
  1606. if (!pool->cb_param.scripts)
  1607. return "Define --coinbase-check-addr list first, then the --coinbase-check-total argument";
  1608. pool->cb_param.perc = atof(arg) / 100;
  1609. if (pool->cb_param.perc < 0.0 || pool->cb_param.perc > 1.0)
  1610. return "The percentage should be between 0 and 100";
  1611. return NULL;
  1612. }
  1613. static
  1614. const char *goal_set(struct mining_goal_info * const goal, const char * const optname, const char * const newvalue, bytes_t * const replybuf, enum bfg_set_device_replytype * const out_success)
  1615. {
  1616. *out_success = SDR_ERR;
  1617. if (!(strcasecmp(optname, "malgo") && strcasecmp(optname, "algo")))
  1618. {
  1619. if (!newvalue)
  1620. return "Goal option 'malgo' requires a value (eg, SHA256d)";
  1621. struct mining_algorithm * const new_malgo = mining_algorithm_by_alias(newvalue);
  1622. if (!new_malgo)
  1623. return "Unrecognised mining algorithm";
  1624. goal_set_malgo(goal, new_malgo);
  1625. goto success;
  1626. }
  1627. #if BLKMAKER_VERSION > 1
  1628. if (match_strtok("generate-to|generate-to-addr|generate-to-address|genaddress|genaddr|gen-address|gen-addr|generate-address|generate-addr|coinbase-addr|coinbase-address|coinbase-payout|cbaddress|cbaddr|cb-address|cb-addr|payout", "|", optname))
  1629. {
  1630. if (!newvalue)
  1631. return "Missing value for 'generate-to' goal option";
  1632. const char * const emsg = set_generate_addr2(goal, newvalue);
  1633. if (emsg)
  1634. return emsg;
  1635. goto success;
  1636. }
  1637. #endif
  1638. *out_success = SDR_UNKNOWN;
  1639. return "Unknown goal option";
  1640. success:
  1641. *out_success = SDR_OK;
  1642. return NULL;
  1643. }
  1644. // May leak replybuf if returning an error
  1645. static
  1646. const char *set_goal_params(struct mining_goal_info * const goal, char *arg)
  1647. {
  1648. bytes_t replybuf = BYTES_INIT;
  1649. for (char *param, *nextptr; (param = strtok_r(arg, ",", &nextptr)); arg = NULL)
  1650. {
  1651. char *val = strchr(param, '=');
  1652. if (val)
  1653. val++[0] = '\0';
  1654. enum bfg_set_device_replytype success;
  1655. const char * const emsg = goal_set(goal, param, val, &replybuf, &success);
  1656. if (success != SDR_OK)
  1657. return emsg ?: "Error setting goal param";
  1658. }
  1659. bytes_free(&replybuf);
  1660. return NULL;
  1661. }
  1662. static
  1663. const char *set_pool_goal(const char * const arg)
  1664. {
  1665. struct pool *pool;
  1666. if (!total_pools)
  1667. return "Usage of --pool-goal before pools are defined does not make sense";
  1668. pool = pools[total_pools - 1];
  1669. char *param = strchr(arg, ':');
  1670. if (param)
  1671. param++[0] = '\0';
  1672. pool->goal = get_mining_goal(arg);
  1673. if (param)
  1674. return set_goal_params(pool->goal, param);
  1675. return NULL;
  1676. }
  1677. static char *set_pool_priority(const char *arg)
  1678. {
  1679. struct pool *pool;
  1680. if (!total_pools)
  1681. return "Usage of --pool-priority before pools are defined does not make sense";
  1682. pool = pools[total_pools - 1];
  1683. opt_set_intval(arg, &pool->prio);
  1684. return NULL;
  1685. }
  1686. static char *set_pool_proxy(const char *arg)
  1687. {
  1688. struct pool *pool;
  1689. if (!total_pools)
  1690. return "Usage of --pool-proxy before pools are defined does not make sense";
  1691. if (!our_curl_supports_proxy_uris())
  1692. return "Your installed cURL library does not support proxy URIs. At least version 7.21.7 is required.";
  1693. pool = pools[total_pools - 1];
  1694. opt_set_charp(arg, &pool->rpc_proxy);
  1695. return NULL;
  1696. }
  1697. static char *set_pool_force_rollntime(const char *arg)
  1698. {
  1699. struct pool *pool;
  1700. if (!total_pools)
  1701. return "Usage of --force-rollntime before pools are defined does not make sense";
  1702. pool = pools[total_pools - 1];
  1703. opt_set_intval(arg, &pool->force_rollntime);
  1704. return NULL;
  1705. }
  1706. static char *enable_debug(bool *flag)
  1707. {
  1708. *flag = true;
  1709. opt_debug_console = true;
  1710. /* Turn on verbose output, too. */
  1711. opt_log_output = true;
  1712. return NULL;
  1713. }
  1714. static char *set_schedtime(const char *arg, struct schedtime *st)
  1715. {
  1716. if (sscanf(arg, "%d:%d", &st->tm.tm_hour, &st->tm.tm_min) != 2)
  1717. {
  1718. if (strcasecmp(arg, "now"))
  1719. return "Invalid time set, should be HH:MM";
  1720. } else
  1721. schedstop.tm.tm_sec = 0;
  1722. if (st->tm.tm_hour > 23 || st->tm.tm_min > 59 || st->tm.tm_hour < 0 || st->tm.tm_min < 0)
  1723. return "Invalid time set.";
  1724. st->enable = true;
  1725. return NULL;
  1726. }
  1727. static
  1728. char *set_log_file(char *arg)
  1729. {
  1730. char *r = "";
  1731. long int i = strtol(arg, &r, 10);
  1732. int fd, stderr_fd = fileno(stderr);
  1733. if ((!*r) && i >= 0 && i <= INT_MAX)
  1734. fd = i;
  1735. else
  1736. if (!strcmp(arg, "-"))
  1737. {
  1738. fd = fileno(stdout);
  1739. if (unlikely(fd == -1))
  1740. return "Standard output missing for log-file";
  1741. }
  1742. else
  1743. {
  1744. fd = open(arg, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR);
  1745. if (unlikely(fd == -1))
  1746. return "Failed to open log-file";
  1747. }
  1748. close(stderr_fd);
  1749. if (unlikely(-1 == dup2(fd, stderr_fd)))
  1750. return "Failed to dup2 for log-file";
  1751. close(fd);
  1752. return NULL;
  1753. }
  1754. static
  1755. char *_bfgopt_set_file(const char *arg, FILE **F, const char *mode, const char *purpose)
  1756. {
  1757. char *r = "";
  1758. long int i = strtol(arg, &r, 10);
  1759. static char *err = NULL;
  1760. const size_t errbufsz = 0x100;
  1761. free(err);
  1762. err = NULL;
  1763. if ((!*r) && i >= 0 && i <= INT_MAX) {
  1764. *F = fdopen((int)i, mode);
  1765. if (!*F)
  1766. {
  1767. err = malloc(errbufsz);
  1768. snprintf(err, errbufsz, "Failed to open fd %d for %s",
  1769. (int)i, purpose);
  1770. return err;
  1771. }
  1772. } else if (!strcmp(arg, "-")) {
  1773. *F = (mode[0] == 'a') ? stdout : stdin;
  1774. if (!*F)
  1775. {
  1776. err = malloc(errbufsz);
  1777. snprintf(err, errbufsz, "Standard %sput missing for %s",
  1778. (mode[0] == 'a') ? "out" : "in", purpose);
  1779. return err;
  1780. }
  1781. } else {
  1782. *F = fopen(arg, mode);
  1783. if (!*F)
  1784. {
  1785. err = malloc(errbufsz);
  1786. snprintf(err, errbufsz, "Failed to open %s for %s",
  1787. arg, purpose);
  1788. return err;
  1789. }
  1790. }
  1791. return NULL;
  1792. }
  1793. static char *set_noncelog(char *arg)
  1794. {
  1795. return _bfgopt_set_file(arg, &noncelog_file, "a", "nonce log");
  1796. }
  1797. static char *set_sharelog(char *arg)
  1798. {
  1799. return _bfgopt_set_file(arg, &sharelog_file, "a", "share log");
  1800. }
  1801. static
  1802. void _add_set_device_option(const char * const func, const char * const buf)
  1803. {
  1804. applog(LOG_DEBUG, "%s: Using --set-device %s", func, buf);
  1805. string_elist_add(buf, &opt_set_device_list);
  1806. }
  1807. #define add_set_device_option(...) do{ \
  1808. char _tmp1718[0x100]; \
  1809. snprintf(_tmp1718, sizeof(_tmp1718), __VA_ARGS__); \
  1810. _add_set_device_option(__func__, _tmp1718); \
  1811. }while(0)
  1812. char *set_temp_cutoff(char *arg)
  1813. {
  1814. if (strchr(arg, ','))
  1815. return "temp-cutoff no longer supports comma-delimited syntax, use --set-device for better control";
  1816. applog(LOG_WARNING, "temp-cutoff is deprecated! Use --set-device for better control");
  1817. add_set_device_option("all:temp-cutoff=%s", arg);
  1818. return NULL;
  1819. }
  1820. char *set_temp_target(char *arg)
  1821. {
  1822. if (strchr(arg, ','))
  1823. return "temp-target no longer supports comma-delimited syntax, use --set-device for better control";
  1824. applog(LOG_WARNING, "temp-target is deprecated! Use --set-device for better control");
  1825. add_set_device_option("all:temp-target=%s", arg);
  1826. return NULL;
  1827. }
  1828. #ifdef USE_OPENCL
  1829. static
  1830. char *set_no_opencl_binaries(__maybe_unused void * const dummy)
  1831. {
  1832. applog(LOG_WARNING, "The --no-opencl-binaries option is deprecated! Use --set-device OCL:binary=no");
  1833. add_set_device_option("OCL:binary=no");
  1834. return NULL;
  1835. }
  1836. #endif
  1837. static
  1838. char *disable_pool_redirect(__maybe_unused void * const dummy)
  1839. {
  1840. opt_disable_client_reconnect = true;
  1841. want_stratum = false;
  1842. return NULL;
  1843. }
  1844. static char *set_api_allow(const char *arg)
  1845. {
  1846. opt_set_charp(arg, &opt_api_allow);
  1847. return NULL;
  1848. }
  1849. static char *set_api_groups(const char *arg)
  1850. {
  1851. opt_set_charp(arg, &opt_api_groups);
  1852. return NULL;
  1853. }
  1854. static char *set_api_description(const char *arg)
  1855. {
  1856. opt_set_charp(arg, &opt_api_description);
  1857. return NULL;
  1858. }
  1859. static char *set_api_mcast_des(const char *arg)
  1860. {
  1861. opt_set_charp(arg, &opt_api_mcast_des);
  1862. return NULL;
  1863. }
  1864. #ifdef USE_ICARUS
  1865. extern const struct bfg_set_device_definition icarus_set_device_funcs[];
  1866. static char *set_icarus_options(const char *arg)
  1867. {
  1868. if (strchr(arg, ','))
  1869. return "icarus-options no longer supports comma-delimited syntax, see README.FPGA for better control";
  1870. applog(LOG_WARNING, "icarus-options is deprecated! See README.FPGA for better control");
  1871. char *opts = strdup(arg), *argdup;
  1872. argdup = opts;
  1873. const struct bfg_set_device_definition *sdf = icarus_set_device_funcs;
  1874. const char *drivers[] = {"antminer", "cairnsmore", "erupter", "icarus"};
  1875. char *saveptr, *opt;
  1876. for (int i = 0; i < 4; ++i, ++sdf)
  1877. {
  1878. opt = strtok_r(opts, ":", &saveptr);
  1879. opts = NULL;
  1880. if (!opt)
  1881. break;
  1882. if (!opt[0])
  1883. continue;
  1884. for (int j = 0; j < 4; ++j)
  1885. add_set_device_option("%s:%s=%s", drivers[j], sdf->optname, opt);
  1886. }
  1887. free(argdup);
  1888. return NULL;
  1889. }
  1890. static char *set_icarus_timing(const char *arg)
  1891. {
  1892. if (strchr(arg, ','))
  1893. return "icarus-timing no longer supports comma-delimited syntax, see README.FPGA for better control";
  1894. applog(LOG_WARNING, "icarus-timing is deprecated! See README.FPGA for better control");
  1895. const char *drivers[] = {"antminer", "cairnsmore", "erupter", "icarus"};
  1896. for (int j = 0; j < 4; ++j)
  1897. add_set_device_option("%s:timing=%s", drivers[j], arg);
  1898. return NULL;
  1899. }
  1900. #endif
  1901. #ifdef USE_AVALON
  1902. extern const struct bfg_set_device_definition avalon_set_device_funcs[];
  1903. static char *set_avalon_options(const char *arg)
  1904. {
  1905. if (strchr(arg, ','))
  1906. return "avalon-options no longer supports comma-delimited syntax, see README.FPGA for better control";
  1907. applog(LOG_WARNING, "avalon-options is deprecated! See README.FPGA for better control");
  1908. char *opts = strdup(arg), *argdup;
  1909. argdup = opts;
  1910. const struct bfg_set_device_definition *sdf = avalon_set_device_funcs;
  1911. char *saveptr, *opt;
  1912. for (int i = 0; i < 5; ++i, ++sdf)
  1913. {
  1914. opt = strtok_r(opts, ":", &saveptr);
  1915. opts = NULL;
  1916. if (!opt)
  1917. break;
  1918. if (!opt[0])
  1919. continue;
  1920. add_set_device_option("avalon:%s=%s", sdf->optname, opt);
  1921. }
  1922. free(argdup);
  1923. return NULL;
  1924. }
  1925. #endif
  1926. #ifdef USE_KLONDIKE
  1927. static char *set_klondike_options(const char *arg)
  1928. {
  1929. int hashclock;
  1930. double temptarget;
  1931. switch (sscanf(arg, "%d:%lf", &hashclock, &temptarget))
  1932. {
  1933. default:
  1934. return "Unrecognised --klondike-options";
  1935. case 2:
  1936. add_set_device_option("klondike:temp-target=%lf", temptarget);
  1937. // fallthru
  1938. case 1:
  1939. add_set_device_option("klondike:clock=%d", hashclock);
  1940. }
  1941. applog(LOG_WARNING, "klondike-options is deprecated! Use --set-device for better control");
  1942. return NULL;
  1943. }
  1944. #endif
  1945. __maybe_unused
  1946. static char *set_null(const char __maybe_unused *arg)
  1947. {
  1948. return NULL;
  1949. }
  1950. /* These options are available from config file or commandline */
  1951. static struct opt_table opt_config_table[] = {
  1952. #ifdef USE_CPUMINING
  1953. #ifdef USE_SHA256D
  1954. OPT_WITH_ARG("--algo",
  1955. set_algo, show_algo, &opt_algo,
  1956. "Specify sha256 implementation for CPU mining:\n"
  1957. "\tfastauto*\tQuick benchmark at startup to pick a working algorithm\n"
  1958. "\tauto\t\tBenchmark at startup and pick fastest algorithm"
  1959. "\n\tc\t\tLinux kernel sha256, implemented in C"
  1960. #ifdef WANT_SSE2_4WAY
  1961. "\n\t4way\t\ttcatm's 4-way SSE2 implementation"
  1962. #endif
  1963. #ifdef WANT_VIA_PADLOCK
  1964. "\n\tvia\t\tVIA padlock implementation"
  1965. #endif
  1966. "\n\tcryptopp\tCrypto++ C/C++ implementation"
  1967. #ifdef WANT_CRYPTOPP_ASM32
  1968. "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
  1969. #endif
  1970. #ifdef WANT_X8632_SSE2
  1971. "\n\tsse2_32\t\tSSE2 32 bit implementation for i386 machines"
  1972. #endif
  1973. #ifdef WANT_X8664_SSE2
  1974. "\n\tsse2_64\t\tSSE2 64 bit implementation for x86_64 machines"
  1975. #endif
  1976. #ifdef WANT_X8664_SSE4
  1977. "\n\tsse4_64\t\tSSE4.1 64 bit implementation for x86_64 machines"
  1978. #endif
  1979. #ifdef WANT_ALTIVEC_4WAY
  1980. "\n\taltivec_4way\tAltivec implementation for PowerPC G4 and G5 machines"
  1981. #endif
  1982. ),
  1983. OPT_WITH_ARG("-a",
  1984. set_algo, show_algo, &opt_algo,
  1985. opt_hidden),
  1986. #else
  1987. // NOTE: Silently ignoring option, since it is plausable a non-SHA256d miner was using it just to skip benchmarking
  1988. OPT_WITH_ARG("--algo|-a", arg_ignored, NULL, NULL, opt_hidden),
  1989. #endif /* USE_SHA256D */
  1990. #endif /* USE_CPUMINING */
  1991. OPT_WITH_ARG("--api-allow",
  1992. set_api_allow, NULL, NULL,
  1993. "Allow API access only to the given list of [G:]IP[/Prefix] addresses[/subnets]"),
  1994. OPT_WITH_ARG("--api-description",
  1995. set_api_description, NULL, NULL,
  1996. "Description placed in the API status header, default: BFGMiner version"),
  1997. OPT_WITH_ARG("--api-groups",
  1998. set_api_groups, NULL, NULL,
  1999. "API one letter groups G:cmd:cmd[,P:cmd:*...] defining the cmds a groups can use"),
  2000. OPT_WITHOUT_ARG("--api-listen",
  2001. opt_set_bool, &opt_api_listen,
  2002. "Enable API, default: disabled"),
  2003. OPT_WITHOUT_ARG("--api-mcast",
  2004. opt_set_bool, &opt_api_mcast,
  2005. "Enable API Multicast listener, default: disabled"),
  2006. OPT_WITH_ARG("--api-mcast-addr",
  2007. opt_set_charp, opt_show_charp, &opt_api_mcast_addr,
  2008. "API Multicast listen address"),
  2009. OPT_WITH_ARG("--api-mcast-code",
  2010. opt_set_charp, opt_show_charp, &opt_api_mcast_code,
  2011. "Code expected in the API Multicast message, don't use '-'"),
  2012. OPT_WITH_ARG("--api-mcast-des",
  2013. set_api_mcast_des, NULL, NULL,
  2014. "Description appended to the API Multicast reply, default: ''"),
  2015. OPT_WITH_ARG("--api-mcast-port",
  2016. set_int_1_to_65535, opt_show_intval, &opt_api_mcast_port,
  2017. "API Multicast listen port"),
  2018. OPT_WITHOUT_ARG("--api-network",
  2019. opt_set_bool, &opt_api_network,
  2020. "Allow API (if enabled) to listen on/for any address, default: only 127.0.0.1"),
  2021. OPT_WITH_ARG("--api-port",
  2022. set_int_1_to_65535, opt_show_intval, &opt_api_port,
  2023. "Port number of miner API"),
  2024. #ifdef HAVE_ADL
  2025. OPT_WITHOUT_ARG("--auto-fan",
  2026. opt_set_bool, &opt_autofan,
  2027. opt_hidden),
  2028. OPT_WITHOUT_ARG("--auto-gpu",
  2029. opt_set_bool, &opt_autoengine,
  2030. opt_hidden),
  2031. #endif
  2032. OPT_WITHOUT_ARG("--balance",
  2033. set_balance, &pool_strategy,
  2034. "Change multipool strategy from failover to even share balance"),
  2035. OPT_WITHOUT_ARG("--benchmark",
  2036. opt_set_bool, &opt_benchmark,
  2037. "Run BFGMiner in benchmark mode - produces no shares"),
  2038. OPT_WITHOUT_ARG("--benchmark-intense",
  2039. set_benchmark_intense, &opt_benchmark_intense,
  2040. "Run BFGMiner in intensive benchmark mode - produces no shares"),
  2041. #if defined(USE_BITFORCE)
  2042. OPT_WITHOUT_ARG("--bfl-range",
  2043. opt_set_bool, &opt_bfl_noncerange,
  2044. "Use nonce range on bitforce devices if supported"),
  2045. #endif
  2046. #ifdef HAVE_CHROOT
  2047. OPT_WITH_ARG("--chroot-dir",
  2048. opt_set_charp, NULL, &chroot_dir,
  2049. "Chroot to a directory right after startup"),
  2050. #endif
  2051. OPT_WITH_ARG("--cmd-idle",
  2052. opt_set_charp, NULL, &cmd_idle,
  2053. "Execute a command when a device is allowed to be idle (rest or wait)"),
  2054. OPT_WITH_ARG("--cmd-sick",
  2055. opt_set_charp, NULL, &cmd_sick,
  2056. "Execute a command when a device is declared sick"),
  2057. OPT_WITH_ARG("--cmd-dead",
  2058. opt_set_charp, NULL, &cmd_dead,
  2059. "Execute a command when a device is declared dead"),
  2060. #if BLKMAKER_VERSION > 0
  2061. OPT_WITH_ARG("--coinbase-sig",
  2062. set_strdup, NULL, &opt_coinbase_sig,
  2063. "Set coinbase signature when possible"),
  2064. OPT_WITH_ARG("--coinbase|--cbsig|--cb-sig|--cb|--prayer",
  2065. set_strdup, NULL, &opt_coinbase_sig,
  2066. opt_hidden),
  2067. #endif
  2068. #ifdef HAVE_CURSES
  2069. OPT_WITHOUT_ARG("--compact",
  2070. opt_set_bool, &opt_compact,
  2071. "Use compact display without per device statistics"),
  2072. #endif
  2073. #ifdef USE_CPUMINING
  2074. OPT_WITH_ARG("--cpu-threads",
  2075. force_nthreads_int, opt_show_intval, &opt_n_threads,
  2076. "Number of miner CPU threads"),
  2077. OPT_WITH_ARG("-t",
  2078. force_nthreads_int, opt_show_intval, &opt_n_threads,
  2079. opt_hidden),
  2080. #endif
  2081. OPT_WITHOUT_ARG("--debug|-D",
  2082. enable_debug, &opt_debug,
  2083. "Enable debug output"),
  2084. OPT_WITHOUT_ARG("--debuglog",
  2085. opt_set_bool, &opt_debug,
  2086. "Enable debug logging"),
  2087. OPT_WITHOUT_ARG("--device-protocol-dump",
  2088. opt_set_bool, &opt_dev_protocol,
  2089. "Verbose dump of device protocol-level activities"),
  2090. OPT_WITH_ARG("--device|-d",
  2091. set_devices, NULL, NULL,
  2092. "Enable only devices matching pattern (default: all)"),
  2093. OPT_WITHOUT_ARG("--disable-rejecting",
  2094. opt_set_bool, &opt_disable_pool,
  2095. "Automatically disable pools that continually reject shares"),
  2096. #ifdef USE_LIBMICROHTTPD
  2097. OPT_WITH_ARG("--http-port",
  2098. opt_set_intval, opt_show_intval, &httpsrv_port,
  2099. "Port number to listen on for HTTP getwork miners (-1 means disabled)"),
  2100. #endif
  2101. OPT_WITH_ARG("--expiry",
  2102. set_int_0_to_9999, opt_show_intval, &opt_expiry,
  2103. "Upper bound on how many seconds after getting work we consider a share from it stale (w/o longpoll active)"),
  2104. OPT_WITH_ARG("-E",
  2105. set_int_0_to_9999, opt_show_intval, &opt_expiry,
  2106. opt_hidden),
  2107. OPT_WITH_ARG("--expiry-lp",
  2108. set_int_0_to_9999, opt_show_intval, &opt_expiry_lp,
  2109. "Upper bound on how many seconds after getting work we consider a share from it stale (with longpoll active)"),
  2110. OPT_WITHOUT_ARG("--failover-only",
  2111. opt_set_bool, &opt_fail_only,
  2112. "Don't leak work to backup pools when primary pool is lagging"),
  2113. OPT_WITH_ARG("--failover-switch-delay",
  2114. set_int_1_to_65535, opt_show_intval, &opt_fail_switch_delay,
  2115. "Delay in seconds before switching back to a failed pool"),
  2116. #ifdef USE_FPGA
  2117. OPT_WITHOUT_ARG("--force-dev-init",
  2118. opt_set_bool, &opt_force_dev_init,
  2119. "Always initialize devices when possible (such as bitstream uploads to some FPGAs)"),
  2120. #endif
  2121. #if BLKMAKER_VERSION > 1
  2122. OPT_WITH_ARG("--generate-to",
  2123. set_generate_addr, NULL, NULL,
  2124. "Set an address to generate to for solo mining"),
  2125. OPT_WITH_ARG("--generate-to-addr|--generate-to-address|--genaddress|--genaddr|--gen-address|--gen-addr|--generate-address|--generate-addr|--coinbase-addr|--coinbase-address|--coinbase-payout|--cbaddress|--cbaddr|--cb-address|--cb-addr|--payout",
  2126. set_generate_addr, NULL, NULL,
  2127. opt_hidden),
  2128. #endif
  2129. #ifdef USE_OPENCL
  2130. OPT_WITH_ARG("--gpu-dyninterval",
  2131. set_int_1_to_65535, opt_show_intval, &opt_dynamic_interval,
  2132. opt_hidden),
  2133. OPT_WITH_ARG("--gpu-platform",
  2134. set_int_0_to_9999, opt_show_intval, &opt_platform_id,
  2135. "Select OpenCL platform ID to use for GPU mining"),
  2136. OPT_WITH_ARG("--gpu-threads|-g",
  2137. set_gpu_threads, opt_show_intval, &opt_g_threads,
  2138. opt_hidden),
  2139. #ifdef HAVE_ADL
  2140. OPT_WITH_ARG("--gpu-engine",
  2141. set_gpu_engine, NULL, NULL,
  2142. opt_hidden),
  2143. OPT_WITH_ARG("--gpu-fan",
  2144. set_gpu_fan, NULL, NULL,
  2145. opt_hidden),
  2146. OPT_WITH_ARG("--gpu-map",
  2147. set_gpu_map, NULL, NULL,
  2148. "Map OpenCL to ADL device order manually, paired CSV (e.g. 1:0,2:1 maps OpenCL 1 to ADL 0, 2 to 1)"),
  2149. OPT_WITH_ARG("--gpu-memclock",
  2150. set_gpu_memclock, NULL, NULL,
  2151. opt_hidden),
  2152. OPT_WITH_ARG("--gpu-memdiff",
  2153. set_gpu_memdiff, NULL, NULL,
  2154. opt_hidden),
  2155. OPT_WITH_ARG("--gpu-powertune",
  2156. set_gpu_powertune, NULL, NULL,
  2157. opt_hidden),
  2158. OPT_WITHOUT_ARG("--gpu-reorder",
  2159. opt_set_bool, &opt_reorder,
  2160. "Attempt to reorder GPU devices according to PCI Bus ID"),
  2161. OPT_WITH_ARG("--gpu-vddc",
  2162. set_gpu_vddc, NULL, NULL,
  2163. opt_hidden),
  2164. #endif
  2165. #ifdef USE_SCRYPT
  2166. OPT_WITH_ARG("--lookup-gap",
  2167. set_lookup_gap, NULL, NULL,
  2168. opt_hidden),
  2169. #endif
  2170. OPT_WITH_ARG("--intensity|-I",
  2171. set_intensity, NULL, NULL,
  2172. opt_hidden),
  2173. #endif
  2174. #if defined(USE_OPENCL) || defined(USE_MODMINER) || defined(USE_X6500) || defined(USE_ZTEX)
  2175. OPT_WITH_ARG("--kernel-path",
  2176. opt_set_charp, opt_show_charp, &opt_kernel_path,
  2177. "Specify a path to where bitstream and kernel files are"),
  2178. OPT_WITH_ARG("-K",
  2179. opt_set_charp, opt_show_charp, &opt_kernel_path,
  2180. opt_hidden),
  2181. #endif
  2182. #ifdef USE_OPENCL
  2183. OPT_WITH_ARG("--kernel|-k",
  2184. set_kernel, NULL, NULL,
  2185. opt_hidden),
  2186. #endif
  2187. #ifdef USE_ICARUS
  2188. OPT_WITH_ARG("--icarus-options",
  2189. set_icarus_options, NULL, NULL,
  2190. opt_hidden),
  2191. OPT_WITH_ARG("--icarus-timing",
  2192. set_icarus_timing, NULL, NULL,
  2193. opt_hidden),
  2194. #endif
  2195. #ifdef USE_AVALON
  2196. OPT_WITH_ARG("--avalon-options",
  2197. set_avalon_options, NULL, NULL,
  2198. opt_hidden),
  2199. #endif
  2200. #ifdef USE_KLONDIKE
  2201. OPT_WITH_ARG("--klondike-options",
  2202. set_klondike_options, NULL, NULL,
  2203. "Set klondike options clock:temptarget"),
  2204. #endif
  2205. OPT_WITHOUT_ARG("--load-balance",
  2206. set_loadbalance, &pool_strategy,
  2207. "Change multipool strategy from failover to quota based balance"),
  2208. OPT_WITH_ARG("--log|-l",
  2209. set_int_0_to_9999, opt_show_intval, &opt_log_interval,
  2210. "Interval in seconds between log output"),
  2211. OPT_WITH_ARG("--log-file|-L",
  2212. set_log_file, NULL, NULL,
  2213. "Append log file for output messages"),
  2214. OPT_WITH_ARG("--logfile",
  2215. set_log_file, NULL, NULL,
  2216. opt_hidden),
  2217. OPT_WITHOUT_ARG("--log-microseconds",
  2218. opt_set_bool, &opt_log_microseconds,
  2219. "Include microseconds in log output"),
  2220. #if defined(unix) || defined(__APPLE__)
  2221. OPT_WITH_ARG("--monitor|-m",
  2222. opt_set_charp, NULL, &opt_stderr_cmd,
  2223. "Use custom pipe cmd for output messages"),
  2224. #endif // defined(unix)
  2225. OPT_WITHOUT_ARG("--net-delay",
  2226. opt_set_bool, &opt_delaynet,
  2227. "Impose small delays in networking to avoid overloading slow routers"),
  2228. OPT_WITHOUT_ARG("--no-adl",
  2229. opt_set_bool, &opt_noadl,
  2230. #ifdef HAVE_ADL
  2231. "Disable the ATI display library used for monitoring and setting GPU parameters"
  2232. #else
  2233. opt_hidden
  2234. #endif
  2235. ),
  2236. OPT_WITHOUT_ARG("--no-gbt",
  2237. opt_set_invbool, &want_gbt,
  2238. "Disable getblocktemplate support"),
  2239. OPT_WITHOUT_ARG("--no-getwork",
  2240. opt_set_invbool, &want_getwork,
  2241. "Disable getwork support"),
  2242. OPT_WITHOUT_ARG("--no-hotplug",
  2243. #ifdef HAVE_BFG_HOTPLUG
  2244. opt_set_invbool, &opt_hotplug,
  2245. "Disable hotplug detection"
  2246. #else
  2247. set_null, &opt_hotplug,
  2248. opt_hidden
  2249. #endif
  2250. ),
  2251. OPT_WITHOUT_ARG("--no-local-bitcoin",
  2252. #if BLKMAKER_VERSION > 1
  2253. opt_set_invbool, &opt_load_bitcoin_conf,
  2254. "Disable adding pools for local bitcoin RPC servers"),
  2255. #else
  2256. set_null, NULL, opt_hidden),
  2257. #endif
  2258. OPT_WITHOUT_ARG("--no-longpoll",
  2259. opt_set_invbool, &want_longpoll,
  2260. "Disable X-Long-Polling support"),
  2261. OPT_WITHOUT_ARG("--no-pool-disable",
  2262. opt_set_invbool, &opt_disable_pool,
  2263. opt_hidden),
  2264. OPT_WITHOUT_ARG("--no-client-reconnect",
  2265. opt_set_invbool, &opt_disable_client_reconnect,
  2266. opt_hidden),
  2267. OPT_WITHOUT_ARG("--no-pool-redirect",
  2268. disable_pool_redirect, NULL,
  2269. "Ignore pool requests to redirect to another server"),
  2270. OPT_WITHOUT_ARG("--no-restart",
  2271. opt_set_invbool, &opt_restart,
  2272. "Do not attempt to restart devices that hang"
  2273. ),
  2274. OPT_WITHOUT_ARG("--no-show-processors",
  2275. opt_set_invbool, &opt_show_procs,
  2276. opt_hidden),
  2277. OPT_WITHOUT_ARG("--no-show-procs",
  2278. opt_set_invbool, &opt_show_procs,
  2279. opt_hidden),
  2280. OPT_WITHOUT_ARG("--no-stratum",
  2281. opt_set_invbool, &want_stratum,
  2282. "Disable Stratum detection"),
  2283. OPT_WITHOUT_ARG("--no-submit-stale",
  2284. opt_set_invbool, &opt_submit_stale,
  2285. "Don't submit shares if they are detected as stale"),
  2286. #ifdef USE_OPENCL
  2287. OPT_WITHOUT_ARG("--no-opencl-binaries",
  2288. set_no_opencl_binaries, NULL,
  2289. opt_hidden),
  2290. #endif
  2291. OPT_WITHOUT_ARG("--no-unicode",
  2292. #ifdef USE_UNICODE
  2293. opt_set_invbool, &use_unicode,
  2294. "Don't use Unicode characters in TUI"
  2295. #else
  2296. set_null, &use_unicode,
  2297. opt_hidden
  2298. #endif
  2299. ),
  2300. OPT_WITH_ARG("--noncelog",
  2301. set_noncelog, NULL, NULL,
  2302. "Create log of all nonces found"),
  2303. OPT_WITH_ARG("--pass|-p",
  2304. set_pass, NULL, NULL,
  2305. "Password for bitcoin JSON-RPC server"),
  2306. OPT_WITHOUT_ARG("--per-device-stats",
  2307. opt_set_bool, &want_per_device_stats,
  2308. "Force verbose mode and output per-device statistics"),
  2309. OPT_WITH_ARG("--userpass|-O",
  2310. set_userpass, NULL, NULL,
  2311. "Username:Password pair for bitcoin JSON-RPC server"),
  2312. OPT_WITH_ARG("--pool-goal",
  2313. set_pool_goal, NULL, NULL,
  2314. "Named goal for the previous-defined pool"),
  2315. OPT_WITH_ARG("--pool-priority",
  2316. set_pool_priority, NULL, NULL,
  2317. "Priority for just the previous-defined pool"),
  2318. OPT_WITH_ARG("--pool-proxy|-x",
  2319. set_pool_proxy, NULL, NULL,
  2320. "Proxy URI to use for connecting to just the previous-defined pool"),
  2321. OPT_WITH_ARG("--force-rollntime", // NOTE: must be after --pass for config file ordering
  2322. set_pool_force_rollntime, NULL, NULL,
  2323. opt_hidden),
  2324. OPT_WITHOUT_ARG("--protocol-dump|-P",
  2325. opt_set_bool, &opt_protocol,
  2326. "Verbose dump of protocol-level activities"),
  2327. OPT_WITH_ARG("--queue|-Q",
  2328. set_int_0_to_9999, opt_show_intval, &opt_queue,
  2329. "Minimum number of work items to have queued (0+)"),
  2330. OPT_WITHOUT_ARG("--quiet|-q",
  2331. opt_set_bool, &opt_quiet,
  2332. "Disable logging output, display status and errors"),
  2333. OPT_WITHOUT_ARG("--quiet-work-updates|--quiet-work-update",
  2334. opt_set_bool, &opt_quiet_work_updates,
  2335. opt_hidden),
  2336. OPT_WITH_ARG("--quit-summary",
  2337. set_quit_summary, NULL, NULL,
  2338. "Summary printed when you quit: none/devs/procs/detailed"),
  2339. OPT_WITH_ARG("--quota|-U",
  2340. set_quota, NULL, NULL,
  2341. "quota;URL combination for server with load-balance strategy quotas"),
  2342. OPT_WITHOUT_ARG("--real-quiet",
  2343. opt_set_bool, &opt_realquiet,
  2344. "Disable all output"),
  2345. OPT_WITH_ARG("--request-diff",
  2346. set_request_diff, opt_show_floatval, &request_pdiff,
  2347. "Request a specific difficulty from pools"),
  2348. OPT_WITH_ARG("--retries",
  2349. opt_set_intval, opt_show_intval, &opt_retries,
  2350. "Number of times to retry failed submissions before giving up (-1 means never)"),
  2351. OPT_WITH_ARG("--retry-pause",
  2352. set_null, NULL, NULL,
  2353. opt_hidden),
  2354. OPT_WITH_ARG("--rotate",
  2355. set_rotate, opt_show_intval, &opt_rotate_period,
  2356. "Change multipool strategy from failover to regularly rotate at N minutes"),
  2357. OPT_WITHOUT_ARG("--round-robin",
  2358. set_rr, &pool_strategy,
  2359. "Change multipool strategy from failover to round robin on failure"),
  2360. OPT_WITH_ARG("--scan|-S",
  2361. add_serial, NULL, NULL,
  2362. "Configure how to scan for mining devices"),
  2363. OPT_WITH_ARG("--scan-device|--scan-serial|--devscan",
  2364. add_serial, NULL, NULL,
  2365. opt_hidden),
  2366. OPT_WITH_ARG("--scan-time",
  2367. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  2368. "Upper bound on time spent scanning current work, in seconds"),
  2369. OPT_WITH_ARG("-s",
  2370. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  2371. opt_hidden),
  2372. OPT_WITH_ARG("--scantime",
  2373. set_int_0_to_9999, opt_show_intval, &opt_scantime,
  2374. opt_hidden),
  2375. OPT_WITH_ARG("--sched-start",
  2376. set_schedtime, NULL, &schedstart,
  2377. "Set a time of day in HH:MM to start mining (a once off without a stop time)"),
  2378. OPT_WITH_ARG("--sched-stop",
  2379. set_schedtime, NULL, &schedstop,
  2380. "Set a time of day in HH:MM to stop mining (will quit without a start time)"),
  2381. #ifdef USE_SCRYPT
  2382. OPT_WITHOUT_ARG("--scrypt",
  2383. set_malgo_scrypt, NULL,
  2384. "Use the scrypt algorithm for mining (non-bitcoin)"),
  2385. #endif
  2386. OPT_WITH_ARG("--set-device|--set",
  2387. opt_string_elist_add, NULL, &opt_set_device_list,
  2388. "Set default parameters on devices; eg"
  2389. ", NFY:osc6_bits=50"
  2390. ", bfl:voltage=<value>"
  2391. ", compac:clock=<value>"
  2392. ),
  2393. #if defined(USE_SCRYPT) && defined(USE_OPENCL)
  2394. OPT_WITH_ARG("--shaders",
  2395. set_shaders, NULL, NULL,
  2396. opt_hidden),
  2397. #endif
  2398. #ifdef HAVE_PWD_H
  2399. OPT_WITH_ARG("--setuid",
  2400. opt_set_charp, NULL, &opt_setuid,
  2401. "Username of an unprivileged user to run as"),
  2402. #endif
  2403. OPT_WITH_ARG("--sharelog",
  2404. set_sharelog, NULL, NULL,
  2405. "Append share log to file"),
  2406. OPT_WITH_ARG("--shares",
  2407. opt_set_floatval, NULL, &opt_shares,
  2408. "Quit after mining 2^32 * N hashes worth of shares (default: unlimited)"),
  2409. OPT_WITHOUT_ARG("--show-processors",
  2410. opt_set_bool, &opt_show_procs,
  2411. "Show per processor statistics in summary"),
  2412. OPT_WITHOUT_ARG("--show-procs",
  2413. opt_set_bool, &opt_show_procs,
  2414. opt_hidden),
  2415. OPT_WITH_ARG("--skip-security-checks",
  2416. set_int_0_to_9999, NULL, &opt_skip_checks,
  2417. "Skip security checks sometimes to save bandwidth; only check 1/<arg>th of the time (default: never skip)"),
  2418. OPT_WITH_ARG("--socks-proxy",
  2419. opt_set_charp, NULL, &opt_socks_proxy,
  2420. "Set socks proxy (host:port)"),
  2421. #ifdef USE_LIBEVENT
  2422. OPT_WITH_ARG("--stratum-port",
  2423. set_long_1_to_65535_or_neg1, opt_show_longval, &stratumsrv_port,
  2424. "Port number to listen on for stratum miners (-1 means disabled)"),
  2425. #endif
  2426. OPT_WITHOUT_ARG("--submit-stale",
  2427. opt_set_bool, &opt_submit_stale,
  2428. opt_hidden),
  2429. OPT_WITH_ARG("--submit-threads",
  2430. opt_set_intval, opt_show_intval, &opt_submit_threads,
  2431. "Minimum number of concurrent share submissions (default: 64)"),
  2432. #ifdef HAVE_SYSLOG_H
  2433. OPT_WITHOUT_ARG("--syslog",
  2434. opt_set_bool, &use_syslog,
  2435. "Use system log for output messages (default: standard error)"),
  2436. #endif
  2437. OPT_WITH_ARG("--temp-cutoff",
  2438. set_temp_cutoff, NULL, &opt_cutofftemp,
  2439. opt_hidden),
  2440. OPT_WITH_ARG("--temp-hysteresis",
  2441. set_int_1_to_10, opt_show_intval, &opt_hysteresis,
  2442. "Set how much the temperature can fluctuate outside limits when automanaging speeds"),
  2443. #ifdef HAVE_ADL
  2444. OPT_WITH_ARG("--temp-overheat",
  2445. set_temp_overheat, opt_show_intval, &opt_overheattemp,
  2446. opt_hidden),
  2447. #endif
  2448. OPT_WITH_ARG("--temp-target",
  2449. set_temp_target, NULL, NULL,
  2450. opt_hidden),
  2451. OPT_WITHOUT_ARG("--text-only|-T",
  2452. opt_set_invbool, &use_curses,
  2453. #ifdef HAVE_CURSES
  2454. "Disable ncurses formatted screen output"
  2455. #else
  2456. opt_hidden
  2457. #endif
  2458. ),
  2459. #if defined(USE_SCRYPT) && defined(USE_OPENCL)
  2460. OPT_WITH_ARG("--thread-concurrency",
  2461. set_thread_concurrency, NULL, NULL,
  2462. opt_hidden),
  2463. #endif
  2464. #ifdef USE_UNICODE
  2465. OPT_WITHOUT_ARG("--unicode",
  2466. opt_set_bool, &use_unicode,
  2467. "Use Unicode characters in TUI"),
  2468. #endif
  2469. OPT_WITH_ARG("--url|-o",
  2470. set_url, NULL, NULL,
  2471. "URL for bitcoin JSON-RPC server"),
  2472. OPT_WITH_ARG("--user|-u",
  2473. set_user, NULL, NULL,
  2474. "Username for bitcoin JSON-RPC server"),
  2475. #ifdef USE_OPENCL
  2476. OPT_WITH_ARG("--vectors|-v",
  2477. set_vector, NULL, NULL,
  2478. opt_hidden),
  2479. #endif
  2480. OPT_WITHOUT_ARG("--verbose",
  2481. opt_set_bool, &opt_log_output,
  2482. "Log verbose output to stderr as well as status output"),
  2483. OPT_WITHOUT_ARG("--verbose-work-updates|--verbose-work-update",
  2484. opt_set_invbool, &opt_quiet_work_updates,
  2485. opt_hidden),
  2486. OPT_WITHOUT_ARG("--weighed-stats",
  2487. opt_set_bool, &opt_weighed_stats,
  2488. "Display statistics weighed to difficulty 1"),
  2489. #ifdef USE_OPENCL
  2490. OPT_WITH_ARG("--worksize|-w",
  2491. set_worksize, NULL, NULL,
  2492. opt_hidden),
  2493. #endif
  2494. OPT_WITHOUT_ARG("--unittest",
  2495. opt_set_bool, &opt_unittest, opt_hidden),
  2496. OPT_WITH_ARG("--coinbase-check-addr",
  2497. set_cbcaddr, NULL, NULL,
  2498. "A list of address to check against in coinbase payout list received from the previous-defined pool, separated by ','"),
  2499. OPT_WITH_ARG("--cbcheck-addr|--cbc-addr|--cbcaddr",
  2500. set_cbcaddr, NULL, NULL,
  2501. opt_hidden),
  2502. OPT_WITH_ARG("--coinbase-check-total",
  2503. set_cbctotal, NULL, NULL,
  2504. "The least total payout amount expected in coinbase received from the previous-defined pool"),
  2505. OPT_WITH_ARG("--cbcheck-total|--cbc-total|--cbctotal",
  2506. set_cbctotal, NULL, NULL,
  2507. opt_hidden),
  2508. OPT_WITH_ARG("--coinbase-check-percent",
  2509. set_cbcperc, NULL, NULL,
  2510. "The least benefit percentage expected for the sum of addr(s) listed in --cbaddr argument for previous-defined pool"),
  2511. OPT_WITH_ARG("--cbcheck-percent|--cbc-percent|--cbcpercent|--cbcperc",
  2512. set_cbcperc, NULL, NULL,
  2513. opt_hidden),
  2514. OPT_WITHOUT_ARG("--worktime",
  2515. opt_set_bool, &opt_worktime,
  2516. "Display extra work time debug information"),
  2517. OPT_WITH_ARG("--pools",
  2518. opt_set_bool, NULL, NULL, opt_hidden),
  2519. OPT_ENDTABLE
  2520. };
  2521. static char *load_config(const char *arg, void __maybe_unused *unused);
  2522. static char *parse_config(json_t *config, bool fileconf, int * const fileconf_load_p)
  2523. {
  2524. static char err_buf[200];
  2525. struct opt_table *opt;
  2526. json_t *val;
  2527. if (fileconf && !*fileconf_load_p)
  2528. *fileconf_load_p = 1;
  2529. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  2530. char *p, *name, *sp;
  2531. /* We don't handle subtables. */
  2532. assert(!(opt->type & OPT_SUBTABLE));
  2533. if (!opt->names)
  2534. continue;
  2535. /* Pull apart the option name(s). */
  2536. name = strdup(opt->names);
  2537. for (p = strtok_r(name, "|", &sp); p; p = strtok_r(NULL, "|", &sp)) {
  2538. char *err = "Invalid value";
  2539. /* Ignore short options. */
  2540. if (p[1] != '-')
  2541. continue;
  2542. val = json_object_get(config, p+2);
  2543. if (!val)
  2544. continue;
  2545. if (opt->type & OPT_HASARG) {
  2546. if (json_is_string(val)) {
  2547. err = opt->cb_arg(json_string_value(val),
  2548. opt->u.arg);
  2549. } else if (json_is_number(val)) {
  2550. char buf[256], *p, *q;
  2551. snprintf(buf, 256, "%f", json_number_value(val));
  2552. if ( (p = strchr(buf, '.')) ) {
  2553. // Trim /\.0*$/ to work properly with integer-only arguments
  2554. q = p;
  2555. while (*(++q) == '0') {}
  2556. if (*q == '\0')
  2557. *p = '\0';
  2558. }
  2559. err = opt->cb_arg(buf, opt->u.arg);
  2560. } else if (json_is_array(val)) {
  2561. int n, size = json_array_size(val);
  2562. err = NULL;
  2563. for (n = 0; n < size && !err; n++) {
  2564. if (json_is_string(json_array_get(val, n)))
  2565. err = opt->cb_arg(json_string_value(json_array_get(val, n)), opt->u.arg);
  2566. else if (json_is_object(json_array_get(val, n)))
  2567. err = parse_config(json_array_get(val, n), false, fileconf_load_p);
  2568. }
  2569. }
  2570. } else if (opt->type & OPT_NOARG) {
  2571. if (json_is_true(val))
  2572. err = opt->cb(opt->u.arg);
  2573. else if (json_is_boolean(val)) {
  2574. if (opt->cb == (void*)opt_set_bool)
  2575. err = opt_set_invbool(opt->u.arg);
  2576. else if (opt->cb == (void*)opt_set_invbool)
  2577. err = opt_set_bool(opt->u.arg);
  2578. }
  2579. }
  2580. if (err) {
  2581. /* Allow invalid values to be in configuration
  2582. * file, just skipping over them provided the
  2583. * JSON is still valid after that. */
  2584. if (fileconf) {
  2585. applog(LOG_ERR, "Invalid config option %s: %s", p, err);
  2586. *fileconf_load_p = -1;
  2587. } else {
  2588. snprintf(err_buf, sizeof(err_buf), "Parsing JSON option %s: %s",
  2589. p, err);
  2590. free(name);
  2591. return err_buf;
  2592. }
  2593. }
  2594. }
  2595. free(name);
  2596. }
  2597. val = json_object_get(config, JSON_INCLUDE_CONF);
  2598. if (val && json_is_string(val))
  2599. return load_config(json_string_value(val), NULL);
  2600. return NULL;
  2601. }
  2602. struct bfg_loaded_configfile *bfg_loaded_configfiles;
  2603. char conf_web1[] = "http://";
  2604. char conf_web2[] = "https://";
  2605. static char *load_web_config(const char *arg)
  2606. {
  2607. json_t *val;
  2608. CURL *curl;
  2609. struct bfg_loaded_configfile *cfginfo;
  2610. curl = curl_easy_init();
  2611. if (unlikely(!curl))
  2612. quithere(1, "CURL initialisation failed");
  2613. val = json_web_config(curl, arg);
  2614. curl_easy_cleanup(curl);
  2615. if (!val || !json_is_object(val))
  2616. return JSON_WEB_ERROR;
  2617. cfginfo = malloc(sizeof(*cfginfo));
  2618. *cfginfo = (struct bfg_loaded_configfile){
  2619. .filename = strdup(arg),
  2620. };
  2621. LL_APPEND(bfg_loaded_configfiles, cfginfo);
  2622. config_loaded = true;
  2623. return parse_config(val, true, &cfginfo->fileconf_load);
  2624. }
  2625. static char *load_config(const char *arg, void __maybe_unused *unused)
  2626. {
  2627. json_error_t err;
  2628. json_t *config;
  2629. char *json_error;
  2630. size_t siz;
  2631. struct bfg_loaded_configfile *cfginfo;
  2632. if (strncasecmp(arg, conf_web1, sizeof(conf_web1)-1) == 0 ||
  2633. strncasecmp(arg, conf_web2, sizeof(conf_web2)-1) == 0)
  2634. return load_web_config(arg);
  2635. cfginfo = malloc(sizeof(*cfginfo));
  2636. *cfginfo = (struct bfg_loaded_configfile){
  2637. .filename = strdup(arg),
  2638. };
  2639. LL_APPEND(bfg_loaded_configfiles, cfginfo);
  2640. if (++include_count > JSON_MAX_DEPTH)
  2641. return JSON_MAX_DEPTH_ERR;
  2642. #if JANSSON_MAJOR_VERSION > 1
  2643. config = json_load_file(arg, 0, &err);
  2644. #else
  2645. config = json_load_file(arg, &err);
  2646. #endif
  2647. if (!json_is_object(config)) {
  2648. siz = JSON_LOAD_ERROR_LEN + strlen(arg) + strlen(err.text);
  2649. json_error = malloc(siz);
  2650. if (!json_error)
  2651. quit(1, "Malloc failure in json error");
  2652. snprintf(json_error, siz, JSON_LOAD_ERROR, arg, err.text);
  2653. return json_error;
  2654. }
  2655. config_loaded = true;
  2656. /* Parse the config now, so we can override it. That can keep pointers
  2657. * so don't free config object. */
  2658. return parse_config(config, true, &cfginfo->fileconf_load);
  2659. }
  2660. static
  2661. bool _load_default_configs(const char * const filepath, void * __maybe_unused userp)
  2662. {
  2663. bool * const found_defcfg_p = userp;
  2664. *found_defcfg_p = true;
  2665. load_config(filepath, NULL);
  2666. // Regardless of status of loading the config file, we should continue loading other defaults
  2667. return false;
  2668. }
  2669. static void load_default_config(void)
  2670. {
  2671. bool found_defcfg = false;
  2672. appdata_file_call("BFGMiner", def_conf, _load_default_configs, &found_defcfg);
  2673. if (!found_defcfg)
  2674. {
  2675. // No BFGMiner config, try Cgminer's...
  2676. appdata_file_call("cgminer", "cgminer.conf", _load_default_configs, &found_defcfg);
  2677. }
  2678. }
  2679. extern const char *opt_argv0;
  2680. static
  2681. void bfg_versioninfo(void)
  2682. {
  2683. puts(packagename);
  2684. printf(" Lowlevel:%s\n", BFG_LOWLLIST);
  2685. printf(" Drivers:%s\n", BFG_DRIVERLIST);
  2686. printf(" Algorithms:%s\n", BFG_ALGOLIST);
  2687. printf(" Options:%s\n", BFG_OPTLIST);
  2688. }
  2689. static char *opt_verusage_and_exit(const char *extra)
  2690. {
  2691. bfg_versioninfo();
  2692. printf("%s", opt_usage(opt_argv0, extra));
  2693. fflush(stdout);
  2694. exit(0);
  2695. }
  2696. static
  2697. const char *my_opt_version_and_exit(void)
  2698. {
  2699. bfg_versioninfo();
  2700. fflush(stdout);
  2701. exit(0);
  2702. }
  2703. /* These options are parsed before anything else */
  2704. static struct opt_table opt_early_table[] = {
  2705. // Default config is loaded in command line order, like a regular config
  2706. OPT_EARLY_WITH_ARG("--config|-c|--default-config",
  2707. set_bool_ignore_arg, NULL, &config_loaded,
  2708. opt_hidden),
  2709. OPT_EARLY_WITHOUT_ARG("--no-config|--no-default-config",
  2710. opt_set_bool, &config_loaded,
  2711. "Inhibit loading default config file"),
  2712. OPT_ENDTABLE
  2713. };
  2714. /* These options are available from commandline only */
  2715. static struct opt_table opt_cmdline_table[] = {
  2716. OPT_WITH_ARG("--config|-c",
  2717. load_config, NULL, NULL,
  2718. "Load a JSON-format configuration file\n"
  2719. "See example.conf for an example configuration."),
  2720. OPT_EARLY_WITHOUT_ARG("--no-config",
  2721. opt_set_bool, &config_loaded,
  2722. opt_hidden),
  2723. OPT_EARLY_WITHOUT_ARG("--no-default-config",
  2724. opt_set_bool, &config_loaded,
  2725. "Inhibit loading default config file"),
  2726. OPT_WITHOUT_ARG("--default-config",
  2727. load_default_config, NULL,
  2728. "Always load the default config file"),
  2729. OPT_WITHOUT_ARG("--help|-h",
  2730. opt_verusage_and_exit, NULL,
  2731. "Print this message"),
  2732. #ifdef USE_OPENCL
  2733. OPT_WITHOUT_ARG("--ndevs|-n",
  2734. print_ndevs_and_exit, &nDevs,
  2735. opt_hidden),
  2736. #endif
  2737. OPT_WITHOUT_ARG("--version|-V",
  2738. my_opt_version_and_exit, NULL,
  2739. "Display version and exit"),
  2740. OPT_ENDTABLE
  2741. };
  2742. static bool jobj_binary(const json_t *obj, const char *key,
  2743. void *buf, size_t buflen, bool required)
  2744. {
  2745. const char *hexstr;
  2746. json_t *tmp;
  2747. tmp = json_object_get(obj, key);
  2748. if (unlikely(!tmp)) {
  2749. if (unlikely(required))
  2750. applog(LOG_ERR, "JSON key '%s' not found", key);
  2751. return false;
  2752. }
  2753. hexstr = json_string_value(tmp);
  2754. if (unlikely(!hexstr)) {
  2755. applog(LOG_ERR, "JSON key '%s' is not a string", key);
  2756. return false;
  2757. }
  2758. if (!hex2bin(buf, hexstr, buflen))
  2759. return false;
  2760. return true;
  2761. }
  2762. static void calc_midstate(struct work *work)
  2763. {
  2764. union {
  2765. unsigned char c[64];
  2766. uint32_t i[16];
  2767. } data;
  2768. swap32yes(&data.i[0], work->data, 16);
  2769. sha256_ctx ctx;
  2770. sha256_init(&ctx);
  2771. sha256_update(&ctx, data.c, 64);
  2772. memcpy(work->midstate, ctx.h, sizeof(work->midstate));
  2773. swap32tole(work->midstate, work->midstate, 8);
  2774. }
  2775. static
  2776. struct bfg_tmpl_ref *tmpl_makeref(blktemplate_t * const tmpl)
  2777. {
  2778. struct bfg_tmpl_ref * const tr = malloc(sizeof(*tr));
  2779. *tr = (struct bfg_tmpl_ref){
  2780. .tmpl = tmpl,
  2781. .refcount = 1,
  2782. };
  2783. mutex_init(&tr->mutex);
  2784. return tr;
  2785. }
  2786. static
  2787. void tmpl_incref(struct bfg_tmpl_ref * const tr)
  2788. {
  2789. mutex_lock(&tr->mutex);
  2790. ++tr->refcount;
  2791. mutex_unlock(&tr->mutex);
  2792. }
  2793. void tmpl_decref(struct bfg_tmpl_ref * const tr)
  2794. {
  2795. mutex_lock(&tr->mutex);
  2796. bool free_tmpl = !--tr->refcount;
  2797. mutex_unlock(&tr->mutex);
  2798. if (free_tmpl)
  2799. {
  2800. blktmpl_free(tr->tmpl);
  2801. mutex_destroy(&tr->mutex);
  2802. free(tr);
  2803. }
  2804. }
  2805. static struct work *make_work(void)
  2806. {
  2807. struct work *work = calloc(1, sizeof(struct work));
  2808. if (unlikely(!work))
  2809. quit(1, "Failed to calloc work in make_work");
  2810. cg_wlock(&control_lock);
  2811. work->id = total_work++;
  2812. cg_wunlock(&control_lock);
  2813. return work;
  2814. }
  2815. /* This is the central place all work that is about to be retired should be
  2816. * cleaned to remove any dynamically allocated arrays within the struct */
  2817. void clean_work(struct work *work)
  2818. {
  2819. free(work->job_id);
  2820. bytes_free(&work->nonce2);
  2821. free(work->nonce1);
  2822. if (work->device_data_free_func)
  2823. work->device_data_free_func(work);
  2824. if (work->tr)
  2825. tmpl_decref(work->tr);
  2826. memset(work, 0, sizeof(struct work));
  2827. }
  2828. /* All dynamically allocated work structs should be freed here to not leak any
  2829. * ram from arrays allocated within the work struct */
  2830. void free_work(struct work *work)
  2831. {
  2832. clean_work(work);
  2833. free(work);
  2834. }
  2835. const char *bfg_workpadding_bin = "\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x02\0\0";
  2836. #define workpadding_bin bfg_workpadding_bin
  2837. static const size_t block_info_str_sz = 3 /* ... */ + 16 /* block hash segment */ + 1;
  2838. static
  2839. void block_info_str(char * const out, const struct block_info * const blkinfo)
  2840. {
  2841. unsigned char hash_swap[32];
  2842. swap256(hash_swap, blkinfo->prevblkhash);
  2843. swap32tole(hash_swap, hash_swap, 32 / 4);
  2844. memset(out, '.', 3);
  2845. // FIXME: The block number will overflow this sometime around AD 2025-2027
  2846. if (blkinfo->height > 0 && blkinfo->height < 1000000)
  2847. {
  2848. bin2hex(&out[3], &hash_swap[0x1c], 4);
  2849. snprintf(&out[11], block_info_str_sz-11, " #%6u", blkinfo->height);
  2850. }
  2851. else
  2852. bin2hex(&out[3], &hash_swap[0x18], 8);
  2853. }
  2854. #ifdef HAVE_CURSES
  2855. static void update_block_display(bool);
  2856. #endif
  2857. // Must only be called with ch_lock held!
  2858. static
  2859. void __update_block_title(struct mining_goal_info * const goal)
  2860. {
  2861. struct blockchain_info * const blkchain = goal->blkchain;
  2862. if (!goal->current_goal_detail)
  2863. goal->current_goal_detail = malloc(block_info_str_sz);
  2864. block_info_str(goal->current_goal_detail, blkchain->currentblk);
  2865. #ifdef HAVE_CURSES
  2866. update_block_display(false);
  2867. #endif
  2868. }
  2869. static struct block_info *block_exists(const struct blockchain_info *, const void *);
  2870. static
  2871. void have_block_height(struct mining_goal_info * const goal, const void * const prevblkhash, uint32_t blkheight)
  2872. {
  2873. struct blockchain_info * const blkchain = goal->blkchain;
  2874. struct block_info * const blkinfo = block_exists(blkchain, prevblkhash);
  2875. if ((!blkinfo) || blkinfo->height)
  2876. return;
  2877. uint32_t block_id = ((uint32_t*)prevblkhash)[0];
  2878. applog(LOG_DEBUG, "Learned that block id %08" PRIx32 " is height %" PRIu32, (uint32_t)be32toh(block_id), blkheight);
  2879. cg_wlock(&ch_lock);
  2880. blkinfo->height = blkheight;
  2881. if (blkinfo == blkchain->currentblk)
  2882. {
  2883. blkchain->currentblk_subsidy = 5000000000LL >> (blkheight / 210000);
  2884. __update_block_title(goal);
  2885. }
  2886. cg_wunlock(&ch_lock);
  2887. }
  2888. static
  2889. void pool_set_opaque(struct pool *pool, bool opaque)
  2890. {
  2891. if (pool->swork.opaque == opaque)
  2892. return;
  2893. pool->swork.opaque = opaque;
  2894. if (opaque)
  2895. applog(LOG_WARNING, "Pool %u is hiding block contents from us",
  2896. pool->pool_no);
  2897. else
  2898. applog(LOG_NOTICE, "Pool %u now providing block contents to us",
  2899. pool->pool_no);
  2900. }
  2901. bool pool_may_redirect_to(struct pool * const pool, const char * const uri)
  2902. {
  2903. if (uri_get_param_bool(pool->rpc_url, "redirect", false))
  2904. return true;
  2905. return match_domains(pool->rpc_url, strlen(pool->rpc_url), uri, strlen(uri));
  2906. }
  2907. void pool_check_coinbase(struct pool * const pool, const uint8_t * const cbtxn, const size_t cbtxnsz)
  2908. {
  2909. if (uri_get_param_bool(pool->rpc_url, "skipcbcheck", false))
  2910. {}
  2911. else
  2912. if (!check_coinbase(cbtxn, cbtxnsz, &pool->cb_param))
  2913. {
  2914. if (pool->enabled == POOL_ENABLED)
  2915. {
  2916. applog(LOG_ERR, "Pool %d misbehaving (%s), disabling!", pool->pool_no, "coinbase check");
  2917. disable_pool(pool, POOL_MISBEHAVING);
  2918. }
  2919. }
  2920. else
  2921. if (pool->enabled == POOL_MISBEHAVING)
  2922. {
  2923. applog(LOG_NOTICE, "Pool %d no longer misbehaving, re-enabling!", pool->pool_no);
  2924. enable_pool(pool);
  2925. }
  2926. }
  2927. void set_simple_ntime_roll_limit(struct ntime_roll_limits * const nrl, const uint32_t ntime_base, const int ntime_roll, const struct timeval * const tvp_ref)
  2928. {
  2929. const int offsets = max(ntime_roll, 60);
  2930. *nrl = (struct ntime_roll_limits){
  2931. .min = ntime_base,
  2932. .max = ntime_base + ntime_roll,
  2933. .tv_ref = *tvp_ref,
  2934. .minoff = -offsets,
  2935. .maxoff = offsets,
  2936. };
  2937. }
  2938. void work_set_simple_ntime_roll_limit(struct work * const work, const int ntime_roll, const struct timeval * const tvp_ref)
  2939. {
  2940. set_simple_ntime_roll_limit(&work->ntime_roll_limits, upk_u32be(work->data, 0x44), ntime_roll, tvp_ref);
  2941. }
  2942. int work_ntime_range(struct work * const work, const struct timeval * const tvp_earliest, const struct timeval * const tvp_latest, const int desired_roll)
  2943. {
  2944. const struct ntime_roll_limits * const nrl = &work->ntime_roll_limits;
  2945. const uint32_t ref_ntime = work_get_ntime(work);
  2946. const int earliest_elapsed = timer_elapsed(&nrl->tv_ref, tvp_earliest);
  2947. const int latest_elapsed = timer_elapsed(&nrl->tv_ref, tvp_latest);
  2948. // minimum ntime is the latest possible result (add a second to spare) adjusted for minimum offset (or fixed minimum ntime)
  2949. uint32_t min_ntime = max(nrl->min, ref_ntime + latest_elapsed+1 + nrl->minoff);
  2950. // maximum ntime is the earliest possible result adjusted for maximum offset (or fixed maximum ntime)
  2951. uint32_t max_ntime = min(nrl->max, ref_ntime + earliest_elapsed + nrl->maxoff);
  2952. if (max_ntime < min_ntime)
  2953. return -1;
  2954. if (max_ntime - min_ntime > desired_roll)
  2955. {
  2956. // Adjust min_ntime upward for accuracy, when possible
  2957. const int mid_elapsed = ((latest_elapsed - earliest_elapsed) / 2) + earliest_elapsed;
  2958. uint32_t ideal_ntime = ref_ntime + mid_elapsed;
  2959. if (ideal_ntime > min_ntime)
  2960. min_ntime = min(ideal_ntime, max_ntime - desired_roll);
  2961. }
  2962. work_set_ntime(work, min_ntime);
  2963. return max_ntime - min_ntime;
  2964. }
  2965. #if BLKMAKER_VERSION > 1
  2966. static
  2967. bool goal_has_at_least_one_getcbaddr(const struct mining_goal_info * const goal)
  2968. {
  2969. for (int i = 0; i < total_pools; ++i)
  2970. {
  2971. struct pool * const pool = pools[i];
  2972. if (uri_get_param_bool(pool->rpc_url, "getcbaddr", false))
  2973. return true;
  2974. }
  2975. return false;
  2976. }
  2977. static
  2978. void refresh_bitcoind_address(struct mining_goal_info * const goal, const bool fresh)
  2979. {
  2980. struct blockchain_info * const blkchain = goal->blkchain;
  2981. if (!goal_has_at_least_one_getcbaddr(goal))
  2982. return;
  2983. char getcbaddr_req[60];
  2984. CURL *curl = NULL;
  2985. json_t *json, *j2;
  2986. const char *s, *s2;
  2987. bytes_t newscript = BYTES_INIT;
  2988. snprintf(getcbaddr_req, sizeof(getcbaddr_req), "{\"method\":\"get%saddress\",\"id\":0,\"params\":[\"BFGMiner\"]}", fresh ? "new" : "account");
  2989. for (int i = 0; i < total_pools; ++i)
  2990. {
  2991. struct pool * const pool = pools[i];
  2992. if (!uri_get_param_bool(pool->rpc_url, "getcbaddr", false))
  2993. continue;
  2994. if (pool->goal != goal)
  2995. continue;
  2996. applog(LOG_DEBUG, "Refreshing coinbase address from pool %d", pool->pool_no);
  2997. if (!curl)
  2998. {
  2999. curl = curl_easy_init();
  3000. if (unlikely(!curl))
  3001. {
  3002. applogfail(LOG_ERR, "curl_easy_init");
  3003. break;
  3004. }
  3005. }
  3006. json = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, getcbaddr_req, false, false, NULL, pool, true);
  3007. if (unlikely((!json) || !json_is_null( (j2 = json_object_get(json, "error")) )))
  3008. {
  3009. const char *estrc;
  3010. char *estr = NULL;
  3011. if (!(json && j2))
  3012. estrc = NULL;
  3013. else
  3014. {
  3015. estrc = json_string_value(j2);
  3016. if (!estrc)
  3017. estrc = estr = json_dumps_ANY(j2, JSON_ENSURE_ASCII | JSON_SORT_KEYS);
  3018. }
  3019. applog(LOG_WARNING, "Error %cetting coinbase address from pool %d: %s", 'g', pool->pool_no, estrc);
  3020. free(estr);
  3021. json_decref(json);
  3022. continue;
  3023. }
  3024. s = bfg_json_obj_string(json, "result", NULL);
  3025. if (unlikely(!s))
  3026. {
  3027. applog(LOG_WARNING, "Error %cetting coinbase address from pool %d: %s", 'g', pool->pool_no, "(return value was not a String)");
  3028. json_decref(json);
  3029. continue;
  3030. }
  3031. s2 = set_b58addr(s, &newscript);
  3032. if (unlikely(s2))
  3033. {
  3034. applog(LOG_WARNING, "Error %cetting coinbase address from pool %d: %s", 's', pool->pool_no, s2);
  3035. json_decref(json);
  3036. continue;
  3037. }
  3038. cg_ilock(&control_lock);
  3039. if (goal->generation_script)
  3040. {
  3041. if (bytes_eq(&newscript, goal->generation_script))
  3042. {
  3043. cg_iunlock(&control_lock);
  3044. applog(LOG_DEBUG, "Pool %d returned coinbase address already in use (%s)", pool->pool_no, s);
  3045. json_decref(json);
  3046. break;
  3047. }
  3048. cg_ulock(&control_lock);
  3049. }
  3050. else
  3051. {
  3052. cg_ulock(&control_lock);
  3053. goal->generation_script = malloc(sizeof(*goal->generation_script));
  3054. bytes_init(goal->generation_script);
  3055. }
  3056. bytes_assimilate(goal->generation_script, &newscript);
  3057. coinbase_script_block_id = blkchain->currentblk->block_id;
  3058. cg_wunlock(&control_lock);
  3059. applog(LOG_NOTICE, "Now using coinbase address %s, provided by pool %d", s, pool->pool_no);
  3060. json_decref(json);
  3061. break;
  3062. }
  3063. bytes_free(&newscript);
  3064. if (curl)
  3065. curl_easy_cleanup(curl);
  3066. }
  3067. #endif
  3068. #define GBT_XNONCESZ (sizeof(uint32_t))
  3069. #if BLKMAKER_VERSION > 6
  3070. #define blkmk_append_coinbase_safe(tmpl, append, appendsz) \
  3071. blkmk_append_coinbase_safe2(tmpl, append, appendsz, GBT_XNONCESZ, false)
  3072. #endif
  3073. static bool work_decode(struct pool *pool, struct work *work, json_t *val)
  3074. {
  3075. json_t *res_val = json_object_get(val, "result");
  3076. json_t *tmp_val;
  3077. bool ret = false;
  3078. struct timeval tv_now;
  3079. if (unlikely(detect_algo == 1)) {
  3080. json_t *tmp = json_object_get(res_val, "algorithm");
  3081. const char *v = tmp ? json_string_value(tmp) : "";
  3082. if (strncasecmp(v, "scrypt", 6))
  3083. detect_algo = 2;
  3084. }
  3085. timer_set_now(&tv_now);
  3086. if (work->tr)
  3087. {
  3088. blktemplate_t * const tmpl = work->tr->tmpl;
  3089. const char *err = blktmpl_add_jansson(tmpl, res_val, tv_now.tv_sec);
  3090. if (err) {
  3091. applog(LOG_ERR, "blktmpl error: %s", err);
  3092. return false;
  3093. }
  3094. work->rolltime = blkmk_time_left(tmpl, tv_now.tv_sec);
  3095. #if BLKMAKER_VERSION > 1
  3096. struct mining_goal_info * const goal = pool->goal;
  3097. const uint32_t tmpl_block_id = ((uint32_t*)tmpl->prevblk)[0];
  3098. if ((!tmpl->cbtxn) && coinbase_script_block_id != tmpl_block_id)
  3099. refresh_bitcoind_address(goal, false);
  3100. if (goal->generation_script)
  3101. {
  3102. bool newcb;
  3103. #if BLKMAKER_VERSION > 2
  3104. blkmk_init_generation2(tmpl, bytes_buf(goal->generation_script), bytes_len(goal->generation_script), &newcb);
  3105. #else
  3106. newcb = !tmpl->cbtxn;
  3107. blkmk_init_generation(tmpl, bytes_buf(goal->generation_script), bytes_len(goal->generation_script));
  3108. #endif
  3109. if (newcb)
  3110. {
  3111. ssize_t ae = blkmk_append_coinbase_safe(tmpl, &template_nonce, sizeof(template_nonce));
  3112. if (ae < (ssize_t)sizeof(template_nonce))
  3113. applog(LOG_WARNING, "Cannot append template-nonce to coinbase on pool %u (%"PRId64") - you might be wasting hashing!", work->pool->pool_no, (int64_t)ae);
  3114. ++template_nonce;
  3115. }
  3116. }
  3117. #endif
  3118. #if BLKMAKER_VERSION > 0
  3119. {
  3120. ssize_t ae = blkmk_append_coinbase_safe(tmpl, opt_coinbase_sig, 101);
  3121. static bool appenderr = false;
  3122. if (ae <= 0) {
  3123. if (opt_coinbase_sig) {
  3124. applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Cannot append coinbase signature at all on pool %u (%"PRId64")", pool->pool_no, (int64_t)ae);
  3125. appenderr = true;
  3126. }
  3127. } else if (ae >= 3 || opt_coinbase_sig) {
  3128. const char *cbappend = opt_coinbase_sig;
  3129. const char * const full = bfgminer_name_space_ver;
  3130. char *need_free = NULL;
  3131. if (!cbappend) {
  3132. if ((size_t)ae >= sizeof(full) - 1)
  3133. cbappend = full;
  3134. else if ((size_t)ae >= sizeof(PACKAGE) - 1)
  3135. {
  3136. const char *pos = strchr(full, '-');
  3137. size_t sz = (pos - full);
  3138. if (pos && ae > sz)
  3139. {
  3140. cbappend = need_free = malloc(sz + 1);
  3141. memcpy(need_free, full, sz);
  3142. need_free[sz] = '\0';
  3143. }
  3144. else
  3145. cbappend = PACKAGE;
  3146. }
  3147. else
  3148. cbappend = "BFG";
  3149. }
  3150. size_t cbappendsz = strlen(cbappend);
  3151. static bool truncatewarning = false;
  3152. if (cbappendsz <= (size_t)ae) {
  3153. if (cbappendsz < (size_t)ae)
  3154. // If we have space, include the trailing \0
  3155. ++cbappendsz;
  3156. ae = cbappendsz;
  3157. truncatewarning = false;
  3158. } else {
  3159. char *tmp = malloc(ae + 1);
  3160. memcpy(tmp, opt_coinbase_sig, ae);
  3161. tmp[ae] = '\0';
  3162. applog((truncatewarning ? LOG_DEBUG : LOG_WARNING),
  3163. "Pool %u truncating appended coinbase signature at %"PRId64" bytes: %s(%s)",
  3164. pool->pool_no, (int64_t)ae, tmp, &opt_coinbase_sig[ae]);
  3165. free(tmp);
  3166. truncatewarning = true;
  3167. }
  3168. ae = blkmk_append_coinbase_safe(tmpl, cbappend, ae);
  3169. free(need_free);
  3170. if (ae <= 0) {
  3171. applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Error appending coinbase signature (%"PRId64")", (int64_t)ae);
  3172. appenderr = true;
  3173. } else
  3174. appenderr = false;
  3175. }
  3176. }
  3177. #endif
  3178. if (blkmk_get_data(tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
  3179. return false;
  3180. swap32yes(work->data, work->data, 80 / 4);
  3181. memcpy(&work->data[80], workpadding_bin, 48);
  3182. work->ntime_roll_limits = (struct ntime_roll_limits){
  3183. .min = tmpl->mintime,
  3184. .max = tmpl->maxtime,
  3185. .tv_ref = tv_now,
  3186. .minoff = tmpl->mintimeoff,
  3187. .maxoff = tmpl->maxtimeoff,
  3188. };
  3189. const struct blktmpl_longpoll_req *lp;
  3190. mutex_lock(&pool->pool_lock);
  3191. if ((lp = blktmpl_get_longpoll(tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) {
  3192. free(pool->lp_id);
  3193. pool->lp_id = strdup(lp->id);
  3194. #if 0 /* This just doesn't work :( */
  3195. curl_socket_t sock = pool->lp_socket;
  3196. if (sock != CURL_SOCKET_BAD) {
  3197. pool->lp_socket = CURL_SOCKET_BAD;
  3198. applog(LOG_WARNING, "Pool %u long poll request hanging, reconnecting", pool->pool_no);
  3199. shutdown(sock, SHUT_RDWR);
  3200. }
  3201. #endif
  3202. }
  3203. mutex_unlock(&pool->pool_lock);
  3204. }
  3205. else
  3206. if (unlikely(!jobj_binary(res_val, "data", work->data, sizeof(work->data), true))) {
  3207. applog(LOG_ERR, "JSON inval data");
  3208. return false;
  3209. }
  3210. else
  3211. work_set_simple_ntime_roll_limit(work, 0, &tv_now);
  3212. if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) {
  3213. // Calculate it ourselves
  3214. applog(LOG_DEBUG, "Calculating midstate locally");
  3215. calc_midstate(work);
  3216. }
  3217. if (unlikely(!jobj_binary(res_val, "target", work->target, sizeof(work->target), true))) {
  3218. applog(LOG_ERR, "JSON inval target");
  3219. return false;
  3220. }
  3221. if (work->tr)
  3222. {
  3223. for (size_t i = 0; i < sizeof(work->target) / 2; ++i)
  3224. {
  3225. int p = (sizeof(work->target) - 1) - i;
  3226. unsigned char c = work->target[i];
  3227. work->target[i] = work->target[p];
  3228. work->target[p] = c;
  3229. }
  3230. }
  3231. if ( (tmp_val = json_object_get(res_val, "height")) ) {
  3232. struct mining_goal_info * const goal = pool->goal;
  3233. uint32_t blkheight = json_number_value(tmp_val);
  3234. const void * const prevblkhash = &work->data[4];
  3235. have_block_height(goal, prevblkhash, blkheight);
  3236. }
  3237. memset(work->hash, 0, sizeof(work->hash));
  3238. work->tv_staged = tv_now;
  3239. #if BLKMAKER_VERSION > 6
  3240. if (work->tr)
  3241. {
  3242. blktemplate_t * const tmpl = work->tr->tmpl;
  3243. uint8_t buf[80];
  3244. int16_t expire;
  3245. uint8_t *cbtxn;
  3246. size_t cbtxnsz;
  3247. size_t cbextranonceoffset;
  3248. int branchcount;
  3249. libblkmaker_hash_t *branches;
  3250. if (blkmk_get_mdata(tmpl, buf, sizeof(buf), tv_now.tv_sec, &expire, &cbtxn, &cbtxnsz, &cbextranonceoffset, &branchcount, &branches, GBT_XNONCESZ, false))
  3251. {
  3252. struct stratum_work * const swork = &pool->swork;
  3253. const size_t branchdatasz = branchcount * 0x20;
  3254. pool_check_coinbase(pool, cbtxn, cbtxnsz);
  3255. cg_wlock(&pool->data_lock);
  3256. if (swork->tr)
  3257. tmpl_decref(swork->tr);
  3258. swork->tr = work->tr;
  3259. tmpl_incref(swork->tr);
  3260. bytes_assimilate_raw(&swork->coinbase, cbtxn, cbtxnsz, cbtxnsz);
  3261. swork->nonce2_offset = cbextranonceoffset;
  3262. bytes_assimilate_raw(&swork->merkle_bin, branches, branchdatasz, branchdatasz);
  3263. swork->merkles = branchcount;
  3264. swap32yes(swork->header1, &buf[0], 36 / 4);
  3265. swork->ntime = le32toh(*(uint32_t *)(&buf[68]));
  3266. swork->tv_received = tv_now;
  3267. swap32yes(swork->diffbits, &buf[72], 4 / 4);
  3268. memcpy(swork->target, work->target, sizeof(swork->target));
  3269. free(swork->job_id);
  3270. swork->job_id = NULL;
  3271. swork->clean = true;
  3272. swork->work_restart_id = pool->work_restart_id;
  3273. // FIXME: Do something with expire
  3274. pool->nonce2sz = swork->n2size = GBT_XNONCESZ;
  3275. pool->nonce2 = 0;
  3276. cg_wunlock(&pool->data_lock);
  3277. }
  3278. else
  3279. applog(LOG_DEBUG, "blkmk_get_mdata failed for pool %u", pool->pool_no);
  3280. }
  3281. #endif // BLKMAKER_VERSION > 6
  3282. pool_set_opaque(pool, !work->tr);
  3283. ret = true;
  3284. return ret;
  3285. }
  3286. /* Returns whether the pool supports local work generation or not. */
  3287. static bool pool_localgen(struct pool *pool)
  3288. {
  3289. return (pool->last_work_copy || pool->has_stratum);
  3290. }
  3291. int dev_from_id(int thr_id)
  3292. {
  3293. struct cgpu_info *cgpu = get_thr_cgpu(thr_id);
  3294. return cgpu->device_id;
  3295. }
  3296. /* Create an exponentially decaying average over the opt_log_interval */
  3297. void decay_time(double *f, double fadd, double fsecs)
  3298. {
  3299. double ftotal, fprop;
  3300. fprop = 1.0 - 1 / (exp(fsecs / (double)opt_log_interval));
  3301. ftotal = 1.0 + fprop;
  3302. *f += (fadd * fprop);
  3303. *f /= ftotal;
  3304. }
  3305. static
  3306. int __total_staged(const bool include_spares)
  3307. {
  3308. int tot = HASH_COUNT(staged_work);
  3309. if (!include_spares)
  3310. tot -= staged_spare;
  3311. return tot;
  3312. }
  3313. static int total_staged(const bool include_spares)
  3314. {
  3315. int ret;
  3316. mutex_lock(stgd_lock);
  3317. ret = __total_staged(include_spares);
  3318. mutex_unlock(stgd_lock);
  3319. return ret;
  3320. }
  3321. #ifdef HAVE_CURSES
  3322. WINDOW *mainwin, *statuswin, *logwin;
  3323. #endif
  3324. double total_secs = 1.0;
  3325. #ifdef HAVE_CURSES
  3326. static char statusline[256];
  3327. /* statusy is where the status window goes up to in cases where it won't fit at startup */
  3328. static int statusy;
  3329. static int devsummaryYOffset;
  3330. static int total_lines;
  3331. #endif
  3332. #ifdef USE_OPENCL
  3333. struct cgpu_info gpus[MAX_GPUDEVICES]; /* Maximum number apparently possible */
  3334. #endif
  3335. struct cgpu_info *cpus;
  3336. bool _bfg_console_cancel_disabled;
  3337. int _bfg_console_prev_cancelstate;
  3338. #ifdef HAVE_CURSES
  3339. #define lock_curses() bfg_console_lock()
  3340. #define unlock_curses() bfg_console_unlock()
  3341. static bool curses_active_locked(void)
  3342. {
  3343. bool ret;
  3344. lock_curses();
  3345. ret = curses_active;
  3346. if (!ret)
  3347. unlock_curses();
  3348. return ret;
  3349. }
  3350. // Cancellable getch
  3351. int my_cancellable_getch(void)
  3352. {
  3353. // This only works because the macro only hits direct getch() calls
  3354. typedef int (*real_getch_t)(void);
  3355. const real_getch_t real_getch = __real_getch;
  3356. int type, rv;
  3357. bool sct;
  3358. sct = !pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &type);
  3359. rv = real_getch();
  3360. if (sct)
  3361. pthread_setcanceltype(type, &type);
  3362. return rv;
  3363. }
  3364. #ifdef PDCURSES
  3365. static
  3366. int bfg_wresize(WINDOW *win, int lines, int columns)
  3367. {
  3368. int rv = wresize(win, lines, columns);
  3369. int x, y;
  3370. getyx(win, y, x);
  3371. if (unlikely(y >= lines || x >= columns))
  3372. {
  3373. if (y >= lines)
  3374. y = lines - 1;
  3375. if (x >= columns)
  3376. x = columns - 1;
  3377. wmove(win, y, x);
  3378. }
  3379. return rv;
  3380. }
  3381. #else
  3382. # define bfg_wresize wresize
  3383. #endif
  3384. #endif
  3385. void tailsprintf(char *buf, size_t bufsz, const char *fmt, ...)
  3386. {
  3387. va_list ap;
  3388. size_t presz = strlen(buf);
  3389. va_start(ap, fmt);
  3390. vsnprintf(&buf[presz], bufsz - presz, fmt, ap);
  3391. va_end(ap);
  3392. }
  3393. double stats_elapsed(struct cgminer_stats *stats)
  3394. {
  3395. struct timeval now;
  3396. double elapsed;
  3397. if (stats->start_tv.tv_sec == 0)
  3398. elapsed = total_secs;
  3399. else {
  3400. cgtime(&now);
  3401. elapsed = tdiff(&now, &stats->start_tv);
  3402. }
  3403. if (elapsed < 1.0)
  3404. elapsed = 1.0;
  3405. return elapsed;
  3406. }
  3407. bool drv_ready(struct cgpu_info *cgpu)
  3408. {
  3409. switch (cgpu->status) {
  3410. case LIFE_INIT:
  3411. case LIFE_DEAD2:
  3412. return false;
  3413. default:
  3414. return true;
  3415. }
  3416. }
  3417. double cgpu_utility(struct cgpu_info *cgpu)
  3418. {
  3419. double dev_runtime = cgpu_runtime(cgpu);
  3420. return cgpu->utility = cgpu->accepted / dev_runtime * 60;
  3421. }
  3422. #define suffix_string(val, buf, bufsiz, sigdigits) do{ \
  3423. _Static_assert(sigdigits == 0, "suffix_string only supported with sigdigits==0"); \
  3424. format_unit3(buf, bufsiz, FUP_DIFF, "", H2B_SHORTV, val, -1); \
  3425. }while(0)
  3426. static float
  3427. utility_to_hashrate(double utility)
  3428. {
  3429. return utility * 0x4444444;
  3430. }
  3431. static const char*_unitchar = "pn\xb5m kMGTPEZY?";
  3432. static const int _unitbase = 4;
  3433. static
  3434. void pick_unit(float hashrate, unsigned char *unit)
  3435. {
  3436. unsigned char i;
  3437. if (hashrate == 0 || !isfinite(hashrate))
  3438. {
  3439. if (*unit < _unitbase)
  3440. *unit = _unitbase;
  3441. return;
  3442. }
  3443. hashrate *= 1e12;
  3444. for (i = 0; i < *unit; ++i)
  3445. hashrate /= 1e3;
  3446. // 1000 but with tolerance for floating-point rounding, avoid showing "1000.0"
  3447. while (hashrate >= 999.95)
  3448. {
  3449. hashrate /= 1e3;
  3450. if (likely(_unitchar[*unit] != '?'))
  3451. ++*unit;
  3452. }
  3453. }
  3454. #define hashrate_pick_unit(hashrate, unit) pick_unit(hashrate, unit)
  3455. enum h2bs_fmt {
  3456. H2B_NOUNIT, // "xxx.x"
  3457. H2B_SHORT, // "xxx.xMH/s"
  3458. H2B_SPACED, // "xxx.x MH/s"
  3459. H2B_SHORTV, // Like H2B_SHORT, but omit space for base unit
  3460. };
  3461. enum bfu_floatprec {
  3462. FUP_INTEGER,
  3463. FUP_HASHES,
  3464. FUP_BTC,
  3465. FUP_DIFF,
  3466. };
  3467. static
  3468. int format_unit3(char *buf, size_t sz, enum bfu_floatprec fprec, const char *measurement, enum h2bs_fmt fmt, float hashrate, signed char unitin)
  3469. {
  3470. char *s = buf;
  3471. unsigned char prec, i, unit;
  3472. int rv = 0;
  3473. if (unitin == -1)
  3474. {
  3475. unit = 0;
  3476. hashrate_pick_unit(hashrate, &unit);
  3477. }
  3478. else
  3479. unit = unitin;
  3480. hashrate *= 1e12;
  3481. for (i = 0; i < unit; ++i)
  3482. hashrate /= 1000;
  3483. switch (fprec)
  3484. {
  3485. case FUP_HASHES:
  3486. // 100 but with tolerance for floating-point rounding, max "99.99" then "100.0"
  3487. if (hashrate >= 99.995 || unit < 6)
  3488. prec = 1;
  3489. else
  3490. prec = 2;
  3491. _SNP("%5.*f", prec, hashrate);
  3492. break;
  3493. case FUP_INTEGER:
  3494. _SNP("%3d", (int)hashrate);
  3495. break;
  3496. case FUP_BTC:
  3497. if (hashrate >= 99.995)
  3498. prec = 0;
  3499. else
  3500. prec = 2;
  3501. _SNP("%5.*f", prec, hashrate);
  3502. break;
  3503. case FUP_DIFF:
  3504. if (unit > _unitbase)
  3505. _SNP("%.3g", hashrate);
  3506. else
  3507. _SNP("%u", (unsigned int)hashrate);
  3508. }
  3509. if (fmt != H2B_NOUNIT)
  3510. {
  3511. char uc[3] = {_unitchar[unit], '\0'};
  3512. switch (fmt) {
  3513. case H2B_SPACED:
  3514. _SNP(" ");
  3515. default:
  3516. break;
  3517. case H2B_SHORTV:
  3518. if (isspace(uc[0]))
  3519. uc[0] = '\0';
  3520. }
  3521. if (uc[0] == '\xb5')
  3522. // Convert to UTF-8
  3523. snprintf(uc, sizeof(uc), "%s", U8_MICRO);
  3524. _SNP("%s%s", uc, measurement);
  3525. }
  3526. return rv;
  3527. }
  3528. #define format_unit2(buf, sz, floatprec, measurement, fmt, n, unit) \
  3529. format_unit3(buf, sz, floatprec ? FUP_HASHES : FUP_INTEGER, measurement, fmt, n, unit)
  3530. static
  3531. char *_multi_format_unit(char **buflist, size_t *bufszlist, bool floatprec, const char *measurement, enum h2bs_fmt fmt, const char *delim, int count, const float *numbers, bool isarray)
  3532. {
  3533. unsigned char unit = 0;
  3534. bool allzero = true;
  3535. int i;
  3536. size_t delimsz = 0;
  3537. char *buf = buflist[0];
  3538. size_t bufsz = bufszlist[0];
  3539. size_t itemwidth = (floatprec ? 5 : 3);
  3540. if (!isarray)
  3541. delimsz = strlen(delim);
  3542. for (i = 0; i < count; ++i)
  3543. if (numbers[i] != 0)
  3544. {
  3545. pick_unit(numbers[i], &unit);
  3546. allzero = false;
  3547. }
  3548. if (allzero)
  3549. unit = _unitbase;
  3550. --count;
  3551. for (i = 0; i < count; ++i)
  3552. {
  3553. format_unit2(buf, bufsz, floatprec, NULL, H2B_NOUNIT, numbers[i], unit);
  3554. if (isarray)
  3555. {
  3556. buf = buflist[i + 1];
  3557. bufsz = bufszlist[i + 1];
  3558. }
  3559. else
  3560. {
  3561. buf += itemwidth;
  3562. bufsz -= itemwidth;
  3563. if (delimsz > bufsz)
  3564. delimsz = bufsz;
  3565. memcpy(buf, delim, delimsz);
  3566. buf += delimsz;
  3567. bufsz -= delimsz;
  3568. }
  3569. }
  3570. // Last entry has the unit
  3571. format_unit2(buf, bufsz, floatprec, measurement, fmt, numbers[count], unit);
  3572. return buflist[0];
  3573. }
  3574. #define multi_format_unit2(buf, bufsz, floatprec, measurement, fmt, delim, count, ...) _multi_format_unit((char *[]){buf}, (size_t[]){bufsz}, floatprec, measurement, fmt, delim, count, (float[]){ __VA_ARGS__ }, false)
  3575. #define multi_format_unit_array2(buflist, bufszlist, floatprec, measurement, fmt, count, ...) (void)_multi_format_unit(buflist, bufszlist, floatprec, measurement, fmt, NULL, count, (float[]){ __VA_ARGS__ }, true)
  3576. static
  3577. int percentf3(char * const buf, size_t sz, double p, const double t)
  3578. {
  3579. char *s = buf;
  3580. int rv = 0;
  3581. if (!p)
  3582. _SNP("none");
  3583. else
  3584. if (t <= p)
  3585. _SNP("100%%");
  3586. else
  3587. {
  3588. p /= t;
  3589. if (p < 0.00995) // 0.01 but with tolerance for floating-point rounding, max ".99%"
  3590. _SNP(".%02.0f%%", p * 10000); // ".01%"
  3591. else
  3592. if (p < 0.0995) // 0.1 but with tolerance for floating-point rounding, max "9.9%"
  3593. _SNP("%.1f%%", p * 100); // "9.1%"
  3594. else
  3595. _SNP("%3.0f%%", p * 100); // " 99%"
  3596. }
  3597. return rv;
  3598. }
  3599. #define percentf4(buf, bufsz, p, t) percentf3(buf, bufsz, p, p + t)
  3600. static
  3601. void test_decimal_width()
  3602. {
  3603. // The pipe character at end of each line should perfectly line up
  3604. char printbuf[512];
  3605. char testbuf1[64];
  3606. char testbuf2[64];
  3607. char testbuf3[64];
  3608. char testbuf4[64];
  3609. double testn;
  3610. int width;
  3611. int saved;
  3612. // Hotspots around 0.1 and 0.01
  3613. saved = -1;
  3614. for (testn = 0.09; testn <= 0.11; testn += 0.000001) {
  3615. percentf3(testbuf1, sizeof(testbuf1), testn, 1.0);
  3616. percentf3(testbuf2, sizeof(testbuf2), testn, 10.0);
  3617. width = snprintf(printbuf, sizeof(printbuf), "%10g %s %s |", testn, testbuf1, testbuf2);
  3618. if (unlikely((saved != -1) && (width != saved))) {
  3619. ++unittest_failures;
  3620. applog(LOG_ERR, "Test width mismatch in percentf3! %d not %d at %10g", width, saved, testn);
  3621. applog(LOG_ERR, "%s", printbuf);
  3622. }
  3623. saved = width;
  3624. }
  3625. // Hotspot around 100 (but test this in several units because format_unit2 also has unit<2 check)
  3626. saved = -1;
  3627. for (testn = 99.0; testn <= 101.0; testn += 0.0001) {
  3628. format_unit2(testbuf1, sizeof(testbuf1), true, "x", H2B_SHORT, testn , -1);
  3629. format_unit2(testbuf2, sizeof(testbuf2), true, "x", H2B_SHORT, testn * 1e3, -1);
  3630. format_unit2(testbuf3, sizeof(testbuf3), true, "x", H2B_SHORT, testn * 1e6, -1);
  3631. snprintf(printbuf, sizeof(printbuf), "%10g %s %s %s |", testn, testbuf1, testbuf2, testbuf3);
  3632. width = utf8_strlen(printbuf);
  3633. if (unlikely((saved != -1) && (width != saved))) {
  3634. ++unittest_failures;
  3635. applog(LOG_ERR, "Test width mismatch in format_unit2! %d not %d at %10g", width, saved, testn);
  3636. applog(LOG_ERR, "%s", printbuf);
  3637. }
  3638. saved = width;
  3639. }
  3640. // Hotspot around unit transition boundary in pick_unit
  3641. saved = -1;
  3642. for (testn = 999.0; testn <= 1001.0; testn += 0.0001) {
  3643. format_unit2(testbuf1, sizeof(testbuf1), true, "x", H2B_SHORT, testn , -1);
  3644. format_unit2(testbuf2, sizeof(testbuf2), true, "x", H2B_SHORT, testn * 1e3, -1);
  3645. format_unit2(testbuf3, sizeof(testbuf3), true, "x", H2B_SHORT, testn * 1e6, -1);
  3646. format_unit2(testbuf4, sizeof(testbuf4), true, "x", H2B_SHORT, testn * 1e9, -1);
  3647. snprintf(printbuf, sizeof(printbuf), "%10g %s %s %s %s |", testn, testbuf1, testbuf2, testbuf3, testbuf4);
  3648. width = utf8_strlen(printbuf);
  3649. if (unlikely((saved != -1) && (width != saved))) {
  3650. ++unittest_failures;
  3651. applog(LOG_ERR, "Test width mismatch in pick_unit! %d not %d at %10g", width, saved, testn);
  3652. applog(LOG_ERR, "%s", printbuf);
  3653. }
  3654. saved = width;
  3655. }
  3656. }
  3657. #ifdef HAVE_CURSES
  3658. static void adj_width(int var, int *length);
  3659. #endif
  3660. #ifdef HAVE_CURSES
  3661. static int awidth = 1, rwidth = 1, swidth = 1, hwwidth = 1;
  3662. static
  3663. void format_statline(char *buf, size_t bufsz, const char *cHr, const char *aHr, const char *uHr, int accepted, int rejected, int stale, double wnotaccepted, double waccepted, int hwerrs, double bad_diff1, double allnonces)
  3664. {
  3665. char rejpcbuf[6];
  3666. char bnbuf[6];
  3667. adj_width(accepted, &awidth);
  3668. adj_width(rejected, &rwidth);
  3669. adj_width(stale, &swidth);
  3670. adj_width(hwerrs, &hwwidth);
  3671. percentf4(rejpcbuf, sizeof(rejpcbuf), wnotaccepted, waccepted);
  3672. percentf3(bnbuf, sizeof(bnbuf), bad_diff1, allnonces);
  3673. tailsprintf(buf, bufsz, "%s/%s/%s | A:%*d R:%*d+%*d(%s) HW:%*d/%s",
  3674. cHr, aHr, uHr,
  3675. awidth, accepted,
  3676. rwidth, rejected,
  3677. swidth, stale,
  3678. rejpcbuf,
  3679. hwwidth, hwerrs,
  3680. bnbuf
  3681. );
  3682. }
  3683. static
  3684. const char *pool_proto_str(const struct pool * const pool)
  3685. {
  3686. if (pool->idle)
  3687. return "Dead ";
  3688. if (pool->has_stratum)
  3689. return "Strtm";
  3690. if (pool->lp_url && pool->proto != pool->lp_proto)
  3691. return "Mixed";
  3692. switch (pool->proto)
  3693. {
  3694. case PLP_GETBLOCKTEMPLATE:
  3695. return " GBT ";
  3696. case PLP_GETWORK:
  3697. return "GWork";
  3698. default:
  3699. return "Alive";
  3700. }
  3701. }
  3702. #endif
  3703. static inline
  3704. void temperature_column(char *buf, size_t bufsz, bool maybe_unicode, const float * const temp)
  3705. {
  3706. if (!(use_unicode && have_unicode_degrees))
  3707. maybe_unicode = false;
  3708. if (temp && *temp > 0.)
  3709. if (maybe_unicode)
  3710. snprintf(buf, bufsz, "%4.1f"U8_DEGREE"C", *temp);
  3711. else
  3712. snprintf(buf, bufsz, "%4.1fC", *temp);
  3713. else
  3714. {
  3715. if (temp)
  3716. snprintf(buf, bufsz, " ");
  3717. if (maybe_unicode)
  3718. tailsprintf(buf, bufsz, " ");
  3719. }
  3720. tailsprintf(buf, bufsz, " | ");
  3721. }
  3722. void get_statline3(char *buf, size_t bufsz, struct cgpu_info *cgpu, bool for_curses, bool opt_show_procs)
  3723. {
  3724. #ifndef HAVE_CURSES
  3725. assert(for_curses == false);
  3726. #endif
  3727. struct device_drv *drv = cgpu->drv;
  3728. enum h2bs_fmt hashrate_style = for_curses ? H2B_SHORT : H2B_SPACED;
  3729. char cHr[ALLOC_H2B_NOUNIT+1], aHr[ALLOC_H2B_NOUNIT+1], uHr[max(ALLOC_H2B_SHORT, ALLOC_H2B_SPACED)+3+1];
  3730. char rejpcbuf[6];
  3731. char bnbuf[6];
  3732. double dev_runtime;
  3733. if (!opt_show_procs)
  3734. cgpu = cgpu->device;
  3735. dev_runtime = cgpu_runtime(cgpu);
  3736. double rolling, mhashes;
  3737. int accepted, rejected, stale;
  3738. double waccepted;
  3739. double wnotaccepted;
  3740. int hwerrs;
  3741. double bad_diff1, good_diff1;
  3742. rolling = mhashes = waccepted = wnotaccepted = 0;
  3743. accepted = rejected = stale = hwerrs = bad_diff1 = good_diff1 = 0;
  3744. {
  3745. struct cgpu_info *slave = cgpu;
  3746. for (int i = 0; i < cgpu->procs; ++i, (slave = slave->next_proc))
  3747. {
  3748. slave->utility = slave->accepted / dev_runtime * 60;
  3749. slave->utility_diff1 = slave->diff_accepted / dev_runtime * 60;
  3750. rolling += drv->get_proc_rolling_hashrate ? drv->get_proc_rolling_hashrate(slave) : slave->rolling;
  3751. mhashes += slave->total_mhashes;
  3752. if (opt_weighed_stats)
  3753. {
  3754. accepted += slave->diff_accepted;
  3755. rejected += slave->diff_rejected;
  3756. stale += slave->diff_stale;
  3757. }
  3758. else
  3759. {
  3760. accepted += slave->accepted;
  3761. rejected += slave->rejected;
  3762. stale += slave->stale;
  3763. }
  3764. waccepted += slave->diff_accepted;
  3765. wnotaccepted += slave->diff_rejected + slave->diff_stale;
  3766. hwerrs += slave->hw_errors;
  3767. bad_diff1 += slave->bad_diff1;
  3768. good_diff1 += slave->diff1;
  3769. if (opt_show_procs)
  3770. break;
  3771. }
  3772. }
  3773. double wtotal = (waccepted + wnotaccepted);
  3774. multi_format_unit_array2(
  3775. ((char*[]){cHr, aHr, uHr}),
  3776. ((size_t[]){sizeof(cHr), sizeof(aHr), sizeof(uHr)}),
  3777. true, "h/s", hashrate_style,
  3778. 3,
  3779. 1e6*rolling,
  3780. 1e6*mhashes / dev_runtime,
  3781. utility_to_hashrate(good_diff1 * (wtotal ? (waccepted / wtotal) : 1) * 60 / dev_runtime));
  3782. // Processor representation
  3783. #ifdef HAVE_CURSES
  3784. if (for_curses)
  3785. {
  3786. if (opt_show_procs)
  3787. snprintf(buf, bufsz, " %*s: ", -(5 + max_lpdigits), cgpu->proc_repr);
  3788. else
  3789. snprintf(buf, bufsz, " %s: ", cgpu->dev_repr);
  3790. }
  3791. else
  3792. #endif
  3793. {
  3794. if (opt_show_procs)
  3795. snprintf(buf, bufsz, "%*s ", -(5 + max_lpdigits), cgpu->proc_repr_ns);
  3796. else
  3797. snprintf(buf, bufsz, "%-5s ", cgpu->dev_repr_ns);
  3798. }
  3799. if (include_serial_in_statline && cgpu->dev_serial)
  3800. tailsprintf(buf, bufsz, "[serial=%s] ", cgpu->dev_serial);
  3801. if (unlikely(cgpu->status == LIFE_INIT))
  3802. {
  3803. tailsprintf(buf, bufsz, "Initializing...");
  3804. return;
  3805. }
  3806. {
  3807. const size_t bufln = strlen(buf);
  3808. const size_t abufsz = (bufln >= bufsz) ? 0 : (bufsz - bufln);
  3809. if (likely(cgpu->status != LIFE_DEAD2) && drv->override_statline_temp2 && drv->override_statline_temp2(buf, bufsz, cgpu, opt_show_procs))
  3810. temperature_column(&buf[bufln], abufsz, for_curses, NULL);
  3811. else
  3812. {
  3813. float temp = cgpu->temp;
  3814. if (!opt_show_procs)
  3815. {
  3816. // Find the highest temperature of all processors
  3817. struct cgpu_info *proc = cgpu;
  3818. for (int i = 0; i < cgpu->procs; ++i, (proc = proc->next_proc))
  3819. if (proc->temp > temp)
  3820. temp = proc->temp;
  3821. }
  3822. temperature_column(&buf[bufln], abufsz, for_curses, &temp);
  3823. }
  3824. }
  3825. #ifdef HAVE_CURSES
  3826. if (for_curses)
  3827. {
  3828. const char *cHrStatsOpt[] = {AS_BAD("DEAD "), AS_BAD("SICK "), "OFF ", AS_BAD("REST "), AS_BAD(" ERR "), AS_BAD("WAIT "), cHr};
  3829. const char *cHrStats;
  3830. int cHrStatsI = (sizeof(cHrStatsOpt) / sizeof(*cHrStatsOpt)) - 1;
  3831. bool all_dead = true, all_off = true, all_rdrv = true;
  3832. struct cgpu_info *proc = cgpu;
  3833. for (int i = 0; i < cgpu->procs; ++i, (proc = proc->next_proc))
  3834. {
  3835. switch (cHrStatsI) {
  3836. default:
  3837. if (proc->status == LIFE_WAIT)
  3838. cHrStatsI = 5;
  3839. case 5:
  3840. if (proc->deven == DEV_RECOVER_ERR)
  3841. cHrStatsI = 4;
  3842. case 4:
  3843. if (proc->deven == DEV_RECOVER)
  3844. cHrStatsI = 3;
  3845. case 3:
  3846. if (proc->status == LIFE_SICK || proc->status == LIFE_DEAD || proc->status == LIFE_DEAD2)
  3847. {
  3848. cHrStatsI = 1;
  3849. all_off = false;
  3850. }
  3851. else
  3852. {
  3853. if (likely(proc->deven == DEV_ENABLED))
  3854. all_off = false;
  3855. if (proc->deven != DEV_RECOVER_DRV)
  3856. all_rdrv = false;
  3857. }
  3858. case 1:
  3859. break;
  3860. }
  3861. if (likely(proc->status != LIFE_DEAD && proc->status != LIFE_DEAD2))
  3862. all_dead = false;
  3863. if (opt_show_procs)
  3864. break;
  3865. }
  3866. if (unlikely(all_dead))
  3867. cHrStatsI = 0;
  3868. else
  3869. if (unlikely(all_off))
  3870. cHrStatsI = 2;
  3871. cHrStats = cHrStatsOpt[cHrStatsI];
  3872. if (cHrStatsI == 2 && all_rdrv)
  3873. cHrStats = " RST ";
  3874. format_statline(buf, bufsz,
  3875. cHrStats,
  3876. aHr, uHr,
  3877. accepted, rejected, stale,
  3878. wnotaccepted, waccepted,
  3879. hwerrs,
  3880. bad_diff1, bad_diff1 + good_diff1);
  3881. }
  3882. else
  3883. #endif
  3884. {
  3885. percentf4(rejpcbuf, sizeof(rejpcbuf), wnotaccepted, waccepted);
  3886. percentf4(bnbuf, sizeof(bnbuf), bad_diff1, good_diff1);
  3887. tailsprintf(buf, bufsz, "%ds:%s avg:%s u:%s | A:%d R:%d+%d(%s) HW:%d/%s",
  3888. opt_log_interval,
  3889. cHr, aHr, uHr,
  3890. accepted,
  3891. rejected,
  3892. stale,
  3893. rejpcbuf,
  3894. hwerrs,
  3895. bnbuf
  3896. );
  3897. }
  3898. }
  3899. #define get_statline(buf, bufsz, cgpu) get_statline3(buf, bufsz, cgpu, false, opt_show_procs)
  3900. #define get_statline2(buf, bufsz, cgpu, for_curses) get_statline3(buf, bufsz, cgpu, for_curses, opt_show_procs)
  3901. static void text_print_status(int thr_id)
  3902. {
  3903. struct cgpu_info *cgpu;
  3904. char logline[256];
  3905. cgpu = get_thr_cgpu(thr_id);
  3906. if (cgpu) {
  3907. get_statline(logline, sizeof(logline), cgpu);
  3908. printf("\n%s\r", logline);
  3909. fflush(stdout);
  3910. }
  3911. }
  3912. #ifdef HAVE_CURSES
  3913. static int attr_bad = A_BOLD;
  3914. #ifdef WIN32
  3915. #define swprintf snwprintf
  3916. #endif
  3917. static
  3918. void bfg_waddstr(WINDOW *win, const char *s)
  3919. {
  3920. const char *p = s;
  3921. int32_t w;
  3922. int wlen;
  3923. unsigned char stop_ascii = (use_unicode ? '|' : 0x80);
  3924. while (true)
  3925. {
  3926. while (likely(p[0] == '\n' || (p[0] >= 0x20 && p[0] < stop_ascii)))
  3927. {
  3928. // Printable ASCII
  3929. ++p;
  3930. }
  3931. if (p != s)
  3932. waddnstr(win, s, p - s);
  3933. w = utf8_decode(p, &wlen);
  3934. s = p += wlen;
  3935. switch(w)
  3936. {
  3937. // NOTE: U+F000-U+F7FF are reserved for font hacks
  3938. case '\0':
  3939. return;
  3940. case 0xb5: // micro symbol
  3941. w = unicode_micro;
  3942. goto default_addch;
  3943. case 0xf000: // "bad" off
  3944. wattroff(win, attr_bad);
  3945. break;
  3946. case 0xf001: // "bad" on
  3947. wattron(win, attr_bad);
  3948. break;
  3949. #ifdef USE_UNICODE
  3950. case '|':
  3951. wadd_wch(win, WACS_VLINE);
  3952. break;
  3953. #endif
  3954. case 0x2500: // BOX DRAWINGS LIGHT HORIZONTAL
  3955. case 0x2534: // BOX DRAWINGS LIGHT UP AND HORIZONTAL
  3956. if (!use_unicode)
  3957. {
  3958. waddch(win, '-');
  3959. break;
  3960. }
  3961. #ifdef USE_UNICODE
  3962. wadd_wch(win, (w == 0x2500) ? WACS_HLINE : WACS_BTEE);
  3963. break;
  3964. #endif
  3965. case 0x2022:
  3966. if (w > WCHAR_MAX || !iswprint(w))
  3967. w = '*';
  3968. default:
  3969. default_addch:
  3970. if (w > WCHAR_MAX || !(iswprint(w) || w == '\n'))
  3971. {
  3972. #if REPLACEMENT_CHAR <= WCHAR_MAX
  3973. if (iswprint(REPLACEMENT_CHAR))
  3974. w = REPLACEMENT_CHAR;
  3975. else
  3976. #endif
  3977. w = '?';
  3978. }
  3979. {
  3980. #ifdef USE_UNICODE
  3981. wchar_t wbuf[0x10];
  3982. int wbuflen = sizeof(wbuf) / sizeof(*wbuf);
  3983. wbuflen = swprintf(wbuf, wbuflen, L"%lc", (wint_t)w);
  3984. waddnwstr(win, wbuf, wbuflen);
  3985. #else
  3986. wprintw(win, "%lc", (wint_t)w);
  3987. #endif
  3988. }
  3989. }
  3990. }
  3991. }
  3992. static inline
  3993. void bfg_hline(WINDOW *win, int y)
  3994. {
  3995. int maxx, __maybe_unused maxy;
  3996. getmaxyx(win, maxy, maxx);
  3997. #ifdef USE_UNICODE
  3998. if (use_unicode)
  3999. mvwhline_set(win, y, 0, WACS_HLINE, maxx);
  4000. else
  4001. #endif
  4002. mvwhline(win, y, 0, '-', maxx);
  4003. }
  4004. static
  4005. int bfg_win_linelen(WINDOW * const win)
  4006. {
  4007. int maxx;
  4008. int __maybe_unused y;
  4009. getmaxyx(win, y, maxx);
  4010. return maxx;
  4011. }
  4012. // Spaces until end of line, using current attributes (ie, not completely clear)
  4013. static
  4014. void bfg_wspctoeol(WINDOW * const win, const int offset)
  4015. {
  4016. int x, maxx;
  4017. int __maybe_unused y;
  4018. getmaxyx(win, y, maxx);
  4019. getyx(win, y, x);
  4020. const int space_count = (maxx - x) - offset;
  4021. // Check for negative - terminal too narrow
  4022. if (space_count <= 0)
  4023. return;
  4024. char buf[space_count];
  4025. memset(buf, ' ', space_count);
  4026. waddnstr(win, buf, space_count);
  4027. }
  4028. static int menu_attr = A_REVERSE;
  4029. #define CURBUFSIZ 256
  4030. #define cg_mvwprintw(win, y, x, fmt, ...) do { \
  4031. char tmp42[CURBUFSIZ]; \
  4032. snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \
  4033. wmove(win, y, x); \
  4034. bfg_waddstr(win, tmp42); \
  4035. } while (0)
  4036. #define cg_wprintw(win, fmt, ...) do { \
  4037. char tmp42[CURBUFSIZ]; \
  4038. snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \
  4039. bfg_waddstr(win, tmp42); \
  4040. } while (0)
  4041. static
  4042. void update_block_display_line(const int blky, struct mining_goal_info *goal)
  4043. {
  4044. struct blockchain_info * const blkchain = goal->blkchain;
  4045. struct block_info * const blkinfo = blkchain->currentblk;
  4046. double income;
  4047. char incomestr[ALLOC_H2B_SHORT+6+1];
  4048. if (blkinfo->height)
  4049. {
  4050. income = goal->diff_accepted * 3600 * blkchain->currentblk_subsidy / total_secs / goal->current_diff;
  4051. format_unit3(incomestr, sizeof(incomestr), FUP_BTC, "BTC/hr", H2B_SHORT, income/1e8, -1);
  4052. }
  4053. else
  4054. strcpy(incomestr, "?");
  4055. int linelen = bfg_win_linelen(statuswin);
  4056. wmove(statuswin, blky, 0);
  4057. bfg_waddstr(statuswin, " Block");
  4058. if (!goal->is_default)
  4059. linelen -= strlen(goal->name) + 1;
  4060. linelen -= 6; // " Block"
  4061. if (blkinfo->height && blkinfo->height < 1000000)
  4062. {
  4063. cg_wprintw(statuswin, " #%6u", blkinfo->height);
  4064. linelen -= 8;
  4065. }
  4066. bfg_waddstr(statuswin, ":");
  4067. if (linelen > 55)
  4068. bfg_waddstr(statuswin, " ");
  4069. if (linelen >= 65)
  4070. bfg_waddstr(statuswin, "...");
  4071. {
  4072. char hexpbh[0x11];
  4073. if (!(blkinfo->height && blkinfo->height < 1000000))
  4074. {
  4075. bin2hex(hexpbh, &blkinfo->prevblkhash[4], 4);
  4076. bfg_waddstr(statuswin, hexpbh);
  4077. }
  4078. bin2hex(hexpbh, &blkinfo->prevblkhash[0], 4);
  4079. bfg_waddstr(statuswin, hexpbh);
  4080. }
  4081. if (linelen >= 55)
  4082. bfg_waddstr(statuswin, " ");
  4083. cg_wprintw(statuswin, " Diff:%s", goal->current_diff_str);
  4084. if (linelen >= 69)
  4085. bfg_waddstr(statuswin, " ");
  4086. cg_wprintw(statuswin, "(%s) ", goal->net_hashrate);
  4087. if (linelen >= 62)
  4088. {
  4089. if (linelen >= 69)
  4090. bfg_waddstr(statuswin, " ");
  4091. bfg_waddstr(statuswin, "Started:");
  4092. }
  4093. else
  4094. bfg_waddstr(statuswin, "S:");
  4095. if (linelen >= 69)
  4096. bfg_waddstr(statuswin, " ");
  4097. bfg_waddstr(statuswin, blkchain->currentblk_first_seen_time_str);
  4098. if (linelen >= 69)
  4099. bfg_waddstr(statuswin, " ");
  4100. cg_wprintw(statuswin, " I:%s", incomestr);
  4101. if (!goal->is_default)
  4102. cg_wprintw(statuswin, " %s", goal->name);
  4103. wclrtoeol(statuswin);
  4104. }
  4105. static bool pool_actively_in_use(const struct pool *, const struct pool *);
  4106. static
  4107. void update_block_display(const bool within_console_lock)
  4108. {
  4109. struct mining_goal_info *goal, *tmpgoal;
  4110. int blky = 3, i, total_found_goals = 0;
  4111. if (!within_console_lock)
  4112. if (!curses_active_locked())
  4113. return;
  4114. HASH_ITER(hh, mining_goals, goal, tmpgoal)
  4115. {
  4116. for (i = 0; i < total_pools; ++i)
  4117. {
  4118. struct pool * const pool = pools[i];
  4119. if (pool->goal == goal && pool_actively_in_use(pool, NULL))
  4120. break;
  4121. }
  4122. if (i >= total_pools)
  4123. // no pools using this goal, so it's probably stale anyway
  4124. continue;
  4125. update_block_display_line(blky++, goal);
  4126. ++total_found_goals;
  4127. }
  4128. // We cannot do resizing if called within someone else's console lock
  4129. if (within_console_lock)
  4130. return;
  4131. bfg_console_unlock();
  4132. if (total_found_goals != active_goals)
  4133. {
  4134. active_goals = total_found_goals;
  4135. devcursor = 7 + active_goals;
  4136. switch_logsize();
  4137. }
  4138. }
  4139. static bool pool_unworkable(const struct pool *);
  4140. /* Must be called with curses mutex lock held and curses_active */
  4141. static void curses_print_status(const int ts)
  4142. {
  4143. struct pool *pool = currentpool;
  4144. struct timeval now, tv;
  4145. float efficiency;
  4146. int logdiv;
  4147. efficiency = total_bytes_xfer ? total_diff_accepted * 2048. / total_bytes_xfer : 0.0;
  4148. wattron(statuswin, attr_title);
  4149. const int linelen = bfg_win_linelen(statuswin);
  4150. int titlelen = 1 + strlen(PACKAGE) + 1 + strlen(bfgminer_ver) + 3 + 21 + 3 + 19;
  4151. cg_mvwprintw(statuswin, 0, 0, " " PACKAGE " ");
  4152. if (titlelen + 17 < linelen)
  4153. cg_wprintw(statuswin, "version ");
  4154. cg_wprintw(statuswin, "%s - ", bfgminer_ver);
  4155. if (titlelen + 9 < linelen)
  4156. cg_wprintw(statuswin, "Started: ");
  4157. else
  4158. if (titlelen + 7 <= linelen)
  4159. cg_wprintw(statuswin, "Start: ");
  4160. cg_wprintw(statuswin, "%s", datestamp);
  4161. timer_set_now(&now);
  4162. {
  4163. unsigned int days, hours;
  4164. div_t d;
  4165. timersub(&now, &miner_started, &tv);
  4166. d = div(tv.tv_sec, 86400);
  4167. days = d.quot;
  4168. d = div(d.rem, 3600);
  4169. hours = d.quot;
  4170. d = div(d.rem, 60);
  4171. cg_wprintw(statuswin, " - [%3u day%c %02d:%02d:%02d]"
  4172. , days
  4173. , (days == 1) ? ' ' : 's'
  4174. , hours
  4175. , d.quot
  4176. , d.rem
  4177. );
  4178. }
  4179. bfg_wspctoeol(statuswin, 0);
  4180. wattroff(statuswin, attr_title);
  4181. wattron(statuswin, menu_attr);
  4182. wmove(statuswin, 1, 0);
  4183. bfg_waddstr(statuswin, " [M]anage devices [P]ool management [S]ettings [D]isplay options ");
  4184. bfg_wspctoeol(statuswin, 14);
  4185. bfg_waddstr(statuswin, "[H]elp [Q]uit ");
  4186. wattroff(statuswin, menu_attr);
  4187. if ((pool_strategy == POOL_LOADBALANCE || pool_strategy == POOL_BALANCE) && enabled_pools > 1) {
  4188. char poolinfo[20], poolinfo2[20];
  4189. int poolinfooff = 0, poolinfo2off, workable_pools = 0;
  4190. double lowdiff = DBL_MAX, highdiff = -1;
  4191. struct pool *lowdiff_pool = pools[0], *highdiff_pool = pools[0];
  4192. time_t oldest_work_restart = time(NULL) + 1;
  4193. struct pool *oldest_work_restart_pool = pools[0];
  4194. for (int i = 0; i < total_pools; ++i)
  4195. {
  4196. if (pool_unworkable(pools[i]))
  4197. continue;
  4198. // NOTE: Only set pool var when it's workable; if only one is, it gets used by single-pool code
  4199. pool = pools[i];
  4200. ++workable_pools;
  4201. if (poolinfooff < sizeof(poolinfo))
  4202. poolinfooff += snprintf(&poolinfo[poolinfooff], sizeof(poolinfo) - poolinfooff, "%u,", pool->pool_no);
  4203. struct cgminer_pool_stats * const pool_stats = &pool->cgminer_pool_stats;
  4204. if (pool_stats->last_diff < lowdiff)
  4205. {
  4206. lowdiff = pool_stats->last_diff;
  4207. lowdiff_pool = pool;
  4208. }
  4209. if (pool_stats->last_diff > highdiff)
  4210. {
  4211. highdiff = pool_stats->last_diff;
  4212. highdiff_pool = pool;
  4213. }
  4214. if (oldest_work_restart >= pool->work_restart_time)
  4215. {
  4216. oldest_work_restart = pool->work_restart_time;
  4217. oldest_work_restart_pool = pool;
  4218. }
  4219. }
  4220. if (unlikely(!workable_pools))
  4221. goto no_workable_pools;
  4222. if (workable_pools == 1)
  4223. goto one_workable_pool;
  4224. poolinfo2off = snprintf(poolinfo2, sizeof(poolinfo2), "%u (", workable_pools);
  4225. if (poolinfooff > sizeof(poolinfo2) - poolinfo2off - 1)
  4226. snprintf(&poolinfo2[poolinfo2off], sizeof(poolinfo2) - poolinfo2off, "%.*s...)", (int)(sizeof(poolinfo2) - poolinfo2off - 5), poolinfo);
  4227. else
  4228. snprintf(&poolinfo2[poolinfo2off], sizeof(poolinfo2) - poolinfo2off, "%.*s)%*s", (int)(poolinfooff - 1), poolinfo, (int)(sizeof(poolinfo2)), "");
  4229. cg_mvwprintw(statuswin, 2, 0, " Pools: %s Diff:%s%s%s %c LU:%s",
  4230. poolinfo2,
  4231. lowdiff_pool->diff,
  4232. (lowdiff == highdiff) ? "" : "-",
  4233. (lowdiff == highdiff) ? "" : highdiff_pool->diff,
  4234. pool->goal->have_longpoll ? '+' : '-',
  4235. oldest_work_restart_pool->work_restart_timestamp);
  4236. }
  4237. else
  4238. if (pool_unworkable(pool))
  4239. {
  4240. no_workable_pools: ;
  4241. wattron(statuswin, attr_bad);
  4242. cg_mvwprintw(statuswin, 2, 0, " (all pools are dead) ");
  4243. wattroff(statuswin, attr_bad);
  4244. }
  4245. else
  4246. {
  4247. one_workable_pool: ;
  4248. char pooladdr[19];
  4249. {
  4250. const char *rawaddr = pool->sockaddr_url;
  4251. BFGINIT(rawaddr, pool->rpc_url);
  4252. size_t pooladdrlen = strlen(rawaddr);
  4253. if (pooladdrlen > 20)
  4254. snprintf(pooladdr, sizeof(pooladdr), "...%s", &rawaddr[pooladdrlen - (sizeof(pooladdr) - 4)]);
  4255. else
  4256. snprintf(pooladdr, sizeof(pooladdr), "%*s", -(int)(sizeof(pooladdr) - 1), rawaddr);
  4257. }
  4258. cg_mvwprintw(statuswin, 2, 0, " Pool%2u: %s Diff:%s %c%s LU:%s User:%s",
  4259. pool->pool_no, pooladdr, pool->diff,
  4260. pool->goal->have_longpoll ? '+' : '-', pool_proto_str(pool),
  4261. pool->work_restart_timestamp,
  4262. pool->rpc_user);
  4263. }
  4264. wclrtoeol(statuswin);
  4265. update_block_display(true);
  4266. char bwstr[(ALLOC_H2B_SHORT*2)+3+1];
  4267. cg_mvwprintw(statuswin, devcursor - 4, 0, " ST:%d F:%d NB:%d AS:%d BW:[%s] E:%.2f BS:%s",
  4268. ts,
  4269. total_go + total_ro,
  4270. new_blocks,
  4271. total_submitting,
  4272. multi_format_unit2(bwstr, sizeof(bwstr),
  4273. false, "B/s", H2B_SHORT, "/", 2,
  4274. (float)(total_bytes_rcvd / total_secs),
  4275. (float)(total_bytes_sent / total_secs)),
  4276. efficiency,
  4277. best_share);
  4278. wclrtoeol(statuswin);
  4279. mvwaddstr(statuswin, devcursor - 3, 0, " ");
  4280. bfg_waddstr(statuswin, statusline);
  4281. wclrtoeol(statuswin);
  4282. int devdiv = devcursor - 2;
  4283. logdiv = statusy - 1;
  4284. bfg_hline(statuswin, devdiv);
  4285. bfg_hline(statuswin, logdiv);
  4286. #ifdef USE_UNICODE
  4287. if (use_unicode)
  4288. {
  4289. int offset = 8 /* device */ + 5 /* temperature */ + 1 /* padding space */;
  4290. if (opt_show_procs && !opt_compact)
  4291. offset += max_lpdigits; // proc letter(s)
  4292. if (have_unicode_degrees)
  4293. ++offset; // degrees symbol
  4294. mvwadd_wch(statuswin, devdiv, offset, WACS_PLUS);
  4295. mvwadd_wch(statuswin, logdiv, offset, WACS_BTEE);
  4296. offset += 24; // hashrates etc
  4297. mvwadd_wch(statuswin, devdiv, offset, WACS_PLUS);
  4298. mvwadd_wch(statuswin, logdiv, offset, WACS_BTEE);
  4299. }
  4300. #endif
  4301. }
  4302. static void adj_width(int var, int *length)
  4303. {
  4304. if ((int)(log10(var) + 1) > *length)
  4305. (*length)++;
  4306. }
  4307. static int dev_width;
  4308. static void curses_print_devstatus(struct cgpu_info *cgpu)
  4309. {
  4310. char logline[256];
  4311. int ypos;
  4312. if (opt_compact)
  4313. return;
  4314. /* Check this isn't out of the window size */
  4315. if (opt_show_procs)
  4316. ypos = cgpu->cgminer_id;
  4317. else
  4318. {
  4319. if (cgpu->proc_id)
  4320. return;
  4321. ypos = cgpu->device_line_id;
  4322. }
  4323. ypos += devsummaryYOffset;
  4324. if (ypos < 0)
  4325. return;
  4326. ypos += devcursor - 1;
  4327. if (ypos >= statusy - 1)
  4328. return;
  4329. if (wmove(statuswin, ypos, 0) == ERR)
  4330. return;
  4331. get_statline2(logline, sizeof(logline), cgpu, true);
  4332. if (selecting_device && (opt_show_procs ? (selected_device == cgpu->cgminer_id) : (devices[selected_device]->device == cgpu)))
  4333. wattron(statuswin, A_REVERSE);
  4334. bfg_waddstr(statuswin, logline);
  4335. wattroff(statuswin, A_REVERSE);
  4336. wclrtoeol(statuswin);
  4337. }
  4338. static
  4339. void _refresh_devstatus(const bool already_have_lock) {
  4340. if ((!opt_compact) && (already_have_lock || curses_active_locked())) {
  4341. int i;
  4342. if (unlikely(!total_devices))
  4343. {
  4344. const int ypos = devcursor - 1;
  4345. if (ypos < statusy - 1 && wmove(statuswin, ypos, 0) != ERR)
  4346. {
  4347. wattron(statuswin, attr_bad);
  4348. bfg_waddstr(statuswin, "NO DEVICES FOUND: Press 'M' and '+' to add");
  4349. wclrtoeol(statuswin);
  4350. wattroff(statuswin, attr_bad);
  4351. }
  4352. }
  4353. for (i = 0; i < total_devices; i++)
  4354. curses_print_devstatus(get_devices(i));
  4355. touchwin(statuswin);
  4356. wrefresh(statuswin);
  4357. if (!already_have_lock)
  4358. unlock_curses();
  4359. }
  4360. }
  4361. #define refresh_devstatus() _refresh_devstatus(false)
  4362. #endif
  4363. static void print_status(int thr_id)
  4364. {
  4365. if (!curses_active)
  4366. text_print_status(thr_id);
  4367. }
  4368. #ifdef HAVE_CURSES
  4369. static
  4370. bool set_statusy(int maxy)
  4371. {
  4372. if (loginput_size)
  4373. {
  4374. maxy -= loginput_size;
  4375. if (maxy < 0)
  4376. maxy = 0;
  4377. }
  4378. if (logstart < maxy)
  4379. maxy = logstart;
  4380. if (statusy == maxy)
  4381. return false;
  4382. statusy = maxy;
  4383. logcursor = statusy;
  4384. return true;
  4385. }
  4386. /* Check for window resize. Called with curses mutex locked */
  4387. static inline void change_logwinsize(void)
  4388. {
  4389. int x, y, logx, logy;
  4390. getmaxyx(mainwin, y, x);
  4391. if (x < 80 || y < 25)
  4392. return;
  4393. if (y > statusy + 2 && statusy < logstart) {
  4394. set_statusy(y - 2);
  4395. mvwin(logwin, logcursor, 0);
  4396. bfg_wresize(statuswin, statusy, x);
  4397. }
  4398. y -= logcursor;
  4399. getmaxyx(logwin, logy, logx);
  4400. /* Detect screen size change */
  4401. if (x != logx || y != logy)
  4402. bfg_wresize(logwin, y, x);
  4403. }
  4404. static void check_winsizes(void)
  4405. {
  4406. if (!use_curses)
  4407. return;
  4408. if (curses_active_locked()) {
  4409. int y, x;
  4410. x = getmaxx(statuswin);
  4411. if (set_statusy(LINES - 2))
  4412. {
  4413. erase();
  4414. bfg_wresize(statuswin, statusy, x);
  4415. getmaxyx(mainwin, y, x);
  4416. y -= logcursor;
  4417. bfg_wresize(logwin, y, x);
  4418. mvwin(logwin, logcursor, 0);
  4419. }
  4420. unlock_curses();
  4421. }
  4422. }
  4423. static int device_line_id_count;
  4424. static void switch_logsize(void)
  4425. {
  4426. if (curses_active_locked()) {
  4427. if (opt_compact) {
  4428. logstart = devcursor - 1;
  4429. logcursor = logstart + 1;
  4430. } else {
  4431. total_lines = (opt_show_procs ? total_devices : device_line_id_count) ?: 1;
  4432. logstart = devcursor + total_lines;
  4433. logcursor = logstart;
  4434. }
  4435. unlock_curses();
  4436. }
  4437. check_winsizes();
  4438. update_block_display(false);
  4439. }
  4440. /* For mandatory printing when mutex is already locked */
  4441. void _wlog(const char *str)
  4442. {
  4443. static bool newline;
  4444. size_t end = strlen(str) - 1;
  4445. if (newline)
  4446. bfg_waddstr(logwin, "\n");
  4447. if (str[end] == '\n')
  4448. {
  4449. char *s;
  4450. newline = true;
  4451. s = alloca(end + 1);
  4452. memcpy(s, str, end);
  4453. s[end] = '\0';
  4454. str = s;
  4455. }
  4456. else
  4457. newline = false;
  4458. bfg_waddstr(logwin, str);
  4459. }
  4460. /* Mandatory printing */
  4461. void _wlogprint(const char *str)
  4462. {
  4463. if (curses_active_locked()) {
  4464. _wlog(str);
  4465. unlock_curses();
  4466. }
  4467. }
  4468. #endif
  4469. #ifdef HAVE_CURSES
  4470. bool _log_curses_only(int prio, const char *datetime, const char *str)
  4471. {
  4472. bool high_prio;
  4473. high_prio = (prio == LOG_WARNING || prio == LOG_ERR);
  4474. if (curses_active)
  4475. {
  4476. if (!loginput_size || high_prio) {
  4477. wlog(" %s %s\n", datetime, str);
  4478. if (high_prio) {
  4479. touchwin(logwin);
  4480. wrefresh(logwin);
  4481. }
  4482. }
  4483. return true;
  4484. }
  4485. return false;
  4486. }
  4487. void clear_logwin(void)
  4488. {
  4489. if (curses_active_locked()) {
  4490. wclear(logwin);
  4491. unlock_curses();
  4492. }
  4493. }
  4494. void logwin_update(void)
  4495. {
  4496. if (curses_active_locked()) {
  4497. touchwin(logwin);
  4498. wrefresh(logwin);
  4499. unlock_curses();
  4500. }
  4501. }
  4502. #endif
  4503. void enable_pool(struct pool * const pool)
  4504. {
  4505. if (pool->enabled != POOL_ENABLED) {
  4506. mutex_lock(&lp_lock);
  4507. enabled_pools++;
  4508. pool->enabled = POOL_ENABLED;
  4509. pthread_cond_broadcast(&lp_cond);
  4510. mutex_unlock(&lp_lock);
  4511. if (pool->prio < current_pool()->prio)
  4512. switch_pools(pool);
  4513. }
  4514. }
  4515. void manual_enable_pool(struct pool * const pool)
  4516. {
  4517. pool->failover_only = false;
  4518. BFGINIT(pool->quota, 1);
  4519. enable_pool(pool);
  4520. }
  4521. void disable_pool(struct pool * const pool, const enum pool_enable enable_status)
  4522. {
  4523. if (pool->enabled == POOL_DISABLED)
  4524. /* had been manually disabled before */
  4525. return;
  4526. if (pool->enabled != POOL_ENABLED)
  4527. {
  4528. /* has been programmatically disabled already, just change to the new status directly */
  4529. pool->enabled = enable_status;
  4530. return;
  4531. }
  4532. /* Fall into the lock area */
  4533. mutex_lock(&lp_lock);
  4534. --enabled_pools;
  4535. pool->enabled = enable_status;
  4536. mutex_unlock(&lp_lock);
  4537. if (pool == current_pool())
  4538. switch_pools(NULL);
  4539. }
  4540. static
  4541. void share_result_msg(const struct work *work, const char *disp, const char *reason, bool resubmit, const char *worktime) {
  4542. struct cgpu_info *cgpu;
  4543. const struct mining_algorithm * const malgo = work_mining_algorithm(work);
  4544. const unsigned char *hashpart = &work->hash[0x1c - malgo->ui_skip_hash_bytes];
  4545. char shrdiffdisp[ALLOC_H2B_SHORTV];
  4546. const double tgtdiff = work->work_difficulty;
  4547. char tgtdiffdisp[ALLOC_H2B_SHORTV];
  4548. char where[20];
  4549. cgpu = get_thr_cgpu(work->thr_id);
  4550. suffix_string(work->share_diff, shrdiffdisp, sizeof(shrdiffdisp), 0);
  4551. suffix_string(tgtdiff, tgtdiffdisp, sizeof(tgtdiffdisp), 0);
  4552. if (total_pools > 1)
  4553. snprintf(where, sizeof(where), " pool %d", work->pool->pool_no);
  4554. else
  4555. where[0] = '\0';
  4556. applog(LOG_NOTICE, "%s %02x%02x%02x%02x %"PRIprepr"%s Diff %s/%s%s %s%s",
  4557. disp,
  4558. (unsigned)hashpart[3], (unsigned)hashpart[2], (unsigned)hashpart[1], (unsigned)hashpart[0],
  4559. cgpu->proc_repr,
  4560. where,
  4561. shrdiffdisp, tgtdiffdisp,
  4562. reason,
  4563. resubmit ? "(resubmit)" : "",
  4564. worktime
  4565. );
  4566. }
  4567. static bool test_work_current(struct work *);
  4568. static void _submit_work_async(struct work *);
  4569. static
  4570. void maybe_local_submit(const struct work *work)
  4571. {
  4572. #if BLKMAKER_VERSION > 3
  4573. if (unlikely(work->block && work->tr))
  4574. {
  4575. // This is a block with a full template (GBT)
  4576. // Regardless of the result, submit to local bitcoind(s) as well
  4577. struct work *work_cp;
  4578. for (int i = 0; i < total_pools; ++i)
  4579. {
  4580. if (!uri_get_param_bool(pools[i]->rpc_url, "allblocks", false))
  4581. continue;
  4582. applog(LOG_DEBUG, "Attempting submission of full block to pool %d", pools[i]->pool_no);
  4583. work_cp = copy_work(work);
  4584. work_cp->pool = pools[i];
  4585. work_cp->do_foreign_submit = true;
  4586. _submit_work_async(work_cp);
  4587. }
  4588. }
  4589. #endif
  4590. }
  4591. static
  4592. json_t *extract_reject_reason_j(json_t * const val, json_t *res, json_t * const err, const struct work * const work)
  4593. {
  4594. if (json_is_string(res))
  4595. return res;
  4596. if ( (res = json_object_get(val, "reject-reason")) )
  4597. return res;
  4598. if (work->stratum && err && json_is_array(err) && json_array_size(err) >= 2 && (res = json_array_get(err, 1)) && json_is_string(res))
  4599. return res;
  4600. return NULL;
  4601. }
  4602. static
  4603. const char *extract_reject_reason(json_t * const val, json_t *res, json_t * const err, const struct work * const work)
  4604. {
  4605. json_t * const j = extract_reject_reason_j(val, res, err, work);
  4606. return j ? json_string_value(j) : NULL;
  4607. }
  4608. static
  4609. int put_in_parens(char * const buf, const size_t bufsz, const char * const s)
  4610. {
  4611. if (!s)
  4612. {
  4613. if (bufsz)
  4614. buf[0] = '\0';
  4615. return 0;
  4616. }
  4617. int p = snprintf(buf, bufsz, " (%s", s);
  4618. if (p >= bufsz - 1)
  4619. p = bufsz - 2;
  4620. strcpy(&buf[p], ")");
  4621. return p + 1;
  4622. }
  4623. /* Theoretically threads could race when modifying accepted and
  4624. * rejected values but the chance of two submits completing at the
  4625. * same time is zero so there is no point adding extra locking */
  4626. static void
  4627. share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
  4628. /*char *hashshow,*/ bool resubmit, char *worktime)
  4629. {
  4630. struct pool *pool = work->pool;
  4631. struct cgpu_info *cgpu;
  4632. cgpu = get_thr_cgpu(work->thr_id);
  4633. if ((json_is_null(err) || !err) && (json_is_null(res) || json_is_true(res))) {
  4634. struct mining_goal_info * const goal = pool->goal;
  4635. mutex_lock(&stats_lock);
  4636. cgpu->accepted++;
  4637. total_accepted++;
  4638. pool->accepted++;
  4639. cgpu->diff_accepted += work->work_difficulty;
  4640. total_diff_accepted += work->work_difficulty;
  4641. pool->diff_accepted += work->work_difficulty;
  4642. goal->diff_accepted += work->work_difficulty;
  4643. mutex_unlock(&stats_lock);
  4644. pool->seq_rejects = 0;
  4645. cgpu->last_share_pool = pool->pool_no;
  4646. cgpu->last_share_pool_time = time(NULL);
  4647. cgpu->last_share_diff = work->work_difficulty;
  4648. pool->last_share_time = cgpu->last_share_pool_time;
  4649. pool->last_share_diff = work->work_difficulty;
  4650. applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
  4651. if (!QUIET) {
  4652. share_result_msg(work, "Accepted", "", resubmit, worktime);
  4653. }
  4654. sharelog("accept", work);
  4655. if (opt_shares && total_diff_accepted >= opt_shares) {
  4656. applog(LOG_WARNING, "Successfully mined %g accepted shares as requested and exiting.", opt_shares);
  4657. kill_work();
  4658. return;
  4659. }
  4660. /* Detect if a pool that has been temporarily disabled for
  4661. * continually rejecting shares has started accepting shares.
  4662. * This will only happen with the work returned from a
  4663. * longpoll */
  4664. if (unlikely(pool->enabled == POOL_REJECTING)) {
  4665. applog(LOG_WARNING, "Rejecting pool %d now accepting shares, re-enabling!", pool->pool_no);
  4666. enable_pool(pool);
  4667. }
  4668. if (unlikely(work->block)) {
  4669. // Force moving on to this new block :)
  4670. struct work fakework;
  4671. memset(&fakework, 0, sizeof(fakework));
  4672. fakework.pool = work->pool;
  4673. // Copy block version, bits, and time from share
  4674. memcpy(&fakework.data[ 0], &work->data[ 0], 4);
  4675. memcpy(&fakework.data[68], &work->data[68], 8);
  4676. // Set prevblock to winning hash (swap32'd)
  4677. swap32yes(&fakework.data[4], &work->hash[0], 32 / 4);
  4678. test_work_current(&fakework);
  4679. }
  4680. }
  4681. else
  4682. if (!hash_target_check(work->hash, work->target))
  4683. {
  4684. // This was submitted despite failing the proper target
  4685. // Quietly ignore the reject
  4686. char reason[32];
  4687. put_in_parens(reason, sizeof(reason), extract_reject_reason(val, res, err, work));
  4688. applog(LOG_DEBUG, "Share above target rejected%s by pool %u as expected, ignoring", reason, pool->pool_no);
  4689. // Stratum error 23 is "low difficulty share", which suggests this pool tracks job difficulty correctly.
  4690. // Therefore, we disable retrodiff if it was enabled-by-default.
  4691. if (pool->pool_diff_effective_retroactively == BTS_UNKNOWN) {
  4692. json_t *errnum;
  4693. if (work->stratum && err && json_is_array(err) && json_array_size(err) >= 1 && (errnum = json_array_get(err, 0)) && json_is_number(errnum) && ((int)json_number_value(errnum)) == 23) {
  4694. applog(LOG_DEBUG, "Disabling retroactive difficulty adjustments for pool %u", pool->pool_no);
  4695. pool->pool_diff_effective_retroactively = false;
  4696. }
  4697. }
  4698. } else {
  4699. mutex_lock(&stats_lock);
  4700. cgpu->rejected++;
  4701. total_rejected++;
  4702. pool->rejected++;
  4703. cgpu->diff_rejected += work->work_difficulty;
  4704. total_diff_rejected += work->work_difficulty;
  4705. pool->diff_rejected += work->work_difficulty;
  4706. pool->seq_rejects++;
  4707. mutex_unlock(&stats_lock);
  4708. applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
  4709. if (!QUIET) {
  4710. char disposition[36] = "reject";
  4711. char reason[32];
  4712. const char *reasontmp = extract_reject_reason(val, res, err, work);
  4713. int n = put_in_parens(reason, sizeof(reason), reasontmp);
  4714. if (reason[0])
  4715. snprintf(&disposition[6], sizeof(disposition) - 6, ":%.*s", n - 3, &reason[2]);
  4716. share_result_msg(work, "Rejected", reason, resubmit, worktime);
  4717. sharelog(disposition, work);
  4718. }
  4719. /* Once we have more than a nominal amount of sequential rejects,
  4720. * at least 10 and more than 3 mins at the current utility,
  4721. * disable the pool because some pool error is likely to have
  4722. * ensued. Do not do this if we know the share just happened to
  4723. * be stale due to networking delays.
  4724. */
  4725. if (pool->seq_rejects > 10 && !work->stale && opt_disable_pool && enabled_pools > 1) {
  4726. double utility = total_accepted / total_secs * 60;
  4727. if (pool->seq_rejects > utility * 3) {
  4728. applog(LOG_WARNING, "Pool %d rejected %d sequential shares, disabling!",
  4729. pool->pool_no, pool->seq_rejects);
  4730. disable_pool(pool, POOL_REJECTING);
  4731. pool->seq_rejects = 0;
  4732. }
  4733. }
  4734. }
  4735. maybe_local_submit(work);
  4736. }
  4737. static char *submit_upstream_work_request(struct work *work)
  4738. {
  4739. char *hexstr = NULL;
  4740. char *s, *sd;
  4741. struct pool *pool = work->pool;
  4742. if (work->tr)
  4743. {
  4744. blktemplate_t * const tmpl = work->tr->tmpl;
  4745. json_t *req;
  4746. unsigned char data[80];
  4747. swap32yes(data, work->data, 80 / 4);
  4748. #if BLKMAKER_VERSION > 6
  4749. if (work->stratum) {
  4750. req = blkmk_submitm_jansson(tmpl, data, bytes_buf(&work->nonce2), bytes_len(&work->nonce2), le32toh(*((uint32_t*)&work->data[76])), work->do_foreign_submit);
  4751. } else
  4752. #endif
  4753. #if BLKMAKER_VERSION > 3
  4754. if (work->do_foreign_submit)
  4755. req = blkmk_submit_foreign_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
  4756. else
  4757. #endif
  4758. req = blkmk_submit_jansson(tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76])));
  4759. s = json_dumps(req, 0);
  4760. json_decref(req);
  4761. sd = malloc(161);
  4762. bin2hex(sd, data, 80);
  4763. } else {
  4764. /* build hex string */
  4765. hexstr = malloc((sizeof(work->data) * 2) + 1);
  4766. bin2hex(hexstr, work->data, sizeof(work->data));
  4767. /* build JSON-RPC request */
  4768. s = strdup("{\"method\": \"getwork\", \"params\": [ \"");
  4769. s = realloc_strcat(s, hexstr);
  4770. s = realloc_strcat(s, "\" ], \"id\":1}");
  4771. free(hexstr);
  4772. sd = s;
  4773. }
  4774. applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
  4775. if (work->tr)
  4776. free(sd);
  4777. else
  4778. s = realloc_strcat(s, "\n");
  4779. return s;
  4780. }
  4781. static bool submit_upstream_work_completed(struct work *work, bool resubmit, struct timeval *ptv_submit, json_t *val) {
  4782. json_t *res, *err;
  4783. bool rc = false;
  4784. int thr_id = work->thr_id;
  4785. struct pool *pool = work->pool;
  4786. struct timeval tv_submit_reply;
  4787. time_t ts_submit_reply;
  4788. char worktime[200] = "";
  4789. cgtime(&tv_submit_reply);
  4790. ts_submit_reply = time(NULL);
  4791. if (unlikely(!val)) {
  4792. applog(LOG_INFO, "submit_upstream_work json_rpc_call failed");
  4793. if (!pool_tset(pool, &pool->submit_fail)) {
  4794. total_ro++;
  4795. pool->remotefail_occasions++;
  4796. applog(LOG_WARNING, "Pool %d communication failure, caching submissions", pool->pool_no);
  4797. }
  4798. goto out;
  4799. } else if (pool_tclear(pool, &pool->submit_fail))
  4800. applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
  4801. res = json_object_get(val, "result");
  4802. err = json_object_get(val, "error");
  4803. if (!QUIET) {
  4804. if (opt_worktime) {
  4805. char workclone[20];
  4806. struct tm _tm;
  4807. struct tm *tm, tm_getwork, tm_submit_reply;
  4808. tm = &_tm;
  4809. double getwork_time = tdiff((struct timeval *)&(work->tv_getwork_reply),
  4810. (struct timeval *)&(work->tv_getwork));
  4811. double getwork_to_work = tdiff((struct timeval *)&(work->tv_work_start),
  4812. (struct timeval *)&(work->tv_getwork_reply));
  4813. double work_time = tdiff((struct timeval *)&(work->tv_work_found),
  4814. (struct timeval *)&(work->tv_work_start));
  4815. double work_to_submit = tdiff(ptv_submit,
  4816. (struct timeval *)&(work->tv_work_found));
  4817. double submit_time = tdiff(&tv_submit_reply, ptv_submit);
  4818. int diffplaces = 3;
  4819. localtime_r(&work->ts_getwork, tm);
  4820. memcpy(&tm_getwork, tm, sizeof(struct tm));
  4821. localtime_r(&ts_submit_reply, tm);
  4822. memcpy(&tm_submit_reply, tm, sizeof(struct tm));
  4823. if (work->clone) {
  4824. snprintf(workclone, sizeof(workclone), "C:%1.3f",
  4825. tdiff((struct timeval *)&(work->tv_cloned),
  4826. (struct timeval *)&(work->tv_getwork_reply)));
  4827. }
  4828. else
  4829. strcpy(workclone, "O");
  4830. if (work->work_difficulty < 1)
  4831. diffplaces = 6;
  4832. const struct mining_algorithm * const malgo = work_mining_algorithm(work);
  4833. const uint8_t * const prevblkhash = &work->data[4];
  4834. snprintf(worktime, sizeof(worktime),
  4835. " <-%08lx.%08lx M:%c D:%1.*f G:%02d:%02d:%02d:%1.3f %s (%1.3f) W:%1.3f (%1.3f) S:%1.3f R:%02d:%02d:%02d",
  4836. (unsigned long)be32toh(((uint32_t *)prevblkhash)[7 - malgo->worktime_skip_prevblk_u32]),
  4837. (unsigned long)be32toh(((uint32_t *)prevblkhash)[6 - malgo->worktime_skip_prevblk_u32]),
  4838. work->getwork_mode, diffplaces, work->work_difficulty,
  4839. tm_getwork.tm_hour, tm_getwork.tm_min,
  4840. tm_getwork.tm_sec, getwork_time, workclone,
  4841. getwork_to_work, work_time, work_to_submit, submit_time,
  4842. tm_submit_reply.tm_hour, tm_submit_reply.tm_min,
  4843. tm_submit_reply.tm_sec);
  4844. }
  4845. }
  4846. share_result(val, res, err, work, resubmit, worktime);
  4847. if (!opt_realquiet)
  4848. print_status(thr_id);
  4849. if (!want_per_device_stats) {
  4850. char logline[256];
  4851. struct cgpu_info *cgpu;
  4852. cgpu = get_thr_cgpu(thr_id);
  4853. get_statline(logline, sizeof(logline), cgpu);
  4854. applog(LOG_INFO, "%s", logline);
  4855. }
  4856. json_decref(val);
  4857. rc = true;
  4858. out:
  4859. return rc;
  4860. }
  4861. /* Specifies whether we can use this pool for work or not. */
  4862. static bool pool_unworkable(const struct pool * const pool)
  4863. {
  4864. if (pool->idle)
  4865. return true;
  4866. if (pool->enabled != POOL_ENABLED)
  4867. return true;
  4868. if (pool->has_stratum && !pool->stratum_active)
  4869. return true;
  4870. return false;
  4871. }
  4872. static struct pool *priority_pool(int);
  4873. static bool pool_unusable(struct pool *);
  4874. static
  4875. bool pool_actively_desired(const struct pool * const pool, const struct pool *cp)
  4876. {
  4877. if (pool->enabled != POOL_ENABLED)
  4878. return false;
  4879. if (pool_strategy == POOL_LOADBALANCE && pool->quota)
  4880. return true;
  4881. if (pool_strategy == POOL_BALANCE && !pool->failover_only)
  4882. return true;
  4883. if (!cp)
  4884. cp = current_pool();
  4885. if (pool == cp)
  4886. return true;
  4887. // If we are the highest priority, workable pool for a given algorithm, we are needed
  4888. struct mining_algorithm * const malgo = pool->goal->malgo;
  4889. for (int i = 0; i < total_pools; ++i)
  4890. {
  4891. struct pool * const other_pool = priority_pool(i);
  4892. if (other_pool == pool)
  4893. return true;
  4894. if (pool_unusable(other_pool))
  4895. continue;
  4896. if (other_pool->goal->malgo == malgo)
  4897. break;
  4898. }
  4899. return false;
  4900. }
  4901. static
  4902. bool pool_actively_in_use(const struct pool * const pool, const struct pool *cp)
  4903. {
  4904. return (!pool_unworkable(pool)) && pool_actively_desired(pool, cp);
  4905. }
  4906. static
  4907. bool pool_supports_block_change_notification(struct pool * const pool)
  4908. {
  4909. return pool->has_stratum || pool->lp_url;
  4910. }
  4911. static
  4912. bool pool_has_active_block_change_notification(struct pool * const pool)
  4913. {
  4914. return pool->stratum_active || pool->lp_active;
  4915. }
  4916. static struct pool *_select_longpoll_pool(struct pool *, bool(*)(struct pool *));
  4917. #define select_longpoll_pool(pool) _select_longpoll_pool(pool, pool_supports_block_change_notification)
  4918. #define pool_active_lp_pool(pool) _select_longpoll_pool(pool, pool_has_active_block_change_notification)
  4919. /* In balanced mode, the amount of diff1 solutions per pool is monitored as a
  4920. * rolling average per 10 minutes and if pools start getting more, it biases
  4921. * away from them to distribute work evenly. The share count is reset to the
  4922. * rolling average every 10 minutes to not send all work to one pool after it
  4923. * has been disabled/out for an extended period. */
  4924. static
  4925. struct pool *select_balanced(struct pool *cp, struct mining_algorithm * const malgo)
  4926. {
  4927. int i, lowest = cp->shares;
  4928. struct pool *ret = cp, *failover_pool = NULL;
  4929. for (i = 0; i < total_pools; i++) {
  4930. struct pool *pool = pools[i];
  4931. if (malgo && pool->goal->malgo != malgo)
  4932. continue;
  4933. if (pool_unworkable(pool))
  4934. continue;
  4935. if (pool->failover_only)
  4936. {
  4937. BFGINIT(failover_pool, pool);
  4938. continue;
  4939. }
  4940. if (pool->shares < lowest) {
  4941. lowest = pool->shares;
  4942. ret = pool;
  4943. }
  4944. }
  4945. if (malgo && ret->goal->malgo != malgo)
  4946. // Yes, we want failover_pool even if it's NULL
  4947. ret = failover_pool;
  4948. else
  4949. if (pool_unworkable(ret) && failover_pool)
  4950. ret = failover_pool;
  4951. if (ret)
  4952. ++ret->shares;
  4953. return ret;
  4954. }
  4955. static
  4956. struct pool *select_loadbalance(struct mining_algorithm * const malgo)
  4957. {
  4958. static int rotating_pool = 0;
  4959. struct pool *pool;
  4960. bool avail = false;
  4961. int tested, i, rpsave;
  4962. for (i = 0; i < total_pools; i++) {
  4963. struct pool *tp = pools[i];
  4964. if (tp->quota_used < tp->quota_gcd) {
  4965. avail = true;
  4966. break;
  4967. }
  4968. }
  4969. /* There are no pools with quota, so reset them. */
  4970. if (!avail) {
  4971. for (i = 0; i < total_pools; i++)
  4972. {
  4973. struct pool * const tp = pools[i];
  4974. tp->quota_used -= tp->quota_gcd;
  4975. }
  4976. if (++rotating_pool >= total_pools)
  4977. rotating_pool = 0;
  4978. }
  4979. /* Try to find the first pool in the rotation that is usable */
  4980. // Look for the lowest integer quota_used / quota_gcd in case we are imbalanced by algorithm demands
  4981. struct pool *pool_lowest = NULL;
  4982. int lowest = INT_MAX;
  4983. rpsave = rotating_pool;
  4984. for (tested = 0; tested < total_pools; ++tested)
  4985. {
  4986. pool = pools[rotating_pool];
  4987. if (malgo && pool->goal->malgo != malgo)
  4988. goto continue_tested;
  4989. if (pool->quota_used < pool->quota_gcd)
  4990. {
  4991. ++pool->quota_used;
  4992. if (!pool_unworkable(pool))
  4993. goto out;
  4994. /* Failover-only flag for load-balance means distribute
  4995. * unused quota to priority pool 0. */
  4996. if (opt_fail_only)
  4997. priority_pool(0)->quota_used--;
  4998. }
  4999. if (malgo)
  5000. {
  5001. const int count = pool->quota_used / pool->quota_gcd;
  5002. if (count < lowest)
  5003. {
  5004. pool_lowest = pool;
  5005. lowest = count;
  5006. }
  5007. }
  5008. continue_tested: ;
  5009. if (++rotating_pool >= total_pools)
  5010. rotating_pool = 0;
  5011. }
  5012. // Even if pool_lowest is NULL, we want to return that to indicate failure
  5013. // Note it isn't possible to get here if !malgo
  5014. pool = pool_lowest;
  5015. out: ;
  5016. // Restore rotating_pool static, so malgo searches don't affect the usual load balancing
  5017. if (malgo)
  5018. rotating_pool = rpsave;
  5019. return pool;
  5020. }
  5021. static
  5022. struct pool *select_failover(struct mining_algorithm * const malgo)
  5023. {
  5024. int i;
  5025. for (i = 0; i < total_pools; i++) {
  5026. struct pool *tp = priority_pool(i);
  5027. if (malgo && tp->goal->malgo != malgo)
  5028. continue;
  5029. if (!pool_unusable(tp)) {
  5030. return tp;
  5031. }
  5032. }
  5033. return NULL;
  5034. }
  5035. static bool pool_active(struct pool *, bool pinging);
  5036. static void pool_died(struct pool *);
  5037. /* Select any active pool in a rotating fashion when loadbalance is chosen if
  5038. * it has any quota left. */
  5039. static inline struct pool *select_pool(bool lagging, struct mining_algorithm * const malgo)
  5040. {
  5041. struct pool *pool = NULL, *cp;
  5042. retry:
  5043. cp = current_pool();
  5044. if (pool_strategy == POOL_BALANCE) {
  5045. pool = select_balanced(cp, malgo);
  5046. if ((!pool) || pool_unworkable(pool))
  5047. goto simple_failover;
  5048. goto out;
  5049. }
  5050. if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only)) {
  5051. if (malgo && cp->goal->malgo != malgo)
  5052. goto simple_failover;
  5053. pool = cp;
  5054. goto out;
  5055. } else
  5056. pool = select_loadbalance(malgo);
  5057. simple_failover:
  5058. /* If there are no alive pools with quota, choose according to
  5059. * priority. */
  5060. if (!pool) {
  5061. pool = select_failover(malgo);
  5062. }
  5063. /* If still nothing is usable, use the current pool */
  5064. if (!pool)
  5065. {
  5066. if (malgo && cp->goal->malgo != malgo)
  5067. {
  5068. applog(LOG_DEBUG, "Failed to select pool for specific mining algorithm '%s'", malgo->name);
  5069. return NULL;
  5070. }
  5071. pool = cp;
  5072. }
  5073. out:
  5074. if (!pool_actively_in_use(pool, cp))
  5075. {
  5076. if (!pool_active(pool, false))
  5077. {
  5078. pool_died(pool);
  5079. goto retry;
  5080. }
  5081. pool_tclear(pool, &pool->idle);
  5082. }
  5083. applog(LOG_DEBUG, "Selecting pool %d for %s%swork", pool->pool_no, malgo ? malgo->name : "", malgo ? " " : "");
  5084. return pool;
  5085. }
  5086. static double DIFFEXACTONE = 26959946667150639794667015087019630673637144422540572481103610249215.0;
  5087. double target_diff(const unsigned char *target)
  5088. {
  5089. double targ = 0;
  5090. signed int i;
  5091. for (i = 31; i >= 0; --i)
  5092. targ = (targ * 0x100) + target[i];
  5093. return DIFFEXACTONE / (targ ?: 1);
  5094. }
  5095. /*
  5096. * Calculate the work share difficulty
  5097. */
  5098. static void calc_diff(struct work *work, int known)
  5099. {
  5100. struct cgminer_pool_stats *pool_stats = &(work->pool->cgminer_pool_stats);
  5101. double difficulty;
  5102. if (!known) {
  5103. work->work_difficulty = target_diff(work->target);
  5104. } else
  5105. work->work_difficulty = known;
  5106. difficulty = work->work_difficulty;
  5107. pool_stats->last_diff = difficulty;
  5108. suffix_string(difficulty, work->pool->diff, sizeof(work->pool->diff), 0);
  5109. if (difficulty == pool_stats->min_diff)
  5110. pool_stats->min_diff_count++;
  5111. else if (difficulty < pool_stats->min_diff || pool_stats->min_diff == 0) {
  5112. pool_stats->min_diff = difficulty;
  5113. pool_stats->min_diff_count = 1;
  5114. }
  5115. if (difficulty == pool_stats->max_diff)
  5116. pool_stats->max_diff_count++;
  5117. else if (difficulty > pool_stats->max_diff) {
  5118. pool_stats->max_diff = difficulty;
  5119. pool_stats->max_diff_count = 1;
  5120. }
  5121. }
  5122. static void gen_stratum_work(struct pool *, struct work *);
  5123. static void pool_update_work_restart_time(struct pool *);
  5124. static void restart_threads(void);
  5125. static uint32_t benchmark_blkhdr[20];
  5126. static const int benchmark_update_interval = 1;
  5127. static
  5128. void *benchmark_intense_work_update_thread(void *userp)
  5129. {
  5130. pthread_detach(pthread_self());
  5131. RenameThread("benchmark-intense");
  5132. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  5133. struct pool * const pool = userp;
  5134. struct stratum_work * const swork = &pool->swork;
  5135. uint8_t * const blkhdr = swork->header1;
  5136. while (true)
  5137. {
  5138. sleep(benchmark_update_interval);
  5139. cg_wlock(&pool->data_lock);
  5140. for (int i = 36; --i >= 0; )
  5141. if (++blkhdr[i])
  5142. break;
  5143. cg_wunlock(&pool->data_lock);
  5144. struct work *work = make_work();
  5145. gen_stratum_work(pool, work);
  5146. pool->swork.work_restart_id = ++pool->work_restart_id;
  5147. pool_update_work_restart_time(pool);
  5148. test_work_current(work);
  5149. free_work(work);
  5150. restart_threads();
  5151. }
  5152. return NULL;
  5153. }
  5154. static
  5155. void setup_benchmark_pool()
  5156. {
  5157. struct pool *pool;
  5158. want_longpoll = false;
  5159. // Temporarily disable opt_benchmark to avoid auto-removal
  5160. opt_benchmark = false;
  5161. pool = add_pool();
  5162. opt_benchmark = true;
  5163. pool->rpc_url = malloc(255);
  5164. strcpy(pool->rpc_url, "Benchmark");
  5165. pool_set_uri(pool, pool->rpc_url);
  5166. pool->rpc_user = pool->rpc_url;
  5167. pool->rpc_pass = pool->rpc_url;
  5168. enable_pool(pool);
  5169. pool->idle = false;
  5170. successful_connect = true;
  5171. {
  5172. uint32_t * const blkhdr = benchmark_blkhdr;
  5173. blkhdr[2] = htobe32(1);
  5174. blkhdr[17] = htobe32(0x7fffffff); // timestamp
  5175. blkhdr[18] = htobe32(0x1700ffff); // "bits"
  5176. }
  5177. {
  5178. struct stratum_work * const swork = &pool->swork;
  5179. const int branchcount = 15; // 1 MB block
  5180. const size_t branchdatasz = branchcount * 0x20;
  5181. const size_t coinbase_sz = (opt_benchmark_intense ? 250 : 6) * 1024;
  5182. bytes_resize(&swork->coinbase, coinbase_sz);
  5183. memset(bytes_buf(&swork->coinbase), '\xff', coinbase_sz);
  5184. swork->nonce2_offset = 0;
  5185. bytes_resize(&swork->merkle_bin, branchdatasz);
  5186. memset(bytes_buf(&swork->merkle_bin), '\xff', branchdatasz);
  5187. swork->merkles = branchcount;
  5188. swork->header1[0] = '\xff';
  5189. memset(&swork->header1[1], '\0', 34);
  5190. swork->header1[35] = '\x01';
  5191. swork->ntime = 0x7fffffff;
  5192. timer_unset(&swork->tv_received);
  5193. memcpy(swork->diffbits, "\x17\0\xff\xff", 4);
  5194. const struct mining_goal_info * const goal = get_mining_goal("default");
  5195. const struct mining_algorithm * const malgo = goal->malgo;
  5196. set_target_to_pdiff(swork->target, malgo->reasonable_low_nonce_diff);
  5197. pool->nonce2sz = swork->n2size = GBT_XNONCESZ;
  5198. pool->nonce2 = 0;
  5199. }
  5200. if (opt_benchmark_intense)
  5201. {
  5202. pthread_t pth;
  5203. if (unlikely(pthread_create(&pth, NULL, benchmark_intense_work_update_thread, pool)))
  5204. applog(LOG_WARNING, "Failed to start benchmark intense work update thread");
  5205. }
  5206. }
  5207. void get_benchmark_work(struct work *work, bool use_swork)
  5208. {
  5209. if (use_swork)
  5210. {
  5211. struct timeval tv_now;
  5212. timer_set_now(&tv_now);
  5213. gen_stratum_work(pools[0], work);
  5214. work->getwork_mode = GETWORK_MODE_BENCHMARK;
  5215. work_set_simple_ntime_roll_limit(work, 0, &tv_now);
  5216. return;
  5217. }
  5218. struct pool * const pool = pools[0];
  5219. uint32_t * const blkhdr = benchmark_blkhdr;
  5220. for (int i = 16; i >= 0; --i)
  5221. if (++blkhdr[i])
  5222. break;
  5223. memcpy(&work->data[ 0], blkhdr, 80);
  5224. memcpy(&work->data[80], workpadding_bin, 48);
  5225. char hex[161];
  5226. bin2hex(hex, work->data, 80);
  5227. applog(LOG_DEBUG, "Generated benchmark header %s", hex);
  5228. calc_midstate(work);
  5229. memcpy(work->target, pool->swork.target, sizeof(work->target));
  5230. work->mandatory = true;
  5231. work->pool = pools[0];
  5232. cgtime(&work->tv_getwork);
  5233. copy_time(&work->tv_getwork_reply, &work->tv_getwork);
  5234. copy_time(&work->tv_staged, &work->tv_getwork);
  5235. work->getwork_mode = GETWORK_MODE_BENCHMARK;
  5236. calc_diff(work, 0);
  5237. work_set_simple_ntime_roll_limit(work, 60, &work->tv_getwork);
  5238. }
  5239. static void wake_gws(void);
  5240. static void update_last_work(struct work *work)
  5241. {
  5242. if (!work->tr)
  5243. // Only save GBT jobs, since rollntime isn't coordinated well yet
  5244. return;
  5245. struct pool *pool = work->pool;
  5246. mutex_lock(&pool->last_work_lock);
  5247. if (pool->last_work_copy)
  5248. free_work(pool->last_work_copy);
  5249. pool->last_work_copy = copy_work(work);
  5250. pool->last_work_copy->work_restart_id = pool->work_restart_id;
  5251. mutex_unlock(&pool->last_work_lock);
  5252. }
  5253. static
  5254. void gbt_req_target(json_t *req)
  5255. {
  5256. json_t *j;
  5257. json_t *n;
  5258. if (!request_target_str)
  5259. return;
  5260. j = json_object_get(req, "params");
  5261. if (!j)
  5262. {
  5263. n = json_array();
  5264. if (!n)
  5265. return;
  5266. if (json_object_set_new(req, "params", n))
  5267. goto erradd;
  5268. j = n;
  5269. }
  5270. n = json_array_get(j, 0);
  5271. if (!n)
  5272. {
  5273. n = json_object();
  5274. if (!n)
  5275. return;
  5276. if (json_array_append_new(j, n))
  5277. goto erradd;
  5278. }
  5279. j = n;
  5280. n = json_string(request_target_str);
  5281. if (!n)
  5282. return;
  5283. if (json_object_set_new(j, "target", n))
  5284. goto erradd;
  5285. return;
  5286. erradd:
  5287. json_decref(n);
  5288. }
  5289. static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const char *lpid, bool probe, struct pool * const pool)
  5290. {
  5291. char *rpc_req;
  5292. clean_work(work);
  5293. switch (proto) {
  5294. case PLP_GETWORK:
  5295. work->getwork_mode = GETWORK_MODE_POOL;
  5296. return strdup(getwork_req);
  5297. case PLP_GETBLOCKTEMPLATE:
  5298. work->getwork_mode = GETWORK_MODE_GBT;
  5299. blktemplate_t * const tmpl = blktmpl_create();
  5300. if (!tmpl)
  5301. goto gbtfail2;
  5302. work->tr = tmpl_makeref(tmpl);
  5303. gbt_capabilities_t caps = blktmpl_addcaps(tmpl);
  5304. if (!caps)
  5305. goto gbtfail;
  5306. caps |= GBT_LONGPOLL;
  5307. #if BLKMAKER_VERSION > 1
  5308. const struct mining_goal_info * const goal = pool->goal;
  5309. if (goal->generation_script || goal_has_at_least_one_getcbaddr(goal))
  5310. caps |= GBT_CBVALUE;
  5311. #endif
  5312. json_t *req = blktmpl_request_jansson(caps, lpid);
  5313. if (!req)
  5314. goto gbtfail;
  5315. if (probe)
  5316. gbt_req_target(req);
  5317. rpc_req = json_dumps(req, 0);
  5318. if (!rpc_req)
  5319. goto gbtfail;
  5320. json_decref(req);
  5321. return rpc_req;
  5322. default:
  5323. return NULL;
  5324. }
  5325. return NULL;
  5326. gbtfail:
  5327. tmpl_decref(work->tr);
  5328. work->tr = NULL;
  5329. gbtfail2:
  5330. return NULL;
  5331. }
  5332. #define prepare_rpc_req(work, proto, lpid, pool) prepare_rpc_req2(work, proto, lpid, false, pool)
  5333. #define prepare_rpc_req_probe(work, proto, lpid, pool) prepare_rpc_req2(work, proto, lpid, true, pool)
  5334. static const char *pool_protocol_name(enum pool_protocol proto)
  5335. {
  5336. switch (proto) {
  5337. case PLP_GETBLOCKTEMPLATE:
  5338. return "getblocktemplate";
  5339. case PLP_GETWORK:
  5340. return "getwork";
  5341. default:
  5342. return "UNKNOWN";
  5343. }
  5344. }
  5345. static enum pool_protocol pool_protocol_fallback(enum pool_protocol proto)
  5346. {
  5347. switch (proto) {
  5348. case PLP_GETBLOCKTEMPLATE:
  5349. if (want_getwork)
  5350. return PLP_GETWORK;
  5351. default:
  5352. return PLP_NONE;
  5353. }
  5354. }
  5355. static bool get_upstream_work(struct work *work, CURL *curl)
  5356. {
  5357. struct pool *pool = work->pool;
  5358. struct cgminer_pool_stats *pool_stats = &(pool->cgminer_pool_stats);
  5359. struct timeval tv_elapsed;
  5360. json_t *val = NULL;
  5361. bool rc = false;
  5362. char *url;
  5363. enum pool_protocol proto;
  5364. char *rpc_req;
  5365. if (pool->proto == PLP_NONE)
  5366. pool->proto = PLP_GETBLOCKTEMPLATE;
  5367. tryagain:
  5368. rpc_req = prepare_rpc_req(work, pool->proto, NULL, pool);
  5369. work->pool = pool;
  5370. if (!rpc_req)
  5371. return false;
  5372. applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req);
  5373. url = pool->rpc_url;
  5374. cgtime(&work->tv_getwork);
  5375. val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req, false,
  5376. false, &work->rolltime, pool, false);
  5377. pool_stats->getwork_attempts++;
  5378. free(rpc_req);
  5379. if (likely(val)) {
  5380. rc = work_decode(pool, work, val);
  5381. if (unlikely(!rc))
  5382. applog(LOG_DEBUG, "Failed to decode work in get_upstream_work");
  5383. } else if (PLP_NONE != (proto = pool_protocol_fallback(pool->proto))) {
  5384. applog(LOG_WARNING, "Pool %u failed getblocktemplate request; falling back to getwork protocol", pool->pool_no);
  5385. pool->proto = proto;
  5386. goto tryagain;
  5387. } else
  5388. applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
  5389. cgtime(&work->tv_getwork_reply);
  5390. timersub(&(work->tv_getwork_reply), &(work->tv_getwork), &tv_elapsed);
  5391. pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
  5392. pool_stats->getwork_wait_rolling /= 1.63;
  5393. timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait));
  5394. if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) {
  5395. pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec;
  5396. pool_stats->getwork_wait_max.tv_usec = tv_elapsed.tv_usec;
  5397. }
  5398. if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_min), <)) {
  5399. pool_stats->getwork_wait_min.tv_sec = tv_elapsed.tv_sec;
  5400. pool_stats->getwork_wait_min.tv_usec = tv_elapsed.tv_usec;
  5401. }
  5402. pool_stats->getwork_calls++;
  5403. work->pool = pool;
  5404. work->longpoll = false;
  5405. calc_diff(work, 0);
  5406. total_getworks++;
  5407. pool->getwork_requested++;
  5408. if (rc)
  5409. update_last_work(work);
  5410. if (likely(val))
  5411. json_decref(val);
  5412. return rc;
  5413. }
  5414. #ifdef HAVE_CURSES
  5415. static void disable_curses(void)
  5416. {
  5417. if (curses_active_locked()) {
  5418. use_curses = false;
  5419. curses_active = false;
  5420. leaveok(logwin, false);
  5421. leaveok(statuswin, false);
  5422. leaveok(mainwin, false);
  5423. nocbreak();
  5424. echo();
  5425. delwin(logwin);
  5426. delwin(statuswin);
  5427. delwin(mainwin);
  5428. endwin();
  5429. #ifdef WIN32
  5430. // Move the cursor to after curses output.
  5431. HANDLE hout = GetStdHandle(STD_OUTPUT_HANDLE);
  5432. CONSOLE_SCREEN_BUFFER_INFO csbi;
  5433. COORD coord;
  5434. if (GetConsoleScreenBufferInfo(hout, &csbi)) {
  5435. coord.X = 0;
  5436. coord.Y = csbi.dwSize.Y - 1;
  5437. SetConsoleCursorPosition(hout, coord);
  5438. }
  5439. #endif
  5440. unlock_curses();
  5441. }
  5442. }
  5443. #endif
  5444. static void __kill_work(void)
  5445. {
  5446. struct cgpu_info *cgpu;
  5447. struct thr_info *thr;
  5448. int i;
  5449. if (!successful_connect)
  5450. return;
  5451. applog(LOG_INFO, "Received kill message");
  5452. shutting_down = true;
  5453. applog(LOG_DEBUG, "Prompting submit_work thread to finish");
  5454. notifier_wake(submit_waiting_notifier);
  5455. #ifdef USE_LIBMICROHTTPD
  5456. httpsrv_stop();
  5457. #endif
  5458. applog(LOG_DEBUG, "Killing off watchpool thread");
  5459. /* Kill the watchpool thread */
  5460. thr = &control_thr[watchpool_thr_id];
  5461. thr_info_cancel(thr);
  5462. applog(LOG_DEBUG, "Killing off watchdog thread");
  5463. /* Kill the watchdog thread */
  5464. thr = &control_thr[watchdog_thr_id];
  5465. thr_info_cancel(thr);
  5466. applog(LOG_DEBUG, "Shutting down mining threads");
  5467. for (i = 0; i < mining_threads; i++) {
  5468. thr = get_thread(i);
  5469. if (!thr)
  5470. continue;
  5471. cgpu = thr->cgpu;
  5472. if (!cgpu)
  5473. continue;
  5474. if (!cgpu->threads)
  5475. continue;
  5476. cgpu->shutdown = true;
  5477. thr->work_restart = true;
  5478. notifier_wake(thr->notifier);
  5479. notifier_wake(thr->work_restart_notifier);
  5480. }
  5481. sleep(1);
  5482. applog(LOG_DEBUG, "Killing off mining threads");
  5483. /* Kill the mining threads*/
  5484. for (i = 0; i < mining_threads; i++) {
  5485. thr = get_thread(i);
  5486. if (!thr)
  5487. continue;
  5488. cgpu = thr->cgpu;
  5489. if (cgpu->threads)
  5490. {
  5491. applog(LOG_WARNING, "Killing %"PRIpreprv, thr->cgpu->proc_repr);
  5492. thr_info_cancel(thr);
  5493. }
  5494. cgpu->status = LIFE_DEAD2;
  5495. }
  5496. /* Stop the others */
  5497. applog(LOG_DEBUG, "Killing off API thread");
  5498. thr = &control_thr[api_thr_id];
  5499. thr_info_cancel(thr);
  5500. }
  5501. /* This should be the common exit path */
  5502. void kill_work(void)
  5503. {
  5504. __kill_work();
  5505. quit(0, "Shutdown signal received.");
  5506. }
  5507. static
  5508. #ifdef WIN32
  5509. #ifndef _WIN64
  5510. const
  5511. #endif
  5512. #endif
  5513. char **initial_args;
  5514. void _bfg_clean_up(bool);
  5515. void app_restart(void)
  5516. {
  5517. applog(LOG_WARNING, "Attempting to restart %s", packagename);
  5518. __kill_work();
  5519. _bfg_clean_up(true);
  5520. #if defined(unix) || defined(__APPLE__)
  5521. if (forkpid > 0) {
  5522. kill(forkpid, SIGTERM);
  5523. forkpid = 0;
  5524. }
  5525. #endif
  5526. execv(initial_args[0], initial_args);
  5527. applog(LOG_WARNING, "Failed to restart application");
  5528. }
  5529. static void sighandler(int __maybe_unused sig)
  5530. {
  5531. /* Restore signal handlers so we can still quit if kill_work fails */
  5532. sigaction(SIGTERM, &termhandler, NULL);
  5533. sigaction(SIGINT, &inthandler, NULL);
  5534. kill_work();
  5535. }
  5536. static void start_longpoll(void);
  5537. static void stop_longpoll(void);
  5538. /* Called with pool_lock held. Recruit an extra curl if none are available for
  5539. * this pool. */
  5540. static void recruit_curl(struct pool *pool)
  5541. {
  5542. struct curl_ent *ce = calloc(sizeof(struct curl_ent), 1);
  5543. if (unlikely(!ce))
  5544. quit(1, "Failed to calloc in recruit_curl");
  5545. ce->curl = curl_easy_init();
  5546. if (unlikely(!ce->curl))
  5547. quit(1, "Failed to init in recruit_curl");
  5548. LL_PREPEND(pool->curllist, ce);
  5549. pool->curls++;
  5550. }
  5551. /* Grab an available curl if there is one. If not, then recruit extra curls
  5552. * unless we are in a submit_fail situation, or we have opt_delaynet enabled
  5553. * and there are already 5 curls in circulation. Limit total number to the
  5554. * number of mining threads per pool as well to prevent blasting a pool during
  5555. * network delays/outages. */
  5556. static struct curl_ent *pop_curl_entry3(struct pool *pool, int blocking)
  5557. {
  5558. int curl_limit = opt_delaynet ? 5 : (mining_threads + opt_queue) * 2;
  5559. bool recruited = false;
  5560. struct curl_ent *ce;
  5561. mutex_lock(&pool->pool_lock);
  5562. retry:
  5563. if (!pool->curls) {
  5564. recruit_curl(pool);
  5565. recruited = true;
  5566. } else if (!pool->curllist) {
  5567. if (blocking < 2 && pool->curls >= curl_limit && (blocking || pool->curls >= opt_submit_threads)) {
  5568. if (!blocking) {
  5569. mutex_unlock(&pool->pool_lock);
  5570. return NULL;
  5571. }
  5572. pthread_cond_wait(&pool->cr_cond, &pool->pool_lock);
  5573. goto retry;
  5574. } else {
  5575. recruit_curl(pool);
  5576. recruited = true;
  5577. }
  5578. }
  5579. ce = pool->curllist;
  5580. LL_DELETE(pool->curllist, ce);
  5581. mutex_unlock(&pool->pool_lock);
  5582. if (recruited)
  5583. applog(LOG_DEBUG, "Recruited curl for pool %d", pool->pool_no);
  5584. return ce;
  5585. }
  5586. static struct curl_ent *pop_curl_entry2(struct pool *pool, bool blocking)
  5587. {
  5588. return pop_curl_entry3(pool, blocking ? 1 : 0);
  5589. }
  5590. __maybe_unused
  5591. static struct curl_ent *pop_curl_entry(struct pool *pool)
  5592. {
  5593. return pop_curl_entry3(pool, 1);
  5594. }
  5595. static void push_curl_entry(struct curl_ent *ce, struct pool *pool)
  5596. {
  5597. mutex_lock(&pool->pool_lock);
  5598. if (!ce || !ce->curl)
  5599. quithere(1, "Attempted to add NULL");
  5600. LL_PREPEND(pool->curllist, ce);
  5601. cgtime(&ce->tv);
  5602. pthread_cond_broadcast(&pool->cr_cond);
  5603. mutex_unlock(&pool->pool_lock);
  5604. }
  5605. static inline bool should_roll(struct work *work)
  5606. {
  5607. struct timeval now;
  5608. time_t expiry;
  5609. if (!pool_actively_in_use(work->pool, NULL))
  5610. return false;
  5611. if (stale_work(work, false))
  5612. return false;
  5613. if (work->rolltime > opt_scantime)
  5614. expiry = work->rolltime;
  5615. else
  5616. expiry = opt_scantime;
  5617. expiry = expiry * 2 / 3;
  5618. /* We shouldn't roll if we're unlikely to get one shares' duration
  5619. * work out of doing so */
  5620. cgtime(&now);
  5621. if (now.tv_sec - work->tv_staged.tv_sec > expiry)
  5622. return false;
  5623. return true;
  5624. }
  5625. /* Limit rolls to 7000 to not beyond 2 hours in the future where bitcoind will
  5626. * reject blocks as invalid. */
  5627. static inline bool can_roll(struct work *work)
  5628. {
  5629. if (work->stratum)
  5630. return false;
  5631. if (!(work->pool && !work->clone))
  5632. return false;
  5633. if (work->tr)
  5634. {
  5635. if (stale_work(work, false))
  5636. return false;
  5637. return blkmk_work_left(work->tr->tmpl);
  5638. }
  5639. return (work->rolltime &&
  5640. work->rolls < 7000 && !stale_work(work, false));
  5641. }
  5642. static void roll_work(struct work *work)
  5643. {
  5644. if (work->tr)
  5645. {
  5646. struct timeval tv_now;
  5647. cgtime(&tv_now);
  5648. if (blkmk_get_data(work->tr->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76)
  5649. applog(LOG_ERR, "Failed to get next data from template; spinning wheels!");
  5650. swap32yes(work->data, work->data, 80 / 4);
  5651. calc_midstate(work);
  5652. applog(LOG_DEBUG, "Successfully rolled extranonce to dataid %u", work->dataid);
  5653. } else {
  5654. uint32_t *work_ntime;
  5655. uint32_t ntime;
  5656. work_ntime = (uint32_t *)(work->data + 68);
  5657. ntime = be32toh(*work_ntime);
  5658. ntime++;
  5659. *work_ntime = htobe32(ntime);
  5660. work_set_simple_ntime_roll_limit(work, 0, &work->ntime_roll_limits.tv_ref);
  5661. applog(LOG_DEBUG, "Successfully rolled time header in work");
  5662. }
  5663. local_work++;
  5664. work->rolls++;
  5665. work->blk.nonce = 0;
  5666. /* This is now a different work item so it needs a different ID for the
  5667. * hashtable */
  5668. work->id = total_work++;
  5669. }
  5670. /* Duplicates any dynamically allocated arrays within the work struct to
  5671. * prevent a copied work struct from freeing ram belonging to another struct */
  5672. static void _copy_work(struct work *work, const struct work *base_work, int noffset)
  5673. {
  5674. int id = work->id;
  5675. clean_work(work);
  5676. memcpy(work, base_work, sizeof(struct work));
  5677. /* Keep the unique new id assigned during make_work to prevent copied
  5678. * work from having the same id. */
  5679. work->id = id;
  5680. if (base_work->job_id)
  5681. work->job_id = strdup(base_work->job_id);
  5682. if (base_work->nonce1)
  5683. work->nonce1 = strdup(base_work->nonce1);
  5684. bytes_cpy(&work->nonce2, &base_work->nonce2);
  5685. if (base_work->tr)
  5686. tmpl_incref(base_work->tr);
  5687. if (noffset)
  5688. {
  5689. uint32_t *work_ntime = (uint32_t *)(work->data + 68);
  5690. uint32_t ntime = be32toh(*work_ntime);
  5691. ntime += noffset;
  5692. *work_ntime = htobe32(ntime);
  5693. }
  5694. if (work->device_data_dup_func)
  5695. work->device_data = work->device_data_dup_func(work);
  5696. }
  5697. /* Generates a copy of an existing work struct, creating fresh heap allocations
  5698. * for all dynamically allocated arrays within the struct. noffset is used for
  5699. * when a driver has internally rolled the ntime, noffset is a relative value.
  5700. * The macro copy_work() calls this function with an noffset of 0. */
  5701. struct work *copy_work_noffset(const struct work *base_work, int noffset)
  5702. {
  5703. struct work *work = make_work();
  5704. _copy_work(work, base_work, noffset);
  5705. return work;
  5706. }
  5707. void __copy_work(struct work *work, const struct work *base_work)
  5708. {
  5709. _copy_work(work, base_work, 0);
  5710. }
  5711. static struct work *make_clone(struct work *work)
  5712. {
  5713. struct work *work_clone = copy_work(work);
  5714. work_clone->clone = true;
  5715. cgtime((struct timeval *)&(work_clone->tv_cloned));
  5716. work_clone->longpoll = false;
  5717. work_clone->mandatory = false;
  5718. /* Make cloned work appear slightly older to bias towards keeping the
  5719. * master work item which can be further rolled */
  5720. work_clone->tv_staged.tv_sec -= 1;
  5721. return work_clone;
  5722. }
  5723. static void stage_work(struct work *work);
  5724. static bool clone_available(void)
  5725. {
  5726. struct work *work_clone = NULL, *work, *tmp;
  5727. bool cloned = false;
  5728. mutex_lock(stgd_lock);
  5729. if (!staged_rollable)
  5730. goto out_unlock;
  5731. HASH_ITER(hh, staged_work, work, tmp) {
  5732. if (can_roll(work) && should_roll(work)) {
  5733. roll_work(work);
  5734. work_clone = make_clone(work);
  5735. applog(LOG_DEBUG, "%s: Rolling work %d to %d", __func__, work->id, work_clone->id);
  5736. roll_work(work);
  5737. cloned = true;
  5738. break;
  5739. }
  5740. }
  5741. out_unlock:
  5742. mutex_unlock(stgd_lock);
  5743. if (cloned) {
  5744. applog(LOG_DEBUG, "Pushing cloned available work to stage thread");
  5745. stage_work(work_clone);
  5746. }
  5747. return cloned;
  5748. }
  5749. static void pool_died(struct pool *pool)
  5750. {
  5751. mutex_lock(&lp_lock);
  5752. if (!pool_tset(pool, &pool->idle)) {
  5753. cgtime(&pool->tv_idle);
  5754. pthread_cond_broadcast(&lp_cond);
  5755. mutex_unlock(&lp_lock);
  5756. if (pool == current_pool()) {
  5757. applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url);
  5758. switch_pools(NULL);
  5759. } else
  5760. applog(LOG_INFO, "Pool %d %s failed to return work", pool->pool_no, pool->rpc_url);
  5761. }
  5762. else
  5763. mutex_unlock(&lp_lock);
  5764. }
  5765. bool stale_work2(struct work * const work, const bool share, const bool have_pool_data_lock)
  5766. {
  5767. unsigned work_expiry;
  5768. struct pool *pool;
  5769. uint32_t block_id;
  5770. unsigned getwork_delay;
  5771. block_id = ((uint32_t*)work->data)[1];
  5772. pool = work->pool;
  5773. struct mining_goal_info * const goal = pool->goal;
  5774. struct blockchain_info * const blkchain = goal->blkchain;
  5775. /* Technically the rolltime should be correct but some pools
  5776. * advertise a broken expire= that is lower than a meaningful
  5777. * scantime */
  5778. if (work->rolltime >= opt_scantime || work->tr)
  5779. work_expiry = work->rolltime;
  5780. else
  5781. work_expiry = opt_expiry;
  5782. unsigned max_expiry = (goal->have_longpoll ? opt_expiry_lp : opt_expiry);
  5783. if (work_expiry > max_expiry)
  5784. work_expiry = max_expiry;
  5785. if (share) {
  5786. /* If the share isn't on this pool's latest block, it's stale */
  5787. if (pool->block_id && pool->block_id != block_id)
  5788. {
  5789. applog(LOG_DEBUG, "Share stale due to block mismatch (%08lx != %08lx)", (long)block_id, (long)pool->block_id);
  5790. return true;
  5791. }
  5792. /* If the pool doesn't want old shares, then any found in work before
  5793. * the most recent longpoll is stale */
  5794. if ((!pool->submit_old) && work->work_restart_id != pool->work_restart_id)
  5795. {
  5796. applog(LOG_DEBUG, "Share stale due to mandatory work update (%02x != %02x)", work->work_restart_id, pool->work_restart_id);
  5797. return true;
  5798. }
  5799. } else {
  5800. /* If this work isn't for the latest Bitcoin block, it's stale */
  5801. /* But only care about the current pool if failover-only */
  5802. if (enabled_pools <= 1 || opt_fail_only) {
  5803. if (pool->block_id && block_id != pool->block_id)
  5804. {
  5805. applog(LOG_DEBUG, "Work stale due to block mismatch (%08lx != 1 ? %08lx : %08lx)", (long)block_id, (long)pool->block_id, (long)blkchain->currentblk->block_id);
  5806. return true;
  5807. }
  5808. } else {
  5809. if (block_id != blkchain->currentblk->block_id)
  5810. {
  5811. applog(LOG_DEBUG, "Work stale due to block mismatch (%08lx != 0 ? %08lx : %08lx)", (long)block_id, (long)pool->block_id, (long)blkchain->currentblk->block_id);
  5812. return true;
  5813. }
  5814. }
  5815. /* If the pool has asked us to restart since this work, it's stale */
  5816. if (work->work_restart_id != pool->work_restart_id)
  5817. {
  5818. applog(LOG_DEBUG, "Work stale due to work update (%02x != %02x)", work->work_restart_id, pool->work_restart_id);
  5819. return true;
  5820. }
  5821. if (pool->has_stratum && work->job_id) {
  5822. bool same_job;
  5823. if (!pool->stratum_active || !pool->stratum_notify) {
  5824. applog(LOG_DEBUG, "Work stale due to stratum inactive");
  5825. return true;
  5826. }
  5827. same_job = true;
  5828. if (!have_pool_data_lock) {
  5829. cg_rlock(&pool->data_lock);
  5830. }
  5831. if (strcmp(work->job_id, pool->swork.job_id))
  5832. same_job = false;
  5833. if (!have_pool_data_lock) {
  5834. cg_runlock(&pool->data_lock);
  5835. }
  5836. if (!same_job) {
  5837. applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch");
  5838. return true;
  5839. }
  5840. }
  5841. /* Factor in the average getwork delay of this pool, rounding it up to
  5842. * the nearest second */
  5843. getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1;
  5844. if (unlikely(work_expiry <= getwork_delay + 5))
  5845. work_expiry = 5;
  5846. else
  5847. work_expiry -= getwork_delay;
  5848. }
  5849. int elapsed_since_staged = timer_elapsed(&work->tv_staged, NULL);
  5850. if (elapsed_since_staged > work_expiry) {
  5851. applog(LOG_DEBUG, "%s stale due to expiry (%d >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry);
  5852. return true;
  5853. }
  5854. /* If the user only wants strict failover, any work from a pool other than
  5855. * the current one is always considered stale */
  5856. if (opt_fail_only && !share && !work->mandatory && !pool_actively_in_use(pool, NULL))
  5857. {
  5858. applog(LOG_DEBUG, "Work stale due to fail only pool mismatch (pool %u vs %u)", pool->pool_no, current_pool()->pool_no);
  5859. return true;
  5860. }
  5861. return false;
  5862. }
  5863. double share_diff(const struct work *work)
  5864. {
  5865. double ret;
  5866. bool new_best = false;
  5867. ret = target_diff(work->hash);
  5868. cg_wlock(&control_lock);
  5869. if (unlikely(ret > best_diff)) {
  5870. new_best = true;
  5871. best_diff = ret;
  5872. suffix_string(best_diff, best_share, sizeof(best_share), 0);
  5873. }
  5874. if (unlikely(ret > work->pool->best_diff))
  5875. work->pool->best_diff = ret;
  5876. cg_wunlock(&control_lock);
  5877. if (unlikely(new_best))
  5878. applog(LOG_INFO, "New best share: %s", best_share);
  5879. return ret;
  5880. }
  5881. static
  5882. void work_check_for_block(struct work * const work)
  5883. {
  5884. struct pool * const pool = work->pool;
  5885. struct mining_goal_info * const goal = pool->goal;
  5886. work->share_diff = share_diff(work);
  5887. if (unlikely(work->share_diff >= goal->current_diff)) {
  5888. work->block = true;
  5889. work->pool->solved++;
  5890. found_blocks++;
  5891. work->mandatory = true;
  5892. applog(LOG_NOTICE, "Found block for pool %d!", work->pool->pool_no);
  5893. }
  5894. }
  5895. static void submit_discard_share2(const char *reason, struct work *work)
  5896. {
  5897. struct cgpu_info *cgpu = get_thr_cgpu(work->thr_id);
  5898. sharelog(reason, work);
  5899. mutex_lock(&stats_lock);
  5900. ++total_stale;
  5901. ++cgpu->stale;
  5902. ++(work->pool->stale_shares);
  5903. total_diff_stale += work->work_difficulty;
  5904. cgpu->diff_stale += work->work_difficulty;
  5905. work->pool->diff_stale += work->work_difficulty;
  5906. mutex_unlock(&stats_lock);
  5907. }
  5908. static void submit_discard_share(struct work *work)
  5909. {
  5910. submit_discard_share2("discard", work);
  5911. }
  5912. struct submit_work_state {
  5913. struct work *work;
  5914. bool resubmit;
  5915. struct curl_ent *ce;
  5916. int failures;
  5917. struct timeval tv_staleexpire;
  5918. char *s;
  5919. struct timeval tv_submit;
  5920. struct submit_work_state *next;
  5921. };
  5922. static int my_curl_timer_set(__maybe_unused CURLM *curlm, long timeout_ms, void *userp)
  5923. {
  5924. long *p_timeout_us = userp;
  5925. const long max_ms = LONG_MAX / 1000;
  5926. if (max_ms < timeout_ms)
  5927. timeout_ms = max_ms;
  5928. *p_timeout_us = timeout_ms * 1000;
  5929. return 0;
  5930. }
  5931. static void sws_has_ce(struct submit_work_state *sws)
  5932. {
  5933. struct pool *pool = sws->work->pool;
  5934. sws->s = submit_upstream_work_request(sws->work);
  5935. cgtime(&sws->tv_submit);
  5936. json_rpc_call_async(sws->ce->curl, pool->rpc_url, pool->rpc_userpass, sws->s, false, pool, true, sws);
  5937. }
  5938. static struct submit_work_state *begin_submission(struct work *work)
  5939. {
  5940. struct pool *pool;
  5941. struct submit_work_state *sws = NULL;
  5942. pool = work->pool;
  5943. sws = malloc(sizeof(*sws));
  5944. *sws = (struct submit_work_state){
  5945. .work = work,
  5946. };
  5947. work_check_for_block(work);
  5948. if (stale_work(work, true)) {
  5949. work->stale = true;
  5950. if (opt_submit_stale)
  5951. applog(LOG_NOTICE, "Pool %d stale share detected, submitting as user requested", pool->pool_no);
  5952. else if (pool->submit_old)
  5953. applog(LOG_NOTICE, "Pool %d stale share detected, submitting as pool requested", pool->pool_no);
  5954. else {
  5955. applog(LOG_NOTICE, "Pool %d stale share detected, discarding", pool->pool_no);
  5956. submit_discard_share(work);
  5957. goto out;
  5958. }
  5959. timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
  5960. }
  5961. if (work->getwork_mode == GETWORK_MODE_STRATUM) {
  5962. char *s;
  5963. s = malloc(1024);
  5964. sws->s = s;
  5965. } else {
  5966. /* submit solution to bitcoin via JSON-RPC */
  5967. sws->ce = pop_curl_entry2(pool, false);
  5968. if (sws->ce) {
  5969. sws_has_ce(sws);
  5970. } else {
  5971. sws->next = pool->sws_waiting_on_curl;
  5972. pool->sws_waiting_on_curl = sws;
  5973. if (sws->next)
  5974. applog(LOG_DEBUG, "submit_thread queuing submission");
  5975. else
  5976. applog(LOG_WARNING, "submit_thread queuing submissions (see --submit-threads)");
  5977. }
  5978. }
  5979. return sws;
  5980. out:
  5981. free(sws);
  5982. return NULL;
  5983. }
  5984. static bool retry_submission(struct submit_work_state *sws)
  5985. {
  5986. struct work *work = sws->work;
  5987. struct pool *pool = work->pool;
  5988. sws->resubmit = true;
  5989. if ((!work->stale) && stale_work(work, true)) {
  5990. work->stale = true;
  5991. if (opt_submit_stale)
  5992. applog(LOG_NOTICE, "Pool %d share became stale during submission failure, will retry as user requested", pool->pool_no);
  5993. else if (pool->submit_old)
  5994. applog(LOG_NOTICE, "Pool %d share became stale during submission failure, will retry as pool requested", pool->pool_no);
  5995. else {
  5996. applog(LOG_NOTICE, "Pool %d share became stale during submission failure, discarding", pool->pool_no);
  5997. submit_discard_share(work);
  5998. return false;
  5999. }
  6000. timer_set_delay_from_now(&sws->tv_staleexpire, 300000000);
  6001. }
  6002. if (unlikely((opt_retries >= 0) && (++sws->failures > opt_retries))) {
  6003. applog(LOG_ERR, "Pool %d failed %d submission retries, discarding", pool->pool_no, opt_retries);
  6004. submit_discard_share(work);
  6005. return false;
  6006. }
  6007. else if (work->stale) {
  6008. if (unlikely(opt_retries < 0 && timer_passed(&sws->tv_staleexpire, NULL)))
  6009. {
  6010. applog(LOG_NOTICE, "Pool %d stale share failed to submit for 5 minutes, discarding", pool->pool_no);
  6011. submit_discard_share(work);
  6012. return false;
  6013. }
  6014. }
  6015. /* pause, then restart work-request loop */
  6016. applog(LOG_INFO, "json_rpc_call failed on submit_work, retrying");
  6017. cgtime(&sws->tv_submit);
  6018. json_rpc_call_async(sws->ce->curl, pool->rpc_url, pool->rpc_userpass, sws->s, false, pool, true, sws);
  6019. return true;
  6020. }
  6021. static void free_sws(struct submit_work_state *sws)
  6022. {
  6023. free(sws->s);
  6024. free_work(sws->work);
  6025. free(sws);
  6026. }
  6027. static void *submit_work_thread(__maybe_unused void *userdata)
  6028. {
  6029. int wip = 0;
  6030. CURLM *curlm;
  6031. long curlm_timeout_us = -1;
  6032. struct timeval curlm_timer;
  6033. struct submit_work_state *sws, **swsp;
  6034. struct submit_work_state *write_sws = NULL;
  6035. unsigned tsreduce = 0;
  6036. pthread_detach(pthread_self());
  6037. RenameThread("submit_work");
  6038. applog(LOG_DEBUG, "Creating extra submit work thread");
  6039. curlm = curl_multi_init();
  6040. curlm_timeout_us = -1;
  6041. curl_multi_setopt(curlm, CURLMOPT_TIMERDATA, &curlm_timeout_us);
  6042. curl_multi_setopt(curlm, CURLMOPT_TIMERFUNCTION, my_curl_timer_set);
  6043. fd_set rfds, wfds, efds;
  6044. int maxfd;
  6045. struct timeval tv_timeout, tv_now;
  6046. int n;
  6047. CURLMsg *cm;
  6048. FD_ZERO(&rfds);
  6049. while (1) {
  6050. mutex_lock(&submitting_lock);
  6051. total_submitting -= tsreduce;
  6052. tsreduce = 0;
  6053. if (FD_ISSET(submit_waiting_notifier[0], &rfds)) {
  6054. notifier_read(submit_waiting_notifier);
  6055. }
  6056. // Receive any new submissions
  6057. while (submit_waiting) {
  6058. struct work *work = submit_waiting;
  6059. DL_DELETE(submit_waiting, work);
  6060. if ( (sws = begin_submission(work)) ) {
  6061. if (sws->ce)
  6062. curl_multi_add_handle(curlm, sws->ce->curl);
  6063. else if (sws->s) {
  6064. sws->next = write_sws;
  6065. write_sws = sws;
  6066. }
  6067. ++wip;
  6068. }
  6069. else {
  6070. --total_submitting;
  6071. free_work(work);
  6072. }
  6073. }
  6074. if (unlikely(shutting_down && !wip))
  6075. break;
  6076. mutex_unlock(&submitting_lock);
  6077. FD_ZERO(&rfds);
  6078. FD_ZERO(&wfds);
  6079. FD_ZERO(&efds);
  6080. tv_timeout.tv_sec = -1;
  6081. // Setup cURL with select
  6082. // Need to call perform to ensure the timeout gets updated
  6083. curl_multi_perform(curlm, &n);
  6084. curl_multi_fdset(curlm, &rfds, &wfds, &efds, &maxfd);
  6085. if (curlm_timeout_us >= 0)
  6086. {
  6087. timer_set_delay_from_now(&curlm_timer, curlm_timeout_us);
  6088. reduce_timeout_to(&tv_timeout, &curlm_timer);
  6089. }
  6090. // Setup waiting stratum submissions with select
  6091. for (sws = write_sws; sws; sws = sws->next)
  6092. {
  6093. struct pool *pool = sws->work->pool;
  6094. int fd = pool->sock;
  6095. if (fd == INVSOCK || (!pool->stratum_init) || !pool->stratum_notify)
  6096. continue;
  6097. FD_SET(fd, &wfds);
  6098. set_maxfd(&maxfd, fd);
  6099. }
  6100. // Setup "submit waiting" notifier with select
  6101. FD_SET(submit_waiting_notifier[0], &rfds);
  6102. set_maxfd(&maxfd, submit_waiting_notifier[0]);
  6103. // Wait for something interesting to happen :)
  6104. cgtime(&tv_now);
  6105. if (select(maxfd+1, &rfds, &wfds, &efds, select_timeout(&tv_timeout, &tv_now)) < 0) {
  6106. FD_ZERO(&rfds);
  6107. continue;
  6108. }
  6109. // Handle any stratum ready-to-write results
  6110. for (swsp = &write_sws; (sws = *swsp); ) {
  6111. struct work *work = sws->work;
  6112. struct pool *pool = work->pool;
  6113. int fd = pool->sock;
  6114. bool sessionid_match;
  6115. if (fd == INVSOCK || (!pool->stratum_init) || (!pool->stratum_notify) || !FD_ISSET(fd, &wfds)) {
  6116. next_write_sws:
  6117. // TODO: Check if stale, possibly discard etc
  6118. swsp = &sws->next;
  6119. continue;
  6120. }
  6121. cg_rlock(&pool->data_lock);
  6122. // NOTE: cgminer only does this check on retries, but BFGMiner does it for even the first/normal submit; therefore, it needs to be such that it always is true on the same connection regardless of session management
  6123. // NOTE: Worst case scenario for a false positive: the pool rejects it as H-not-zero
  6124. sessionid_match = (!pool->swork.nonce1) || !strcmp(work->nonce1, pool->swork.nonce1);
  6125. cg_runlock(&pool->data_lock);
  6126. if (!sessionid_match)
  6127. {
  6128. applog(LOG_DEBUG, "No matching session id for resubmitting stratum share");
  6129. submit_discard_share2("disconnect", work);
  6130. ++tsreduce;
  6131. next_write_sws_del:
  6132. // Clear the fd from wfds, to avoid potentially blocking on other submissions to the same socket
  6133. FD_CLR(fd, &wfds);
  6134. // Delete sws for this submission, since we're done with it
  6135. *swsp = sws->next;
  6136. free_sws(sws);
  6137. --wip;
  6138. continue;
  6139. }
  6140. char *s = sws->s;
  6141. struct stratum_share *sshare = calloc(sizeof(struct stratum_share), 1);
  6142. int sshare_id;
  6143. uint32_t nonce;
  6144. char nonce2hex[(bytes_len(&work->nonce2) * 2) + 1];
  6145. char noncehex[9];
  6146. char ntimehex[9];
  6147. sshare->work = copy_work(work);
  6148. bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
  6149. nonce = *((uint32_t *)(work->data + 76));
  6150. bin2hex(noncehex, (const unsigned char *)&nonce, 4);
  6151. bin2hex(ntimehex, (void *)&work->data[68], 4);
  6152. mutex_lock(&sshare_lock);
  6153. /* Give the stratum share a unique id */
  6154. sshare_id =
  6155. sshare->id = swork_id++;
  6156. HASH_ADD_INT(stratum_shares, id, sshare);
  6157. snprintf(s, 1024, "{\"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\": %d, \"method\": \"mining.submit\"}",
  6158. pool->rpc_user, work->job_id, nonce2hex, ntimehex, noncehex, sshare->id);
  6159. mutex_unlock(&sshare_lock);
  6160. applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->stratum_url, s);
  6161. if (likely(stratum_send(pool, s, strlen(s)))) {
  6162. if (pool_tclear(pool, &pool->submit_fail))
  6163. applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
  6164. applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db");
  6165. goto next_write_sws_del;
  6166. } else if (!pool_tset(pool, &pool->submit_fail)) {
  6167. // Undo stuff
  6168. mutex_lock(&sshare_lock);
  6169. // NOTE: Need to find it again in case something else has consumed it already (like the stratum-disconnect resubmitter...)
  6170. HASH_FIND_INT(stratum_shares, &sshare_id, sshare);
  6171. if (sshare)
  6172. HASH_DEL(stratum_shares, sshare);
  6173. mutex_unlock(&sshare_lock);
  6174. if (sshare)
  6175. {
  6176. free_work(sshare->work);
  6177. free(sshare);
  6178. }
  6179. applog(LOG_WARNING, "Pool %d stratum share submission failure", pool->pool_no);
  6180. total_ro++;
  6181. pool->remotefail_occasions++;
  6182. if (!sshare)
  6183. goto next_write_sws_del;
  6184. goto next_write_sws;
  6185. }
  6186. }
  6187. // Handle any cURL activities
  6188. curl_multi_perform(curlm, &n);
  6189. while( (cm = curl_multi_info_read(curlm, &n)) ) {
  6190. if (cm->msg == CURLMSG_DONE)
  6191. {
  6192. bool finished;
  6193. json_t *val = json_rpc_call_completed(cm->easy_handle, cm->data.result, false, NULL, &sws);
  6194. curl_multi_remove_handle(curlm, cm->easy_handle);
  6195. finished = submit_upstream_work_completed(sws->work, sws->resubmit, &sws->tv_submit, val);
  6196. if (!finished) {
  6197. if (retry_submission(sws))
  6198. curl_multi_add_handle(curlm, sws->ce->curl);
  6199. else
  6200. finished = true;
  6201. }
  6202. if (finished) {
  6203. --wip;
  6204. ++tsreduce;
  6205. struct pool *pool = sws->work->pool;
  6206. if (pool->sws_waiting_on_curl) {
  6207. pool->sws_waiting_on_curl->ce = sws->ce;
  6208. sws_has_ce(pool->sws_waiting_on_curl);
  6209. pool->sws_waiting_on_curl = pool->sws_waiting_on_curl->next;
  6210. curl_multi_add_handle(curlm, sws->ce->curl);
  6211. } else {
  6212. push_curl_entry(sws->ce, sws->work->pool);
  6213. }
  6214. free_sws(sws);
  6215. }
  6216. }
  6217. }
  6218. }
  6219. assert(!write_sws);
  6220. mutex_unlock(&submitting_lock);
  6221. curl_multi_cleanup(curlm);
  6222. applog(LOG_DEBUG, "submit_work thread exiting");
  6223. return NULL;
  6224. }
  6225. /* Find the pool that currently has the highest priority */
  6226. static struct pool *priority_pool(int choice)
  6227. {
  6228. struct pool *ret = NULL;
  6229. int i;
  6230. for (i = 0; i < total_pools; i++) {
  6231. struct pool *pool = pools[i];
  6232. if (pool->prio == choice) {
  6233. ret = pool;
  6234. break;
  6235. }
  6236. }
  6237. if (unlikely(!ret)) {
  6238. applog(LOG_ERR, "WTF No pool %d found!", choice);
  6239. return pools[choice];
  6240. }
  6241. return ret;
  6242. }
  6243. int prioritize_pools(char *param, int *pid)
  6244. {
  6245. char *ptr, *next;
  6246. int i, pr, prio = 0;
  6247. if (total_pools == 0) {
  6248. return MSG_NOPOOL;
  6249. }
  6250. if (param == NULL || *param == '\0') {
  6251. return MSG_MISPID;
  6252. }
  6253. bool pools_changed[total_pools];
  6254. int new_prio[total_pools];
  6255. for (i = 0; i < total_pools; ++i)
  6256. pools_changed[i] = false;
  6257. next = param;
  6258. while (next && *next) {
  6259. ptr = next;
  6260. next = strchr(ptr, ',');
  6261. if (next)
  6262. *(next++) = '\0';
  6263. i = atoi(ptr);
  6264. if (i < 0 || i >= total_pools) {
  6265. *pid = i;
  6266. return MSG_INVPID;
  6267. }
  6268. if (pools_changed[i]) {
  6269. *pid = i;
  6270. return MSG_DUPPID;
  6271. }
  6272. pools_changed[i] = true;
  6273. new_prio[i] = prio++;
  6274. }
  6275. // Only change them if no errors
  6276. for (i = 0; i < total_pools; i++) {
  6277. if (pools_changed[i])
  6278. pools[i]->prio = new_prio[i];
  6279. }
  6280. // In priority order, cycle through the unchanged pools and append them
  6281. for (pr = 0; pr < total_pools; pr++)
  6282. for (i = 0; i < total_pools; i++) {
  6283. if (!pools_changed[i] && pools[i]->prio == pr) {
  6284. pools[i]->prio = prio++;
  6285. pools_changed[i] = true;
  6286. break;
  6287. }
  6288. }
  6289. if (current_pool()->prio)
  6290. switch_pools(NULL);
  6291. return MSG_POOLPRIO;
  6292. }
  6293. void validate_pool_priorities(void)
  6294. {
  6295. // TODO: this should probably do some sort of logging
  6296. int i, j;
  6297. bool used[total_pools];
  6298. bool valid[total_pools];
  6299. for (i = 0; i < total_pools; i++)
  6300. used[i] = valid[i] = false;
  6301. for (i = 0; i < total_pools; i++) {
  6302. if (pools[i]->prio >=0 && pools[i]->prio < total_pools) {
  6303. if (!used[pools[i]->prio]) {
  6304. valid[i] = true;
  6305. used[pools[i]->prio] = true;
  6306. }
  6307. }
  6308. }
  6309. for (i = 0; i < total_pools; i++) {
  6310. if (!valid[i]) {
  6311. for (j = 0; j < total_pools; j++) {
  6312. if (!used[j]) {
  6313. applog(LOG_WARNING, "Pool %d priority changed from %d to %d", i, pools[i]->prio, j);
  6314. pools[i]->prio = j;
  6315. used[j] = true;
  6316. break;
  6317. }
  6318. }
  6319. }
  6320. }
  6321. }
  6322. static void clear_pool_work(struct pool *pool);
  6323. /* Specifies whether we can switch to this pool or not. */
  6324. static bool pool_unusable(struct pool *pool)
  6325. {
  6326. if (pool->idle)
  6327. return true;
  6328. if (pool->enabled != POOL_ENABLED)
  6329. return true;
  6330. return false;
  6331. }
  6332. void switch_pools(struct pool *selected)
  6333. {
  6334. struct pool *pool, *last_pool, *failover_pool = NULL;
  6335. int i, pool_no, next_pool;
  6336. if (selected)
  6337. enable_pool(selected);
  6338. cg_wlock(&control_lock);
  6339. last_pool = currentpool;
  6340. pool_no = currentpool->pool_no;
  6341. /* Switch selected to pool number 0 and move the rest down */
  6342. if (selected) {
  6343. if (selected->prio != 0) {
  6344. for (i = 0; i < total_pools; i++) {
  6345. pool = pools[i];
  6346. if (pool->prio < selected->prio)
  6347. pool->prio++;
  6348. }
  6349. selected->prio = 0;
  6350. }
  6351. }
  6352. switch (pool_strategy) {
  6353. /* All of these set to the master pool */
  6354. case POOL_BALANCE:
  6355. case POOL_FAILOVER:
  6356. case POOL_LOADBALANCE:
  6357. for (i = 0; i < total_pools; i++) {
  6358. pool = priority_pool(i);
  6359. if (pool_unusable(pool))
  6360. continue;
  6361. pool_no = pool->pool_no;
  6362. break;
  6363. }
  6364. break;
  6365. /* Both of these simply increment and cycle */
  6366. case POOL_ROUNDROBIN:
  6367. case POOL_ROTATE:
  6368. if (selected && !selected->idle) {
  6369. pool_no = selected->pool_no;
  6370. break;
  6371. }
  6372. next_pool = pool_no;
  6373. /* Select the next alive pool */
  6374. for (i = 1; i < total_pools; i++) {
  6375. next_pool++;
  6376. if (next_pool >= total_pools)
  6377. next_pool = 0;
  6378. pool = pools[next_pool];
  6379. if (pool_unusable(pool))
  6380. continue;
  6381. if (pool->failover_only)
  6382. {
  6383. BFGINIT(failover_pool, pool);
  6384. continue;
  6385. }
  6386. pool_no = next_pool;
  6387. break;
  6388. }
  6389. break;
  6390. default:
  6391. break;
  6392. }
  6393. pool = pools[pool_no];
  6394. if (pool_unusable(pool) && failover_pool)
  6395. pool = failover_pool;
  6396. currentpool = pool;
  6397. cg_wunlock(&control_lock);
  6398. mutex_lock(&lp_lock);
  6399. pthread_cond_broadcast(&lp_cond);
  6400. mutex_unlock(&lp_lock);
  6401. /* Set the lagging flag to avoid pool not providing work fast enough
  6402. * messages in failover only mode since we have to get all fresh work
  6403. * as in restart_threads */
  6404. if (opt_fail_only)
  6405. pool_tset(pool, &pool->lagging);
  6406. if (pool != last_pool)
  6407. {
  6408. pool->block_id = 0;
  6409. if (pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) {
  6410. applog(LOG_WARNING, "Switching to pool %d %s", pool->pool_no, pool->rpc_url);
  6411. if (pool_localgen(pool) || opt_fail_only)
  6412. clear_pool_work(last_pool);
  6413. }
  6414. }
  6415. mutex_lock(&lp_lock);
  6416. pthread_cond_broadcast(&lp_cond);
  6417. mutex_unlock(&lp_lock);
  6418. #ifdef HAVE_CURSES
  6419. update_block_display(false);
  6420. #endif
  6421. }
  6422. static void discard_work(struct work *work)
  6423. {
  6424. if (!work->clone && !work->rolls && !work->mined) {
  6425. if (work->pool) {
  6426. work->pool->discarded_work++;
  6427. work->pool->quota_used--;
  6428. work->pool->works--;
  6429. }
  6430. total_discarded++;
  6431. applog(LOG_DEBUG, "Discarded work");
  6432. } else
  6433. applog(LOG_DEBUG, "Discarded cloned or rolled work");
  6434. free_work(work);
  6435. }
  6436. static bool work_rollable(struct work *);
  6437. static
  6438. void unstage_work(struct work * const work)
  6439. {
  6440. HASH_DEL(staged_work, work);
  6441. --work_mining_algorithm(work)->staged;
  6442. if (work_rollable(work))
  6443. --staged_rollable;
  6444. if (work->spare)
  6445. --staged_spare;
  6446. staged_full = false;
  6447. }
  6448. static void wake_gws(void)
  6449. {
  6450. mutex_lock(stgd_lock);
  6451. pthread_cond_signal(&gws_cond);
  6452. mutex_unlock(stgd_lock);
  6453. }
  6454. static void discard_stale(void)
  6455. {
  6456. struct work *work, *tmp;
  6457. int stale = 0;
  6458. mutex_lock(stgd_lock);
  6459. HASH_ITER(hh, staged_work, work, tmp) {
  6460. if (stale_work(work, false)) {
  6461. unstage_work(work);
  6462. discard_work(work);
  6463. stale++;
  6464. }
  6465. }
  6466. pthread_cond_signal(&gws_cond);
  6467. mutex_unlock(stgd_lock);
  6468. if (stale)
  6469. applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
  6470. }
  6471. bool stale_work_future(struct work *work, bool share, unsigned long ustime)
  6472. {
  6473. bool rv;
  6474. struct timeval tv, orig;
  6475. ldiv_t d;
  6476. d = ldiv(ustime, 1000000);
  6477. tv = (struct timeval){
  6478. .tv_sec = d.quot,
  6479. .tv_usec = d.rem,
  6480. };
  6481. orig = work->tv_staged;
  6482. timersub(&orig, &tv, &work->tv_staged);
  6483. rv = stale_work(work, share);
  6484. work->tv_staged = orig;
  6485. return rv;
  6486. }
  6487. static
  6488. void pool_update_work_restart_time(struct pool * const pool)
  6489. {
  6490. pool->work_restart_time = time(NULL);
  6491. get_timestamp(pool->work_restart_timestamp, sizeof(pool->work_restart_timestamp), pool->work_restart_time);
  6492. }
  6493. static void restart_threads(void)
  6494. {
  6495. struct pool *cp = current_pool();
  6496. int i;
  6497. struct thr_info *thr;
  6498. /* Artificially set the lagging flag to avoid pool not providing work
  6499. * fast enough messages after every long poll */
  6500. pool_tset(cp, &cp->lagging);
  6501. /* Discard staged work that is now stale */
  6502. discard_stale();
  6503. rd_lock(&mining_thr_lock);
  6504. for (i = 0; i < mining_threads; i++)
  6505. {
  6506. thr = mining_thr[i];
  6507. thr->work_restart = true;
  6508. }
  6509. for (i = 0; i < mining_threads; i++)
  6510. {
  6511. thr = mining_thr[i];
  6512. notifier_wake(thr->work_restart_notifier);
  6513. }
  6514. rd_unlock(&mining_thr_lock);
  6515. }
  6516. void blkhashstr(char *rv, const unsigned char *hash)
  6517. {
  6518. unsigned char hash_swap[32];
  6519. swap256(hash_swap, hash);
  6520. swap32tole(hash_swap, hash_swap, 32 / 4);
  6521. bin2hex(rv, hash_swap, 32);
  6522. }
  6523. static
  6524. void set_curblock(struct mining_goal_info * const goal, struct block_info * const blkinfo)
  6525. {
  6526. struct blockchain_info * const blkchain = goal->blkchain;
  6527. blkchain->currentblk = blkinfo;
  6528. blkchain->currentblk_subsidy = 5000000000LL >> (blkinfo->height / 210000);
  6529. cg_wlock(&ch_lock);
  6530. __update_block_title(goal);
  6531. get_timestamp(blkchain->currentblk_first_seen_time_str, sizeof(blkchain->currentblk_first_seen_time_str), blkinfo->first_seen_time);
  6532. cg_wunlock(&ch_lock);
  6533. applog(LOG_INFO, "New block: %s diff %s (%s)", goal->current_goal_detail, goal->current_diff_str, goal->net_hashrate);
  6534. }
  6535. /* Search to see if this prevblkhash has been seen before */
  6536. static
  6537. struct block_info *block_exists(const struct blockchain_info * const blkchain, const void * const prevblkhash)
  6538. {
  6539. struct block_info *s;
  6540. rd_lock(&blk_lock);
  6541. HASH_FIND(hh, blkchain->blocks, prevblkhash, 0x20, s);
  6542. rd_unlock(&blk_lock);
  6543. return s;
  6544. }
  6545. static int block_sort(struct block_info * const blocka, struct block_info * const blockb)
  6546. {
  6547. return blocka->block_seen_order - blockb->block_seen_order;
  6548. }
  6549. static
  6550. void set_blockdiff(struct mining_goal_info * const goal, const struct work * const work)
  6551. {
  6552. unsigned char target[32];
  6553. double diff;
  6554. uint64_t diff64;
  6555. real_block_target(target, work->data);
  6556. diff = target_diff(target);
  6557. diff64 = diff;
  6558. suffix_string(diff64, goal->current_diff_str, sizeof(goal->current_diff_str), 0);
  6559. format_unit2(goal->net_hashrate, sizeof(goal->net_hashrate),
  6560. true, "h/s", H2B_SHORT, diff * 7158278, -1);
  6561. if (unlikely(goal->current_diff != diff))
  6562. applog(LOG_NOTICE, "Network difficulty changed to %s (%s)", goal->current_diff_str, goal->net_hashrate);
  6563. goal->current_diff = diff;
  6564. }
  6565. static bool test_work_current(struct work *work)
  6566. {
  6567. bool ret = true;
  6568. if (work->mandatory)
  6569. return ret;
  6570. uint32_t block_id = ((uint32_t*)(work->data))[1];
  6571. const uint8_t * const prevblkhash = &work->data[4];
  6572. {
  6573. /* Hack to work around dud work sneaking into test */
  6574. bool dudwork = true;
  6575. for (int i = 8; i < 26; ++i)
  6576. if (work->data[i])
  6577. {
  6578. dudwork = false;
  6579. break;
  6580. }
  6581. if (dudwork)
  6582. goto out_free;
  6583. }
  6584. struct pool * const pool = work->pool;
  6585. struct mining_goal_info * const goal = pool->goal;
  6586. struct blockchain_info * const blkchain = goal->blkchain;
  6587. /* Search to see if this block exists yet and if not, consider it a
  6588. * new block and set the current block details to this one */
  6589. if (!block_exists(blkchain, prevblkhash))
  6590. {
  6591. struct block_info * const s = calloc(sizeof(struct block_info), 1);
  6592. int deleted_block = 0;
  6593. ret = false;
  6594. if (unlikely(!s))
  6595. quit (1, "test_work_current OOM");
  6596. memcpy(s->prevblkhash, prevblkhash, sizeof(s->prevblkhash));
  6597. s->block_id = block_id;
  6598. s->block_seen_order = new_blocks++;
  6599. s->first_seen_time = time(NULL);
  6600. wr_lock(&blk_lock);
  6601. /* Only keep the last hour's worth of blocks in memory since
  6602. * work from blocks before this is virtually impossible and we
  6603. * want to prevent memory usage from continually rising */
  6604. if (HASH_COUNT(blkchain->blocks) > 6)
  6605. {
  6606. struct block_info *oldblock;
  6607. HASH_SORT(blkchain->blocks, block_sort);
  6608. oldblock = blkchain->blocks;
  6609. deleted_block = oldblock->block_seen_order;
  6610. HASH_DEL(blkchain->blocks, oldblock);
  6611. free(oldblock);
  6612. }
  6613. HASH_ADD(hh, blkchain->blocks, prevblkhash, sizeof(s->prevblkhash), s);
  6614. set_blockdiff(goal, work);
  6615. wr_unlock(&blk_lock);
  6616. pool->block_id = block_id;
  6617. pool_update_work_restart_time(pool);
  6618. if (deleted_block)
  6619. applog(LOG_DEBUG, "Deleted block %d from database", deleted_block);
  6620. #if BLKMAKER_VERSION > 1
  6621. template_nonce = 0;
  6622. #endif
  6623. set_curblock(goal, s);
  6624. if (unlikely(new_blocks == 1))
  6625. goto out_free;
  6626. if (!work->stratum)
  6627. {
  6628. if (work->longpoll)
  6629. {
  6630. applog(LOG_NOTICE, "Longpoll from pool %d detected new block",
  6631. pool->pool_no);
  6632. }
  6633. else
  6634. if (goal->have_longpoll)
  6635. applog(LOG_NOTICE, "New block detected on network before longpoll");
  6636. else
  6637. applog(LOG_NOTICE, "New block detected on network");
  6638. }
  6639. restart_threads();
  6640. }
  6641. else
  6642. {
  6643. bool restart = false;
  6644. if (unlikely(pool->block_id != block_id))
  6645. {
  6646. bool was_active = pool->block_id != 0;
  6647. pool->block_id = block_id;
  6648. pool_update_work_restart_time(pool);
  6649. if (!work->longpoll)
  6650. update_last_work(work);
  6651. if (was_active)
  6652. {
  6653. // Pool actively changed block
  6654. if (pool == current_pool())
  6655. restart = true;
  6656. if (block_id == blkchain->currentblk->block_id)
  6657. {
  6658. // Caught up, only announce if this pool is the one in use
  6659. if (restart)
  6660. applog(LOG_NOTICE, "%s %d caught up to new block",
  6661. work->longpoll ? "Longpoll from pool" : "Pool",
  6662. pool->pool_no);
  6663. }
  6664. else
  6665. {
  6666. // Switched to a block we know, but not the latest... why?
  6667. // This might detect pools trying to double-spend or 51%,
  6668. // but let's not make any accusations until it's had time
  6669. // in the real world.
  6670. char hexstr[65];
  6671. blkhashstr(hexstr, prevblkhash);
  6672. applog(LOG_WARNING, "%s %d is issuing work for an old block: %s",
  6673. work->longpoll ? "Longpoll from pool" : "Pool",
  6674. pool->pool_no,
  6675. hexstr);
  6676. }
  6677. }
  6678. }
  6679. if (work->longpoll)
  6680. {
  6681. struct pool * const cp = current_pool();
  6682. ++pool->work_restart_id;
  6683. if (work->tr && work->tr == pool->swork.tr)
  6684. pool->swork.work_restart_id = pool->work_restart_id;
  6685. update_last_work(work);
  6686. pool_update_work_restart_time(pool);
  6687. applog(
  6688. ((!opt_quiet_work_updates) && pool_actively_in_use(pool, cp) ? LOG_NOTICE : LOG_DEBUG),
  6689. "Longpoll from pool %d requested work update",
  6690. pool->pool_no);
  6691. if ((!restart) && pool == cp)
  6692. restart = true;
  6693. }
  6694. if (restart)
  6695. restart_threads();
  6696. }
  6697. work->longpoll = false;
  6698. out_free:
  6699. return ret;
  6700. }
  6701. static int tv_sort(struct work *worka, struct work *workb)
  6702. {
  6703. return worka->tv_staged.tv_sec - workb->tv_staged.tv_sec;
  6704. }
  6705. static bool work_rollable(struct work *work)
  6706. {
  6707. return (!work->clone && work->rolltime);
  6708. }
  6709. static bool hash_push(struct work *work)
  6710. {
  6711. bool rc = true;
  6712. mutex_lock(stgd_lock);
  6713. if (work_rollable(work))
  6714. staged_rollable++;
  6715. ++work_mining_algorithm(work)->staged;
  6716. if (work->spare)
  6717. ++staged_spare;
  6718. if (likely(!getq->frozen)) {
  6719. HASH_ADD_INT(staged_work, id, work);
  6720. HASH_SORT(staged_work, tv_sort);
  6721. } else
  6722. rc = false;
  6723. pthread_cond_broadcast(&getq->cond);
  6724. mutex_unlock(stgd_lock);
  6725. return rc;
  6726. }
  6727. static void stage_work(struct work *work)
  6728. {
  6729. applog(LOG_DEBUG, "Pushing work %d from pool %d to hash queue",
  6730. work->id, work->pool->pool_no);
  6731. work->work_restart_id = work->pool->work_restart_id;
  6732. work->pool->last_work_time = time(NULL);
  6733. cgtime(&work->pool->tv_last_work_time);
  6734. test_work_current(work);
  6735. work->pool->works++;
  6736. hash_push(work);
  6737. }
  6738. #ifdef HAVE_CURSES
  6739. int curses_int(const char *query)
  6740. {
  6741. int ret;
  6742. char *cvar;
  6743. cvar = curses_input(query);
  6744. if (unlikely(!cvar))
  6745. return -1;
  6746. ret = atoi(cvar);
  6747. free(cvar);
  6748. return ret;
  6749. }
  6750. #endif
  6751. #ifdef HAVE_CURSES
  6752. static bool input_pool(bool live);
  6753. #endif
  6754. #ifdef HAVE_CURSES
  6755. static void display_pool_summary(struct pool *pool)
  6756. {
  6757. double efficiency = 0.0;
  6758. char xfer[ALLOC_H2B_NOUNIT+ALLOC_H2B_SPACED+4+1], bw[ALLOC_H2B_NOUNIT+ALLOC_H2B_SPACED+6+1];
  6759. int pool_secs;
  6760. if (curses_active_locked()) {
  6761. wlog("Pool: %s Goal: %s\n", pool->rpc_url, pool->goal->name);
  6762. if (pool->solved)
  6763. wlog("SOLVED %d BLOCK%s!\n", pool->solved, pool->solved > 1 ? "S" : "");
  6764. if (!pool->has_stratum)
  6765. wlog("%s own long-poll support\n", pool->lp_url ? "Has" : "Does not have");
  6766. wlog(" Queued work requests: %d\n", pool->getwork_requested);
  6767. wlog(" Share submissions: %d\n", pool->accepted + pool->rejected);
  6768. wlog(" Accepted shares: %d\n", pool->accepted);
  6769. wlog(" Rejected shares: %d + %d stale (%.2f%%)\n",
  6770. pool->rejected, pool->stale_shares,
  6771. (float)(pool->rejected + pool->stale_shares) / (float)(pool->rejected + pool->stale_shares + pool->accepted)
  6772. );
  6773. wlog(" Accepted difficulty shares: %1.f\n", pool->diff_accepted);
  6774. wlog(" Rejected difficulty shares: %1.f\n", pool->diff_rejected);
  6775. pool_secs = timer_elapsed(&pool->cgminer_stats.start_tv, NULL);
  6776. wlog(" Network transfer: %s (%s)\n",
  6777. multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2,
  6778. (float)pool->cgminer_pool_stats.net_bytes_received,
  6779. (float)pool->cgminer_pool_stats.net_bytes_sent),
  6780. multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2,
  6781. (float)(pool->cgminer_pool_stats.net_bytes_received / pool_secs),
  6782. (float)(pool->cgminer_pool_stats.net_bytes_sent / pool_secs)));
  6783. uint64_t pool_bytes_xfer = pool->cgminer_pool_stats.net_bytes_received + pool->cgminer_pool_stats.net_bytes_sent;
  6784. efficiency = pool_bytes_xfer ? pool->diff_accepted * 2048. / pool_bytes_xfer : 0.0;
  6785. wlog(" Efficiency (accepted * difficulty / 2 KB): %.2f\n", efficiency);
  6786. wlog(" Items worked on: %d\n", pool->works);
  6787. wlog(" Stale submissions discarded due to new blocks: %d\n", pool->stale_shares);
  6788. wlog(" Unable to get work from server occasions: %d\n", pool->getfail_occasions);
  6789. wlog(" Submitting work remotely delay occasions: %d\n\n", pool->remotefail_occasions);
  6790. unlock_curses();
  6791. }
  6792. }
  6793. #endif
  6794. /* We can't remove the memory used for this struct pool because there may
  6795. * still be work referencing it. We just remove it from the pools list */
  6796. void remove_pool(struct pool *pool)
  6797. {
  6798. int i, last_pool = total_pools - 1;
  6799. struct pool *other;
  6800. disable_pool(pool, POOL_DISABLED);
  6801. /* Boost priority of any lower prio than this one */
  6802. for (i = 0; i < total_pools; i++) {
  6803. other = pools[i];
  6804. if (other->prio > pool->prio)
  6805. other->prio--;
  6806. }
  6807. if (pool->pool_no < last_pool) {
  6808. /* Swap the last pool for this one */
  6809. (pools[last_pool])->pool_no = pool->pool_no;
  6810. pools[pool->pool_no] = pools[last_pool];
  6811. }
  6812. /* Give it an invalid number */
  6813. pool->pool_no = total_pools;
  6814. pool->removed = true;
  6815. pool->has_stratum = false;
  6816. total_pools--;
  6817. }
  6818. /* add a mutex if this needs to be thread safe in the future */
  6819. static struct JE {
  6820. char *buf;
  6821. struct JE *next;
  6822. } *jedata = NULL;
  6823. static void json_escape_free()
  6824. {
  6825. struct JE *jeptr = jedata;
  6826. struct JE *jenext;
  6827. jedata = NULL;
  6828. while (jeptr) {
  6829. jenext = jeptr->next;
  6830. free(jeptr->buf);
  6831. free(jeptr);
  6832. jeptr = jenext;
  6833. }
  6834. }
  6835. static
  6836. char *json_escape(const char *str)
  6837. {
  6838. struct JE *jeptr;
  6839. char *buf, *ptr;
  6840. /* 2x is the max, may as well just allocate that */
  6841. ptr = buf = malloc(strlen(str) * 2 + 1);
  6842. jeptr = malloc(sizeof(*jeptr));
  6843. jeptr->buf = buf;
  6844. jeptr->next = jedata;
  6845. jedata = jeptr;
  6846. while (*str) {
  6847. if (*str == '\\' || *str == '"')
  6848. *(ptr++) = '\\';
  6849. *(ptr++) = *(str++);
  6850. }
  6851. *ptr = '\0';
  6852. return buf;
  6853. }
  6854. static
  6855. void _write_config_string_elist(FILE *fcfg, const char *configname, struct string_elist * const elist)
  6856. {
  6857. if (!elist)
  6858. return;
  6859. static struct string_elist *entry;
  6860. fprintf(fcfg, ",\n\"%s\" : [", configname);
  6861. bool first = true;
  6862. DL_FOREACH(elist, entry)
  6863. {
  6864. const char * const s = entry->string;
  6865. fprintf(fcfg, "%s\n\t\"%s\"", first ? "" : ",", json_escape(s));
  6866. first = false;
  6867. }
  6868. fprintf(fcfg, "\n]");
  6869. }
  6870. void write_config(FILE *fcfg)
  6871. {
  6872. int i;
  6873. /* Write pool values */
  6874. fputs("{\n\"pools\" : [", fcfg);
  6875. for(i = 0; i < total_pools; i++) {
  6876. struct pool *pool = pools[i];
  6877. if (pool->failover_only)
  6878. // Don't write failover-only (automatically added) pools to the config file for now
  6879. continue;
  6880. if (pool->quota != 1) {
  6881. fprintf(fcfg, "%s\n\t{\n\t\t\"quota\" : \"%d;%s\",", i > 0 ? "," : "",
  6882. pool->quota,
  6883. json_escape(pool->rpc_url));
  6884. } else {
  6885. fprintf(fcfg, "%s\n\t{\n\t\t\"url\" : \"%s\",", i > 0 ? "," : "",
  6886. json_escape(pool->rpc_url));
  6887. }
  6888. if (pool->rpc_proxy)
  6889. fprintf(fcfg, "\n\t\t\"pool-proxy\" : \"%s\",", json_escape(pool->rpc_proxy));
  6890. fprintf(fcfg, "\n\t\t\"user\" : \"%s\",", json_escape(pool->rpc_user));
  6891. fprintf(fcfg, "\n\t\t\"pass\" : \"%s\",", json_escape(pool->rpc_pass));
  6892. if (strcmp(pool->goal->name, "default"))
  6893. fprintf(fcfg, "\n\t\t\"pool-goal\" : \"%s\",", pool->goal->name);
  6894. fprintf(fcfg, "\n\t\t\"pool-priority\" : \"%d\"", pool->prio);
  6895. if (pool->force_rollntime)
  6896. fprintf(fcfg, ",\n\t\t\"force-rollntime\" : %d", pool->force_rollntime);
  6897. fprintf(fcfg, "\n\t}");
  6898. }
  6899. fputs("\n]\n", fcfg);
  6900. #ifdef USE_OPENCL
  6901. write_config_opencl(fcfg);
  6902. #endif
  6903. #if defined(USE_CPUMINING) && defined(USE_SHA256D)
  6904. fprintf(fcfg, ",\n\"algo\" : \"%s\"", algo_names[opt_algo]);
  6905. #endif
  6906. /* Simple bool and int options */
  6907. struct opt_table *opt;
  6908. for (opt = opt_config_table; opt->type != OPT_END; opt++) {
  6909. char *p, *name = strdup(opt->names);
  6910. for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
  6911. if (p[1] != '-')
  6912. continue;
  6913. if (opt->type & OPT_NOARG &&
  6914. ((void *)opt->cb == (void *)opt_set_bool || (void *)opt->cb == (void *)opt_set_invbool) &&
  6915. (*(bool *)opt->u.arg == ((void *)opt->cb == (void *)opt_set_bool)))
  6916. fprintf(fcfg, ",\n\"%s\" : true", p+2);
  6917. if (opt->type & OPT_HASARG &&
  6918. ((void *)opt->cb_arg == (void *)set_int_0_to_9999 ||
  6919. (void *)opt->cb_arg == (void *)set_int_1_to_65535 ||
  6920. (void *)opt->cb_arg == (void *)set_int_0_to_10 ||
  6921. (void *)opt->cb_arg == (void *)set_int_1_to_10) &&
  6922. opt->desc != opt_hidden &&
  6923. 0 <= *(int *)opt->u.arg)
  6924. fprintf(fcfg, ",\n\"%s\" : \"%d\"", p+2, *(int *)opt->u.arg);
  6925. }
  6926. free(name);
  6927. }
  6928. /* Special case options */
  6929. if (request_target_str)
  6930. {
  6931. if (request_pdiff == (long)request_pdiff)
  6932. fprintf(fcfg, ",\n\"request-diff\" : %ld", (long)request_pdiff);
  6933. else
  6934. fprintf(fcfg, ",\n\"request-diff\" : %f", request_pdiff);
  6935. }
  6936. fprintf(fcfg, ",\n\"shares\" : %g", opt_shares);
  6937. if (pool_strategy == POOL_BALANCE)
  6938. fputs(",\n\"balance\" : true", fcfg);
  6939. if (pool_strategy == POOL_LOADBALANCE)
  6940. fputs(",\n\"load-balance\" : true", fcfg);
  6941. if (pool_strategy == POOL_ROUNDROBIN)
  6942. fputs(",\n\"round-robin\" : true", fcfg);
  6943. if (pool_strategy == POOL_ROTATE)
  6944. fprintf(fcfg, ",\n\"rotate\" : \"%d\"", opt_rotate_period);
  6945. #if defined(unix) || defined(__APPLE__)
  6946. if (opt_stderr_cmd && *opt_stderr_cmd)
  6947. fprintf(fcfg, ",\n\"monitor\" : \"%s\"", json_escape(opt_stderr_cmd));
  6948. #endif // defined(unix)
  6949. if (opt_kernel_path && *opt_kernel_path) {
  6950. char *kpath = strdup(opt_kernel_path);
  6951. if (kpath[strlen(kpath)-1] == '/')
  6952. kpath[strlen(kpath)-1] = 0;
  6953. fprintf(fcfg, ",\n\"kernel-path\" : \"%s\"", json_escape(kpath));
  6954. free(kpath);
  6955. }
  6956. if (schedstart.enable)
  6957. fprintf(fcfg, ",\n\"sched-time\" : \"%d:%d\"", schedstart.tm.tm_hour, schedstart.tm.tm_min);
  6958. if (schedstop.enable)
  6959. fprintf(fcfg, ",\n\"stop-time\" : \"%d:%d\"", schedstop.tm.tm_hour, schedstop.tm.tm_min);
  6960. if (opt_socks_proxy && *opt_socks_proxy)
  6961. fprintf(fcfg, ",\n\"socks-proxy\" : \"%s\"", json_escape(opt_socks_proxy));
  6962. _write_config_string_elist(fcfg, "scan", scan_devices);
  6963. #ifdef USE_LIBMICROHTTPD
  6964. if (httpsrv_port != -1)
  6965. fprintf(fcfg, ",\n\"http-port\" : %d", httpsrv_port);
  6966. #endif
  6967. #ifdef USE_LIBEVENT
  6968. if (stratumsrv_port != -1)
  6969. fprintf(fcfg, ",\n\"stratum-port\" : %ld", stratumsrv_port);
  6970. #endif
  6971. _write_config_string_elist(fcfg, "device", opt_devices_enabled_list);
  6972. _write_config_string_elist(fcfg, "set-device", opt_set_device_list);
  6973. if (opt_api_allow)
  6974. fprintf(fcfg, ",\n\"api-allow\" : \"%s\"", json_escape(opt_api_allow));
  6975. if (strcmp(opt_api_mcast_addr, API_MCAST_ADDR) != 0)
  6976. fprintf(fcfg, ",\n\"api-mcast-addr\" : \"%s\"", json_escape(opt_api_mcast_addr));
  6977. if (strcmp(opt_api_mcast_code, API_MCAST_CODE) != 0)
  6978. fprintf(fcfg, ",\n\"api-mcast-code\" : \"%s\"", json_escape(opt_api_mcast_code));
  6979. if (*opt_api_mcast_des)
  6980. fprintf(fcfg, ",\n\"api-mcast-des\" : \"%s\"", json_escape(opt_api_mcast_des));
  6981. if (strcmp(opt_api_description, PACKAGE_STRING) != 0)
  6982. fprintf(fcfg, ",\n\"api-description\" : \"%s\"", json_escape(opt_api_description));
  6983. if (opt_api_groups)
  6984. fprintf(fcfg, ",\n\"api-groups\" : \"%s\"", json_escape(opt_api_groups));
  6985. fputs("\n}\n", fcfg);
  6986. json_escape_free();
  6987. }
  6988. void zero_bestshare(void)
  6989. {
  6990. int i;
  6991. best_diff = 0;
  6992. suffix_string(best_diff, best_share, sizeof(best_share), 0);
  6993. for (i = 0; i < total_pools; i++) {
  6994. struct pool *pool = pools[i];
  6995. pool->best_diff = 0;
  6996. }
  6997. }
  6998. void zero_stats(void)
  6999. {
  7000. int i;
  7001. applog(LOG_DEBUG, "Zeroing stats");
  7002. cgtime(&total_tv_start);
  7003. miner_started = total_tv_start;
  7004. total_rolling = 0;
  7005. total_mhashes_done = 0;
  7006. total_getworks = 0;
  7007. total_accepted = 0;
  7008. total_rejected = 0;
  7009. hw_errors = 0;
  7010. total_stale = 0;
  7011. total_discarded = 0;
  7012. total_bytes_rcvd = total_bytes_sent = 0;
  7013. new_blocks = 0;
  7014. local_work = 0;
  7015. total_go = 0;
  7016. total_ro = 0;
  7017. total_secs = 1.0;
  7018. total_diff1 = 0;
  7019. total_bad_diff1 = 0;
  7020. found_blocks = 0;
  7021. total_diff_accepted = 0;
  7022. total_diff_rejected = 0;
  7023. total_diff_stale = 0;
  7024. #ifdef HAVE_CURSES
  7025. awidth = rwidth = swidth = hwwidth = 1;
  7026. #endif
  7027. struct mining_goal_info *goal, *tmpgoal;
  7028. HASH_ITER(hh, mining_goals, goal, tmpgoal)
  7029. {
  7030. goal->diff_accepted = 0;
  7031. }
  7032. for (i = 0; i < total_pools; i++) {
  7033. struct pool *pool = pools[i];
  7034. pool->getwork_requested = 0;
  7035. pool->accepted = 0;
  7036. pool->rejected = 0;
  7037. pool->solved = 0;
  7038. pool->getwork_requested = 0;
  7039. pool->stale_shares = 0;
  7040. pool->discarded_work = 0;
  7041. pool->getfail_occasions = 0;
  7042. pool->remotefail_occasions = 0;
  7043. pool->last_share_time = 0;
  7044. pool->works = 0;
  7045. pool->diff1 = 0;
  7046. pool->diff_accepted = 0;
  7047. pool->diff_rejected = 0;
  7048. pool->diff_stale = 0;
  7049. pool->last_share_diff = 0;
  7050. pool->cgminer_stats.start_tv = total_tv_start;
  7051. pool->cgminer_stats.getwork_calls = 0;
  7052. pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  7053. pool->cgminer_stats.getwork_wait_max.tv_sec = 0;
  7054. pool->cgminer_stats.getwork_wait_max.tv_usec = 0;
  7055. pool->cgminer_pool_stats.getwork_calls = 0;
  7056. pool->cgminer_pool_stats.getwork_attempts = 0;
  7057. pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  7058. pool->cgminer_pool_stats.getwork_wait_max.tv_sec = 0;
  7059. pool->cgminer_pool_stats.getwork_wait_max.tv_usec = 0;
  7060. pool->cgminer_pool_stats.min_diff = 0;
  7061. pool->cgminer_pool_stats.max_diff = 0;
  7062. pool->cgminer_pool_stats.min_diff_count = 0;
  7063. pool->cgminer_pool_stats.max_diff_count = 0;
  7064. pool->cgminer_pool_stats.times_sent = 0;
  7065. pool->cgminer_pool_stats.bytes_sent = 0;
  7066. pool->cgminer_pool_stats.net_bytes_sent = 0;
  7067. pool->cgminer_pool_stats.times_received = 0;
  7068. pool->cgminer_pool_stats.bytes_received = 0;
  7069. pool->cgminer_pool_stats.net_bytes_received = 0;
  7070. }
  7071. zero_bestshare();
  7072. for (i = 0; i < total_devices; ++i) {
  7073. struct cgpu_info *cgpu = get_devices(i);
  7074. mutex_lock(&hash_lock);
  7075. cgpu->total_mhashes = 0;
  7076. cgpu->accepted = 0;
  7077. cgpu->rejected = 0;
  7078. cgpu->stale = 0;
  7079. cgpu->hw_errors = 0;
  7080. cgpu->utility = 0.0;
  7081. cgpu->utility_diff1 = 0;
  7082. cgpu->last_share_pool_time = 0;
  7083. cgpu->bad_diff1 = 0;
  7084. cgpu->diff1 = 0;
  7085. cgpu->diff_accepted = 0;
  7086. cgpu->diff_rejected = 0;
  7087. cgpu->diff_stale = 0;
  7088. cgpu->last_share_diff = 0;
  7089. cgpu->thread_fail_init_count = 0;
  7090. cgpu->thread_zero_hash_count = 0;
  7091. cgpu->thread_fail_queue_count = 0;
  7092. cgpu->dev_sick_idle_60_count = 0;
  7093. cgpu->dev_dead_idle_600_count = 0;
  7094. cgpu->dev_nostart_count = 0;
  7095. cgpu->dev_over_heat_count = 0;
  7096. cgpu->dev_thermal_cutoff_count = 0;
  7097. cgpu->dev_comms_error_count = 0;
  7098. cgpu->dev_throttle_count = 0;
  7099. cgpu->cgminer_stats.start_tv = total_tv_start;
  7100. cgpu->cgminer_stats.getwork_calls = 0;
  7101. cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  7102. cgpu->cgminer_stats.getwork_wait_max.tv_sec = 0;
  7103. cgpu->cgminer_stats.getwork_wait_max.tv_usec = 0;
  7104. mutex_unlock(&hash_lock);
  7105. if (cgpu->drv->zero_stats)
  7106. cgpu->drv->zero_stats(cgpu);
  7107. }
  7108. }
  7109. int bfg_strategy_parse(const char * const s)
  7110. {
  7111. char *endptr;
  7112. if (!(s && s[0]))
  7113. return -1;
  7114. long int selected = strtol(s, &endptr, 0);
  7115. if (endptr == s || *endptr) {
  7116. // Look-up by name
  7117. selected = -1;
  7118. for (unsigned i = 0; i <= TOP_STRATEGY; ++i) {
  7119. if (!strcasecmp(strategies[i].s, s)) {
  7120. selected = i;
  7121. }
  7122. }
  7123. }
  7124. if (selected < 0 || selected > TOP_STRATEGY) {
  7125. return -1;
  7126. }
  7127. return selected;
  7128. }
  7129. bool bfg_strategy_change(const int selected, const char * const param)
  7130. {
  7131. if (param && param[0]) {
  7132. switch (selected) {
  7133. case POOL_ROTATE:
  7134. {
  7135. char *endptr;
  7136. long int n = strtol(param, &endptr, 0);
  7137. if (n < 0 || n > 9999 || *endptr) {
  7138. return false;
  7139. }
  7140. opt_rotate_period = n;
  7141. break;
  7142. }
  7143. default:
  7144. return false;
  7145. }
  7146. }
  7147. mutex_lock(&lp_lock);
  7148. pool_strategy = selected;
  7149. pthread_cond_broadcast(&lp_cond);
  7150. mutex_unlock(&lp_lock);
  7151. switch_pools(NULL);
  7152. return true;
  7153. }
  7154. #ifdef HAVE_CURSES
  7155. static
  7156. void loginput_mode(const int size)
  7157. {
  7158. clear_logwin();
  7159. loginput_size = size;
  7160. check_winsizes();
  7161. }
  7162. static void display_pools(void)
  7163. {
  7164. struct pool *pool;
  7165. int selected, i, j;
  7166. char input;
  7167. loginput_mode(7 + total_pools);
  7168. immedok(logwin, true);
  7169. updated:
  7170. for (j = 0; j < total_pools; j++) {
  7171. for (i = 0; i < total_pools; i++) {
  7172. pool = pools[i];
  7173. if (pool->prio != j)
  7174. continue;
  7175. if (pool_actively_in_use(pool, NULL))
  7176. wattron(logwin, A_BOLD);
  7177. if (pool->enabled != POOL_ENABLED || pool->failover_only)
  7178. wattron(logwin, A_DIM);
  7179. wlogprint("%d: ", pool->prio);
  7180. switch (pool->enabled) {
  7181. case POOL_ENABLED:
  7182. if ((pool_strategy == POOL_LOADBALANCE) ? (!pool->quota)
  7183. : ((pool_strategy != POOL_FAILOVER) ? pool->failover_only : 0))
  7184. wlogprint("Failover ");
  7185. else
  7186. wlogprint("Enabled ");
  7187. break;
  7188. case POOL_DISABLED:
  7189. wlogprint("Disabled ");
  7190. break;
  7191. case POOL_REJECTING:
  7192. wlogprint("Rejectin ");
  7193. break;
  7194. case POOL_MISBEHAVING:
  7195. wlogprint("Misbehav ");
  7196. break;
  7197. }
  7198. _wlogprint(pool_proto_str(pool));
  7199. wlogprint(" Quota %d Pool %d: %s User:%s\n",
  7200. pool->quota,
  7201. pool->pool_no,
  7202. pool->rpc_url, pool->rpc_user);
  7203. wattroff(logwin, A_BOLD | A_DIM);
  7204. break; //for (i = 0; i < total_pools; i++)
  7205. }
  7206. }
  7207. retry:
  7208. wlogprint("\nCurrent pool management strategy: %s\n",
  7209. strategies[pool_strategy].s);
  7210. if (pool_strategy == POOL_ROTATE)
  7211. wlogprint("Set to rotate every %d minutes\n", opt_rotate_period);
  7212. wlogprint("[F]ailover only %s\n", opt_fail_only ? "enabled" : "disabled");
  7213. wlogprint("Pool [A]dd [R]emove [D]isable [E]nable [P]rioritize [Q]uota change\n");
  7214. wlogprint("[C]hange management strategy [S]witch pool [I]nformation\n");
  7215. wlogprint("Or press any other key to continue\n");
  7216. logwin_update();
  7217. input = getch();
  7218. if (!strncasecmp(&input, "a", 1)) {
  7219. if (opt_benchmark)
  7220. {
  7221. wlogprint("Cannot add pools in benchmark mode");
  7222. goto retry;
  7223. }
  7224. input_pool(true);
  7225. goto updated;
  7226. } else if (!strncasecmp(&input, "r", 1)) {
  7227. if (total_pools <= 1) {
  7228. wlogprint("Cannot remove last pool");
  7229. goto retry;
  7230. }
  7231. selected = curses_int("Select pool number");
  7232. if (selected < 0 || selected >= total_pools) {
  7233. wlogprint("Invalid selection\n");
  7234. goto retry;
  7235. }
  7236. pool = pools[selected];
  7237. if (pool == current_pool())
  7238. switch_pools(NULL);
  7239. if (pool == current_pool()) {
  7240. wlogprint("Unable to remove pool due to activity\n");
  7241. goto retry;
  7242. }
  7243. remove_pool(pool);
  7244. goto updated;
  7245. } else if (!strncasecmp(&input, "s", 1)) {
  7246. selected = curses_int("Select pool number");
  7247. if (selected < 0 || selected >= total_pools) {
  7248. wlogprint("Invalid selection\n");
  7249. goto retry;
  7250. }
  7251. pool = pools[selected];
  7252. manual_enable_pool(pool);
  7253. switch_pools(pool);
  7254. goto updated;
  7255. } else if (!strncasecmp(&input, "d", 1)) {
  7256. if (enabled_pools <= 1) {
  7257. wlogprint("Cannot disable last pool");
  7258. goto retry;
  7259. }
  7260. selected = curses_int("Select pool number");
  7261. if (selected < 0 || selected >= total_pools) {
  7262. wlogprint("Invalid selection\n");
  7263. goto retry;
  7264. }
  7265. pool = pools[selected];
  7266. disable_pool(pool, POOL_DISABLED);
  7267. goto updated;
  7268. } else if (!strncasecmp(&input, "e", 1)) {
  7269. selected = curses_int("Select pool number");
  7270. if (selected < 0 || selected >= total_pools) {
  7271. wlogprint("Invalid selection\n");
  7272. goto retry;
  7273. }
  7274. pool = pools[selected];
  7275. manual_enable_pool(pool);
  7276. goto updated;
  7277. } else if (!strncasecmp(&input, "c", 1)) {
  7278. for (i = 0; i <= TOP_STRATEGY; i++)
  7279. wlogprint("%d: %s\n", i, strategies[i].s);
  7280. {
  7281. char * const selected_str = curses_input("Select strategy type");
  7282. selected = bfg_strategy_parse(selected_str);
  7283. free(selected_str);
  7284. }
  7285. if (selected < 0 || selected > TOP_STRATEGY) {
  7286. wlogprint("Invalid selection\n");
  7287. goto retry;
  7288. }
  7289. char *param = NULL;
  7290. if (selected == POOL_ROTATE) {
  7291. param = curses_input("Select interval in minutes");
  7292. }
  7293. bool result = bfg_strategy_change(selected, param);
  7294. free(param);
  7295. if (!result) {
  7296. wlogprint("Invalid selection\n");
  7297. goto retry;
  7298. }
  7299. goto updated;
  7300. } else if (!strncasecmp(&input, "i", 1)) {
  7301. selected = curses_int("Select pool number");
  7302. if (selected < 0 || selected >= total_pools) {
  7303. wlogprint("Invalid selection\n");
  7304. goto retry;
  7305. }
  7306. pool = pools[selected];
  7307. display_pool_summary(pool);
  7308. goto retry;
  7309. } else if (!strncasecmp(&input, "q", 1)) {
  7310. selected = curses_int("Select pool number");
  7311. if (selected < 0 || selected >= total_pools) {
  7312. wlogprint("Invalid selection\n");
  7313. goto retry;
  7314. }
  7315. pool = pools[selected];
  7316. selected = curses_int("Set quota");
  7317. if (selected < 0) {
  7318. wlogprint("Invalid negative quota\n");
  7319. goto retry;
  7320. }
  7321. if (selected > 0)
  7322. pool->failover_only = false;
  7323. pool->quota = selected;
  7324. adjust_quota_gcd();
  7325. goto updated;
  7326. } else if (!strncasecmp(&input, "f", 1)) {
  7327. opt_fail_only ^= true;
  7328. goto updated;
  7329. } else if (!strncasecmp(&input, "p", 1)) {
  7330. char *prilist = curses_input("Enter new pool priority (comma separated list)");
  7331. if (!prilist)
  7332. {
  7333. wlogprint("Not changing priorities\n");
  7334. goto retry;
  7335. }
  7336. int res = prioritize_pools(prilist, &i);
  7337. free(prilist);
  7338. switch (res) {
  7339. case MSG_NOPOOL:
  7340. wlogprint("No pools\n");
  7341. goto retry;
  7342. case MSG_MISPID:
  7343. wlogprint("Missing pool id parameter\n");
  7344. goto retry;
  7345. case MSG_INVPID:
  7346. wlogprint("Invalid pool id %d - range is 0 - %d\n", i, total_pools - 1);
  7347. goto retry;
  7348. case MSG_DUPPID:
  7349. wlogprint("Duplicate pool specified %d\n", i);
  7350. goto retry;
  7351. case MSG_POOLPRIO:
  7352. default:
  7353. goto updated;
  7354. }
  7355. }
  7356. immedok(logwin, false);
  7357. loginput_mode(0);
  7358. }
  7359. static const char *summary_detail_level_str(void)
  7360. {
  7361. if (opt_compact)
  7362. return "compact";
  7363. if (opt_show_procs)
  7364. return "processors";
  7365. return "devices";
  7366. }
  7367. static void display_options(void)
  7368. {
  7369. int selected;
  7370. char input;
  7371. immedok(logwin, true);
  7372. loginput_mode(12);
  7373. retry:
  7374. clear_logwin();
  7375. wlogprint("[N]ormal [C]lear [S]ilent mode (disable all output)\n");
  7376. wlogprint("[D]ebug:%s\n[P]er-device:%s\n[Q]uiet:%s\n[V]erbose:%s\n"
  7377. "[R]PC debug:%s\n[W]orkTime details:%s\nsu[M]mary detail level:%s\n"
  7378. "[L]og interval:%d\nS[T]atistical counts: %s\n[Z]ero statistics\n",
  7379. opt_debug_console ? "on" : "off",
  7380. want_per_device_stats? "on" : "off",
  7381. opt_quiet ? "on" : "off",
  7382. opt_log_output ? "on" : "off",
  7383. opt_protocol ? "on" : "off",
  7384. opt_worktime ? "on" : "off",
  7385. summary_detail_level_str(),
  7386. opt_log_interval,
  7387. opt_weighed_stats ? "weighed" : "absolute");
  7388. wlogprint("Select an option or any other key to return\n");
  7389. logwin_update();
  7390. input = getch();
  7391. if (!strncasecmp(&input, "q", 1)) {
  7392. opt_quiet ^= true;
  7393. wlogprint("Quiet mode %s\n", opt_quiet ? "enabled" : "disabled");
  7394. goto retry;
  7395. } else if (!strncasecmp(&input, "v", 1)) {
  7396. opt_log_output ^= true;
  7397. if (opt_log_output)
  7398. opt_quiet = false;
  7399. wlogprint("Verbose mode %s\n", opt_log_output ? "enabled" : "disabled");
  7400. goto retry;
  7401. } else if (!strncasecmp(&input, "n", 1)) {
  7402. opt_log_output = false;
  7403. opt_debug_console = false;
  7404. opt_quiet = false;
  7405. opt_protocol = false;
  7406. opt_compact = false;
  7407. opt_show_procs = false;
  7408. devsummaryYOffset = 0;
  7409. want_per_device_stats = false;
  7410. wlogprint("Output mode reset to normal\n");
  7411. switch_logsize();
  7412. goto retry;
  7413. } else if (!strncasecmp(&input, "d", 1)) {
  7414. opt_debug = true;
  7415. opt_debug_console ^= true;
  7416. opt_log_output = opt_debug_console;
  7417. if (opt_debug_console)
  7418. opt_quiet = false;
  7419. wlogprint("Debug mode %s\n", opt_debug_console ? "enabled" : "disabled");
  7420. goto retry;
  7421. } else if (!strncasecmp(&input, "m", 1)) {
  7422. if (opt_compact)
  7423. opt_compact = false;
  7424. else
  7425. if (!opt_show_procs)
  7426. opt_show_procs = true;
  7427. else
  7428. {
  7429. opt_compact = true;
  7430. opt_show_procs = false;
  7431. devsummaryYOffset = 0;
  7432. }
  7433. wlogprint("su[M]mary detail level changed to: %s\n", summary_detail_level_str());
  7434. switch_logsize();
  7435. goto retry;
  7436. } else if (!strncasecmp(&input, "p", 1)) {
  7437. want_per_device_stats ^= true;
  7438. opt_log_output = want_per_device_stats;
  7439. wlogprint("Per-device stats %s\n", want_per_device_stats ? "enabled" : "disabled");
  7440. goto retry;
  7441. } else if (!strncasecmp(&input, "r", 1)) {
  7442. opt_protocol ^= true;
  7443. if (opt_protocol)
  7444. opt_quiet = false;
  7445. wlogprint("RPC protocol debugging %s\n", opt_protocol ? "enabled" : "disabled");
  7446. goto retry;
  7447. } else if (!strncasecmp(&input, "c", 1))
  7448. clear_logwin();
  7449. else if (!strncasecmp(&input, "l", 1)) {
  7450. selected = curses_int("Interval in seconds");
  7451. if (selected < 0 || selected > 9999) {
  7452. wlogprint("Invalid selection\n");
  7453. goto retry;
  7454. }
  7455. opt_log_interval = selected;
  7456. wlogprint("Log interval set to %d seconds\n", opt_log_interval);
  7457. goto retry;
  7458. } else if (!strncasecmp(&input, "s", 1)) {
  7459. opt_realquiet = true;
  7460. } else if (!strncasecmp(&input, "w", 1)) {
  7461. opt_worktime ^= true;
  7462. wlogprint("WorkTime details %s\n", opt_worktime ? "enabled" : "disabled");
  7463. goto retry;
  7464. } else if (!strncasecmp(&input, "t", 1)) {
  7465. opt_weighed_stats ^= true;
  7466. wlogprint("Now displaying %s statistics\n", opt_weighed_stats ? "weighed" : "absolute");
  7467. goto retry;
  7468. } else if (!strncasecmp(&input, "z", 1)) {
  7469. zero_stats();
  7470. goto retry;
  7471. }
  7472. immedok(logwin, false);
  7473. loginput_mode(0);
  7474. }
  7475. #endif
  7476. void default_save_file(char *filename)
  7477. {
  7478. #if defined(unix) || defined(__APPLE__)
  7479. if (getenv("HOME") && *getenv("HOME")) {
  7480. strcpy(filename, getenv("HOME"));
  7481. strcat(filename, "/");
  7482. }
  7483. else
  7484. strcpy(filename, "");
  7485. strcat(filename, ".bfgminer/");
  7486. mkdir(filename, 0777);
  7487. #else
  7488. strcpy(filename, "");
  7489. #endif
  7490. strcat(filename, def_conf);
  7491. }
  7492. #ifdef HAVE_CURSES
  7493. static void set_options(void)
  7494. {
  7495. int selected;
  7496. char input;
  7497. immedok(logwin, true);
  7498. loginput_mode(8);
  7499. retry:
  7500. wlogprint("\n[L]ongpoll: %s\n", want_longpoll ? "On" : "Off");
  7501. wlogprint("[Q]ueue: %d\n[S]cantime: %d\n[E]xpiry: %d\n[R]etries: %d\n"
  7502. "[W]rite config file\n[B]FGMiner restart\n",
  7503. opt_queue, opt_scantime, opt_expiry, opt_retries);
  7504. wlogprint("Select an option or any other key to return\n");
  7505. logwin_update();
  7506. input = getch();
  7507. if (!strncasecmp(&input, "q", 1)) {
  7508. selected = curses_int("Extra work items to queue");
  7509. if (selected < 0 || selected > 9999) {
  7510. wlogprint("Invalid selection\n");
  7511. goto retry;
  7512. }
  7513. opt_queue = selected;
  7514. goto retry;
  7515. } else if (!strncasecmp(&input, "l", 1)) {
  7516. if (want_longpoll)
  7517. stop_longpoll();
  7518. else
  7519. start_longpoll();
  7520. applog(LOG_WARNING, "Longpoll %s", want_longpoll ? "enabled" : "disabled");
  7521. goto retry;
  7522. } else if (!strncasecmp(&input, "s", 1)) {
  7523. selected = curses_int("Set scantime in seconds");
  7524. if (selected < 0 || selected > 9999) {
  7525. wlogprint("Invalid selection\n");
  7526. goto retry;
  7527. }
  7528. opt_scantime = selected;
  7529. goto retry;
  7530. } else if (!strncasecmp(&input, "e", 1)) {
  7531. selected = curses_int("Set expiry time in seconds");
  7532. if (selected < 0 || selected > 9999) {
  7533. wlogprint("Invalid selection\n");
  7534. goto retry;
  7535. }
  7536. opt_expiry = selected;
  7537. goto retry;
  7538. } else if (!strncasecmp(&input, "r", 1)) {
  7539. selected = curses_int("Retries before failing (-1 infinite)");
  7540. if (selected < -1 || selected > 9999) {
  7541. wlogprint("Invalid selection\n");
  7542. goto retry;
  7543. }
  7544. opt_retries = selected;
  7545. goto retry;
  7546. } else if (!strncasecmp(&input, "w", 1)) {
  7547. FILE *fcfg;
  7548. char *str, filename[PATH_MAX], prompt[PATH_MAX + 50];
  7549. default_save_file(filename);
  7550. snprintf(prompt, sizeof(prompt), "Config filename to write (Enter for default) [%s]", filename);
  7551. str = curses_input(prompt);
  7552. if (str) {
  7553. struct stat statbuf;
  7554. strcpy(filename, str);
  7555. free(str);
  7556. if (!stat(filename, &statbuf)) {
  7557. wlogprint("File exists, overwrite?\n");
  7558. input = getch();
  7559. if (strncasecmp(&input, "y", 1))
  7560. goto retry;
  7561. }
  7562. }
  7563. fcfg = fopen(filename, "w");
  7564. if (!fcfg) {
  7565. wlogprint("Cannot open or create file\n");
  7566. goto retry;
  7567. }
  7568. write_config(fcfg);
  7569. fclose(fcfg);
  7570. goto retry;
  7571. } else if (!strncasecmp(&input, "b", 1)) {
  7572. wlogprint("Are you sure?\n");
  7573. input = getch();
  7574. if (!strncasecmp(&input, "y", 1))
  7575. app_restart();
  7576. else
  7577. clear_logwin();
  7578. } else
  7579. clear_logwin();
  7580. loginput_mode(0);
  7581. immedok(logwin, false);
  7582. }
  7583. int scan_serial(const char *);
  7584. static
  7585. void _managetui_msg(const char *repr, const char **msg)
  7586. {
  7587. if (*msg)
  7588. {
  7589. applog(LOG_DEBUG, "ManageTUI: %"PRIpreprv": %s", repr, *msg);
  7590. wattron(logwin, A_BOLD);
  7591. wlogprint("%s", *msg);
  7592. wattroff(logwin, A_BOLD);
  7593. *msg = NULL;
  7594. }
  7595. logwin_update();
  7596. }
  7597. void manage_device(void)
  7598. {
  7599. char logline[256];
  7600. const char *msg = NULL;
  7601. struct cgpu_info *cgpu;
  7602. const struct device_drv *drv;
  7603. selecting_device = true;
  7604. immedok(logwin, true);
  7605. loginput_mode(12);
  7606. devchange:
  7607. if (unlikely(!total_devices))
  7608. {
  7609. clear_logwin();
  7610. wlogprint("(no devices)\n");
  7611. wlogprint("[Plus] Add device(s) [Enter] Close device manager\n");
  7612. _managetui_msg("(none)", &msg);
  7613. int input = getch();
  7614. switch (input)
  7615. {
  7616. case '+': case '=': // add new device
  7617. goto addnew;
  7618. default:
  7619. goto out;
  7620. }
  7621. }
  7622. cgpu = devices[selected_device];
  7623. drv = cgpu->drv;
  7624. refresh_devstatus();
  7625. refresh:
  7626. clear_logwin();
  7627. wlogprint("Select processor to manage using up/down arrow keys\n");
  7628. get_statline3(logline, sizeof(logline), cgpu, true, true);
  7629. wattron(logwin, A_BOLD);
  7630. wlogprint("%s", logline);
  7631. wattroff(logwin, A_BOLD);
  7632. wlogprint("\n");
  7633. if (cgpu->dev_manufacturer)
  7634. wlogprint(" %s from %s\n", (cgpu->dev_product ?: "Device"), cgpu->dev_manufacturer);
  7635. else
  7636. if (cgpu->dev_product)
  7637. wlogprint(" %s\n", cgpu->dev_product);
  7638. if (cgpu->dev_serial)
  7639. wlogprint("Serial: %s\n", cgpu->dev_serial);
  7640. if (cgpu->kname)
  7641. wlogprint("Kernel: %s\n", cgpu->kname);
  7642. if (drv->proc_wlogprint_status && likely(cgpu->status != LIFE_INIT))
  7643. drv->proc_wlogprint_status(cgpu);
  7644. wlogprint("\n");
  7645. // TODO: Last share at TIMESTAMP on pool N
  7646. // TODO: Custom device info/commands
  7647. if (cgpu->deven != DEV_ENABLED)
  7648. wlogprint("[E]nable ");
  7649. if (cgpu->deven != DEV_DISABLED)
  7650. wlogprint("[D]isable ");
  7651. if (drv->identify_device)
  7652. wlogprint("[I]dentify ");
  7653. if (drv->proc_tui_wlogprint_choices && likely(cgpu->status != LIFE_INIT))
  7654. drv->proc_tui_wlogprint_choices(cgpu);
  7655. wlogprint("\n");
  7656. wlogprint("[Slash] Find processor [Plus] Add device(s) [Enter] Close device manager\n");
  7657. _managetui_msg(cgpu->proc_repr, &msg);
  7658. while (true)
  7659. {
  7660. int input = getch();
  7661. applog(LOG_DEBUG, "ManageTUI: %"PRIpreprv": (choice %d)", cgpu->proc_repr, input);
  7662. switch (input) {
  7663. case 'd': case 'D':
  7664. if (cgpu->deven == DEV_DISABLED)
  7665. msg = "Processor already disabled\n";
  7666. else
  7667. {
  7668. cgpu->deven = DEV_DISABLED;
  7669. msg = "Processor being disabled\n";
  7670. }
  7671. goto refresh;
  7672. case 'e': case 'E':
  7673. if (cgpu->deven == DEV_ENABLED)
  7674. msg = "Processor already enabled\n";
  7675. else
  7676. {
  7677. proc_enable(cgpu);
  7678. msg = "Processor being enabled\n";
  7679. }
  7680. goto refresh;
  7681. case 'i': case 'I':
  7682. if (drv->identify_device && drv->identify_device(cgpu))
  7683. msg = "Identify command sent\n";
  7684. else
  7685. goto key_default;
  7686. goto refresh;
  7687. case KEY_DOWN:
  7688. if (selected_device >= total_devices - 1)
  7689. break;
  7690. ++selected_device;
  7691. goto devchange;
  7692. case KEY_UP:
  7693. if (selected_device <= 0)
  7694. break;
  7695. --selected_device;
  7696. goto devchange;
  7697. case KEY_NPAGE:
  7698. {
  7699. if (selected_device >= total_devices - 1)
  7700. break;
  7701. struct cgpu_info *mdev = devices[selected_device]->device;
  7702. do {
  7703. ++selected_device;
  7704. } while (devices[selected_device]->device == mdev && selected_device < total_devices - 1);
  7705. goto devchange;
  7706. }
  7707. case KEY_PPAGE:
  7708. {
  7709. if (selected_device <= 0)
  7710. break;
  7711. struct cgpu_info *mdev = devices[selected_device]->device;
  7712. do {
  7713. --selected_device;
  7714. } while (devices[selected_device]->device == mdev && selected_device > 0);
  7715. goto devchange;
  7716. }
  7717. case '/': case '?': // find device
  7718. {
  7719. static char *pattern = NULL;
  7720. char *newpattern = curses_input("Enter pattern");
  7721. if (newpattern)
  7722. {
  7723. free(pattern);
  7724. pattern = newpattern;
  7725. }
  7726. else
  7727. if (!pattern)
  7728. pattern = calloc(1, 1);
  7729. int match = cgpu_search(pattern, selected_device + 1);
  7730. if (match == -1)
  7731. {
  7732. msg = "Couldn't find device\n";
  7733. goto refresh;
  7734. }
  7735. selected_device = match;
  7736. goto devchange;
  7737. }
  7738. case '+': case '=': // add new device
  7739. {
  7740. addnew:
  7741. clear_logwin();
  7742. _wlogprint(
  7743. "Enter \"auto\", \"all\", or a serial port to probe for mining devices.\n"
  7744. "Prefix by a driver name and colon to only probe a specific driver.\n"
  7745. "For example: erupter:"
  7746. #ifdef WIN32
  7747. "\\\\.\\COM40"
  7748. #elif defined(__APPLE__)
  7749. "/dev/cu.SLAB_USBtoUART"
  7750. #else
  7751. "/dev/ttyUSB39"
  7752. #endif
  7753. "\n"
  7754. );
  7755. char *scanser = curses_input("Enter target");
  7756. if (scan_serial(scanser))
  7757. {
  7758. selected_device = total_devices - 1;
  7759. msg = "Device scan succeeded\n";
  7760. }
  7761. else
  7762. msg = "No new devices found\n";
  7763. goto devchange;
  7764. }
  7765. case 'Q': case 'q':
  7766. case KEY_BREAK: case KEY_BACKSPACE: case KEY_CANCEL: case KEY_CLOSE: case KEY_EXIT:
  7767. case '\x1b': // ESC
  7768. case KEY_ENTER:
  7769. case '\r': // Ctrl-M on Windows, with nonl
  7770. #ifdef PADENTER
  7771. case PADENTER: // pdcurses, used by Enter key on Windows with nonl
  7772. #endif
  7773. case '\n':
  7774. goto out;
  7775. default:
  7776. ;
  7777. key_default:
  7778. if (drv->proc_tui_handle_choice && likely(drv_ready(cgpu)))
  7779. {
  7780. msg = drv->proc_tui_handle_choice(cgpu, input);
  7781. if (msg)
  7782. goto refresh;
  7783. }
  7784. }
  7785. }
  7786. out:
  7787. selecting_device = false;
  7788. loginput_mode(0);
  7789. immedok(logwin, false);
  7790. }
  7791. void show_help(void)
  7792. {
  7793. loginput_mode(11);
  7794. // NOTE: wlogprint is a macro with a buffer limit
  7795. _wlogprint(
  7796. "LU: oldest explicit work update currently being used for new work\n"
  7797. "ST: work in queue | F: network fails | NB: new blocks detected\n"
  7798. "AS: shares being submitted | BW: bandwidth (up/down)\n"
  7799. "E: # shares * diff per 2kB bw | I: expected income | BS: best share ever found\n"
  7800. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7801. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7802. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7803. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_BTEE
  7804. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7805. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7806. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_BTEE U8_HLINE U8_HLINE U8_HLINE
  7807. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7808. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7809. U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE
  7810. "\n"
  7811. "devices/processors hashing (only for totals line), hottest temperature\n"
  7812. );
  7813. wlogprint(
  7814. "hashrates: %ds decaying / all-time average / all-time average (effective)\n"
  7815. , opt_log_interval);
  7816. _wlogprint(
  7817. "A: accepted shares | R: rejected+discarded(% of total)\n"
  7818. "HW: hardware errors / % nonces invalid\n"
  7819. "\n"
  7820. "Press any key to clear"
  7821. );
  7822. logwin_update();
  7823. getch();
  7824. loginput_mode(0);
  7825. }
  7826. static void *input_thread(void __maybe_unused *userdata)
  7827. {
  7828. RenameThread("input");
  7829. if (!curses_active)
  7830. return NULL;
  7831. while (1) {
  7832. int input;
  7833. input = getch();
  7834. switch (input) {
  7835. case 'h': case 'H': case '?':
  7836. case KEY_F(1):
  7837. show_help();
  7838. break;
  7839. case 'q': case 'Q':
  7840. kill_work();
  7841. return NULL;
  7842. case 'd': case 'D':
  7843. display_options();
  7844. break;
  7845. case 'm': case 'M':
  7846. manage_device();
  7847. break;
  7848. case 'p': case 'P':
  7849. display_pools();
  7850. break;
  7851. case 's': case 'S':
  7852. set_options();
  7853. break;
  7854. #ifdef HAVE_CURSES
  7855. case KEY_DOWN:
  7856. {
  7857. const int visible_lines = logcursor - devcursor;
  7858. const int invisible_lines = total_lines - visible_lines;
  7859. if (devsummaryYOffset <= -invisible_lines)
  7860. break;
  7861. devsummaryYOffset -= 2;
  7862. }
  7863. case KEY_UP:
  7864. if (devsummaryYOffset == 0)
  7865. break;
  7866. ++devsummaryYOffset;
  7867. refresh_devstatus();
  7868. break;
  7869. case KEY_NPAGE:
  7870. {
  7871. const int visible_lines = logcursor - devcursor;
  7872. const int invisible_lines = total_lines - visible_lines;
  7873. if (devsummaryYOffset - visible_lines <= -invisible_lines)
  7874. devsummaryYOffset = -invisible_lines;
  7875. else
  7876. devsummaryYOffset -= visible_lines;
  7877. refresh_devstatus();
  7878. break;
  7879. }
  7880. case KEY_PPAGE:
  7881. {
  7882. const int visible_lines = logcursor - devcursor;
  7883. if (devsummaryYOffset + visible_lines >= 0)
  7884. devsummaryYOffset = 0;
  7885. else
  7886. devsummaryYOffset += visible_lines;
  7887. refresh_devstatus();
  7888. break;
  7889. }
  7890. #endif
  7891. }
  7892. if (opt_realquiet) {
  7893. disable_curses();
  7894. break;
  7895. }
  7896. }
  7897. return NULL;
  7898. }
  7899. #endif
  7900. static void *api_thread(void *userdata)
  7901. {
  7902. struct thr_info *mythr = userdata;
  7903. pthread_detach(pthread_self());
  7904. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  7905. RenameThread("rpc");
  7906. api(api_thr_id);
  7907. mythr->has_pth = false;
  7908. return NULL;
  7909. }
  7910. void thread_reportin(struct thr_info *thr)
  7911. {
  7912. cgtime(&thr->last);
  7913. thr->cgpu->status = LIFE_WELL;
  7914. thr->getwork = 0;
  7915. thr->cgpu->device_last_well = time(NULL);
  7916. }
  7917. void thread_reportout(struct thr_info *thr)
  7918. {
  7919. thr->getwork = time(NULL);
  7920. }
  7921. static void hashmeter(int thr_id, struct timeval *diff,
  7922. uint64_t hashes_done)
  7923. {
  7924. char logstatusline[256];
  7925. struct timeval temp_tv_end, total_diff;
  7926. double secs;
  7927. double local_secs;
  7928. static double local_mhashes_done = 0;
  7929. double local_mhashes = (double)hashes_done / 1000000.0;
  7930. bool showlog = false;
  7931. char cHr[ALLOC_H2B_NOUNIT+1], aHr[ALLOC_H2B_NOUNIT+1], uHr[ALLOC_H2B_SPACED+3+1];
  7932. char rejpcbuf[6];
  7933. char bnbuf[6];
  7934. struct thr_info *thr;
  7935. /* Update the last time this thread reported in */
  7936. if (thr_id >= 0) {
  7937. thr = get_thread(thr_id);
  7938. cgtime(&(thr->last));
  7939. thr->cgpu->device_last_well = time(NULL);
  7940. }
  7941. secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
  7942. /* So we can call hashmeter from a non worker thread */
  7943. if (thr_id >= 0) {
  7944. struct cgpu_info *cgpu = thr->cgpu;
  7945. int threadobj = cgpu->threads ?: 1;
  7946. double thread_rolling = 0.0;
  7947. int i;
  7948. applog(LOG_DEBUG, "[thread %d: %"PRIu64" hashes, %.1f khash/sec]",
  7949. thr_id, hashes_done, hashes_done / 1000 / secs);
  7950. /* Rolling average for each thread and each device */
  7951. decay_time(&thr->rolling, local_mhashes / secs, secs);
  7952. for (i = 0; i < threadobj; i++)
  7953. thread_rolling += cgpu->thr[i]->rolling;
  7954. mutex_lock(&hash_lock);
  7955. decay_time(&cgpu->rolling, thread_rolling, secs);
  7956. cgpu->total_mhashes += local_mhashes;
  7957. mutex_unlock(&hash_lock);
  7958. // If needed, output detailed, per-device stats
  7959. if (want_per_device_stats) {
  7960. struct timeval now;
  7961. struct timeval elapsed;
  7962. struct timeval *last_msg_tv = opt_show_procs ? &thr->cgpu->last_message_tv : &thr->cgpu->device->last_message_tv;
  7963. cgtime(&now);
  7964. timersub(&now, last_msg_tv, &elapsed);
  7965. if (opt_log_interval <= elapsed.tv_sec) {
  7966. struct cgpu_info *cgpu = thr->cgpu;
  7967. char logline[255];
  7968. *last_msg_tv = now;
  7969. get_statline(logline, sizeof(logline), cgpu);
  7970. if (!curses_active) {
  7971. printf("\n%s\r", logline);
  7972. fflush(stdout);
  7973. } else
  7974. applog(LOG_INFO, "%s", logline);
  7975. }
  7976. }
  7977. }
  7978. /* Totals are updated by all threads so can race without locking */
  7979. mutex_lock(&hash_lock);
  7980. cgtime(&temp_tv_end);
  7981. timersub(&temp_tv_end, &total_tv_start, &total_diff);
  7982. total_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  7983. timersub(&temp_tv_end, &total_tv_end, &total_diff);
  7984. total_mhashes_done += local_mhashes;
  7985. local_mhashes_done += local_mhashes;
  7986. /* Only update with opt_log_interval */
  7987. if (total_diff.tv_sec < opt_log_interval)
  7988. goto out_unlock;
  7989. showlog = true;
  7990. cgtime(&total_tv_end);
  7991. local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0);
  7992. decay_time(&total_rolling, local_mhashes_done / local_secs, local_secs);
  7993. global_hashrate = ((unsigned long long)lround(total_rolling)) * 1000000;
  7994. double wtotal = (total_diff_accepted + total_diff_rejected + total_diff_stale);
  7995. multi_format_unit_array2(
  7996. ((char*[]){cHr, aHr, uHr}),
  7997. ((size_t[]){sizeof(cHr), sizeof(aHr), sizeof(uHr)}),
  7998. true, "h/s", H2B_SHORT,
  7999. 3,
  8000. 1e6*total_rolling,
  8001. 1e6*total_mhashes_done / total_secs,
  8002. utility_to_hashrate(total_diff1 * (wtotal ? (total_diff_accepted / wtotal) : 1) * 60 / total_secs));
  8003. int ui_accepted, ui_rejected, ui_stale;
  8004. if (opt_weighed_stats)
  8005. {
  8006. ui_accepted = total_diff_accepted;
  8007. ui_rejected = total_diff_rejected;
  8008. ui_stale = total_diff_stale;
  8009. }
  8010. else
  8011. {
  8012. ui_accepted = total_accepted;
  8013. ui_rejected = total_rejected;
  8014. ui_stale = total_stale;
  8015. }
  8016. #ifdef HAVE_CURSES
  8017. if (curses_active_locked()) {
  8018. float temp = 0;
  8019. struct cgpu_info *proc, *last_working_dev = NULL;
  8020. int i, working_devs = 0, working_procs = 0;
  8021. int divx;
  8022. bool bad = false;
  8023. // Find the highest temperature of all processors
  8024. for (i = 0; i < total_devices; ++i)
  8025. {
  8026. proc = get_devices(i);
  8027. if (proc->temp > temp)
  8028. temp = proc->temp;
  8029. if (unlikely(proc->deven == DEV_DISABLED))
  8030. ; // Just need to block it off from both conditions
  8031. else
  8032. if (likely(proc->status == LIFE_WELL && proc->deven == DEV_ENABLED))
  8033. {
  8034. if (proc->rolling > .1)
  8035. {
  8036. ++working_procs;
  8037. if (proc->device != last_working_dev)
  8038. {
  8039. ++working_devs;
  8040. last_working_dev = proc->device;
  8041. }
  8042. }
  8043. }
  8044. else
  8045. bad = true;
  8046. }
  8047. if (working_devs == working_procs)
  8048. snprintf(statusline, sizeof(statusline), "%s%d ", bad ? U8_BAD_START : "", working_devs);
  8049. else
  8050. snprintf(statusline, sizeof(statusline), "%s%d/%d ", bad ? U8_BAD_START : "", working_devs, working_procs);
  8051. divx = 7;
  8052. if (opt_show_procs && !opt_compact)
  8053. divx += max_lpdigits;
  8054. if (bad)
  8055. {
  8056. divx += sizeof(U8_BAD_START)-1;
  8057. strcpy(&statusline[divx], U8_BAD_END);
  8058. divx += sizeof(U8_BAD_END)-1;
  8059. }
  8060. temperature_column(&statusline[divx], sizeof(statusline)-divx, true, &temp);
  8061. format_statline(statusline, sizeof(statusline),
  8062. cHr, aHr,
  8063. uHr,
  8064. ui_accepted,
  8065. ui_rejected,
  8066. ui_stale,
  8067. total_diff_rejected + total_diff_stale, total_diff_accepted,
  8068. hw_errors,
  8069. total_bad_diff1, total_bad_diff1 + total_diff1);
  8070. unlock_curses();
  8071. }
  8072. #endif
  8073. // Add a space
  8074. memmove(&uHr[6], &uHr[5], strlen(&uHr[5]) + 1);
  8075. uHr[5] = ' ';
  8076. percentf4(rejpcbuf, sizeof(rejpcbuf), total_diff_rejected + total_diff_stale, total_diff_accepted);
  8077. percentf4(bnbuf, sizeof(bnbuf), total_bad_diff1, total_diff1);
  8078. snprintf(logstatusline, sizeof(logstatusline),
  8079. "%s%ds:%s avg:%s u:%s | A:%d R:%d+%d(%s) HW:%d/%s",
  8080. want_per_device_stats ? "ALL " : "",
  8081. opt_log_interval,
  8082. cHr, aHr,
  8083. uHr,
  8084. ui_accepted,
  8085. ui_rejected,
  8086. ui_stale,
  8087. rejpcbuf,
  8088. hw_errors,
  8089. bnbuf
  8090. );
  8091. local_mhashes_done = 0;
  8092. out_unlock:
  8093. mutex_unlock(&hash_lock);
  8094. if (showlog) {
  8095. if (!curses_active) {
  8096. if (want_per_device_stats)
  8097. printf("\n%s\r", logstatusline);
  8098. else
  8099. {
  8100. const int logstatusline_len = strlen(logstatusline);
  8101. int padding;
  8102. if (last_logstatusline_len > logstatusline_len)
  8103. padding = (last_logstatusline_len - logstatusline_len);
  8104. else
  8105. {
  8106. padding = 0;
  8107. if (last_logstatusline_len == -1)
  8108. puts("");
  8109. }
  8110. printf("%s%*s\r", logstatusline, padding, "");
  8111. last_logstatusline_len = logstatusline_len;
  8112. }
  8113. fflush(stdout);
  8114. } else
  8115. applog(LOG_INFO, "%s", logstatusline);
  8116. }
  8117. }
  8118. void hashmeter2(struct thr_info *thr)
  8119. {
  8120. struct timeval tv_now, tv_elapsed;
  8121. timerclear(&thr->tv_hashes_done);
  8122. cgtime(&tv_now);
  8123. timersub(&tv_now, &thr->tv_lastupdate, &tv_elapsed);
  8124. /* Update the hashmeter at most 5 times per second */
  8125. if ((thr->hashes_done && (tv_elapsed.tv_sec > 0 || tv_elapsed.tv_usec > 200000)) ||
  8126. tv_elapsed.tv_sec >= opt_log_interval) {
  8127. hashmeter(thr->id, &tv_elapsed, thr->hashes_done);
  8128. thr->hashes_done = 0;
  8129. thr->tv_lastupdate = tv_now;
  8130. }
  8131. }
  8132. static void stratum_share_result(json_t *val, json_t *res_val, json_t *err_val,
  8133. struct stratum_share *sshare)
  8134. {
  8135. struct work *work = sshare->work;
  8136. share_result(val, res_val, err_val, work, false, "");
  8137. }
  8138. /* Parses stratum json responses and tries to find the id that the request
  8139. * matched to and treat it accordingly. */
  8140. bool parse_stratum_response(struct pool *pool, char *s)
  8141. {
  8142. json_t *val = NULL, *err_val, *res_val, *id_val;
  8143. struct stratum_share *sshare;
  8144. json_error_t err;
  8145. bool ret = false;
  8146. int id;
  8147. val = JSON_LOADS(s, &err);
  8148. if (!val) {
  8149. applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
  8150. goto out;
  8151. }
  8152. res_val = json_object_get(val, "result");
  8153. err_val = json_object_get(val, "error");
  8154. id_val = json_object_get(val, "id");
  8155. if (json_is_null(id_val) || !id_val) {
  8156. char *ss;
  8157. if (err_val)
  8158. ss = json_dumps(err_val, JSON_INDENT(3));
  8159. else
  8160. ss = strdup("(unknown reason)");
  8161. applog(LOG_INFO, "JSON-RPC non method decode failed: %s", ss);
  8162. free(ss);
  8163. goto out;
  8164. }
  8165. if (!json_is_integer(id_val)) {
  8166. if (json_is_string(id_val)
  8167. && !strncmp(json_string_value(id_val), "txlist", 6))
  8168. {
  8169. const bool is_array = json_is_array(res_val);
  8170. applog(LOG_DEBUG, "Received %s for pool %u job %s",
  8171. is_array ? "transaction list" : "no-transaction-list response",
  8172. pool->pool_no, &json_string_value(id_val)[6]);
  8173. if (!is_array)
  8174. {
  8175. // No need to wait for a timeout
  8176. timer_unset(&pool->swork.tv_transparency);
  8177. pool_set_opaque(pool, true);
  8178. goto fishy;
  8179. }
  8180. if (strcmp(json_string_value(id_val) + 6, pool->swork.job_id))
  8181. // We only care about a transaction list for the current job id
  8182. goto fishy;
  8183. // Check that the transactions actually hash to the merkle links
  8184. {
  8185. unsigned maxtx = 1 << pool->swork.merkles;
  8186. unsigned mintx = maxtx >> 1;
  8187. --maxtx;
  8188. unsigned acttx = (unsigned)json_array_size(res_val);
  8189. if (acttx < mintx || acttx > maxtx) {
  8190. applog(LOG_WARNING, "Pool %u is sending mismatched block contents to us (%u is not %u-%u)",
  8191. pool->pool_no, acttx, mintx, maxtx);
  8192. goto fishy;
  8193. }
  8194. // TODO: Check hashes match actual merkle links
  8195. }
  8196. pool_set_opaque(pool, false);
  8197. timer_unset(&pool->swork.tv_transparency);
  8198. fishy:
  8199. ret = true;
  8200. }
  8201. goto out;
  8202. }
  8203. id = json_integer_value(id_val);
  8204. mutex_lock(&sshare_lock);
  8205. HASH_FIND_INT(stratum_shares, &id, sshare);
  8206. if (sshare)
  8207. HASH_DEL(stratum_shares, sshare);
  8208. mutex_unlock(&sshare_lock);
  8209. if (!sshare) {
  8210. double pool_diff;
  8211. /* Since the share is untracked, we can only guess at what the
  8212. * work difficulty is based on the current pool diff. */
  8213. cg_rlock(&pool->data_lock);
  8214. pool_diff = target_diff(pool->swork.target);
  8215. cg_runlock(&pool->data_lock);
  8216. if (json_is_true(res_val)) {
  8217. struct mining_goal_info * const goal = pool->goal;
  8218. applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no);
  8219. /* We don't know what device this came from so we can't
  8220. * attribute the work to the relevant cgpu */
  8221. mutex_lock(&stats_lock);
  8222. total_accepted++;
  8223. pool->accepted++;
  8224. total_diff_accepted += pool_diff;
  8225. pool->diff_accepted += pool_diff;
  8226. goal->diff_accepted += pool_diff;
  8227. mutex_unlock(&stats_lock);
  8228. } else {
  8229. applog(LOG_NOTICE, "Rejected untracked stratum share from pool %d", pool->pool_no);
  8230. mutex_lock(&stats_lock);
  8231. total_rejected++;
  8232. pool->rejected++;
  8233. total_diff_rejected += pool_diff;
  8234. pool->diff_rejected += pool_diff;
  8235. mutex_unlock(&stats_lock);
  8236. }
  8237. goto out;
  8238. }
  8239. else {
  8240. mutex_lock(&submitting_lock);
  8241. --total_submitting;
  8242. mutex_unlock(&submitting_lock);
  8243. }
  8244. stratum_share_result(val, res_val, err_val, sshare);
  8245. free_work(sshare->work);
  8246. free(sshare);
  8247. ret = true;
  8248. out:
  8249. if (val)
  8250. json_decref(val);
  8251. return ret;
  8252. }
  8253. static void shutdown_stratum(struct pool *pool)
  8254. {
  8255. // Shut down Stratum as if we never had it
  8256. pool->stratum_active = false;
  8257. pool->stratum_init = false;
  8258. pool->has_stratum = false;
  8259. shutdown(pool->sock, SHUT_RDWR);
  8260. free(pool->stratum_url);
  8261. if (pool->sockaddr_url == pool->stratum_url)
  8262. pool->sockaddr_url = NULL;
  8263. pool->stratum_url = NULL;
  8264. }
  8265. void clear_stratum_shares(struct pool *pool)
  8266. {
  8267. int my_mining_threads = mining_threads; // Cached outside of locking
  8268. struct stratum_share *sshare, *tmpshare;
  8269. struct work *work;
  8270. struct cgpu_info *cgpu;
  8271. double diff_cleared = 0;
  8272. double thr_diff_cleared[my_mining_threads];
  8273. int cleared = 0;
  8274. int thr_cleared[my_mining_threads];
  8275. // NOTE: This is per-thread rather than per-device to avoid getting devices lock in stratum_shares loop
  8276. for (int i = 0; i < my_mining_threads; ++i)
  8277. {
  8278. thr_diff_cleared[i] = 0;
  8279. thr_cleared[i] = 0;
  8280. }
  8281. mutex_lock(&sshare_lock);
  8282. HASH_ITER(hh, stratum_shares, sshare, tmpshare) {
  8283. work = sshare->work;
  8284. if (sshare->work->pool == pool && work->thr_id < my_mining_threads) {
  8285. HASH_DEL(stratum_shares, sshare);
  8286. sharelog("disconnect", work);
  8287. diff_cleared += sshare->work->work_difficulty;
  8288. thr_diff_cleared[work->thr_id] += work->work_difficulty;
  8289. ++thr_cleared[work->thr_id];
  8290. free_work(sshare->work);
  8291. free(sshare);
  8292. cleared++;
  8293. }
  8294. }
  8295. mutex_unlock(&sshare_lock);
  8296. if (cleared) {
  8297. applog(LOG_WARNING, "Lost %d shares due to stratum disconnect on pool %d", cleared, pool->pool_no);
  8298. mutex_lock(&stats_lock);
  8299. pool->stale_shares += cleared;
  8300. total_stale += cleared;
  8301. pool->diff_stale += diff_cleared;
  8302. total_diff_stale += diff_cleared;
  8303. for (int i = 0; i < my_mining_threads; ++i)
  8304. if (thr_cleared[i])
  8305. {
  8306. cgpu = get_thr_cgpu(i);
  8307. cgpu->diff_stale += thr_diff_cleared[i];
  8308. cgpu->stale += thr_cleared[i];
  8309. }
  8310. mutex_unlock(&stats_lock);
  8311. mutex_lock(&submitting_lock);
  8312. total_submitting -= cleared;
  8313. mutex_unlock(&submitting_lock);
  8314. }
  8315. }
  8316. static void resubmit_stratum_shares(struct pool *pool)
  8317. {
  8318. struct stratum_share *sshare, *tmpshare;
  8319. struct work *work;
  8320. unsigned resubmitted = 0;
  8321. mutex_lock(&sshare_lock);
  8322. mutex_lock(&submitting_lock);
  8323. HASH_ITER(hh, stratum_shares, sshare, tmpshare) {
  8324. if (sshare->work->pool != pool)
  8325. continue;
  8326. HASH_DEL(stratum_shares, sshare);
  8327. work = sshare->work;
  8328. DL_APPEND(submit_waiting, work);
  8329. free(sshare);
  8330. ++resubmitted;
  8331. }
  8332. mutex_unlock(&submitting_lock);
  8333. mutex_unlock(&sshare_lock);
  8334. if (resubmitted) {
  8335. notifier_wake(submit_waiting_notifier);
  8336. applog(LOG_DEBUG, "Resubmitting %u shares due to stratum disconnect on pool %u", resubmitted, pool->pool_no);
  8337. }
  8338. }
  8339. static void clear_pool_work(struct pool *pool)
  8340. {
  8341. struct work *work, *tmp;
  8342. int cleared = 0;
  8343. mutex_lock(stgd_lock);
  8344. HASH_ITER(hh, staged_work, work, tmp) {
  8345. if (work->pool == pool) {
  8346. unstage_work(work);
  8347. free_work(work);
  8348. cleared++;
  8349. }
  8350. }
  8351. mutex_unlock(stgd_lock);
  8352. }
  8353. static int cp_prio(void)
  8354. {
  8355. int prio;
  8356. cg_rlock(&control_lock);
  8357. prio = currentpool->prio;
  8358. cg_runlock(&control_lock);
  8359. return prio;
  8360. }
  8361. /* We only need to maintain a secondary pool connection when we need the
  8362. * capacity to get work from the backup pools while still on the primary */
  8363. static bool cnx_needed(struct pool *pool)
  8364. {
  8365. struct pool *cp;
  8366. // We want to keep a connection open for rejecting or misbehaving pools, to detect when/if they change their tune
  8367. if (pool->enabled == POOL_DISABLED)
  8368. return false;
  8369. /* Idle stratum pool needs something to kick it alive again */
  8370. if (pool->has_stratum && pool->idle)
  8371. return true;
  8372. /* Getwork pools without opt_fail_only need backup pools up to be able
  8373. * to leak shares */
  8374. cp = current_pool();
  8375. if (pool_actively_desired(pool, cp))
  8376. return true;
  8377. if (!pool_localgen(cp) && (!opt_fail_only || !cp->hdr_path))
  8378. return true;
  8379. /* Keep the connection open to allow any stray shares to be submitted
  8380. * on switching pools for 2 minutes. */
  8381. if (timer_elapsed(&pool->tv_last_work_time, NULL) < 120)
  8382. return true;
  8383. /* If the pool has only just come to life and is higher priority than
  8384. * the current pool keep the connection open so we can fail back to
  8385. * it. */
  8386. if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio())
  8387. return true;
  8388. if (pool_unworkable(cp))
  8389. return true;
  8390. /* We've run out of work, bring anything back to life. */
  8391. if (no_work)
  8392. return true;
  8393. // If the current pool lacks its own block change detection, see if we are needed for that
  8394. if (pool_active_lp_pool(cp) == pool)
  8395. return true;
  8396. return false;
  8397. }
  8398. static void wait_lpcurrent(struct pool *pool);
  8399. static void pool_resus(struct pool *pool);
  8400. static void stratum_resumed(struct pool *pool)
  8401. {
  8402. if (!pool->stratum_notify)
  8403. return;
  8404. if (pool_tclear(pool, &pool->idle)) {
  8405. applog(LOG_INFO, "Stratum connection to pool %d resumed", pool->pool_no);
  8406. pool_resus(pool);
  8407. }
  8408. }
  8409. static bool supports_resume(struct pool *pool)
  8410. {
  8411. bool ret;
  8412. cg_rlock(&pool->data_lock);
  8413. ret = (pool->sessionid != NULL);
  8414. cg_runlock(&pool->data_lock);
  8415. return ret;
  8416. }
  8417. static bool pools_active;
  8418. /* One stratum thread per pool that has stratum waits on the socket checking
  8419. * for new messages and for the integrity of the socket connection. We reset
  8420. * the connection based on the integrity of the receive side only as the send
  8421. * side will eventually expire data it fails to send. */
  8422. static void *stratum_thread(void *userdata)
  8423. {
  8424. struct pool *pool = (struct pool *)userdata;
  8425. pthread_detach(pthread_self());
  8426. char threadname[20];
  8427. snprintf(threadname, 20, "stratum%u", pool->pool_no);
  8428. RenameThread(threadname);
  8429. srand(time(NULL) + (intptr_t)userdata);
  8430. while (42) {
  8431. struct timeval timeout;
  8432. int sel_ret;
  8433. fd_set rd;
  8434. char *s;
  8435. int sock;
  8436. if (unlikely(!pool->has_stratum))
  8437. break;
  8438. /* Check to see whether we need to maintain this connection
  8439. * indefinitely or just bring it up when we switch to this
  8440. * pool */
  8441. while (true)
  8442. {
  8443. sock = pool->sock;
  8444. if (sock == INVSOCK)
  8445. applog(LOG_DEBUG, "Pool %u: Invalid socket, suspending",
  8446. pool->pool_no);
  8447. else
  8448. if (!sock_full(pool) && !cnx_needed(pool) && pools_active)
  8449. applog(LOG_DEBUG, "Pool %u: Connection not needed, suspending",
  8450. pool->pool_no);
  8451. else
  8452. break;
  8453. suspend_stratum(pool);
  8454. clear_stratum_shares(pool);
  8455. clear_pool_work(pool);
  8456. wait_lpcurrent(pool);
  8457. if (!restart_stratum(pool)) {
  8458. pool_died(pool);
  8459. while (!restart_stratum(pool)) {
  8460. if (pool->removed)
  8461. goto out;
  8462. cgsleep_ms(30000);
  8463. }
  8464. }
  8465. }
  8466. FD_ZERO(&rd);
  8467. FD_SET(sock, &rd);
  8468. timeout.tv_sec = 120;
  8469. timeout.tv_usec = 0;
  8470. /* If we fail to receive any notify messages for 2 minutes we
  8471. * assume the connection has been dropped and treat this pool
  8472. * as dead */
  8473. if (!sock_full(pool) && (sel_ret = select(sock + 1, &rd, NULL, NULL, &timeout)) < 1) {
  8474. applog(LOG_DEBUG, "Stratum select failed on pool %d with value %d", pool->pool_no, sel_ret);
  8475. s = NULL;
  8476. } else
  8477. s = recv_line(pool);
  8478. if (!s) {
  8479. if (!pool->has_stratum)
  8480. break;
  8481. applog(LOG_NOTICE, "Stratum connection to pool %d interrupted", pool->pool_no);
  8482. pool->getfail_occasions++;
  8483. total_go++;
  8484. mutex_lock(&pool->stratum_lock);
  8485. pool->stratum_active = pool->stratum_notify = false;
  8486. pool->sock = INVSOCK;
  8487. mutex_unlock(&pool->stratum_lock);
  8488. /* If the socket to our stratum pool disconnects, all
  8489. * submissions need to be discarded or resent. */
  8490. if (!supports_resume(pool))
  8491. clear_stratum_shares(pool);
  8492. else
  8493. resubmit_stratum_shares(pool);
  8494. clear_pool_work(pool);
  8495. if (pool == current_pool())
  8496. restart_threads();
  8497. if (restart_stratum(pool))
  8498. continue;
  8499. shutdown_stratum(pool);
  8500. pool_died(pool);
  8501. break;
  8502. }
  8503. /* Check this pool hasn't died while being a backup pool and
  8504. * has not had its idle flag cleared */
  8505. stratum_resumed(pool);
  8506. if (!parse_method(pool, s) && !parse_stratum_response(pool, s))
  8507. applog(LOG_INFO, "Unknown stratum msg: %s", s);
  8508. free(s);
  8509. if (pool->swork.clean) {
  8510. struct work *work = make_work();
  8511. /* Generate a single work item to update the current
  8512. * block database */
  8513. pool->swork.clean = false;
  8514. gen_stratum_work(pool, work);
  8515. /* Try to extract block height from coinbase scriptSig */
  8516. uint8_t *bin_height = &bytes_buf(&pool->swork.coinbase)[4 /*version*/ + 1 /*txin count*/ + 36 /*prevout*/ + 1 /*scriptSig len*/ + 1 /*push opcode*/];
  8517. unsigned char cb_height_sz;
  8518. cb_height_sz = bin_height[-1];
  8519. if (cb_height_sz == 3) {
  8520. // FIXME: The block number will overflow this by AD 2173
  8521. struct mining_goal_info * const goal = pool->goal;
  8522. const void * const prevblkhash = &work->data[4];
  8523. uint32_t height = 0;
  8524. memcpy(&height, bin_height, 3);
  8525. height = le32toh(height);
  8526. have_block_height(goal, prevblkhash, height);
  8527. }
  8528. pool->swork.work_restart_id =
  8529. ++pool->work_restart_id;
  8530. pool_update_work_restart_time(pool);
  8531. if (test_work_current(work)) {
  8532. /* Only accept a work update if this stratum
  8533. * connection is from the current pool */
  8534. struct pool * const cp = current_pool();
  8535. if (pool == cp)
  8536. restart_threads();
  8537. applog(
  8538. ((!opt_quiet_work_updates) && pool_actively_in_use(pool, cp) ? LOG_NOTICE : LOG_DEBUG),
  8539. "Stratum from pool %d requested work update", pool->pool_no);
  8540. } else
  8541. applog(LOG_NOTICE, "Stratum from pool %d detected new block", pool->pool_no);
  8542. free_work(work);
  8543. }
  8544. if (timer_passed(&pool->swork.tv_transparency, NULL)) {
  8545. // More than 4 timmills past since requested transactions
  8546. timer_unset(&pool->swork.tv_transparency);
  8547. pool_set_opaque(pool, true);
  8548. }
  8549. }
  8550. out:
  8551. return NULL;
  8552. }
  8553. static void init_stratum_thread(struct pool *pool)
  8554. {
  8555. struct mining_goal_info * const goal = pool->goal;
  8556. goal->have_longpoll = true;
  8557. if (unlikely(pthread_create(&pool->stratum_thread, NULL, stratum_thread, (void *)pool)))
  8558. quit(1, "Failed to create stratum thread");
  8559. }
  8560. static void *longpoll_thread(void *userdata);
  8561. static bool stratum_works(struct pool *pool)
  8562. {
  8563. applog(LOG_INFO, "Testing pool %d stratum %s", pool->pool_no, pool->stratum_url);
  8564. if (!extract_sockaddr(pool->stratum_url, &pool->sockaddr_url, &pool->stratum_port))
  8565. return false;
  8566. if (pool->stratum_active)
  8567. return true;
  8568. if (!initiate_stratum(pool))
  8569. return false;
  8570. return true;
  8571. }
  8572. static
  8573. bool pool_recently_got_work(struct pool * const pool, const struct timeval * const tvp_now)
  8574. {
  8575. return (timer_isset(&pool->tv_last_work_time) && timer_elapsed(&pool->tv_last_work_time, tvp_now) < 60);
  8576. }
  8577. static bool pool_active(struct pool *pool, bool pinging)
  8578. {
  8579. struct timeval tv_now, tv_getwork, tv_getwork_reply;
  8580. bool ret = false;
  8581. json_t *val;
  8582. CURL *curl = NULL;
  8583. int rolltime;
  8584. char *rpc_req;
  8585. struct work *work;
  8586. enum pool_protocol proto;
  8587. if (pool->stratum_init)
  8588. {
  8589. if (pool->stratum_active)
  8590. return true;
  8591. }
  8592. else
  8593. if (!pool->idle)
  8594. {
  8595. timer_set_now(&tv_now);
  8596. if (pool_recently_got_work(pool, &tv_now))
  8597. return true;
  8598. }
  8599. mutex_lock(&pool->pool_test_lock);
  8600. if (pool->stratum_init)
  8601. {
  8602. ret = pool->stratum_active;
  8603. goto out;
  8604. }
  8605. timer_set_now(&tv_now);
  8606. if (pool->idle)
  8607. {
  8608. if (timer_elapsed(&pool->tv_idle, &tv_now) < 30)
  8609. goto out;
  8610. }
  8611. else
  8612. if (pool_recently_got_work(pool, &tv_now))
  8613. {
  8614. ret = true;
  8615. goto out;
  8616. }
  8617. applog(LOG_INFO, "Testing pool %s", pool->rpc_url);
  8618. /* This is the central point we activate stratum when we can */
  8619. curl = curl_easy_init();
  8620. if (unlikely(!curl)) {
  8621. applog(LOG_ERR, "CURL initialisation failed");
  8622. goto out;
  8623. }
  8624. if (!(want_gbt || want_getwork))
  8625. goto nohttp;
  8626. work = make_work();
  8627. /* Probe for GBT support on first pass */
  8628. proto = want_gbt ? PLP_GETBLOCKTEMPLATE : PLP_GETWORK;
  8629. tryagain:
  8630. rpc_req = prepare_rpc_req_probe(work, proto, NULL, pool);
  8631. work->pool = pool;
  8632. if (!rpc_req)
  8633. goto out;
  8634. pool->probed = false;
  8635. cgtime(&tv_getwork);
  8636. val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req,
  8637. true, false, &rolltime, pool, false);
  8638. cgtime(&tv_getwork_reply);
  8639. free(rpc_req);
  8640. /* Detect if a http getwork pool has an X-Stratum header at startup,
  8641. * and if so, switch to that in preference to getwork if it works */
  8642. if (pool->stratum_url && want_stratum && pool_may_redirect_to(pool, pool->stratum_url) && (pool->has_stratum || stratum_works(pool))) {
  8643. if (!pool->has_stratum) {
  8644. applog(LOG_NOTICE, "Switching pool %d %s to %s", pool->pool_no, pool->rpc_url, pool->stratum_url);
  8645. if (!pool->rpc_url)
  8646. pool_set_uri(pool, strdup(pool->stratum_url));
  8647. pool->has_stratum = true;
  8648. }
  8649. free_work(work);
  8650. if (val)
  8651. json_decref(val);
  8652. retry_stratum:
  8653. ;
  8654. /* We create the stratum thread for each pool just after
  8655. * successful authorisation. Once the init flag has been set
  8656. * we never unset it and the stratum thread is responsible for
  8657. * setting/unsetting the active flag */
  8658. bool init = pool_tset(pool, &pool->stratum_init);
  8659. if (!init) {
  8660. ret = initiate_stratum(pool) && auth_stratum(pool);
  8661. if (ret)
  8662. {
  8663. detect_algo = 2;
  8664. init_stratum_thread(pool);
  8665. }
  8666. else
  8667. {
  8668. pool_tclear(pool, &pool->stratum_init);
  8669. pool->tv_idle = tv_getwork_reply;
  8670. }
  8671. goto out;
  8672. }
  8673. ret = pool->stratum_active;
  8674. goto out;
  8675. }
  8676. else if (pool->has_stratum)
  8677. shutdown_stratum(pool);
  8678. if (val) {
  8679. bool rc;
  8680. json_t *res;
  8681. res = json_object_get(val, "result");
  8682. if ((!json_is_object(res)) || (proto == PLP_GETBLOCKTEMPLATE && !json_object_get(res, "bits")))
  8683. goto badwork;
  8684. work->rolltime = rolltime;
  8685. rc = work_decode(pool, work, val);
  8686. if (rc) {
  8687. applog(LOG_DEBUG, "Successfully retrieved and deciphered work from pool %u %s",
  8688. pool->pool_no, pool->rpc_url);
  8689. work->pool = pool;
  8690. copy_time(&work->tv_getwork, &tv_getwork);
  8691. copy_time(&work->tv_getwork_reply, &tv_getwork_reply);
  8692. work->getwork_mode = GETWORK_MODE_TESTPOOL;
  8693. calc_diff(work, 0);
  8694. update_last_work(work);
  8695. applog(LOG_DEBUG, "Pushing pooltest work to base pool");
  8696. stage_work(work);
  8697. total_getworks++;
  8698. pool->getwork_requested++;
  8699. ret = true;
  8700. pool->tv_idle = tv_getwork_reply;
  8701. } else {
  8702. badwork:
  8703. json_decref(val);
  8704. applog(LOG_DEBUG, "Successfully retrieved but FAILED to decipher work from pool %u %s",
  8705. pool->pool_no, pool->rpc_url);
  8706. pool->proto = proto = pool_protocol_fallback(proto);
  8707. if (PLP_NONE != proto)
  8708. goto tryagain;
  8709. pool->tv_idle = tv_getwork_reply;
  8710. free_work(work);
  8711. goto out;
  8712. }
  8713. json_decref(val);
  8714. if (proto != pool->proto) {
  8715. pool->proto = proto;
  8716. applog(LOG_INFO, "Selected %s protocol for pool %u", pool_protocol_name(proto), pool->pool_no);
  8717. }
  8718. if (pool->lp_url)
  8719. goto out;
  8720. /* Decipher the longpoll URL, if any, and store it in ->lp_url */
  8721. const struct blktmpl_longpoll_req *lp;
  8722. if (work->tr && (lp = blktmpl_get_longpoll(work->tr->tmpl))) {
  8723. // NOTE: work_decode takes care of lp id
  8724. pool->lp_url = lp->uri ? absolute_uri(lp->uri, pool->rpc_url) : pool->rpc_url;
  8725. if (!pool->lp_url)
  8726. {
  8727. ret = false;
  8728. goto out;
  8729. }
  8730. pool->lp_proto = PLP_GETBLOCKTEMPLATE;
  8731. }
  8732. else
  8733. if (pool->hdr_path && want_getwork) {
  8734. pool->lp_url = absolute_uri(pool->hdr_path, pool->rpc_url);
  8735. if (!pool->lp_url)
  8736. {
  8737. ret = false;
  8738. goto out;
  8739. }
  8740. pool->lp_proto = PLP_GETWORK;
  8741. } else
  8742. pool->lp_url = NULL;
  8743. if (want_longpoll && !pool->lp_started) {
  8744. pool->lp_started = true;
  8745. if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool)))
  8746. quit(1, "Failed to create pool longpoll thread");
  8747. }
  8748. } else if (PLP_NONE != (proto = pool_protocol_fallback(proto))) {
  8749. pool->proto = proto;
  8750. goto tryagain;
  8751. } else {
  8752. pool->tv_idle = tv_getwork_reply;
  8753. free_work(work);
  8754. nohttp:
  8755. /* If we failed to parse a getwork, this could be a stratum
  8756. * url without the prefix stratum+tcp:// so let's check it */
  8757. if (extract_sockaddr(pool->rpc_url, &pool->sockaddr_url, &pool->stratum_port) && initiate_stratum(pool)) {
  8758. pool->has_stratum = true;
  8759. goto retry_stratum;
  8760. }
  8761. applog(LOG_DEBUG, "FAILED to retrieve work from pool %u %s",
  8762. pool->pool_no, pool->rpc_url);
  8763. if (!pinging)
  8764. applog(LOG_WARNING, "Pool %u slow/down or URL or credentials invalid", pool->pool_no);
  8765. }
  8766. out:
  8767. if (curl)
  8768. curl_easy_cleanup(curl);
  8769. mutex_unlock(&pool->pool_test_lock);
  8770. return ret;
  8771. }
  8772. static void pool_resus(struct pool *pool)
  8773. {
  8774. if (pool->enabled == POOL_ENABLED && pool_strategy == POOL_FAILOVER && pool->prio < cp_prio())
  8775. applog(LOG_WARNING, "Pool %d %s alive, testing stability", pool->pool_no, pool->rpc_url);
  8776. else
  8777. applog(LOG_INFO, "Pool %d %s alive", pool->pool_no, pool->rpc_url);
  8778. }
  8779. static
  8780. void *cmd_idle_thread(void * const __maybe_unused userp)
  8781. {
  8782. pthread_detach(pthread_self());
  8783. RenameThread("cmd-idle");
  8784. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  8785. sleep(opt_log_interval);
  8786. pthread_testcancel();
  8787. run_cmd(cmd_idle);
  8788. return NULL;
  8789. }
  8790. static struct work *hash_pop(struct cgpu_info * const proc)
  8791. {
  8792. int hc;
  8793. struct work *work, *work_found, *tmp;
  8794. enum {
  8795. HPWS_NONE,
  8796. HPWS_LOWDIFF,
  8797. HPWS_SPARE,
  8798. HPWS_ROLLABLE,
  8799. HPWS_PERFECT,
  8800. } work_score = HPWS_NONE;
  8801. bool did_cmd_idle = false;
  8802. pthread_t cmd_idle_thr;
  8803. retry:
  8804. mutex_lock(stgd_lock);
  8805. while (true)
  8806. {
  8807. work_found = NULL;
  8808. work_score = 0;
  8809. hc = HASH_COUNT(staged_work);
  8810. HASH_ITER(hh, staged_work, work, tmp)
  8811. {
  8812. const struct mining_algorithm * const work_malgo = work_mining_algorithm(work);
  8813. const float min_nonce_diff = drv_min_nonce_diff(proc->drv, proc, work_malgo);
  8814. #define FOUND_WORK(score) do{ \
  8815. if (work_score < score) \
  8816. { \
  8817. work_found = work; \
  8818. work_score = score; \
  8819. } \
  8820. continue; \
  8821. }while(0)
  8822. if (min_nonce_diff < work->work_difficulty)
  8823. {
  8824. if (min_nonce_diff < 0)
  8825. continue;
  8826. FOUND_WORK(HPWS_LOWDIFF);
  8827. }
  8828. if (work->spare)
  8829. FOUND_WORK(HPWS_SPARE);
  8830. if (work->rolltime && hc > staged_rollable)
  8831. FOUND_WORK(HPWS_ROLLABLE);
  8832. #undef FOUND_WORK
  8833. // Good match
  8834. work_found = work;
  8835. work_score = HPWS_PERFECT;
  8836. break;
  8837. }
  8838. if (work_found)
  8839. {
  8840. work = work_found;
  8841. break;
  8842. }
  8843. // Failed to get a usable work
  8844. if (unlikely(staged_full))
  8845. {
  8846. if (likely(opt_queue < 10 + mining_threads))
  8847. {
  8848. ++opt_queue;
  8849. applog(LOG_WARNING, "Staged work underrun; increasing queue minimum to %d", opt_queue);
  8850. }
  8851. else
  8852. applog(LOG_WARNING, "Staged work underrun; not automatically increasing above %d", opt_queue);
  8853. staged_full = false; // Let it fill up before triggering an underrun again
  8854. no_work = true;
  8855. }
  8856. pthread_cond_signal(&gws_cond);
  8857. if (cmd_idle && !did_cmd_idle)
  8858. {
  8859. if (likely(!pthread_create(&cmd_idle_thr, NULL, cmd_idle_thread, NULL)))
  8860. did_cmd_idle = true;
  8861. }
  8862. pthread_cond_wait(&getq->cond, stgd_lock);
  8863. }
  8864. if (did_cmd_idle)
  8865. pthread_cancel(cmd_idle_thr);
  8866. no_work = false;
  8867. if (can_roll(work) && should_roll(work))
  8868. {
  8869. // Instead of consuming it, force it to be cloned and grab the clone
  8870. mutex_unlock(stgd_lock);
  8871. clone_available();
  8872. goto retry;
  8873. }
  8874. unstage_work(work);
  8875. /* Signal the getwork scheduler to look for more work */
  8876. pthread_cond_signal(&gws_cond);
  8877. /* Signal hash_pop again in case there are mutliple hash_pop waiters */
  8878. pthread_cond_signal(&getq->cond);
  8879. mutex_unlock(stgd_lock);
  8880. work->pool->last_work_time = time(NULL);
  8881. cgtime(&work->pool->tv_last_work_time);
  8882. return work;
  8883. }
  8884. /* Clones work by rolling it if possible, and returning a clone instead of the
  8885. * original work item which gets staged again to possibly be rolled again in
  8886. * the future */
  8887. static struct work *clone_work(struct work *work)
  8888. {
  8889. int mrs = mining_threads + opt_queue - total_staged(false);
  8890. struct work *work_clone;
  8891. bool cloned;
  8892. if (mrs < 1)
  8893. return work;
  8894. cloned = false;
  8895. work_clone = make_clone(work);
  8896. while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
  8897. applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
  8898. stage_work(work_clone);
  8899. roll_work(work);
  8900. work_clone = make_clone(work);
  8901. /* Roll it again to prevent duplicates should this be used
  8902. * directly later on */
  8903. roll_work(work);
  8904. cloned = true;
  8905. }
  8906. if (cloned) {
  8907. stage_work(work);
  8908. return work_clone;
  8909. }
  8910. free_work(work_clone);
  8911. return work;
  8912. }
  8913. void gen_hash(unsigned char *data, unsigned char *hash, int len)
  8914. {
  8915. unsigned char hash1[32];
  8916. sha256(data, len, hash1);
  8917. sha256(hash1, 32, hash);
  8918. }
  8919. /* PDiff 1 is a 256 bit unsigned integer of
  8920. * 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
  8921. * so we use a big endian 32 bit unsigned integer positioned at the Nth byte to
  8922. * cover a huge range of difficulty targets, though not all 256 bits' worth */
  8923. static void pdiff_target_leadzero(void * const target_p, double diff)
  8924. {
  8925. uint8_t *target = target_p;
  8926. diff *= 0x100000000;
  8927. int skip = log2(diff) / 8;
  8928. if (skip)
  8929. {
  8930. if (skip > 0x1c)
  8931. skip = 0x1c;
  8932. diff /= pow(0x100, skip);
  8933. memset(target, 0, skip);
  8934. }
  8935. uint32_t n = 0xffffffff;
  8936. n = (double)n / diff;
  8937. n = htobe32(n);
  8938. memcpy(&target[skip], &n, sizeof(n));
  8939. memset(&target[skip + sizeof(n)], 0xff, 32 - (skip + sizeof(n)));
  8940. }
  8941. void set_target_to_pdiff(void * const dest_target, const double pdiff)
  8942. {
  8943. unsigned char rtarget[32];
  8944. pdiff_target_leadzero(rtarget, pdiff);
  8945. swab256(dest_target, rtarget);
  8946. if (opt_debug) {
  8947. char htarget[65];
  8948. bin2hex(htarget, rtarget, 32);
  8949. applog(LOG_DEBUG, "Generated target %s", htarget);
  8950. }
  8951. }
  8952. void set_target_to_bdiff(void * const dest_target, const double bdiff)
  8953. {
  8954. set_target_to_pdiff(dest_target, bdiff_to_pdiff(bdiff));
  8955. }
  8956. void _test_target(void * const funcp, const char * const funcname, const bool little_endian, const void * const expectp, const double diff)
  8957. {
  8958. uint8_t bufr[32], buf[32], expectr[32], expect[32];
  8959. int off;
  8960. void (*func)(void *, double) = funcp;
  8961. func(little_endian ? bufr : buf, diff);
  8962. if (little_endian)
  8963. swab256(buf, bufr);
  8964. swap32tobe(expect, expectp, 256/32);
  8965. // Fuzzy comparison: the first 32 bits set must match, and the actual target must be >= the expected
  8966. for (off = 0; off < 28 && !buf[off]; ++off)
  8967. {}
  8968. if (memcmp(&buf[off], &expect[off], 4))
  8969. {
  8970. testfail: ;
  8971. ++unittest_failures;
  8972. char hexbuf[65], expectbuf[65];
  8973. bin2hex(hexbuf, buf, 32);
  8974. bin2hex(expectbuf, expect, 32);
  8975. applogr(, LOG_WARNING, "%s test failed: diff %g got %s (expected %s)",
  8976. funcname, diff, hexbuf, expectbuf);
  8977. }
  8978. if (!little_endian)
  8979. swab256(bufr, buf);
  8980. swab256(expectr, expect);
  8981. if (!hash_target_check(expectr, bufr))
  8982. goto testfail;
  8983. }
  8984. #define TEST_TARGET(func, le, expect, diff) \
  8985. _test_target(func, #func, le, expect, diff)
  8986. void test_target()
  8987. {
  8988. uint32_t expect[8] = {0};
  8989. // bdiff 1 should be exactly 00000000ffff0000000006f29cfd29510a6caee84634e86a57257cf03152537f due to floating-point imprecision (pdiff1 / 1.0000152590218966)
  8990. expect[0] = 0x0000ffff;
  8991. TEST_TARGET(set_target_to_bdiff, true, expect, 1./0x10000);
  8992. expect[0] = 0;
  8993. expect[1] = 0xffff0000;
  8994. TEST_TARGET(set_target_to_bdiff, true, expect, 1);
  8995. expect[1] >>= 1;
  8996. TEST_TARGET(set_target_to_bdiff, true, expect, 2);
  8997. expect[1] >>= 3;
  8998. TEST_TARGET(set_target_to_bdiff, true, expect, 0x10);
  8999. expect[1] >>= 4;
  9000. TEST_TARGET(set_target_to_bdiff, true, expect, 0x100);
  9001. memset(&expect[1], '\xff', 28);
  9002. expect[0] = 0x0000ffff;
  9003. TEST_TARGET(set_target_to_pdiff, true, expect, 1./0x10000);
  9004. expect[0] = 0;
  9005. TEST_TARGET(set_target_to_pdiff, true, expect, 1);
  9006. expect[1] >>= 1;
  9007. TEST_TARGET(set_target_to_pdiff, true, expect, 2);
  9008. expect[1] >>= 3;
  9009. TEST_TARGET(set_target_to_pdiff, true, expect, 0x10);
  9010. expect[1] >>= 4;
  9011. TEST_TARGET(set_target_to_pdiff, true, expect, 0x100);
  9012. }
  9013. void stratum_work_cpy(struct stratum_work * const dst, const struct stratum_work * const src)
  9014. {
  9015. *dst = *src;
  9016. if (dst->tr)
  9017. tmpl_incref(dst->tr);
  9018. dst->nonce1 = maybe_strdup(src->nonce1);
  9019. dst->job_id = maybe_strdup(src->job_id);
  9020. bytes_cpy(&dst->coinbase, &src->coinbase);
  9021. bytes_cpy(&dst->merkle_bin, &src->merkle_bin);
  9022. dst->data_lock_p = NULL;
  9023. }
  9024. void stratum_work_clean(struct stratum_work * const swork)
  9025. {
  9026. if (swork->tr)
  9027. tmpl_decref(swork->tr);
  9028. free(swork->nonce1);
  9029. free(swork->job_id);
  9030. bytes_free(&swork->coinbase);
  9031. bytes_free(&swork->merkle_bin);
  9032. }
  9033. bool pool_has_usable_swork(const struct pool * const pool)
  9034. {
  9035. if (opt_benchmark)
  9036. return true;
  9037. if (pool->swork.tr)
  9038. {
  9039. // GBT
  9040. struct timeval tv_now;
  9041. timer_set_now(&tv_now);
  9042. return blkmk_time_left(pool->swork.tr->tmpl, tv_now.tv_sec);
  9043. }
  9044. return pool->stratum_notify;
  9045. }
  9046. /* Generates stratum based work based on the most recent notify information
  9047. * from the pool. This will keep generating work while a pool is down so we use
  9048. * other means to detect when the pool has died in stratum_thread */
  9049. static void gen_stratum_work(struct pool *pool, struct work *work)
  9050. {
  9051. clean_work(work);
  9052. cg_wlock(&pool->data_lock);
  9053. const int n2size = pool->swork.n2size;
  9054. bytes_resize(&work->nonce2, n2size);
  9055. if (pool->nonce2sz < n2size)
  9056. memset(&bytes_buf(&work->nonce2)[pool->nonce2sz], 0, n2size - pool->nonce2sz);
  9057. memcpy(bytes_buf(&work->nonce2),
  9058. #ifdef WORDS_BIGENDIAN
  9059. // NOTE: On big endian, the most significant bits are stored at the end, so skip the LSBs
  9060. &((char*)&pool->nonce2)[pool->nonce2off],
  9061. #else
  9062. &pool->nonce2,
  9063. #endif
  9064. pool->nonce2sz);
  9065. pool->nonce2++;
  9066. work->pool = pool;
  9067. work->work_restart_id = pool->swork.work_restart_id;
  9068. gen_stratum_work2(work, &pool->swork);
  9069. cgtime(&work->tv_staged);
  9070. }
  9071. void gen_stratum_work2(struct work *work, struct stratum_work *swork)
  9072. {
  9073. unsigned char *coinbase;
  9074. /* Generate coinbase */
  9075. coinbase = bytes_buf(&swork->coinbase);
  9076. memcpy(&coinbase[swork->nonce2_offset], bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
  9077. /* Downgrade to a read lock to read off the variables */
  9078. if (swork->data_lock_p)
  9079. cg_dwlock(swork->data_lock_p);
  9080. gen_stratum_work3(work, swork, swork->data_lock_p);
  9081. if (opt_debug)
  9082. {
  9083. char header[161];
  9084. char nonce2hex[(bytes_len(&work->nonce2) * 2) + 1];
  9085. bin2hex(header, work->data, 80);
  9086. bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2));
  9087. applog(LOG_DEBUG, "Generated stratum header %s", header);
  9088. applog(LOG_DEBUG, "Work job_id %s nonce2 %s", work->job_id, nonce2hex);
  9089. }
  9090. }
  9091. void gen_stratum_work3(struct work * const work, struct stratum_work * const swork, cglock_t * const data_lock_p)
  9092. {
  9093. unsigned char *coinbase, merkle_root[32], merkle_sha[64];
  9094. uint8_t *merkle_bin;
  9095. uint32_t *data32, *swap32;
  9096. int i;
  9097. coinbase = bytes_buf(&swork->coinbase);
  9098. /* Generate merkle root */
  9099. gen_hash(coinbase, merkle_root, bytes_len(&swork->coinbase));
  9100. memcpy(merkle_sha, merkle_root, 32);
  9101. merkle_bin = bytes_buf(&swork->merkle_bin);
  9102. for (i = 0; i < swork->merkles; ++i, merkle_bin += 32) {
  9103. memcpy(merkle_sha + 32, merkle_bin, 32);
  9104. gen_hash(merkle_sha, merkle_root, 64);
  9105. memcpy(merkle_sha, merkle_root, 32);
  9106. }
  9107. data32 = (uint32_t *)merkle_sha;
  9108. swap32 = (uint32_t *)merkle_root;
  9109. flip32(swap32, data32);
  9110. memcpy(&work->data[0], swork->header1, 36);
  9111. memcpy(&work->data[36], merkle_root, 32);
  9112. *((uint32_t*)&work->data[68]) = htobe32(swork->ntime + timer_elapsed(&swork->tv_received, NULL));
  9113. memcpy(&work->data[72], swork->diffbits, 4);
  9114. memset(&work->data[76], 0, 4); // nonce
  9115. memcpy(&work->data[80], workpadding_bin, 48);
  9116. work->ntime_roll_limits = swork->ntime_roll_limits;
  9117. /* Copy parameters required for share submission */
  9118. memcpy(work->target, swork->target, sizeof(work->target));
  9119. work->job_id = maybe_strdup(swork->job_id);
  9120. work->nonce1 = maybe_strdup(swork->nonce1);
  9121. if (data_lock_p)
  9122. cg_runlock(data_lock_p);
  9123. calc_midstate(work);
  9124. local_work++;
  9125. work->stratum = true;
  9126. work->blk.nonce = 0;
  9127. work->id = total_work++;
  9128. work->longpoll = false;
  9129. work->getwork_mode = GETWORK_MODE_STRATUM;
  9130. if (swork->tr) {
  9131. work->getwork_mode = GETWORK_MODE_GBT;
  9132. work->tr = swork->tr;
  9133. tmpl_incref(work->tr);
  9134. }
  9135. calc_diff(work, 0);
  9136. }
  9137. void request_work(struct thr_info *thr)
  9138. {
  9139. struct cgpu_info *cgpu = thr->cgpu;
  9140. struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats);
  9141. /* Tell the watchdog thread this thread is waiting on getwork and
  9142. * should not be restarted */
  9143. thread_reportout(thr);
  9144. // HACK: Since get_work still blocks, reportout all processors dependent on this thread
  9145. for (struct cgpu_info *proc = thr->cgpu->next_proc; proc; proc = proc->next_proc)
  9146. {
  9147. if (proc->threads)
  9148. break;
  9149. thread_reportout(proc->thr[0]);
  9150. }
  9151. cgtime(&dev_stats->_get_start);
  9152. }
  9153. // FIXME: Make this non-blocking (and remove HACK above)
  9154. struct work *get_work(struct thr_info *thr)
  9155. {
  9156. const int thr_id = thr->id;
  9157. struct cgpu_info *cgpu = thr->cgpu;
  9158. struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats);
  9159. struct cgminer_stats *pool_stats;
  9160. struct timeval tv_get;
  9161. struct work *work = NULL;
  9162. applog(LOG_DEBUG, "%"PRIpreprv": Popping work from get queue to get work", cgpu->proc_repr);
  9163. while (!work) {
  9164. work = hash_pop(cgpu);
  9165. if (stale_work(work, false)) {
  9166. staged_full = false; // It wasn't really full, since it was stale :(
  9167. discard_work(work);
  9168. work = NULL;
  9169. wake_gws();
  9170. }
  9171. }
  9172. last_getwork = time(NULL);
  9173. applog(LOG_DEBUG, "%"PRIpreprv": Got work %d from get queue to get work for thread %d",
  9174. cgpu->proc_repr, work->id, thr_id);
  9175. work->thr_id = thr_id;
  9176. thread_reportin(thr);
  9177. // HACK: Since get_work still blocks, reportin all processors dependent on this thread
  9178. for (struct cgpu_info *proc = thr->cgpu->next_proc; proc; proc = proc->next_proc)
  9179. {
  9180. if (proc->threads)
  9181. break;
  9182. thread_reportin(proc->thr[0]);
  9183. }
  9184. work->mined = true;
  9185. work->blk.nonce = 0;
  9186. cgtime(&tv_get);
  9187. timersub(&tv_get, &dev_stats->_get_start, &tv_get);
  9188. timeradd(&tv_get, &dev_stats->getwork_wait, &dev_stats->getwork_wait);
  9189. if (timercmp(&tv_get, &dev_stats->getwork_wait_max, >))
  9190. dev_stats->getwork_wait_max = tv_get;
  9191. if (timercmp(&tv_get, &dev_stats->getwork_wait_min, <))
  9192. dev_stats->getwork_wait_min = tv_get;
  9193. ++dev_stats->getwork_calls;
  9194. pool_stats = &(work->pool->cgminer_stats);
  9195. timeradd(&tv_get, &pool_stats->getwork_wait, &pool_stats->getwork_wait);
  9196. if (timercmp(&tv_get, &pool_stats->getwork_wait_max, >))
  9197. pool_stats->getwork_wait_max = tv_get;
  9198. if (timercmp(&tv_get, &pool_stats->getwork_wait_min, <))
  9199. pool_stats->getwork_wait_min = tv_get;
  9200. ++pool_stats->getwork_calls;
  9201. if (work->work_difficulty < 1)
  9202. {
  9203. const float min_nonce_diff = drv_min_nonce_diff(cgpu->drv, cgpu, work_mining_algorithm(work));
  9204. if (unlikely(work->work_difficulty < min_nonce_diff))
  9205. {
  9206. if (min_nonce_diff - work->work_difficulty > 1./0x10000000)
  9207. applog(LOG_WARNING, "%"PRIpreprv": Using work with lower difficulty than device supports",
  9208. cgpu->proc_repr);
  9209. work->nonce_diff = min_nonce_diff;
  9210. }
  9211. else
  9212. work->nonce_diff = work->work_difficulty;
  9213. }
  9214. else
  9215. work->nonce_diff = 1;
  9216. return work;
  9217. }
  9218. struct dupe_hash_elem {
  9219. uint8_t hash[0x20];
  9220. struct timeval tv_prune;
  9221. UT_hash_handle hh;
  9222. };
  9223. static
  9224. void _submit_work_async(struct work *work)
  9225. {
  9226. applog(LOG_DEBUG, "Pushing submit work to work thread");
  9227. if (opt_benchmark)
  9228. {
  9229. json_t * const jn = json_null(), *result = NULL;
  9230. work_check_for_block(work);
  9231. {
  9232. static struct dupe_hash_elem *dupe_hashes;
  9233. struct dupe_hash_elem *dhe, *dhetmp;
  9234. HASH_FIND(hh, dupe_hashes, &work->hash, sizeof(dhe->hash), dhe);
  9235. if (dhe)
  9236. result = json_string("duplicate");
  9237. else
  9238. {
  9239. struct timeval tv_now;
  9240. timer_set_now(&tv_now);
  9241. // Prune old entries
  9242. HASH_ITER(hh, dupe_hashes, dhe, dhetmp)
  9243. {
  9244. if (!timer_passed(&dhe->tv_prune, &tv_now))
  9245. break;
  9246. HASH_DEL(dupe_hashes, dhe);
  9247. free(dhe);
  9248. }
  9249. dhe = malloc(sizeof(*dhe));
  9250. memcpy(dhe->hash, work->hash, sizeof(dhe->hash));
  9251. timer_set_delay(&dhe->tv_prune, &tv_now, 337500000);
  9252. HASH_ADD(hh, dupe_hashes, hash, sizeof(dhe->hash), dhe);
  9253. }
  9254. }
  9255. if (result)
  9256. {}
  9257. else
  9258. if (stale_work(work, true))
  9259. {
  9260. char stalemsg[0x10];
  9261. snprintf(stalemsg, sizeof(stalemsg), "stale %us", benchmark_update_interval * (work->pool->work_restart_id - work->work_restart_id));
  9262. result = json_string(stalemsg);
  9263. }
  9264. else
  9265. result = json_incref(jn);
  9266. share_result(jn, result, jn, work, false, "");
  9267. free_work(work);
  9268. json_decref(result);
  9269. json_decref(jn);
  9270. return;
  9271. }
  9272. mutex_lock(&submitting_lock);
  9273. ++total_submitting;
  9274. DL_APPEND(submit_waiting, work);
  9275. mutex_unlock(&submitting_lock);
  9276. notifier_wake(submit_waiting_notifier);
  9277. }
  9278. /* Submit a copy of the tested, statistic recorded work item asynchronously */
  9279. static void submit_work_async2(struct work *work, struct timeval *tv_work_found)
  9280. {
  9281. if (tv_work_found)
  9282. copy_time(&work->tv_work_found, tv_work_found);
  9283. _submit_work_async(work);
  9284. }
  9285. void inc_hw_errors3(struct thr_info *thr, const struct work *work, const uint32_t *bad_nonce_p, float nonce_diff)
  9286. {
  9287. struct cgpu_info * const cgpu = thr->cgpu;
  9288. if (bad_nonce_p)
  9289. {
  9290. if (bad_nonce_p == UNKNOWN_NONCE)
  9291. applog(LOG_DEBUG, "%"PRIpreprv": invalid nonce - HW error",
  9292. cgpu->proc_repr);
  9293. else
  9294. applog(LOG_DEBUG, "%"PRIpreprv": invalid nonce (%08lx) - HW error",
  9295. cgpu->proc_repr, (unsigned long)be32toh(*bad_nonce_p));
  9296. }
  9297. mutex_lock(&stats_lock);
  9298. hw_errors++;
  9299. ++cgpu->hw_errors;
  9300. if (bad_nonce_p)
  9301. {
  9302. total_bad_diff1 += nonce_diff;
  9303. cgpu->bad_diff1 += nonce_diff;
  9304. }
  9305. mutex_unlock(&stats_lock);
  9306. if (thr->cgpu->drv->hw_error)
  9307. thr->cgpu->drv->hw_error(thr);
  9308. }
  9309. void work_hash(struct work * const work)
  9310. {
  9311. const struct mining_algorithm * const malgo = work_mining_algorithm(work);
  9312. malgo->hash_data_f(work->hash, work->data);
  9313. }
  9314. static
  9315. bool test_hash(const void * const phash, const float diff)
  9316. {
  9317. const uint32_t * const hash = phash;
  9318. if (diff >= 1.)
  9319. // FIXME: > 1 should check more
  9320. return !hash[7];
  9321. const uint32_t Htarg = (uint32_t)ceil((1. / diff) - 1);
  9322. const uint32_t tmp_hash7 = le32toh(hash[7]);
  9323. applog(LOG_DEBUG, "htarget %08lx hash %08lx",
  9324. (long unsigned int)Htarg,
  9325. (long unsigned int)tmp_hash7);
  9326. return (tmp_hash7 <= Htarg);
  9327. }
  9328. enum test_nonce2_result _test_nonce2(struct work *work, uint32_t nonce, bool checktarget)
  9329. {
  9330. uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12);
  9331. *work_nonce = htole32(nonce);
  9332. work_hash(work);
  9333. if (!test_hash(work->hash, work->nonce_diff))
  9334. return TNR_BAD;
  9335. if (checktarget && !hash_target_check_v(work->hash, work->target))
  9336. {
  9337. bool high_hash = true;
  9338. struct pool * const pool = work->pool;
  9339. if (pool_diff_effective_retroactively(pool))
  9340. {
  9341. // Some stratum pools are buggy and expect difficulty changes to be immediate retroactively, so if the target has changed, check and submit just in case
  9342. if (memcmp(pool->next_target, work->target, sizeof(work->target)))
  9343. {
  9344. applog(LOG_DEBUG, "Stratum pool %u target has changed since work job issued, checking that too",
  9345. pool->pool_no);
  9346. if (hash_target_check_v(work->hash, pool->next_target))
  9347. {
  9348. high_hash = false;
  9349. work->work_difficulty = target_diff(pool->next_target);
  9350. }
  9351. }
  9352. }
  9353. if (high_hash)
  9354. return TNR_HIGH;
  9355. }
  9356. return TNR_GOOD;
  9357. }
  9358. /* Returns true if nonce for work was a valid share */
  9359. bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
  9360. {
  9361. return submit_noffset_nonce(thr, work, nonce, 0);
  9362. }
  9363. /* Allows drivers to submit work items where the driver has changed the ntime
  9364. * value by noffset. Must be only used with a work protocol that does not ntime
  9365. * roll itself intrinsically to generate work (eg stratum). We do not touch
  9366. * the original work struct, but the copy of it only. */
  9367. bool submit_noffset_nonce(struct thr_info *thr, struct work *work_in, uint32_t nonce,
  9368. int noffset)
  9369. {
  9370. struct work *work = make_work();
  9371. _copy_work(work, work_in, noffset);
  9372. uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12);
  9373. struct timeval tv_work_found;
  9374. enum test_nonce2_result res;
  9375. bool ret = true;
  9376. thread_reportout(thr);
  9377. cgtime(&tv_work_found);
  9378. *work_nonce = htole32(nonce);
  9379. work->thr_id = thr->id;
  9380. /* Do one last check before attempting to submit the work */
  9381. /* Side effect: sets work->data and work->hash for us */
  9382. res = test_nonce2(work, nonce);
  9383. if (unlikely(res == TNR_BAD))
  9384. {
  9385. inc_hw_errors(thr, work, nonce);
  9386. ret = false;
  9387. goto out;
  9388. }
  9389. mutex_lock(&stats_lock);
  9390. total_diff1 += work->nonce_diff;
  9391. thr ->cgpu->diff1 += work->nonce_diff;
  9392. work->pool->diff1 += work->nonce_diff;
  9393. thr->cgpu->last_device_valid_work = time(NULL);
  9394. mutex_unlock(&stats_lock);
  9395. if (noncelog_file)
  9396. noncelog(work);
  9397. if (res == TNR_HIGH)
  9398. {
  9399. // Share above target, normal
  9400. /* Check the diff of the share, even if it didn't reach the
  9401. * target, just to set the best share value if it's higher. */
  9402. share_diff(work);
  9403. goto out;
  9404. }
  9405. submit_work_async2(work, &tv_work_found);
  9406. work = NULL; // Taken by submit_work_async2
  9407. out:
  9408. if (work)
  9409. free_work(work);
  9410. thread_reportin(thr);
  9411. return ret;
  9412. }
  9413. // return true of we should stop working on this piece of work
  9414. // returning false means we will keep scanning for a nonce
  9415. // assumptions: work->blk.nonce is the number of nonces completed in the work
  9416. // see minerloop_scanhash comments for more details & usage
  9417. bool abandon_work(struct work *work, struct timeval *wdiff, uint64_t max_hashes)
  9418. {
  9419. if (work->blk.nonce == 0xffffffff || // known we are scanning a full nonce range
  9420. wdiff->tv_sec > opt_scantime || // scan-time has elapsed (user specified, default 60s)
  9421. work->blk.nonce >= 0xfffffffe - max_hashes || // are there enough nonces left in the work
  9422. max_hashes >= 0xfffffffe || // assume we are scanning a full nonce range
  9423. stale_work(work, false)) // work is stale
  9424. return true;
  9425. return false;
  9426. }
  9427. void __thr_being_msg(int prio, struct thr_info *thr, const char *being)
  9428. {
  9429. struct cgpu_info *proc = thr->cgpu;
  9430. if (proc->threads > 1)
  9431. applog(prio, "%"PRIpreprv" (thread %d) %s", proc->proc_repr, thr->id, being);
  9432. else
  9433. applog(prio, "%"PRIpreprv" %s", proc->proc_repr, being);
  9434. }
  9435. // Called by asynchronous minerloops, when they find their processor should be disabled
  9436. void mt_disable_start(struct thr_info *mythr)
  9437. {
  9438. struct cgpu_info *cgpu = mythr->cgpu;
  9439. struct device_drv *drv = cgpu->drv;
  9440. if (drv->thread_disable)
  9441. drv->thread_disable(mythr);
  9442. hashmeter2(mythr);
  9443. __thr_being_msg(LOG_WARNING, mythr, "being disabled");
  9444. mythr->rolling = mythr->cgpu->rolling = 0;
  9445. thread_reportout(mythr);
  9446. mythr->_mt_disable_called = true;
  9447. }
  9448. /* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till
  9449. * the driver tells us it's full so that it may extract the work item using
  9450. * the get_queued() function which adds it to the hashtable on
  9451. * cgpu->queued_work. */
  9452. static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id)
  9453. {
  9454. thread_reportout(mythr);
  9455. do {
  9456. bool need_work;
  9457. /* Do this lockless just to know if we need more unqueued work. */
  9458. need_work = (!cgpu->unqueued_work);
  9459. /* get_work is a blocking function so do it outside of lock
  9460. * to prevent deadlocks with other locks. */
  9461. if (need_work) {
  9462. struct work *work = get_work(mythr);
  9463. wr_lock(&cgpu->qlock);
  9464. /* Check we haven't grabbed work somehow between
  9465. * checking and picking up the lock. */
  9466. if (likely(!cgpu->unqueued_work))
  9467. cgpu->unqueued_work = work;
  9468. else
  9469. need_work = false;
  9470. wr_unlock(&cgpu->qlock);
  9471. if (unlikely(!need_work))
  9472. discard_work(work);
  9473. }
  9474. /* The queue_full function should be used by the driver to
  9475. * actually place work items on the physical device if it
  9476. * does have a queue. */
  9477. } while (drv->queue_full && !drv->queue_full(cgpu));
  9478. }
  9479. /* Add a work item to a cgpu's queued hashlist */
  9480. void __add_queued(struct cgpu_info *cgpu, struct work *work)
  9481. {
  9482. cgpu->queued_count++;
  9483. HASH_ADD_INT(cgpu->queued_work, id, work);
  9484. }
  9485. /* This function is for retrieving one work item from the unqueued pointer and
  9486. * adding it to the hashtable of queued work. Code using this function must be
  9487. * able to handle NULL as a return which implies there is no work available. */
  9488. struct work *get_queued(struct cgpu_info *cgpu)
  9489. {
  9490. struct work *work = NULL;
  9491. wr_lock(&cgpu->qlock);
  9492. if (cgpu->unqueued_work) {
  9493. work = cgpu->unqueued_work;
  9494. if (unlikely(stale_work(work, false))) {
  9495. discard_work(work);
  9496. work = NULL;
  9497. wake_gws();
  9498. } else
  9499. __add_queued(cgpu, work);
  9500. cgpu->unqueued_work = NULL;
  9501. }
  9502. wr_unlock(&cgpu->qlock);
  9503. return work;
  9504. }
  9505. void add_queued(struct cgpu_info *cgpu, struct work *work)
  9506. {
  9507. wr_lock(&cgpu->qlock);
  9508. __add_queued(cgpu, work);
  9509. wr_unlock(&cgpu->qlock);
  9510. }
  9511. /* Get fresh work and add it to cgpu's queued hashlist */
  9512. struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id)
  9513. {
  9514. struct work *work = get_work(thr);
  9515. add_queued(cgpu, work);
  9516. return work;
  9517. }
  9518. /* This function is for finding an already queued work item in the
  9519. * given que hashtable. Code using this function must be able
  9520. * to handle NULL as a return which implies there is no matching work.
  9521. * The calling function must lock access to the que if it is required.
  9522. * The common values for midstatelen, offset, datalen are 32, 64, 12 */
  9523. struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  9524. {
  9525. struct work *work, *tmp, *ret = NULL;
  9526. HASH_ITER(hh, que, work, tmp) {
  9527. if (memcmp(work->midstate, midstate, midstatelen) == 0 &&
  9528. memcmp(work->data + offset, data, datalen) == 0) {
  9529. ret = work;
  9530. break;
  9531. }
  9532. }
  9533. return ret;
  9534. }
  9535. /* This function is for finding an already queued work item in the
  9536. * device's queued_work hashtable. Code using this function must be able
  9537. * to handle NULL as a return which implies there is no matching work.
  9538. * The common values for midstatelen, offset, datalen are 32, 64, 12 */
  9539. struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  9540. {
  9541. struct work *ret;
  9542. rd_lock(&cgpu->qlock);
  9543. ret = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen);
  9544. rd_unlock(&cgpu->qlock);
  9545. return ret;
  9546. }
  9547. struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  9548. {
  9549. struct work *work, *ret = NULL;
  9550. rd_lock(&cgpu->qlock);
  9551. work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen);
  9552. if (work)
  9553. ret = copy_work(work);
  9554. rd_unlock(&cgpu->qlock);
  9555. return ret;
  9556. }
  9557. void __work_completed(struct cgpu_info *cgpu, struct work *work)
  9558. {
  9559. cgpu->queued_count--;
  9560. HASH_DEL(cgpu->queued_work, work);
  9561. }
  9562. /* This iterates over a queued hashlist finding work started more than secs
  9563. * seconds ago and discards the work as completed. The driver must set the
  9564. * work->tv_work_start value appropriately. Returns the number of items aged. */
  9565. int age_queued_work(struct cgpu_info *cgpu, double secs)
  9566. {
  9567. struct work *work, *tmp;
  9568. struct timeval tv_now;
  9569. int aged = 0;
  9570. cgtime(&tv_now);
  9571. wr_lock(&cgpu->qlock);
  9572. HASH_ITER(hh, cgpu->queued_work, work, tmp) {
  9573. if (tdiff(&tv_now, &work->tv_work_start) > secs) {
  9574. __work_completed(cgpu, work);
  9575. aged++;
  9576. }
  9577. }
  9578. wr_unlock(&cgpu->qlock);
  9579. return aged;
  9580. }
  9581. /* This function should be used by queued device drivers when they're sure
  9582. * the work struct is no longer in use. */
  9583. void work_completed(struct cgpu_info *cgpu, struct work *work)
  9584. {
  9585. wr_lock(&cgpu->qlock);
  9586. __work_completed(cgpu, work);
  9587. wr_unlock(&cgpu->qlock);
  9588. free_work(work);
  9589. }
  9590. /* Combines find_queued_work_bymidstate and work_completed in one function
  9591. * withOUT destroying the work so the driver must free it. */
  9592. struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen)
  9593. {
  9594. struct work *work;
  9595. wr_lock(&cgpu->qlock);
  9596. work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen);
  9597. if (work)
  9598. __work_completed(cgpu, work);
  9599. wr_unlock(&cgpu->qlock);
  9600. return work;
  9601. }
  9602. void flush_queue(struct cgpu_info *cgpu)
  9603. {
  9604. struct work *work = NULL;
  9605. wr_lock(&cgpu->qlock);
  9606. work = cgpu->unqueued_work;
  9607. cgpu->unqueued_work = NULL;
  9608. wr_unlock(&cgpu->qlock);
  9609. if (work) {
  9610. free_work(work);
  9611. applog(LOG_DEBUG, "Discarded queued work item");
  9612. }
  9613. }
  9614. /* This version of hash work is for devices that are fast enough to always
  9615. * perform a full nonce range and need a queue to maintain the device busy.
  9616. * Work creation and destruction is not done from within this function
  9617. * directly. */
  9618. void hash_queued_work(struct thr_info *mythr)
  9619. {
  9620. const long cycle = opt_log_interval / 5 ? : 1;
  9621. struct timeval tv_start = {0, 0}, tv_end;
  9622. struct cgpu_info *cgpu = mythr->cgpu;
  9623. struct device_drv *drv = cgpu->drv;
  9624. const int thr_id = mythr->id;
  9625. int64_t hashes_done = 0;
  9626. if (unlikely(cgpu->deven != DEV_ENABLED))
  9627. mt_disable(mythr);
  9628. while (likely(!cgpu->shutdown)) {
  9629. struct timeval diff;
  9630. int64_t hashes;
  9631. fill_queue(mythr, cgpu, drv, thr_id);
  9632. thread_reportin(mythr);
  9633. hashes = drv->scanwork(mythr);
  9634. /* Reset the bool here in case the driver looks for it
  9635. * synchronously in the scanwork loop. */
  9636. mythr->work_restart = false;
  9637. if (unlikely(hashes == -1 )) {
  9638. applog(LOG_ERR, "%s %d failure, disabling!", drv->name, cgpu->device_id);
  9639. cgpu->deven = DEV_DISABLED;
  9640. dev_error(cgpu, REASON_THREAD_ZERO_HASH);
  9641. mt_disable(mythr);
  9642. }
  9643. hashes_done += hashes;
  9644. cgtime(&tv_end);
  9645. timersub(&tv_end, &tv_start, &diff);
  9646. if (diff.tv_sec >= cycle) {
  9647. hashmeter(thr_id, &diff, hashes_done);
  9648. hashes_done = 0;
  9649. copy_time(&tv_start, &tv_end);
  9650. }
  9651. if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
  9652. mt_disable(mythr);
  9653. if (unlikely(mythr->work_restart)) {
  9654. flush_queue(cgpu);
  9655. if (drv->flush_work)
  9656. drv->flush_work(cgpu);
  9657. }
  9658. }
  9659. // cgpu->deven = DEV_DISABLED; set in miner_thread
  9660. }
  9661. // Called by minerloop, when it is re-enabling a processor
  9662. void mt_disable_finish(struct thr_info *mythr)
  9663. {
  9664. struct device_drv *drv = mythr->cgpu->drv;
  9665. thread_reportin(mythr);
  9666. __thr_being_msg(LOG_WARNING, mythr, "being re-enabled");
  9667. if (drv->thread_enable)
  9668. drv->thread_enable(mythr);
  9669. mythr->_mt_disable_called = false;
  9670. }
  9671. // Called by synchronous minerloops, when they find their processor should be disabled
  9672. // Calls mt_disable_start, waits until it's re-enabled, then calls mt_disable_finish
  9673. void mt_disable(struct thr_info *mythr)
  9674. {
  9675. const struct cgpu_info * const cgpu = mythr->cgpu;
  9676. mt_disable_start(mythr);
  9677. applog(LOG_DEBUG, "Waiting for wakeup notification in miner thread");
  9678. do {
  9679. notifier_read(mythr->notifier);
  9680. } while (mythr->pause || cgpu->deven != DEV_ENABLED);
  9681. mt_disable_finish(mythr);
  9682. }
  9683. enum {
  9684. STAT_SLEEP_INTERVAL = 1,
  9685. STAT_CTR_INTERVAL = 10000000,
  9686. FAILURE_INTERVAL = 30,
  9687. };
  9688. /* Stage another work item from the work returned in a longpoll */
  9689. static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct work *work, struct timeval *tv_lp, struct timeval *tv_lp_reply)
  9690. {
  9691. bool rc;
  9692. work->rolltime = rolltime;
  9693. rc = work_decode(pool, work, val);
  9694. if (unlikely(!rc)) {
  9695. applog(LOG_ERR, "Could not convert longpoll data to work");
  9696. free_work(work);
  9697. return;
  9698. }
  9699. total_getworks++;
  9700. pool->getwork_requested++;
  9701. work->pool = pool;
  9702. copy_time(&work->tv_getwork, tv_lp);
  9703. copy_time(&work->tv_getwork_reply, tv_lp_reply);
  9704. calc_diff(work, 0);
  9705. if (pool->enabled == POOL_REJECTING)
  9706. work->mandatory = true;
  9707. work->longpoll = true;
  9708. work->getwork_mode = GETWORK_MODE_LP;
  9709. update_last_work(work);
  9710. /* We'll be checking this work item twice, but we already know it's
  9711. * from a new block so explicitly force the new block detection now
  9712. * rather than waiting for it to hit the stage thread. This also
  9713. * allows testwork to know whether LP discovered the block or not. */
  9714. test_work_current(work);
  9715. /* Don't use backup LPs as work if we have failover-only enabled. Use
  9716. * the longpoll work from a pool that has been rejecting shares as a
  9717. * way to detect when the pool has recovered.
  9718. */
  9719. if (pool != current_pool() && opt_fail_only && pool->enabled != POOL_REJECTING) {
  9720. free_work(work);
  9721. return;
  9722. }
  9723. work = clone_work(work);
  9724. applog(LOG_DEBUG, "Pushing converted work to stage thread");
  9725. stage_work(work);
  9726. applog(LOG_DEBUG, "Converted longpoll data to work");
  9727. }
  9728. /* If we want longpoll, enable it for the chosen default pool, or, if
  9729. * the pool does not support longpoll, find the first one that does
  9730. * and use its longpoll support */
  9731. static
  9732. struct pool *_select_longpoll_pool(struct pool *cp, bool(*func)(struct pool *))
  9733. {
  9734. int i;
  9735. if (func(cp))
  9736. return cp;
  9737. for (i = 0; i < total_pools; i++) {
  9738. struct pool *pool = pools[i];
  9739. if (cp->goal != pool->goal)
  9740. continue;
  9741. if (func(pool))
  9742. return pool;
  9743. }
  9744. return NULL;
  9745. }
  9746. /* This will make the longpoll thread wait till it's the current pool, or it
  9747. * has been flagged as rejecting, before attempting to open any connections.
  9748. */
  9749. static void wait_lpcurrent(struct pool *pool)
  9750. {
  9751. mutex_lock(&lp_lock);
  9752. while (!cnx_needed(pool))
  9753. {
  9754. pool->lp_active = false;
  9755. pthread_cond_wait(&lp_cond, &lp_lock);
  9756. }
  9757. mutex_unlock(&lp_lock);
  9758. }
  9759. static curl_socket_t save_curl_socket(void *vpool, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr) {
  9760. struct pool *pool = vpool;
  9761. curl_socket_t sock = bfg_socket(addr->family, addr->socktype, addr->protocol);
  9762. pool->lp_socket = sock;
  9763. return sock;
  9764. }
  9765. static void *longpoll_thread(void *userdata)
  9766. {
  9767. struct pool *cp = (struct pool *)userdata;
  9768. /* This *pool is the source of the actual longpoll, not the pool we've
  9769. * tied it to */
  9770. struct timeval start, reply, end;
  9771. struct pool *pool = NULL;
  9772. char threadname[20];
  9773. CURL *curl = NULL;
  9774. int failures = 0;
  9775. char *lp_url;
  9776. int rolltime;
  9777. #ifndef HAVE_PTHREAD_CANCEL
  9778. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  9779. #endif
  9780. snprintf(threadname, 20, "longpoll%u", cp->pool_no);
  9781. RenameThread(threadname);
  9782. curl = curl_easy_init();
  9783. if (unlikely(!curl)) {
  9784. applog(LOG_ERR, "CURL initialisation failed");
  9785. return NULL;
  9786. }
  9787. retry_pool:
  9788. pool = select_longpoll_pool(cp);
  9789. if (!pool) {
  9790. applog(LOG_WARNING, "No suitable long-poll found for %s", cp->rpc_url);
  9791. while (!pool) {
  9792. cgsleep_ms(60000);
  9793. pool = select_longpoll_pool(cp);
  9794. }
  9795. }
  9796. if (pool->has_stratum) {
  9797. applog(LOG_WARNING, "Block change for %s detection via %s stratum",
  9798. cp->rpc_url, pool->rpc_url);
  9799. goto out;
  9800. }
  9801. /* Any longpoll from any pool is enough for this to be true */
  9802. pool->goal->have_longpoll = true;
  9803. wait_lpcurrent(cp);
  9804. {
  9805. lp_url = pool->lp_url;
  9806. if (cp == pool)
  9807. applog(LOG_WARNING, "Long-polling activated for %s (%s)", lp_url, pool_protocol_name(pool->lp_proto));
  9808. else
  9809. applog(LOG_WARNING, "Long-polling activated for %s via %s (%s)", cp->rpc_url, lp_url, pool_protocol_name(pool->lp_proto));
  9810. }
  9811. while (42) {
  9812. json_t *val, *soval;
  9813. struct work *work = make_work();
  9814. char *lpreq;
  9815. lpreq = prepare_rpc_req(work, pool->lp_proto, pool->lp_id, pool);
  9816. work->pool = pool;
  9817. if (!lpreq)
  9818. {
  9819. free_work(work);
  9820. goto lpfail;
  9821. }
  9822. wait_lpcurrent(cp);
  9823. cgtime(&start);
  9824. /* Longpoll connections can be persistent for a very long time
  9825. * and any number of issues could have come up in the meantime
  9826. * so always establish a fresh connection instead of relying on
  9827. * a persistent one. */
  9828. curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
  9829. curl_easy_setopt(curl, CURLOPT_FORBID_REUSE, 1);
  9830. curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, save_curl_socket);
  9831. curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool);
  9832. val = json_rpc_call(curl, lp_url, pool->rpc_userpass,
  9833. lpreq, false, true, &rolltime, pool, false);
  9834. pool->lp_socket = CURL_SOCKET_BAD;
  9835. cgtime(&reply);
  9836. free(lpreq);
  9837. if (likely(val)) {
  9838. soval = json_object_get(json_object_get(val, "result"), "submitold");
  9839. if (soval)
  9840. pool->submit_old = json_is_true(soval);
  9841. else
  9842. pool->submit_old = false;
  9843. convert_to_work(val, rolltime, pool, work, &start, &reply);
  9844. failures = 0;
  9845. json_decref(val);
  9846. } else {
  9847. /* Some pools regularly drop the longpoll request so
  9848. * only see this as longpoll failure if it happens
  9849. * immediately and just restart it the rest of the
  9850. * time. */
  9851. cgtime(&end);
  9852. free_work(work);
  9853. if (end.tv_sec - start.tv_sec <= 30)
  9854. {
  9855. if (failures == 1)
  9856. applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", lp_url);
  9857. lpfail:
  9858. cgsleep_ms(30000);
  9859. }
  9860. }
  9861. if (pool != cp) {
  9862. pool = select_longpoll_pool(cp);
  9863. if (pool->has_stratum) {
  9864. applog(LOG_WARNING, "Block change for %s detection via %s stratum",
  9865. cp->rpc_url, pool->rpc_url);
  9866. break;
  9867. }
  9868. if (unlikely(!pool))
  9869. goto retry_pool;
  9870. }
  9871. if (unlikely(pool->removed))
  9872. break;
  9873. }
  9874. out:
  9875. pool->lp_active = false;
  9876. curl_easy_cleanup(curl);
  9877. return NULL;
  9878. }
  9879. static void stop_longpoll(void)
  9880. {
  9881. int i;
  9882. want_longpoll = false;
  9883. for (i = 0; i < total_pools; ++i)
  9884. {
  9885. struct pool *pool = pools[i];
  9886. if (unlikely(!pool->lp_started))
  9887. continue;
  9888. pool->lp_started = false;
  9889. pthread_cancel(pool->longpoll_thread);
  9890. }
  9891. struct mining_goal_info *goal, *tmpgoal;
  9892. HASH_ITER(hh, mining_goals, goal, tmpgoal)
  9893. {
  9894. goal->have_longpoll = false;
  9895. }
  9896. }
  9897. static void start_longpoll(void)
  9898. {
  9899. int i;
  9900. want_longpoll = true;
  9901. for (i = 0; i < total_pools; ++i)
  9902. {
  9903. struct pool *pool = pools[i];
  9904. if (unlikely(pool->removed || pool->lp_started || !pool->lp_url))
  9905. continue;
  9906. pool->lp_started = true;
  9907. if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool)))
  9908. quit(1, "Failed to create pool longpoll thread");
  9909. }
  9910. }
  9911. void reinit_device(struct cgpu_info *cgpu)
  9912. {
  9913. if (cgpu->drv->reinit_device)
  9914. cgpu->drv->reinit_device(cgpu);
  9915. }
  9916. static struct timeval rotate_tv;
  9917. /* We reap curls if they are unused for over a minute */
  9918. static void reap_curl(struct pool *pool)
  9919. {
  9920. struct curl_ent *ent, *iter;
  9921. struct timeval now;
  9922. int reaped = 0;
  9923. cgtime(&now);
  9924. mutex_lock(&pool->pool_lock);
  9925. LL_FOREACH_SAFE(pool->curllist, ent, iter) {
  9926. if (pool->curls < 2)
  9927. break;
  9928. if (now.tv_sec - ent->tv.tv_sec > 300) {
  9929. reaped++;
  9930. pool->curls--;
  9931. LL_DELETE(pool->curllist, ent);
  9932. curl_easy_cleanup(ent->curl);
  9933. free(ent);
  9934. }
  9935. }
  9936. mutex_unlock(&pool->pool_lock);
  9937. if (reaped)
  9938. applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no);
  9939. }
  9940. static void *watchpool_thread(void __maybe_unused *userdata)
  9941. {
  9942. int intervals = 0;
  9943. #ifndef HAVE_PTHREAD_CANCEL
  9944. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  9945. #endif
  9946. RenameThread("watchpool");
  9947. while (42) {
  9948. struct timeval now;
  9949. int i;
  9950. if (++intervals > 20)
  9951. intervals = 0;
  9952. cgtime(&now);
  9953. for (i = 0; i < total_pools; i++) {
  9954. struct pool *pool = pools[i];
  9955. if (!opt_benchmark)
  9956. reap_curl(pool);
  9957. /* Get a rolling utility per pool over 10 mins */
  9958. if (intervals > 19) {
  9959. int shares = pool->diff1 - pool->last_shares;
  9960. pool->last_shares = pool->diff1;
  9961. pool->utility = (pool->utility + (double)shares * 0.63) / 1.63;
  9962. pool->shares = pool->utility;
  9963. }
  9964. if (pool->enabled == POOL_DISABLED)
  9965. continue;
  9966. /* Don't start testing any pools if the test threads
  9967. * from startup are still doing their first attempt. */
  9968. if (unlikely(pool->testing)) {
  9969. pthread_join(pool->test_thread, NULL);
  9970. }
  9971. /* Test pool is idle once every minute */
  9972. if (pool->idle && now.tv_sec - pool->tv_idle.tv_sec > 30) {
  9973. if (pool_active(pool, true) && pool_tclear(pool, &pool->idle))
  9974. pool_resus(pool);
  9975. }
  9976. /* Only switch pools if the failback pool has been
  9977. * alive for more than 5 minutes (default) to prevent
  9978. * intermittently failing pools from being used. */
  9979. if (!pool->idle && pool->enabled == POOL_ENABLED && pool_strategy == POOL_FAILOVER && pool->prio < cp_prio() && now.tv_sec - pool->tv_idle.tv_sec > opt_fail_switch_delay)
  9980. {
  9981. if (opt_fail_switch_delay % 60)
  9982. applog(LOG_WARNING, "Pool %d %s stable for %d second%s",
  9983. pool->pool_no, pool->rpc_url,
  9984. opt_fail_switch_delay,
  9985. (opt_fail_switch_delay == 1 ? "" : "s"));
  9986. else
  9987. applog(LOG_WARNING, "Pool %d %s stable for %d minute%s",
  9988. pool->pool_no, pool->rpc_url,
  9989. opt_fail_switch_delay / 60,
  9990. (opt_fail_switch_delay == 60 ? "" : "s"));
  9991. switch_pools(NULL);
  9992. }
  9993. }
  9994. if (current_pool()->idle)
  9995. switch_pools(NULL);
  9996. if (pool_strategy == POOL_ROTATE && now.tv_sec - rotate_tv.tv_sec > 60 * opt_rotate_period) {
  9997. cgtime(&rotate_tv);
  9998. switch_pools(NULL);
  9999. }
  10000. cgsleep_ms(30000);
  10001. }
  10002. return NULL;
  10003. }
  10004. void mt_enable(struct thr_info *thr)
  10005. {
  10006. applog(LOG_DEBUG, "Waking up thread %d", thr->id);
  10007. notifier_wake(thr->notifier);
  10008. }
  10009. void proc_enable(struct cgpu_info *cgpu)
  10010. {
  10011. int j;
  10012. cgpu->deven = DEV_ENABLED;
  10013. for (j = cgpu->threads ?: 1; j--; )
  10014. mt_enable(cgpu->thr[j]);
  10015. }
  10016. #define device_recovered(cgpu) proc_enable(cgpu)
  10017. void cgpu_set_defaults(struct cgpu_info * const cgpu)
  10018. {
  10019. struct string_elist *setstr_elist;
  10020. const char *p, *p2;
  10021. char replybuf[0x2000];
  10022. size_t L;
  10023. DL_FOREACH(opt_set_device_list, setstr_elist)
  10024. {
  10025. const char * const setstr = setstr_elist->string;
  10026. p = strchr(setstr, ':');
  10027. if (!p)
  10028. p = setstr;
  10029. {
  10030. L = p - setstr;
  10031. char pattern[L + 1];
  10032. if (L)
  10033. memcpy(pattern, setstr, L);
  10034. pattern[L] = '\0';
  10035. if (!cgpu_match(pattern, cgpu))
  10036. continue;
  10037. }
  10038. applog(LOG_DEBUG, "%"PRIpreprv": %s: Matched with set default: %s",
  10039. cgpu->proc_repr, __func__, setstr);
  10040. if (p[0] == ':')
  10041. ++p;
  10042. p2 = strchr(p, '=');
  10043. if (!p2)
  10044. {
  10045. L = strlen(p);
  10046. p2 = "";
  10047. }
  10048. else
  10049. {
  10050. L = p2 - p;
  10051. ++p2;
  10052. }
  10053. char opt[L + 1];
  10054. if (L)
  10055. memcpy(opt, p, L);
  10056. opt[L] = '\0';
  10057. L = strlen(p2);
  10058. char setval[L + 1];
  10059. if (L)
  10060. memcpy(setval, p2, L);
  10061. setval[L] = '\0';
  10062. enum bfg_set_device_replytype success;
  10063. p = proc_set_device(cgpu, opt, setval, replybuf, &success);
  10064. switch (success)
  10065. {
  10066. case SDR_OK:
  10067. applog(LOG_DEBUG, "%"PRIpreprv": Applied rule %s%s%s",
  10068. cgpu->proc_repr, setstr,
  10069. p ? ": " : "", p ?: "");
  10070. break;
  10071. case SDR_ERR:
  10072. case SDR_HELP:
  10073. case SDR_UNKNOWN:
  10074. applog(LOG_DEBUG, "%"PRIpreprv": Applying rule %s: %s",
  10075. cgpu->proc_repr, setstr, p);
  10076. break;
  10077. case SDR_AUTO:
  10078. case SDR_NOSUPP:
  10079. applog(LOG_DEBUG, "%"PRIpreprv": set_device is not implemented (trying to apply rule: %s)",
  10080. cgpu->proc_repr, setstr);
  10081. }
  10082. }
  10083. cgpu->already_set_defaults = true;
  10084. }
  10085. void drv_set_defaults(const struct device_drv * const drv, const void *datap, void *userp, const char * const devpath, const char * const serial, const int mode)
  10086. {
  10087. struct device_drv dummy_drv = *drv;
  10088. struct cgpu_info dummy_cgpu = {
  10089. .drv = &dummy_drv,
  10090. .device = &dummy_cgpu,
  10091. .device_id = -1,
  10092. .proc_id = -1,
  10093. .device_data = userp,
  10094. .device_path = devpath,
  10095. .dev_serial = serial,
  10096. };
  10097. strcpy(dummy_cgpu.proc_repr, drv->name);
  10098. switch (mode)
  10099. {
  10100. case 0:
  10101. dummy_drv.set_device = datap;
  10102. break;
  10103. case 1:
  10104. dummy_drv.set_device = NULL;
  10105. dummy_cgpu.set_device_funcs = datap;
  10106. break;
  10107. }
  10108. cgpu_set_defaults(&dummy_cgpu);
  10109. }
  10110. /* Makes sure the hashmeter keeps going even if mining threads stall, updates
  10111. * the screen at regular intervals, and restarts threads if they appear to have
  10112. * died. */
  10113. #define WATCHDOG_SICK_TIME 60
  10114. #define WATCHDOG_DEAD_TIME 600
  10115. #define WATCHDOG_SICK_COUNT (WATCHDOG_SICK_TIME/WATCHDOG_INTERVAL)
  10116. #define WATCHDOG_DEAD_COUNT (WATCHDOG_DEAD_TIME/WATCHDOG_INTERVAL)
  10117. static void *watchdog_thread(void __maybe_unused *userdata)
  10118. {
  10119. const unsigned int interval = WATCHDOG_INTERVAL;
  10120. struct timeval zero_tv;
  10121. #ifndef HAVE_PTHREAD_CANCEL
  10122. pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
  10123. #endif
  10124. RenameThread("watchdog");
  10125. memset(&zero_tv, 0, sizeof(struct timeval));
  10126. cgtime(&rotate_tv);
  10127. while (1) {
  10128. int i;
  10129. struct timeval now;
  10130. sleep(interval);
  10131. discard_stale();
  10132. hashmeter(-1, &zero_tv, 0);
  10133. #ifdef HAVE_CURSES
  10134. const int ts = total_staged(true);
  10135. if (curses_active_locked()) {
  10136. change_logwinsize();
  10137. curses_print_status(ts);
  10138. _refresh_devstatus(true);
  10139. touchwin(logwin);
  10140. wrefresh(logwin);
  10141. unlock_curses();
  10142. }
  10143. #endif
  10144. cgtime(&now);
  10145. if (!sched_paused && !should_run()) {
  10146. applog(LOG_WARNING, "Pausing execution as per stop time %02d:%02d scheduled",
  10147. schedstop.tm.tm_hour, schedstop.tm.tm_min);
  10148. if (!schedstart.enable) {
  10149. quit(0, "Terminating execution as planned");
  10150. break;
  10151. }
  10152. applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
  10153. schedstart.tm.tm_hour, schedstart.tm.tm_min);
  10154. sched_paused = true;
  10155. rd_lock(&mining_thr_lock);
  10156. for (i = 0; i < mining_threads; i++)
  10157. mining_thr[i]->pause = true;
  10158. rd_unlock(&mining_thr_lock);
  10159. } else if (sched_paused && should_run()) {
  10160. applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled",
  10161. schedstart.tm.tm_hour, schedstart.tm.tm_min);
  10162. if (schedstop.enable)
  10163. applog(LOG_WARNING, "Will pause execution as scheduled at %02d:%02d",
  10164. schedstop.tm.tm_hour, schedstop.tm.tm_min);
  10165. sched_paused = false;
  10166. for (i = 0; i < mining_threads; i++) {
  10167. struct thr_info *thr;
  10168. thr = get_thread(i);
  10169. thr->pause = false;
  10170. }
  10171. for (i = 0; i < total_devices; ++i)
  10172. {
  10173. struct cgpu_info *cgpu = get_devices(i);
  10174. /* Don't touch disabled devices */
  10175. if (cgpu->deven == DEV_DISABLED)
  10176. continue;
  10177. proc_enable(cgpu);
  10178. }
  10179. }
  10180. for (i = 0; i < total_devices; ++i) {
  10181. struct cgpu_info *cgpu = get_devices(i);
  10182. if (!cgpu->disable_watchdog)
  10183. bfg_watchdog(cgpu, &now);
  10184. }
  10185. }
  10186. return NULL;
  10187. }
  10188. void bfg_watchdog(struct cgpu_info * const cgpu, struct timeval * const tvp_now)
  10189. {
  10190. struct thr_info *thr = cgpu->thr[0];
  10191. enum dev_enable *denable;
  10192. char *dev_str = cgpu->proc_repr;
  10193. if (likely(drv_ready(cgpu)))
  10194. {
  10195. if (unlikely(!cgpu->already_set_defaults))
  10196. cgpu_set_defaults(cgpu);
  10197. if (cgpu->drv->get_stats)
  10198. cgpu->drv->get_stats(cgpu);
  10199. }
  10200. denable = &cgpu->deven;
  10201. if (cgpu->drv->watchdog)
  10202. cgpu->drv->watchdog(cgpu, tvp_now);
  10203. /* Thread is disabled */
  10204. if (*denable == DEV_DISABLED)
  10205. return;
  10206. else
  10207. if (*denable == DEV_RECOVER_ERR) {
  10208. if (opt_restart && timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > cgpu->reinit_backoff) {
  10209. applog(LOG_NOTICE, "Attempting to reinitialize %s",
  10210. dev_str);
  10211. if (cgpu->reinit_backoff < 300)
  10212. cgpu->reinit_backoff *= 2;
  10213. device_recovered(cgpu);
  10214. }
  10215. return;
  10216. }
  10217. else
  10218. if (*denable == DEV_RECOVER) {
  10219. if (opt_restart && cgpu->temp < cgpu->targettemp) {
  10220. applog(LOG_NOTICE, "%s recovered to temperature below target, re-enabling",
  10221. dev_str);
  10222. device_recovered(cgpu);
  10223. }
  10224. dev_error_update(cgpu, REASON_DEV_THERMAL_CUTOFF);
  10225. return;
  10226. }
  10227. else
  10228. if (cgpu->temp > cgpu->cutofftemp)
  10229. {
  10230. applog(LOG_WARNING, "%s hit thermal cutoff limit at %dC, disabling!",
  10231. dev_str, (int)cgpu->temp);
  10232. *denable = DEV_RECOVER;
  10233. dev_error(cgpu, REASON_DEV_THERMAL_CUTOFF);
  10234. run_cmd(cmd_idle);
  10235. }
  10236. if (thr->getwork) {
  10237. if (cgpu->status == LIFE_WELL && thr->getwork < tvp_now->tv_sec - opt_log_interval) {
  10238. int thrid;
  10239. bool cgpu_idle = true;
  10240. thr->rolling = 0;
  10241. for (thrid = 0; thrid < cgpu->threads; ++thrid)
  10242. if (!cgpu->thr[thrid]->getwork)
  10243. cgpu_idle = false;
  10244. if (cgpu_idle) {
  10245. cgpu->rolling = 0;
  10246. cgpu->status = LIFE_WAIT;
  10247. }
  10248. }
  10249. return;
  10250. }
  10251. else if (cgpu->status == LIFE_WAIT)
  10252. cgpu->status = LIFE_WELL;
  10253. #ifdef USE_CPUMINING
  10254. if (!strcmp(cgpu->drv->dname, "cpu"))
  10255. return;
  10256. #endif
  10257. if (cgpu->status != LIFE_WELL && (tvp_now->tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME)) {
  10258. if (likely(cgpu->status != LIFE_INIT && cgpu->status != LIFE_INIT2))
  10259. applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str);
  10260. cgpu->status = LIFE_WELL;
  10261. cgpu->device_last_well = time(NULL);
  10262. } else if (cgpu->status == LIFE_WELL && (tvp_now->tv_sec - thr->last.tv_sec > WATCHDOG_SICK_TIME)) {
  10263. thr->rolling = cgpu->rolling = 0;
  10264. cgpu->status = LIFE_SICK;
  10265. applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str);
  10266. cgtime(&thr->sick);
  10267. dev_error(cgpu, REASON_DEV_SICK_IDLE_60);
  10268. run_cmd(cmd_sick);
  10269. if (opt_restart && cgpu->drv->reinit_device) {
  10270. applog(LOG_ERR, "%s: Attempting to restart", dev_str);
  10271. reinit_device(cgpu);
  10272. }
  10273. } else if (cgpu->status == LIFE_SICK && (tvp_now->tv_sec - thr->last.tv_sec > WATCHDOG_DEAD_TIME)) {
  10274. cgpu->status = LIFE_DEAD;
  10275. applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str);
  10276. cgtime(&thr->sick);
  10277. dev_error(cgpu, REASON_DEV_DEAD_IDLE_600);
  10278. run_cmd(cmd_dead);
  10279. } else if (tvp_now->tv_sec - thr->sick.tv_sec > 60 &&
  10280. (cgpu->status == LIFE_SICK || cgpu->status == LIFE_DEAD)) {
  10281. /* Attempt to restart a GPU that's sick or dead once every minute */
  10282. cgtime(&thr->sick);
  10283. if (opt_restart)
  10284. reinit_device(cgpu);
  10285. }
  10286. }
  10287. static void log_print_status(struct cgpu_info *cgpu)
  10288. {
  10289. char logline[255];
  10290. get_statline(logline, sizeof(logline), cgpu);
  10291. applog(LOG_WARNING, "%s", logline);
  10292. }
  10293. void print_summary(void)
  10294. {
  10295. struct timeval diff;
  10296. int hours, mins, secs, i;
  10297. double utility, efficiency = 0.0;
  10298. char xfer[(ALLOC_H2B_SPACED*2)+4+1], bw[(ALLOC_H2B_SPACED*2)+6+1];
  10299. int pool_secs;
  10300. timersub(&total_tv_end, &total_tv_start, &diff);
  10301. hours = diff.tv_sec / 3600;
  10302. mins = (diff.tv_sec % 3600) / 60;
  10303. secs = diff.tv_sec % 60;
  10304. utility = total_accepted / total_secs * 60;
  10305. efficiency = total_bytes_xfer ? total_diff_accepted * 2048. / total_bytes_xfer : 0.0;
  10306. applog(LOG_WARNING, "\nSummary of runtime statistics:\n");
  10307. applog(LOG_WARNING, "Started at %s", datestamp);
  10308. if (total_pools == 1)
  10309. applog(LOG_WARNING, "Pool: %s", pools[0]->rpc_url);
  10310. #if defined(USE_CPUMINING) && defined(USE_SHA256D)
  10311. if (opt_n_threads > 0)
  10312. applog(LOG_WARNING, "CPU hasher algorithm used: %s", algo_names[opt_algo]);
  10313. #endif
  10314. applog(LOG_WARNING, "Runtime: %d hrs : %d mins : %d secs", hours, mins, secs);
  10315. applog(LOG_WARNING, "Average hashrate: %.1f Megahash/s", total_mhashes_done / total_secs);
  10316. applog(LOG_WARNING, "Solved blocks: %d", found_blocks);
  10317. applog(LOG_WARNING, "Best share difficulty: %s", best_share);
  10318. applog(LOG_WARNING, "Share submissions: %d", total_accepted + total_rejected);
  10319. applog(LOG_WARNING, "Accepted shares: %d", total_accepted);
  10320. applog(LOG_WARNING, "Rejected shares: %d + %d stale (%.2f%%)",
  10321. total_rejected, total_stale,
  10322. (float)(total_rejected + total_stale) / (float)(total_rejected + total_stale + total_accepted)
  10323. );
  10324. applog(LOG_WARNING, "Accepted difficulty shares: %1.f", total_diff_accepted);
  10325. applog(LOG_WARNING, "Rejected difficulty shares: %1.f", total_diff_rejected);
  10326. applog(LOG_WARNING, "Hardware errors: %d", hw_errors);
  10327. applog(LOG_WARNING, "Network transfer: %s (%s)",
  10328. multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2,
  10329. (float)total_bytes_rcvd,
  10330. (float)total_bytes_sent),
  10331. multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2,
  10332. (float)(total_bytes_rcvd / total_secs),
  10333. (float)(total_bytes_sent / total_secs)));
  10334. applog(LOG_WARNING, "Efficiency (accepted shares * difficulty / 2 KB): %.2f", efficiency);
  10335. applog(LOG_WARNING, "Utility (accepted shares / min): %.2f/min\n", utility);
  10336. applog(LOG_WARNING, "Unable to get work from server occasions: %d", total_go);
  10337. applog(LOG_WARNING, "Work items generated locally: %d", local_work);
  10338. applog(LOG_WARNING, "Submitting work remotely delay occasions: %d", total_ro);
  10339. applog(LOG_WARNING, "New blocks detected on network: %d\n", new_blocks);
  10340. if (total_pools > 1) {
  10341. for (i = 0; i < total_pools; i++) {
  10342. struct pool *pool = pools[i];
  10343. applog(LOG_WARNING, "Pool: %s", pool->rpc_url);
  10344. if (pool->solved)
  10345. applog(LOG_WARNING, "SOLVED %d BLOCK%s!", pool->solved, pool->solved > 1 ? "S" : "");
  10346. applog(LOG_WARNING, " Share submissions: %d", pool->accepted + pool->rejected);
  10347. applog(LOG_WARNING, " Accepted shares: %d", pool->accepted);
  10348. applog(LOG_WARNING, " Rejected shares: %d + %d stale (%.2f%%)",
  10349. pool->rejected, pool->stale_shares,
  10350. (float)(pool->rejected + pool->stale_shares) / (float)(pool->rejected + pool->stale_shares + pool->accepted)
  10351. );
  10352. applog(LOG_WARNING, " Accepted difficulty shares: %1.f", pool->diff_accepted);
  10353. applog(LOG_WARNING, " Rejected difficulty shares: %1.f", pool->diff_rejected);
  10354. pool_secs = timer_elapsed(&pool->cgminer_stats.start_tv, NULL);
  10355. applog(LOG_WARNING, " Network transfer: %s (%s)",
  10356. multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2,
  10357. (float)pool->cgminer_pool_stats.net_bytes_received,
  10358. (float)pool->cgminer_pool_stats.net_bytes_sent),
  10359. multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2,
  10360. (float)(pool->cgminer_pool_stats.net_bytes_received / pool_secs),
  10361. (float)(pool->cgminer_pool_stats.net_bytes_sent / pool_secs)));
  10362. uint64_t pool_bytes_xfer = pool->cgminer_pool_stats.net_bytes_received + pool->cgminer_pool_stats.net_bytes_sent;
  10363. efficiency = pool_bytes_xfer ? pool->diff_accepted * 2048. / pool_bytes_xfer : 0.0;
  10364. applog(LOG_WARNING, " Efficiency (accepted * difficulty / 2 KB): %.2f", efficiency);
  10365. applog(LOG_WARNING, " Items worked on: %d", pool->works);
  10366. applog(LOG_WARNING, " Unable to get work from server occasions: %d", pool->getfail_occasions);
  10367. applog(LOG_WARNING, " Submitting work remotely delay occasions: %d\n", pool->remotefail_occasions);
  10368. }
  10369. }
  10370. if (opt_quit_summary != BQS_NONE)
  10371. {
  10372. if (opt_quit_summary == BQS_DEFAULT)
  10373. {
  10374. if (total_devices < 25)
  10375. opt_quit_summary = BQS_PROCS;
  10376. else
  10377. opt_quit_summary = BQS_DEVS;
  10378. }
  10379. if (opt_quit_summary == BQS_DETAILED)
  10380. include_serial_in_statline = true;
  10381. applog(LOG_WARNING, "Summary of per device statistics:\n");
  10382. for (i = 0; i < total_devices; ++i) {
  10383. struct cgpu_info *cgpu = get_devices(i);
  10384. if (!cgpu->proc_id)
  10385. {
  10386. // Device summary line
  10387. opt_show_procs = false;
  10388. log_print_status(cgpu);
  10389. opt_show_procs = true;
  10390. }
  10391. if ((opt_quit_summary == BQS_PROCS || opt_quit_summary == BQS_DETAILED) && cgpu->procs > 1)
  10392. log_print_status(cgpu);
  10393. }
  10394. }
  10395. if (opt_shares) {
  10396. applog(LOG_WARNING, "Mined %g accepted shares of %g requested\n", total_diff_accepted, opt_shares);
  10397. if (opt_shares > total_diff_accepted)
  10398. applog(LOG_WARNING, "WARNING - Mined only %g shares of %g requested.", total_diff_accepted, opt_shares);
  10399. }
  10400. applog(LOG_WARNING, " ");
  10401. fflush(stderr);
  10402. fflush(stdout);
  10403. }
  10404. void _bfg_clean_up(bool restarting)
  10405. {
  10406. #ifdef USE_OPENCL
  10407. clear_adl(nDevs);
  10408. #endif
  10409. #ifdef HAVE_LIBUSB
  10410. if (likely(have_libusb))
  10411. libusb_exit(NULL);
  10412. #endif
  10413. cgtime(&total_tv_end);
  10414. #ifdef WIN32
  10415. timeEndPeriod(1);
  10416. #endif
  10417. if (!restarting) {
  10418. /* Attempting to disable curses or print a summary during a
  10419. * restart can lead to a deadlock. */
  10420. #ifdef HAVE_CURSES
  10421. disable_curses();
  10422. #endif
  10423. if (!opt_realquiet && successful_connect)
  10424. print_summary();
  10425. }
  10426. if (opt_n_threads > 0)
  10427. free(cpus);
  10428. curl_global_cleanup();
  10429. #ifdef WIN32
  10430. WSACleanup();
  10431. #endif
  10432. }
  10433. void _quit(int status)
  10434. {
  10435. if (status) {
  10436. const char *ev = getenv("__BFGMINER_SEGFAULT_ERRQUIT");
  10437. if (unlikely(ev && ev[0] && ev[0] != '0')) {
  10438. int *p = NULL;
  10439. // NOTE debugger can bypass with: p = &p
  10440. *p = status; // Segfault, hopefully dumping core
  10441. }
  10442. }
  10443. #if defined(unix) || defined(__APPLE__)
  10444. if (forkpid > 0) {
  10445. kill(forkpid, SIGTERM);
  10446. forkpid = 0;
  10447. }
  10448. #endif
  10449. exit(status);
  10450. }
  10451. #ifdef HAVE_CURSES
  10452. char *curses_input(const char *query)
  10453. {
  10454. char *input;
  10455. echo();
  10456. input = malloc(255);
  10457. if (!input)
  10458. quit(1, "Failed to malloc input");
  10459. leaveok(logwin, false);
  10460. wlogprint("%s:\n", query);
  10461. wgetnstr(logwin, input, 255);
  10462. if (!strlen(input))
  10463. {
  10464. free(input);
  10465. input = NULL;
  10466. }
  10467. leaveok(logwin, true);
  10468. noecho();
  10469. return input;
  10470. }
  10471. #endif
  10472. static void *test_pool_thread(void *arg)
  10473. {
  10474. struct pool *pool = (struct pool *)arg;
  10475. if (pool_active(pool, false)) {
  10476. pool_tset(pool, &pool->lagging);
  10477. pool_tclear(pool, &pool->idle);
  10478. bool first_pool = false;
  10479. cg_wlock(&control_lock);
  10480. if (!pools_active) {
  10481. currentpool = pool;
  10482. if (pool->pool_no != 0)
  10483. first_pool = true;
  10484. pools_active = true;
  10485. }
  10486. cg_wunlock(&control_lock);
  10487. if (unlikely(first_pool))
  10488. applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url);
  10489. else
  10490. applog(LOG_NOTICE, "Pool %d %s alive", pool->pool_no, pool->rpc_url);
  10491. switch_pools(NULL);
  10492. } else
  10493. pool_died(pool);
  10494. pool->testing = false;
  10495. return NULL;
  10496. }
  10497. /* Always returns true that the pool details were added unless we are not
  10498. * live, implying this is the only pool being added, so if no pools are
  10499. * active it returns false. */
  10500. bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass)
  10501. {
  10502. size_t siz;
  10503. pool_set_uri(pool, url);
  10504. pool->rpc_user = user;
  10505. pool->rpc_pass = pass;
  10506. siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2;
  10507. pool->rpc_userpass = malloc(siz);
  10508. if (!pool->rpc_userpass)
  10509. quit(1, "Failed to malloc userpass");
  10510. snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass);
  10511. pool->testing = true;
  10512. pool->idle = true;
  10513. enable_pool(pool);
  10514. pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool);
  10515. if (!live) {
  10516. pthread_join(pool->test_thread, NULL);
  10517. return pools_active;
  10518. }
  10519. return true;
  10520. }
  10521. #ifdef HAVE_CURSES
  10522. static bool input_pool(bool live)
  10523. {
  10524. char *url = NULL, *user = NULL, *pass = NULL;
  10525. struct pool *pool;
  10526. bool ret = false;
  10527. immedok(logwin, true);
  10528. wlogprint("Input server details.\n");
  10529. url = curses_input("URL");
  10530. if (!url)
  10531. goto out;
  10532. user = curses_input("Username");
  10533. if (!user)
  10534. goto out;
  10535. pass = curses_input("Password");
  10536. if (!pass)
  10537. pass = calloc(1, 1);
  10538. pool = add_pool();
  10539. if (!detect_stratum(pool, url) && strncmp(url, "http://", 7) &&
  10540. strncmp(url, "https://", 8)) {
  10541. char *httpinput;
  10542. httpinput = malloc(256);
  10543. if (!httpinput)
  10544. quit(1, "Failed to malloc httpinput");
  10545. strcpy(httpinput, "http://");
  10546. strncat(httpinput, url, 248);
  10547. free(url);
  10548. url = httpinput;
  10549. }
  10550. ret = add_pool_details(pool, live, url, user, pass);
  10551. out:
  10552. immedok(logwin, false);
  10553. if (!ret) {
  10554. if (url)
  10555. free(url);
  10556. if (user)
  10557. free(user);
  10558. if (pass)
  10559. free(pass);
  10560. }
  10561. return ret;
  10562. }
  10563. #endif
  10564. #if BLKMAKER_VERSION > 1 && defined(USE_SHA256D)
  10565. static
  10566. bool _add_local_gbt(const char * const filepath, void *userp)
  10567. {
  10568. const bool * const live_p = userp;
  10569. struct pool *pool;
  10570. char buf[0x100];
  10571. char *rpcuser = NULL, *rpcpass = NULL, *rpcconnect = NULL;
  10572. int rpcport = 0, rpcssl = -101;
  10573. FILE * const F = fopen(filepath, "r");
  10574. if (!F)
  10575. applogr(false, LOG_WARNING, "%s: Failed to open %s for reading", "add_local_gbt", filepath);
  10576. while (fgets(buf, sizeof(buf), F))
  10577. {
  10578. if (!strncasecmp(buf, "rpcuser=", 8))
  10579. rpcuser = trimmed_strdup(&buf[8]);
  10580. else
  10581. if (!strncasecmp(buf, "rpcpassword=", 12))
  10582. rpcpass = trimmed_strdup(&buf[12]);
  10583. else
  10584. if (!strncasecmp(buf, "rpcport=", 8))
  10585. rpcport = atoi(&buf[8]);
  10586. else
  10587. if (!strncasecmp(buf, "rpcssl=", 7))
  10588. rpcssl = atoi(&buf[7]);
  10589. else
  10590. if (!strncasecmp(buf, "rpcconnect=", 11))
  10591. rpcconnect = trimmed_strdup(&buf[11]);
  10592. else
  10593. continue;
  10594. if (rpcuser && rpcpass && rpcport && rpcssl != -101 && rpcconnect)
  10595. break;
  10596. }
  10597. fclose(F);
  10598. if (!rpcpass)
  10599. {
  10600. applog(LOG_DEBUG, "%s: Did not find rpcpassword in %s", "add_local_gbt", filepath);
  10601. err:
  10602. free(rpcuser);
  10603. free(rpcpass);
  10604. goto out;
  10605. }
  10606. if (!rpcport)
  10607. rpcport = 8332;
  10608. if (rpcssl == -101)
  10609. rpcssl = 0;
  10610. const bool have_cbaddr = get_mining_goal("default")->generation_script;
  10611. const int uri_sz = 0x30;
  10612. char * const uri = malloc(uri_sz);
  10613. snprintf(uri, uri_sz, "http%s://%s:%d/%s#allblocks", rpcssl ? "s" : "", rpcconnect ?: "localhost", rpcport, have_cbaddr ? "" : "#getcbaddr");
  10614. char hfuri[0x40];
  10615. if (rpcconnect)
  10616. snprintf(hfuri, sizeof(hfuri), "%s:%d", rpcconnect, rpcport);
  10617. else
  10618. snprintf(hfuri, sizeof(hfuri), "port %d", rpcport);
  10619. applog(LOG_DEBUG, "Local bitcoin RPC server on %s found in %s", hfuri, filepath);
  10620. for (int i = 0; i < total_pools; ++i)
  10621. {
  10622. struct pool *pool = pools[i];
  10623. if (!(strcmp(pool->rpc_url, uri) || strcmp(pool->rpc_pass, rpcpass)))
  10624. {
  10625. applog(LOG_DEBUG, "Server on %s is already configured, not adding as failover", hfuri);
  10626. free(uri);
  10627. goto err;
  10628. }
  10629. }
  10630. pool = add_pool();
  10631. if (!pool)
  10632. {
  10633. applog(LOG_ERR, "%s: Error adding pool for bitcoin configured in %s", "add_local_gbt", filepath);
  10634. goto err;
  10635. }
  10636. if (!rpcuser)
  10637. rpcuser = "";
  10638. pool->quota = 0;
  10639. adjust_quota_gcd();
  10640. pool->failover_only = true;
  10641. add_pool_details(pool, *live_p, uri, rpcuser, rpcpass);
  10642. applog(LOG_NOTICE, "Added local bitcoin RPC server on %s as pool %d", hfuri, pool->pool_no);
  10643. out:
  10644. return false;
  10645. }
  10646. static
  10647. void add_local_gbt(bool live)
  10648. {
  10649. appdata_file_call("Bitcoin", "bitcoin.conf", _add_local_gbt, &live);
  10650. }
  10651. #endif
  10652. #if defined(unix) || defined(__APPLE__)
  10653. static void fork_monitor()
  10654. {
  10655. // Make a pipe: [readFD, writeFD]
  10656. int pfd[2];
  10657. int r = pipe(pfd);
  10658. if (r < 0) {
  10659. perror("pipe - failed to create pipe for --monitor");
  10660. exit(1);
  10661. }
  10662. // Make stderr write end of pipe
  10663. fflush(stderr);
  10664. r = dup2(pfd[1], 2);
  10665. if (r < 0) {
  10666. perror("dup2 - failed to alias stderr to write end of pipe for --monitor");
  10667. exit(1);
  10668. }
  10669. r = close(pfd[1]);
  10670. if (r < 0) {
  10671. perror("close - failed to close write end of pipe for --monitor");
  10672. exit(1);
  10673. }
  10674. // Don't allow a dying monitor to kill the main process
  10675. sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
  10676. sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
  10677. if (SIG_ERR == sr0 || SIG_ERR == sr1) {
  10678. perror("signal - failed to edit signal mask for --monitor");
  10679. exit(1);
  10680. }
  10681. // Fork a child process
  10682. forkpid = fork();
  10683. if (forkpid < 0) {
  10684. perror("fork - failed to fork child process for --monitor");
  10685. exit(1);
  10686. }
  10687. // Child: launch monitor command
  10688. if (0 == forkpid) {
  10689. // Make stdin read end of pipe
  10690. r = dup2(pfd[0], 0);
  10691. if (r < 0) {
  10692. perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor");
  10693. exit(1);
  10694. }
  10695. close(pfd[0]);
  10696. if (r < 0) {
  10697. perror("close - in child, failed to close read end of pipe for --monitor");
  10698. exit(1);
  10699. }
  10700. // Launch user specified command
  10701. execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL);
  10702. perror("execl - in child failed to exec user specified command for --monitor");
  10703. exit(1);
  10704. }
  10705. // Parent: clean up unused fds and bail
  10706. r = close(pfd[0]);
  10707. if (r < 0) {
  10708. perror("close - failed to close read end of pipe for --monitor");
  10709. exit(1);
  10710. }
  10711. }
  10712. #endif // defined(unix)
  10713. #ifdef HAVE_CURSES
  10714. #ifdef USE_UNICODE
  10715. static
  10716. wchar_t select_unicode_char(const wchar_t *opt)
  10717. {
  10718. for ( ; *opt; ++opt)
  10719. if (iswprint(*opt))
  10720. return *opt;
  10721. return '?';
  10722. }
  10723. #endif
  10724. void enable_curses(void) {
  10725. int x;
  10726. __maybe_unused int y;
  10727. lock_curses();
  10728. if (curses_active) {
  10729. unlock_curses();
  10730. return;
  10731. }
  10732. #ifdef USE_UNICODE
  10733. if (use_unicode)
  10734. {
  10735. setlocale(LC_CTYPE, "");
  10736. if (iswprint(0xb0))
  10737. have_unicode_degrees = true;
  10738. unicode_micro = select_unicode_char(L"\xb5\u03bcu");
  10739. }
  10740. #endif
  10741. mainwin = initscr();
  10742. start_color();
  10743. #if defined(PDCURSES) || defined(NCURSES_VERSION)
  10744. if (ERR != use_default_colors())
  10745. default_bgcolor = -1;
  10746. #endif
  10747. if (has_colors() && ERR != init_pair(1, COLOR_WHITE, COLOR_BLUE))
  10748. {
  10749. menu_attr = COLOR_PAIR(1);
  10750. if (ERR != init_pair(2, COLOR_RED, default_bgcolor))
  10751. attr_bad |= COLOR_PAIR(2);
  10752. }
  10753. keypad(mainwin, true);
  10754. getmaxyx(mainwin, y, x);
  10755. statuswin = newwin(logstart, x, 0, 0);
  10756. leaveok(statuswin, true);
  10757. // For whatever reason, PDCurses crashes if the logwin is initialized to height y-logcursor
  10758. // We resize the window later anyway, so just start it off at 1 :)
  10759. logwin = newwin(1, 0, logcursor, 0);
  10760. idlok(logwin, true);
  10761. scrollok(logwin, true);
  10762. leaveok(logwin, true);
  10763. cbreak();
  10764. noecho();
  10765. nonl();
  10766. curses_active = true;
  10767. statusy = logstart;
  10768. unlock_curses();
  10769. }
  10770. #endif
  10771. /* TODO: fix need a dummy CPU device_drv even if no support for CPU mining */
  10772. #ifndef USE_CPUMINING
  10773. struct device_drv cpu_drv;
  10774. struct device_drv cpu_drv = {
  10775. .name = "CPU",
  10776. };
  10777. #endif
  10778. static int cgminer_id_count = 0;
  10779. static int device_line_id_count;
  10780. void register_device(struct cgpu_info *cgpu)
  10781. {
  10782. cgpu->deven = DEV_ENABLED;
  10783. wr_lock(&devices_lock);
  10784. devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
  10785. wr_unlock(&devices_lock);
  10786. if (!cgpu->proc_id)
  10787. cgpu->device_line_id = device_line_id_count++;
  10788. int thr_objs = cgpu->threads ?: 1;
  10789. mining_threads += thr_objs;
  10790. base_queue += thr_objs + cgpu->extra_work_queue;
  10791. {
  10792. const struct device_drv * const drv = cgpu->drv;
  10793. struct mining_algorithm *malgo;
  10794. LL_FOREACH(mining_algorithms, malgo)
  10795. {
  10796. if (drv_min_nonce_diff(drv, cgpu, malgo) < 0)
  10797. continue;
  10798. malgo->base_queue += thr_objs + cgpu->extra_work_queue;
  10799. }
  10800. }
  10801. #ifdef HAVE_CURSES
  10802. adj_width(mining_threads, &dev_width);
  10803. #endif
  10804. rwlock_init(&cgpu->qlock);
  10805. cgpu->queued_work = NULL;
  10806. }
  10807. struct _cgpu_devid_counter {
  10808. char name[4];
  10809. int lastid;
  10810. UT_hash_handle hh;
  10811. };
  10812. void renumber_cgpu(struct cgpu_info *cgpu)
  10813. {
  10814. static struct _cgpu_devid_counter *devids = NULL;
  10815. struct _cgpu_devid_counter *d;
  10816. HASH_FIND_STR(devids, cgpu->drv->name, d);
  10817. if (d)
  10818. cgpu->device_id = ++d->lastid;
  10819. else {
  10820. d = malloc(sizeof(*d));
  10821. memcpy(d->name, cgpu->drv->name, sizeof(d->name));
  10822. cgpu->device_id = d->lastid = 0;
  10823. HASH_ADD_STR(devids, name, d);
  10824. }
  10825. // Build repr strings
  10826. sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100);
  10827. sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100);
  10828. strcpy(cgpu->proc_repr, cgpu->dev_repr);
  10829. sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id);
  10830. const int lpcount = cgpu->procs;
  10831. if (lpcount > 1)
  10832. {
  10833. int ns;
  10834. struct cgpu_info *slave;
  10835. int lpdigits = 1;
  10836. for (int i = lpcount; i > 26 && lpdigits < 3; i /= 26)
  10837. ++lpdigits;
  10838. if (lpdigits > max_lpdigits)
  10839. max_lpdigits = lpdigits;
  10840. memset(&cgpu->proc_repr[5], 'a', lpdigits);
  10841. cgpu->proc_repr[5 + lpdigits] = '\0';
  10842. ns = strlen(cgpu->proc_repr_ns);
  10843. strcpy(&cgpu->proc_repr_ns[ns], &cgpu->proc_repr[5]);
  10844. slave = cgpu;
  10845. for (int i = 1; i < lpcount; ++i)
  10846. {
  10847. slave = slave->next_proc;
  10848. strcpy(slave->proc_repr, cgpu->proc_repr);
  10849. strcpy(slave->proc_repr_ns, cgpu->proc_repr_ns);
  10850. for (int x = i, y = lpdigits; --y, x; x /= 26)
  10851. {
  10852. slave->proc_repr_ns[ns + y] =
  10853. slave->proc_repr[5 + y] += (x % 26);
  10854. }
  10855. }
  10856. }
  10857. }
  10858. static bool my_blkmaker_sha256_callback(void *digest, const void *buffer, size_t length)
  10859. {
  10860. sha256(buffer, length, digest);
  10861. return true;
  10862. }
  10863. static
  10864. bool drv_algo_check(const struct device_drv * const drv)
  10865. {
  10866. struct mining_goal_info *goal, *tmpgoal;
  10867. HASH_ITER(hh, mining_goals, goal, tmpgoal)
  10868. {
  10869. if (drv_min_nonce_diff(drv, NULL, goal->malgo) >= 0)
  10870. return true;
  10871. }
  10872. return false;
  10873. }
  10874. #ifndef HAVE_PTHREAD_CANCEL
  10875. extern void setup_pthread_cancel_workaround();
  10876. extern struct sigaction pcwm_orig_term_handler;
  10877. #endif
  10878. bool bfg_need_detect_rescan;
  10879. extern void probe_device(struct lowlevel_device_info *);
  10880. static void schedule_rescan(const struct timeval *);
  10881. static
  10882. void drv_detect_all()
  10883. {
  10884. bool rescanning = false;
  10885. rescan:
  10886. bfg_need_detect_rescan = false;
  10887. #ifdef HAVE_BFG_LOWLEVEL
  10888. struct lowlevel_device_info * const infolist = lowlevel_scan(), *info, *infotmp;
  10889. LL_FOREACH_SAFE(infolist, info, infotmp)
  10890. probe_device(info);
  10891. LL_FOREACH_SAFE(infolist, info, infotmp)
  10892. pthread_join(info->probe_pth, NULL);
  10893. #endif
  10894. struct driver_registration *reg;
  10895. BFG_FOREACH_DRIVER_BY_PRIORITY(reg)
  10896. {
  10897. const struct device_drv * const drv = reg->drv;
  10898. if (!(drv_algo_check(drv) && drv->drv_detect))
  10899. continue;
  10900. drv->drv_detect();
  10901. }
  10902. #ifdef HAVE_BFG_LOWLEVEL
  10903. lowlevel_scan_free();
  10904. #endif
  10905. if (bfg_need_detect_rescan)
  10906. {
  10907. if (rescanning)
  10908. {
  10909. applog(LOG_DEBUG, "Device rescan requested a second time, delaying");
  10910. struct timeval tv_when;
  10911. timer_set_delay_from_now(&tv_when, rescan_delay_ms * 1000);
  10912. schedule_rescan(&tv_when);
  10913. }
  10914. else
  10915. {
  10916. rescanning = true;
  10917. applog(LOG_DEBUG, "Device rescan requested");
  10918. goto rescan;
  10919. }
  10920. }
  10921. }
  10922. static
  10923. void allocate_cgpu(struct cgpu_info *cgpu, unsigned int *kp)
  10924. {
  10925. struct thr_info *thr;
  10926. int j;
  10927. struct device_drv *api = cgpu->drv;
  10928. cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET;
  10929. int threadobj = cgpu->threads;
  10930. if (!threadobj)
  10931. // Create a fake thread object to handle hashmeter etc
  10932. threadobj = 1;
  10933. cgpu->thr = calloc(threadobj + 1, sizeof(*cgpu->thr));
  10934. cgpu->thr[threadobj] = NULL;
  10935. cgpu->status = LIFE_INIT;
  10936. if (opt_devices_enabled_list)
  10937. {
  10938. struct string_elist *enablestr_elist;
  10939. cgpu->deven = DEV_DISABLED;
  10940. DL_FOREACH(opt_devices_enabled_list, enablestr_elist)
  10941. {
  10942. const char * const enablestr = enablestr_elist->string;
  10943. if (cgpu_match(enablestr, cgpu))
  10944. {
  10945. cgpu->deven = DEV_ENABLED;
  10946. break;
  10947. }
  10948. }
  10949. }
  10950. cgpu->max_hashes = 0;
  10951. BFGINIT(cgpu->cutofftemp, opt_cutofftemp);
  10952. BFGINIT(cgpu->targettemp, cgpu->cutofftemp - 6);
  10953. // Setup thread structs before starting any of the threads, in case they try to interact
  10954. for (j = 0; j < threadobj; ++j, ++*kp) {
  10955. thr = get_thread(*kp);
  10956. thr->id = *kp;
  10957. thr->cgpu = cgpu;
  10958. thr->device_thread = j;
  10959. thr->work_restart_notifier[1] = INVSOCK;
  10960. thr->mutex_request[1] = INVSOCK;
  10961. thr->_job_transition_in_progress = true;
  10962. timerclear(&thr->tv_morework);
  10963. thr->scanhash_working = true;
  10964. thr->hashes_done = 0;
  10965. timerclear(&thr->tv_hashes_done);
  10966. cgtime(&thr->tv_lastupdate);
  10967. thr->tv_poll.tv_sec = -1;
  10968. thr->_max_nonce = api->can_limit_work ? api->can_limit_work(thr) : 0xffffffff;
  10969. cgpu->thr[j] = thr;
  10970. }
  10971. if (!cgpu->device->threads)
  10972. notifier_init_invalid(cgpu->thr[0]->notifier);
  10973. else
  10974. if (!cgpu->threads)
  10975. memcpy(&cgpu->thr[0]->notifier, &cgpu->device->thr[0]->notifier, sizeof(cgpu->thr[0]->notifier));
  10976. else
  10977. for (j = 0; j < cgpu->threads; ++j)
  10978. {
  10979. thr = cgpu->thr[j];
  10980. notifier_init(thr->notifier);
  10981. }
  10982. }
  10983. static
  10984. void start_cgpu(struct cgpu_info *cgpu)
  10985. {
  10986. struct thr_info *thr;
  10987. int j;
  10988. for (j = 0; j < cgpu->threads; ++j) {
  10989. thr = cgpu->thr[j];
  10990. /* Enable threads for devices set not to mine but disable
  10991. * their queue in case we wish to enable them later */
  10992. if (cgpu->drv->thread_prepare && !cgpu->drv->thread_prepare(thr))
  10993. continue;
  10994. thread_reportout(thr);
  10995. if (unlikely(thr_info_create(thr, NULL, miner_thread, thr)))
  10996. quit(1, "thread %d create failed", thr->id);
  10997. notifier_wake(thr->notifier);
  10998. }
  10999. if (cgpu->deven == DEV_ENABLED)
  11000. proc_enable(cgpu);
  11001. }
  11002. static
  11003. void _scan_serial(void *p)
  11004. {
  11005. const char *s = p;
  11006. struct string_elist *iter, *tmp;
  11007. struct string_elist *orig_scan_devices = scan_devices;
  11008. if (s)
  11009. {
  11010. // Make temporary scan_devices list
  11011. scan_devices = NULL;
  11012. string_elist_add("noauto", &scan_devices);
  11013. add_serial(s);
  11014. }
  11015. drv_detect_all();
  11016. if (s)
  11017. {
  11018. DL_FOREACH_SAFE(scan_devices, iter, tmp)
  11019. {
  11020. string_elist_del(&scan_devices, iter);
  11021. }
  11022. scan_devices = orig_scan_devices;
  11023. }
  11024. }
  11025. #ifdef HAVE_BFG_LOWLEVEL
  11026. static
  11027. bool _probe_device_match(const struct lowlevel_device_info * const info, const char * const ser)
  11028. {
  11029. if (!(false
  11030. || (info->serial && !strcasecmp(ser, info->serial))
  11031. || (info->path && !strcasecmp(ser, info->path ))
  11032. || (info->devid && !strcasecmp(ser, info->devid ))
  11033. ))
  11034. {
  11035. char *devid = devpath_to_devid(ser);
  11036. if (!devid)
  11037. return false;
  11038. const bool different = strcmp(info->devid, devid);
  11039. free(devid);
  11040. if (different)
  11041. return false;
  11042. }
  11043. return true;
  11044. }
  11045. static
  11046. bool _probe_device_do_probe(const struct device_drv * const drv, const struct lowlevel_device_info * const info, bool * const request_rescan_p)
  11047. {
  11048. bfg_probe_result_flags = 0;
  11049. if (drv->lowl_probe(info))
  11050. {
  11051. if (!(bfg_probe_result_flags & BPR_CONTINUE_PROBES))
  11052. return true;
  11053. }
  11054. else
  11055. if (request_rescan_p && opt_hotplug && !(bfg_probe_result_flags & BPR_DONT_RESCAN))
  11056. *request_rescan_p = true;
  11057. return false;
  11058. }
  11059. bool dummy_check_never_true = false;
  11060. static
  11061. void *probe_device_thread(void *p)
  11062. {
  11063. struct lowlevel_device_info * const infolist = p;
  11064. struct lowlevel_device_info *info = infolist;
  11065. bool request_rescan = false;
  11066. {
  11067. char threadname[6 + strlen(info->devid) + 1];
  11068. sprintf(threadname, "probe_%s", info->devid);
  11069. RenameThread(threadname);
  11070. }
  11071. // If already in use, ignore
  11072. if (bfg_claim_any(NULL, NULL, info->devid))
  11073. applogr(NULL, LOG_DEBUG, "%s: \"%s\" already in use",
  11074. __func__, info->product);
  11075. // if lowlevel device matches specific user assignment, probe requested driver(s)
  11076. struct string_elist *sd_iter, *sd_tmp;
  11077. struct driver_registration *dreg;
  11078. DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp)
  11079. {
  11080. const char * const dname = sd_iter->string;
  11081. const char * const colon = strpbrk(dname, ":@");
  11082. if (!(colon && colon != dname))
  11083. continue;
  11084. const char * const ser = &colon[1];
  11085. LL_FOREACH2(infolist, info, same_devid_next)
  11086. {
  11087. if (!_probe_device_match(info, ser))
  11088. continue;
  11089. const size_t dnamelen = (colon - dname);
  11090. char dname_nt[dnamelen + 1];
  11091. memcpy(dname_nt, dname, dnamelen);
  11092. dname_nt[dnamelen] = '\0';
  11093. BFG_FOREACH_DRIVER_BY_PRIORITY(dreg) {
  11094. const struct device_drv * const drv = dreg->drv;
  11095. if (!(drv && drv->lowl_probe && drv_algo_check(drv)))
  11096. continue;
  11097. if (strcasecmp(drv->dname, dname_nt) && strcasecmp(drv->name, dname_nt))
  11098. continue;
  11099. if (_probe_device_do_probe(drv, info, &request_rescan))
  11100. return NULL;
  11101. }
  11102. }
  11103. }
  11104. // probe driver(s) with auto enabled and matching VID/PID/Product/etc of device
  11105. BFG_FOREACH_DRIVER_BY_PRIORITY(dreg)
  11106. {
  11107. const struct device_drv * const drv = dreg->drv;
  11108. if (!drv_algo_check(drv))
  11109. continue;
  11110. // Check for "noauto" flag
  11111. // NOTE: driver-specific configuration overrides general
  11112. bool doauto = true;
  11113. DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp)
  11114. {
  11115. const char * const dname = sd_iter->string;
  11116. // NOTE: Only checking flags here, NOT path/serial, so @ is unacceptable
  11117. const char *colon = strchr(dname, ':');
  11118. if (!colon)
  11119. colon = &dname[-1];
  11120. if (strcasecmp("noauto", &colon[1]) && strcasecmp("auto", &colon[1]))
  11121. continue;
  11122. const ssize_t dnamelen = (colon - dname);
  11123. if (dnamelen >= 0) {
  11124. char dname_nt[dnamelen + 1];
  11125. memcpy(dname_nt, dname, dnamelen);
  11126. dname_nt[dnamelen] = '\0';
  11127. if (strcasecmp(drv->dname, dname_nt) && strcasecmp(drv->name, dname_nt))
  11128. continue;
  11129. }
  11130. doauto = (tolower(colon[1]) == 'a');
  11131. if (dnamelen != -1)
  11132. break;
  11133. }
  11134. if (doauto && drv->lowl_match)
  11135. {
  11136. LL_FOREACH2(infolist, info, same_devid_next)
  11137. {
  11138. /*
  11139. The below call to applog is absolutely necessary
  11140. Starting with commit 76d0cc183b1c9ddcc0ef34d2e43bc696ef9de92e installing BFGMiner on
  11141. Mac OS X using Homebrew results in a binary that segfaults on startup
  11142. There are two unresolved issues:
  11143. 1) The BFGMiner authors cannot find a way to install BFGMiner with Homebrew that results
  11144. in debug symbols being available to help troubleshoot the issue
  11145. 2) The issue disappears when unrelated code changes are made, such as adding the following
  11146. call to applog with infolist and / or p
  11147. We would encourage revisiting this in the future to come up with a more concrete solution
  11148. Reproducing should only require commenting / removing the following line and installing
  11149. BFGMiner using "brew install bfgminer --HEAD"
  11150. */
  11151. if (dummy_check_never_true)
  11152. applog(LOG_DEBUG, "lowl_match: %p(%s) %p %p %p", drv, drv->dname, info, infolist, p);
  11153. if (!drv->lowl_match(info))
  11154. continue;
  11155. if (_probe_device_do_probe(drv, info, &request_rescan))
  11156. return NULL;
  11157. }
  11158. }
  11159. }
  11160. // probe driver(s) with 'all' enabled
  11161. DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp)
  11162. {
  11163. const char * const dname = sd_iter->string;
  11164. // NOTE: Only checking flags here, NOT path/serial, so @ is unacceptable
  11165. const char * const colon = strchr(dname, ':');
  11166. if (!colon)
  11167. {
  11168. LL_FOREACH2(infolist, info, same_devid_next)
  11169. {
  11170. if (
  11171. #ifdef NEED_BFG_LOWL_VCOM
  11172. (info->lowl == &lowl_vcom && !strcasecmp(dname, "all")) ||
  11173. #endif
  11174. _probe_device_match(info, (dname[0] == '@') ? &dname[1] : dname))
  11175. {
  11176. bool dont_rescan = false;
  11177. BFG_FOREACH_DRIVER_BY_PRIORITY(dreg)
  11178. {
  11179. const struct device_drv * const drv = dreg->drv;
  11180. if (!drv_algo_check(drv))
  11181. continue;
  11182. if (drv->lowl_probe_by_name_only)
  11183. continue;
  11184. if (!drv->lowl_probe)
  11185. continue;
  11186. if (_probe_device_do_probe(drv, info, NULL))
  11187. return NULL;
  11188. if (bfg_probe_result_flags & BPR_DONT_RESCAN)
  11189. dont_rescan = true;
  11190. }
  11191. if (opt_hotplug && !dont_rescan)
  11192. request_rescan = true;
  11193. break;
  11194. }
  11195. }
  11196. continue;
  11197. }
  11198. if (strcasecmp(&colon[1], "all"))
  11199. continue;
  11200. const size_t dnamelen = (colon - dname);
  11201. char dname_nt[dnamelen + 1];
  11202. memcpy(dname_nt, dname, dnamelen);
  11203. dname_nt[dnamelen] = '\0';
  11204. BFG_FOREACH_DRIVER_BY_PRIORITY(dreg) {
  11205. const struct device_drv * const drv = dreg->drv;
  11206. if (!(drv && drv->lowl_probe && drv_algo_check(drv)))
  11207. continue;
  11208. if (strcasecmp(drv->dname, dname_nt) && strcasecmp(drv->name, dname_nt))
  11209. continue;
  11210. LL_FOREACH2(infolist, info, same_devid_next)
  11211. {
  11212. if (info->lowl->exclude_from_all)
  11213. continue;
  11214. if (_probe_device_do_probe(drv, info, NULL))
  11215. return NULL;
  11216. }
  11217. }
  11218. }
  11219. // Only actually request a rescan if we never found any cgpu
  11220. if (request_rescan)
  11221. bfg_need_detect_rescan = true;
  11222. return NULL;
  11223. }
  11224. void probe_device(struct lowlevel_device_info * const info)
  11225. {
  11226. pthread_create(&info->probe_pth, NULL, probe_device_thread, info);
  11227. }
  11228. #endif
  11229. int create_new_cgpus(void (*addfunc)(void*), void *arg)
  11230. {
  11231. static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  11232. int devcount, i, mining_threads_new = 0;
  11233. unsigned int k;
  11234. struct cgpu_info *cgpu;
  11235. struct thr_info *thr;
  11236. void *p;
  11237. mutex_lock(&mutex);
  11238. devcount = total_devices;
  11239. addfunc(arg);
  11240. if (!total_devices_new)
  11241. goto out;
  11242. wr_lock(&devices_lock);
  11243. p = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + total_devices_new + 1));
  11244. if (unlikely(!p))
  11245. {
  11246. wr_unlock(&devices_lock);
  11247. applog(LOG_ERR, "scan_serial: realloc failed trying to grow devices array");
  11248. goto out;
  11249. }
  11250. devices = p;
  11251. wr_unlock(&devices_lock);
  11252. for (i = 0; i < total_devices_new; ++i)
  11253. {
  11254. cgpu = devices_new[i];
  11255. mining_threads_new += cgpu->threads ?: 1;
  11256. }
  11257. wr_lock(&mining_thr_lock);
  11258. mining_threads_new += mining_threads;
  11259. p = realloc(mining_thr, sizeof(struct thr_info *) * mining_threads_new);
  11260. if (unlikely(!p))
  11261. {
  11262. wr_unlock(&mining_thr_lock);
  11263. applog(LOG_ERR, "scan_serial: realloc failed trying to grow mining_thr");
  11264. goto out;
  11265. }
  11266. mining_thr = p;
  11267. wr_unlock(&mining_thr_lock);
  11268. for (i = mining_threads; i < mining_threads_new; ++i) {
  11269. mining_thr[i] = calloc(1, sizeof(*thr));
  11270. if (!mining_thr[i])
  11271. {
  11272. applog(LOG_ERR, "scan_serial: Failed to calloc mining_thr[%d]", i);
  11273. for ( ; --i >= mining_threads; )
  11274. free(mining_thr[i]);
  11275. goto out;
  11276. }
  11277. }
  11278. k = mining_threads;
  11279. for (i = 0; i < total_devices_new; ++i)
  11280. {
  11281. cgpu = devices_new[i];
  11282. allocate_cgpu(cgpu, &k);
  11283. }
  11284. for (i = 0; i < total_devices_new; ++i)
  11285. {
  11286. cgpu = devices_new[i];
  11287. start_cgpu(cgpu);
  11288. register_device(cgpu);
  11289. ++total_devices;
  11290. }
  11291. #ifdef HAVE_CURSES
  11292. switch_logsize();
  11293. #endif
  11294. out:
  11295. total_devices_new = 0;
  11296. devcount = total_devices - devcount;
  11297. mutex_unlock(&mutex);
  11298. return devcount;
  11299. }
  11300. int scan_serial(const char *s)
  11301. {
  11302. return create_new_cgpus(_scan_serial, (void*)s);
  11303. }
  11304. static pthread_mutex_t rescan_mutex = PTHREAD_MUTEX_INITIALIZER;
  11305. static bool rescan_active;
  11306. static struct timeval tv_rescan;
  11307. static notifier_t rescan_notifier;
  11308. static
  11309. void *rescan_thread(__maybe_unused void *p)
  11310. {
  11311. pthread_detach(pthread_self());
  11312. RenameThread("rescan");
  11313. struct timeval tv_timeout, tv_now;
  11314. fd_set rfds;
  11315. while (true)
  11316. {
  11317. mutex_lock(&rescan_mutex);
  11318. tv_timeout = tv_rescan;
  11319. if (!timer_isset(&tv_timeout))
  11320. {
  11321. rescan_active = false;
  11322. mutex_unlock(&rescan_mutex);
  11323. break;
  11324. }
  11325. mutex_unlock(&rescan_mutex);
  11326. FD_ZERO(&rfds);
  11327. FD_SET(rescan_notifier[0], &rfds);
  11328. const int maxfd = rescan_notifier[0];
  11329. timer_set_now(&tv_now);
  11330. if (select(maxfd+1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now)) > 0)
  11331. notifier_read(rescan_notifier);
  11332. mutex_lock(&rescan_mutex);
  11333. if (timer_passed(&tv_rescan, NULL))
  11334. {
  11335. timer_unset(&tv_rescan);
  11336. mutex_unlock(&rescan_mutex);
  11337. applog(LOG_DEBUG, "Rescan timer expired, triggering");
  11338. scan_serial(NULL);
  11339. }
  11340. else
  11341. mutex_unlock(&rescan_mutex);
  11342. }
  11343. return NULL;
  11344. }
  11345. static
  11346. void _schedule_rescan(const struct timeval * const tvp_when)
  11347. {
  11348. if (rescan_active)
  11349. {
  11350. if (timercmp(tvp_when, &tv_rescan, <))
  11351. applog(LOG_DEBUG, "schedule_rescan: New schedule is before current, waiting it out");
  11352. else
  11353. {
  11354. applog(LOG_DEBUG, "schedule_rescan: New schedule is after current, delaying rescan");
  11355. tv_rescan = *tvp_when;
  11356. }
  11357. return;
  11358. }
  11359. applog(LOG_DEBUG, "schedule_rescan: Scheduling rescan (no rescans currently pending)");
  11360. tv_rescan = *tvp_when;
  11361. rescan_active = true;
  11362. static pthread_t pth;
  11363. if (unlikely(pthread_create(&pth, NULL, rescan_thread, NULL)))
  11364. applog(LOG_ERR, "Failed to start rescan thread");
  11365. }
  11366. static
  11367. void schedule_rescan(const struct timeval * const tvp_when)
  11368. {
  11369. mutex_lock(&rescan_mutex);
  11370. _schedule_rescan(tvp_when);
  11371. mutex_unlock(&rescan_mutex);
  11372. }
  11373. static
  11374. void hotplug_trigger()
  11375. {
  11376. applog(LOG_DEBUG, "%s: Scheduling rescan immediately", __func__);
  11377. struct timeval tv_now;
  11378. timer_set_now(&tv_now);
  11379. schedule_rescan(&tv_now);
  11380. }
  11381. #if defined(HAVE_LIBUDEV) && defined(HAVE_SYS_EPOLL_H)
  11382. static
  11383. void *hotplug_thread(__maybe_unused void *p)
  11384. {
  11385. pthread_detach(pthread_self());
  11386. RenameThread("hotplug");
  11387. struct udev * const udev = udev_new();
  11388. if (unlikely(!udev))
  11389. applogfailr(NULL, LOG_ERR, "udev_new");
  11390. struct udev_monitor * const mon = udev_monitor_new_from_netlink(udev, "udev");
  11391. if (unlikely(!mon))
  11392. applogfailr(NULL, LOG_ERR, "udev_monitor_new_from_netlink");
  11393. if (unlikely(udev_monitor_enable_receiving(mon)))
  11394. applogfailr(NULL, LOG_ERR, "udev_monitor_enable_receiving");
  11395. const int epfd = epoll_create(1);
  11396. if (unlikely(epfd == -1))
  11397. applogfailr(NULL, LOG_ERR, "epoll_create");
  11398. {
  11399. const int fd = udev_monitor_get_fd(mon);
  11400. struct epoll_event ev = {
  11401. .events = EPOLLIN | EPOLLPRI,
  11402. .data.fd = fd,
  11403. };
  11404. if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev))
  11405. applogfailr(NULL, LOG_ERR, "epoll_ctl");
  11406. }
  11407. struct epoll_event ev;
  11408. int rv;
  11409. bool pending = false;
  11410. while (true)
  11411. {
  11412. rv = epoll_wait(epfd, &ev, 1, pending ? hotplug_delay_ms : -1);
  11413. if (rv == -1)
  11414. {
  11415. if (errno == EAGAIN || errno == EINTR)
  11416. continue;
  11417. break;
  11418. }
  11419. if (!rv)
  11420. {
  11421. hotplug_trigger();
  11422. pending = false;
  11423. continue;
  11424. }
  11425. struct udev_device * const device = udev_monitor_receive_device(mon);
  11426. if (!device)
  11427. continue;
  11428. const char * const action = udev_device_get_action(device);
  11429. applog(LOG_DEBUG, "%s: Received %s event", __func__, action);
  11430. if (!strcmp(action, "add"))
  11431. pending = true;
  11432. udev_device_unref(device);
  11433. }
  11434. applogfailr(NULL, LOG_ERR, "epoll_wait");
  11435. }
  11436. #elif defined(WIN32)
  11437. static UINT_PTR _hotplug_wintimer_id;
  11438. VOID CALLBACK hotplug_win_timer(HWND hwnd, UINT msg, UINT_PTR idEvent, DWORD dwTime)
  11439. {
  11440. KillTimer(NULL, _hotplug_wintimer_id);
  11441. _hotplug_wintimer_id = 0;
  11442. hotplug_trigger();
  11443. }
  11444. LRESULT CALLBACK hotplug_win_callback(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
  11445. {
  11446. if (msg == WM_DEVICECHANGE && wParam == DBT_DEVNODES_CHANGED)
  11447. {
  11448. applog(LOG_DEBUG, "%s: Received DBT_DEVNODES_CHANGED event", __func__);
  11449. _hotplug_wintimer_id = SetTimer(NULL, _hotplug_wintimer_id, hotplug_delay_ms, hotplug_win_timer);
  11450. }
  11451. return DefWindowProc(hwnd, msg, wParam, lParam);
  11452. }
  11453. static
  11454. void *hotplug_thread(__maybe_unused void *p)
  11455. {
  11456. pthread_detach(pthread_self());
  11457. WNDCLASS DummyWinCls = {
  11458. .lpszClassName = "BFGDummyWinCls",
  11459. .lpfnWndProc = hotplug_win_callback,
  11460. };
  11461. ATOM a = RegisterClass(&DummyWinCls);
  11462. if (unlikely(!a))
  11463. applogfailinfor(NULL, LOG_ERR, "RegisterClass", "%d", (int)GetLastError());
  11464. HWND hwnd = CreateWindow((void*)(intptr_t)a, NULL, WS_OVERLAPPED, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL, NULL, NULL, NULL);
  11465. if (unlikely(!hwnd))
  11466. applogfailinfor(NULL, LOG_ERR, "CreateWindow", "%d", (int)GetLastError());
  11467. MSG msg;
  11468. while (GetMessage(&msg, NULL, 0, 0))
  11469. {
  11470. TranslateMessage(&msg);
  11471. DispatchMessage(&msg);
  11472. }
  11473. quit(0, "WM_QUIT received");
  11474. return NULL;
  11475. }
  11476. #endif
  11477. #ifdef HAVE_BFG_HOTPLUG
  11478. static
  11479. void hotplug_start()
  11480. {
  11481. pthread_t pth;
  11482. if (unlikely(pthread_create(&pth, NULL, hotplug_thread, NULL)))
  11483. applog(LOG_ERR, "Failed to start hotplug thread");
  11484. }
  11485. #endif
  11486. static void probe_pools(void)
  11487. {
  11488. int i;
  11489. for (i = 0; i < total_pools; i++) {
  11490. struct pool *pool = pools[i];
  11491. pool->testing = true;
  11492. pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool);
  11493. }
  11494. }
  11495. static void raise_fd_limits(void)
  11496. {
  11497. #ifdef HAVE_SETRLIMIT
  11498. struct rlimit fdlimit;
  11499. rlim_t old_soft_limit;
  11500. char frombuf[0x10] = "unlimited";
  11501. char hardbuf[0x10] = "unlimited";
  11502. if (getrlimit(RLIMIT_NOFILE, &fdlimit))
  11503. applogr(, LOG_DEBUG, "setrlimit: Failed to getrlimit(RLIMIT_NOFILE)");
  11504. old_soft_limit = fdlimit.rlim_cur;
  11505. if (fdlimit.rlim_max > FD_SETSIZE || fdlimit.rlim_max == RLIM_INFINITY)
  11506. fdlimit.rlim_cur = FD_SETSIZE;
  11507. else
  11508. fdlimit.rlim_cur = fdlimit.rlim_max;
  11509. if (fdlimit.rlim_max != RLIM_INFINITY)
  11510. snprintf(hardbuf, sizeof(hardbuf), "%lu", (unsigned long)fdlimit.rlim_max);
  11511. if (old_soft_limit != RLIM_INFINITY)
  11512. snprintf(frombuf, sizeof(frombuf), "%lu", (unsigned long)old_soft_limit);
  11513. if (fdlimit.rlim_cur == old_soft_limit)
  11514. applogr(, LOG_DEBUG, "setrlimit: Soft fd limit not being changed from %lu (FD_SETSIZE=%lu; hard limit=%s)",
  11515. (unsigned long)old_soft_limit, (unsigned long)FD_SETSIZE, hardbuf);
  11516. if (setrlimit(RLIMIT_NOFILE, &fdlimit))
  11517. applogr(, LOG_DEBUG, "setrlimit: Failed to change soft fd limit from %s to %lu (FD_SETSIZE=%lu; hard limit=%s)",
  11518. frombuf, (unsigned long)fdlimit.rlim_cur, (unsigned long)FD_SETSIZE, hardbuf);
  11519. applog(LOG_DEBUG, "setrlimit: Changed soft fd limit from %s to %lu (FD_SETSIZE=%lu; hard limit=%s)",
  11520. frombuf, (unsigned long)fdlimit.rlim_cur, (unsigned long)FD_SETSIZE, hardbuf);
  11521. #else
  11522. applog(LOG_DEBUG, "setrlimit: Not supported by platform");
  11523. #endif
  11524. }
  11525. static
  11526. void bfg_atexit(void)
  11527. {
  11528. puts("");
  11529. }
  11530. extern void bfg_init_threadlocal();
  11531. extern bool stratumsrv_change_port(unsigned);
  11532. extern void test_aan_pll(void);
  11533. int main(int argc, char *argv[])
  11534. {
  11535. struct sigaction handler;
  11536. struct thr_info *thr;
  11537. unsigned int k;
  11538. int i;
  11539. int rearrange_pools = 0;
  11540. char *s;
  11541. #ifdef WIN32
  11542. LoadLibrary("backtrace.dll");
  11543. #endif
  11544. atexit(bfg_atexit);
  11545. b58_sha256_impl = my_blkmaker_sha256_callback;
  11546. blkmk_sha256_impl = my_blkmaker_sha256_callback;
  11547. bfg_init_threadlocal();
  11548. #ifndef HAVE_PTHREAD_CANCEL
  11549. setup_pthread_cancel_workaround();
  11550. #endif
  11551. bfg_init_checksums();
  11552. #ifdef WIN32
  11553. {
  11554. WSADATA wsa;
  11555. i = WSAStartup(MAKEWORD(2, 2), &wsa);
  11556. if (i)
  11557. quit(1, "Failed to initialise Winsock: %s", bfg_strerror(i, BST_SOCKET));
  11558. }
  11559. #endif
  11560. /* This dangerous functions tramples random dynamically allocated
  11561. * variables so do it before anything at all */
  11562. if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
  11563. quit(1, "Failed to curl_global_init");
  11564. initial_args = malloc(sizeof(char *) * (argc + 1));
  11565. for (i = 0; i < argc; i++)
  11566. initial_args[i] = strdup(argv[i]);
  11567. initial_args[argc] = NULL;
  11568. mutex_init(&hash_lock);
  11569. mutex_init(&console_lock);
  11570. cglock_init(&control_lock);
  11571. mutex_init(&stats_lock);
  11572. mutex_init(&sharelog_lock);
  11573. cglock_init(&ch_lock);
  11574. mutex_init(&sshare_lock);
  11575. rwlock_init(&blk_lock);
  11576. rwlock_init(&netacc_lock);
  11577. rwlock_init(&mining_thr_lock);
  11578. rwlock_init(&devices_lock);
  11579. mutex_init(&lp_lock);
  11580. if (unlikely(pthread_cond_init(&lp_cond, bfg_condattr)))
  11581. quit(1, "Failed to pthread_cond_init lp_cond");
  11582. if (unlikely(pthread_cond_init(&gws_cond, bfg_condattr)))
  11583. quit(1, "Failed to pthread_cond_init gws_cond");
  11584. notifier_init(submit_waiting_notifier);
  11585. timer_unset(&tv_rescan);
  11586. notifier_init(rescan_notifier);
  11587. /* Create a unique get work queue */
  11588. getq = tq_new();
  11589. if (!getq)
  11590. quit(1, "Failed to create getq");
  11591. /* We use the getq mutex as the staged lock */
  11592. stgd_lock = &getq->mutex;
  11593. #if defined(USE_CPUMINING) && defined(USE_SHA256D)
  11594. init_max_name_len();
  11595. #endif
  11596. handler.sa_handler = &sighandler;
  11597. handler.sa_flags = 0;
  11598. sigemptyset(&handler.sa_mask);
  11599. #ifdef HAVE_PTHREAD_CANCEL
  11600. sigaction(SIGTERM, &handler, &termhandler);
  11601. #else
  11602. // Need to let pthread_cancel emulation handle SIGTERM first
  11603. termhandler = pcwm_orig_term_handler;
  11604. pcwm_orig_term_handler = handler;
  11605. #endif
  11606. sigaction(SIGINT, &handler, &inthandler);
  11607. #ifndef WIN32
  11608. signal(SIGPIPE, SIG_IGN);
  11609. #else
  11610. timeBeginPeriod(1);
  11611. #endif
  11612. opt_kernel_path = CGMINER_PREFIX;
  11613. cgminer_path = alloca(PATH_MAX);
  11614. s = strdup(argv[0]);
  11615. strcpy(cgminer_path, dirname(s));
  11616. free(s);
  11617. strcat(cgminer_path, "/");
  11618. #if defined(USE_CPUMINING) && defined(WIN32)
  11619. {
  11620. char buf[32];
  11621. int gev = GetEnvironmentVariable("BFGMINER_BENCH_ALGO", buf, sizeof(buf));
  11622. if (gev > 0 && gev < sizeof(buf))
  11623. {
  11624. setup_benchmark_pool();
  11625. double rate = bench_algo_stage3(atoi(buf));
  11626. // Write result to shared memory for parent
  11627. char unique_name[64];
  11628. if (GetEnvironmentVariable("BFGMINER_SHARED_MEM", unique_name, 32))
  11629. {
  11630. HANDLE map_handle = CreateFileMapping(
  11631. INVALID_HANDLE_VALUE, // use paging file
  11632. NULL, // default security attributes
  11633. PAGE_READWRITE, // read/write access
  11634. 0, // size: high 32-bits
  11635. 4096, // size: low 32-bits
  11636. unique_name // name of map object
  11637. );
  11638. if (NULL != map_handle) {
  11639. void *shared_mem = MapViewOfFile(
  11640. map_handle, // object to map view of
  11641. FILE_MAP_WRITE, // read/write access
  11642. 0, // high offset: map from
  11643. 0, // low offset: beginning
  11644. 0 // default: map entire file
  11645. );
  11646. if (NULL != shared_mem)
  11647. CopyMemory(shared_mem, &rate, sizeof(rate));
  11648. (void)UnmapViewOfFile(shared_mem);
  11649. }
  11650. (void)CloseHandle(map_handle);
  11651. }
  11652. exit(0);
  11653. }
  11654. }
  11655. #endif
  11656. #ifdef HAVE_CURSES
  11657. devcursor = 8;
  11658. logstart = devcursor;
  11659. logcursor = logstart;
  11660. #endif
  11661. mutex_init(&submitting_lock);
  11662. // Ensure at least the default goal is created
  11663. get_mining_goal("default");
  11664. #ifdef USE_OPENCL
  11665. opencl_early_init();
  11666. #endif
  11667. schedstart.tm.tm_sec = 1;
  11668. schedstop .tm.tm_sec = 1;
  11669. opt_register_table(opt_early_table, NULL);
  11670. opt_register_table(opt_config_table, NULL);
  11671. opt_register_table(opt_cmdline_table, NULL);
  11672. opt_early_parse(argc, argv, applog_and_exit);
  11673. if (!config_loaded)
  11674. {
  11675. load_default_config();
  11676. rearrange_pools = total_pools;
  11677. }
  11678. opt_free_table();
  11679. /* parse command line */
  11680. opt_register_table(opt_config_table,
  11681. "Options for both config file and command line");
  11682. opt_register_table(opt_cmdline_table,
  11683. "Options for command line only");
  11684. opt_parse(&argc, argv, applog_and_exit);
  11685. if (argc != 1)
  11686. quit(1, "Unexpected extra commandline arguments");
  11687. if (rearrange_pools && rearrange_pools < total_pools)
  11688. {
  11689. // Prioritise commandline pools before default-config pools
  11690. for (i = 0; i < rearrange_pools; ++i)
  11691. pools[i]->prio += rearrange_pools;
  11692. for ( ; i < total_pools; ++i)
  11693. pools[i]->prio -= rearrange_pools;
  11694. }
  11695. #ifndef HAVE_PTHREAD_CANCEL
  11696. // Can't do this any earlier, or config isn't loaded
  11697. applog(LOG_DEBUG, "pthread_cancel workaround in use");
  11698. #endif
  11699. #ifdef HAVE_PWD_H
  11700. struct passwd *user_info = NULL;
  11701. if (opt_setuid != NULL) {
  11702. if ((user_info = getpwnam(opt_setuid)) == NULL) {
  11703. quit(1, "Unable to find setuid user information");
  11704. }
  11705. }
  11706. #endif
  11707. #ifdef HAVE_CHROOT
  11708. if (chroot_dir != NULL) {
  11709. #ifdef HAVE_PWD_H
  11710. if (user_info == NULL && getuid() == 0) {
  11711. applog(LOG_WARNING, "Running as root inside chroot");
  11712. }
  11713. #endif
  11714. if (chroot(chroot_dir) != 0) {
  11715. quit(1, "Unable to chroot");
  11716. }
  11717. if (chdir("/"))
  11718. quit(1, "Unable to chdir to chroot");
  11719. }
  11720. #endif
  11721. #ifdef HAVE_PWD_H
  11722. if (user_info != NULL) {
  11723. if (setgid((*user_info).pw_gid) != 0)
  11724. quit(1, "Unable to setgid");
  11725. if (setuid((*user_info).pw_uid) != 0)
  11726. quit(1, "Unable to setuid");
  11727. }
  11728. #endif
  11729. raise_fd_limits();
  11730. if (opt_benchmark) {
  11731. while (total_pools)
  11732. remove_pool(pools[0]);
  11733. setup_benchmark_pool();
  11734. }
  11735. if (opt_unittest) {
  11736. test_cgpu_match();
  11737. test_intrange();
  11738. test_decimal_width();
  11739. test_domain_funcs();
  11740. #ifdef USE_SCRYPT
  11741. test_scrypt();
  11742. #endif
  11743. test_target();
  11744. test_uri_get_param();
  11745. utf8_test();
  11746. #ifdef USE_JINGTIAN
  11747. test_aan_pll();
  11748. #endif
  11749. if (unittest_failures)
  11750. quit(1, "Unit tests failed");
  11751. }
  11752. #ifdef HAVE_CURSES
  11753. if (opt_realquiet || opt_display_devs)
  11754. use_curses = false;
  11755. setlocale(LC_ALL, "C");
  11756. if (use_curses)
  11757. enable_curses();
  11758. #endif
  11759. #ifdef HAVE_LIBUSB
  11760. int err = libusb_init(NULL);
  11761. if (err)
  11762. applog(LOG_WARNING, "libusb_init() failed err %d", err);
  11763. else
  11764. have_libusb = true;
  11765. #endif
  11766. applog(LOG_WARNING, "Started %s", packagename);
  11767. {
  11768. struct bfg_loaded_configfile *configfile;
  11769. LL_FOREACH(bfg_loaded_configfiles, configfile)
  11770. {
  11771. char * const cnfbuf = configfile->filename;
  11772. int fileconf_load = configfile->fileconf_load;
  11773. applog(LOG_NOTICE, "Loaded configuration file %s", cnfbuf);
  11774. switch (fileconf_load) {
  11775. case 0:
  11776. applog(LOG_WARNING, "Fatal JSON error in configuration file.");
  11777. applog(LOG_WARNING, "Configuration file could not be used.");
  11778. break;
  11779. case -1:
  11780. applog(LOG_WARNING, "Error in configuration file, partially loaded.");
  11781. if (use_curses)
  11782. applog(LOG_WARNING, "Start BFGMiner with -T to see what failed to load.");
  11783. break;
  11784. default:
  11785. break;
  11786. }
  11787. }
  11788. }
  11789. i = strlen(opt_kernel_path) + 2;
  11790. char __kernel_path[i];
  11791. snprintf(__kernel_path, i, "%s/", opt_kernel_path);
  11792. opt_kernel_path = __kernel_path;
  11793. if (want_per_device_stats)
  11794. opt_log_output = true;
  11795. bfg_devapi_init();
  11796. drv_detect_all();
  11797. total_devices = total_devices_new;
  11798. devices = devices_new;
  11799. total_devices_new = 0;
  11800. devices_new = NULL;
  11801. if (opt_display_devs) {
  11802. int devcount = 0;
  11803. applog(LOG_ERR, "Devices detected:");
  11804. for (i = 0; i < total_devices; ++i) {
  11805. struct cgpu_info *cgpu = devices[i];
  11806. char buf[0x100];
  11807. if (cgpu->device != cgpu)
  11808. continue;
  11809. if (cgpu->name)
  11810. snprintf(buf, sizeof(buf), " %s", cgpu->name);
  11811. else
  11812. if (cgpu->dev_manufacturer)
  11813. snprintf(buf, sizeof(buf), " %s by %s", (cgpu->dev_product ?: "Device"), cgpu->dev_manufacturer);
  11814. else
  11815. if (cgpu->dev_product)
  11816. snprintf(buf, sizeof(buf), " %s", cgpu->dev_product);
  11817. else
  11818. strcpy(buf, " Device");
  11819. tailsprintf(buf, sizeof(buf), " (driver=%s; procs=%d", cgpu->drv->dname, cgpu->procs);
  11820. if (cgpu->dev_serial)
  11821. tailsprintf(buf, sizeof(buf), "; serial=%s", cgpu->dev_serial);
  11822. if (cgpu->device_path)
  11823. tailsprintf(buf, sizeof(buf), "; path=%s", cgpu->device_path);
  11824. tailsprintf(buf, sizeof(buf), ")");
  11825. _applog(LOG_NOTICE, buf);
  11826. ++devcount;
  11827. }
  11828. quit(0, "%d devices listed", devcount);
  11829. }
  11830. mining_threads = 0;
  11831. for (i = 0; i < total_devices; ++i)
  11832. register_device(devices[i]);
  11833. if (!total_devices) {
  11834. applog(LOG_WARNING, "No devices detected!");
  11835. if (use_curses)
  11836. applog(LOG_WARNING, "Waiting for devices; press 'M+' to add, or 'Q' to quit");
  11837. else
  11838. applog(LOG_WARNING, "Waiting for devices");
  11839. }
  11840. #ifdef HAVE_CURSES
  11841. switch_logsize();
  11842. #endif
  11843. #if BLKMAKER_VERSION > 1 && defined(USE_SHA256D)
  11844. if (opt_load_bitcoin_conf && !(get_mining_goal("default")->malgo->algo != POW_SHA256D || opt_benchmark))
  11845. add_local_gbt(total_pools);
  11846. #endif
  11847. if (!total_pools) {
  11848. applog(LOG_WARNING, "Need to specify at least one pool server.");
  11849. #ifdef HAVE_CURSES
  11850. if (!use_curses || !input_pool(false))
  11851. #endif
  11852. quit(1, "Pool setup failed");
  11853. }
  11854. for (i = 0; i < total_pools; i++) {
  11855. struct pool *pool = pools[i];
  11856. size_t siz;
  11857. if (!pool->rpc_url)
  11858. quit(1, "No URI supplied for pool %u", i);
  11859. if (!pool->rpc_userpass) {
  11860. if (!pool->rpc_user || !pool->rpc_pass)
  11861. quit(1, "No login credentials supplied for pool %u %s", i, pool->rpc_url);
  11862. siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2;
  11863. pool->rpc_userpass = malloc(siz);
  11864. if (!pool->rpc_userpass)
  11865. quit(1, "Failed to malloc userpass");
  11866. snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass);
  11867. }
  11868. }
  11869. /* Set the currentpool to pool with priority 0 */
  11870. validate_pool_priorities();
  11871. for (i = 0; i < total_pools; i++) {
  11872. struct pool *pool = pools[i];
  11873. if (!pool->prio)
  11874. currentpool = pool;
  11875. }
  11876. #ifdef HAVE_SYSLOG_H
  11877. if (use_syslog)
  11878. openlog(PACKAGE, LOG_PID, LOG_USER);
  11879. #endif
  11880. #if defined(unix) || defined(__APPLE__)
  11881. if (opt_stderr_cmd)
  11882. fork_monitor();
  11883. #endif // defined(unix)
  11884. mining_thr = calloc(mining_threads, sizeof(thr));
  11885. if (!mining_thr)
  11886. quit(1, "Failed to calloc mining_thr");
  11887. for (i = 0; i < mining_threads; i++) {
  11888. mining_thr[i] = calloc(1, sizeof(*thr));
  11889. if (!mining_thr[i])
  11890. quit(1, "Failed to calloc mining_thr[%d]", i);
  11891. }
  11892. total_control_threads = 6;
  11893. control_thr = calloc(total_control_threads, sizeof(*thr));
  11894. if (!control_thr)
  11895. quit(1, "Failed to calloc control_thr");
  11896. if (opt_benchmark)
  11897. goto begin_bench;
  11898. applog(LOG_NOTICE, "Probing for an alive pool");
  11899. do {
  11900. bool still_testing;
  11901. int i;
  11902. /* Look for at least one active pool before starting */
  11903. probe_pools();
  11904. do {
  11905. sleep(1);
  11906. if (pools_active)
  11907. break;
  11908. still_testing = false;
  11909. for (int i = 0; i < total_pools; ++i)
  11910. if (pools[i]->testing)
  11911. still_testing = true;
  11912. } while (still_testing);
  11913. if (!pools_active) {
  11914. applog(LOG_ERR, "No servers were found that could be used to get work from.");
  11915. applog(LOG_ERR, "Please check the details from the list below of the servers you have input");
  11916. applog(LOG_ERR, "Most likely you have input the wrong URL, forgotten to add a port, or have not set up workers");
  11917. for (i = 0; i < total_pools; i++) {
  11918. struct pool *pool;
  11919. pool = pools[i];
  11920. applog(LOG_WARNING, "Pool: %d URL: %s User: %s Password: %s",
  11921. i, pool->rpc_url, pool->rpc_user, pool->rpc_pass);
  11922. }
  11923. #ifdef HAVE_CURSES
  11924. if (use_curses) {
  11925. halfdelay(150);
  11926. applog(LOG_ERR, "Press any key to exit, or BFGMiner will try again in 15s.");
  11927. if (getch() != ERR)
  11928. quit(0, "No servers could be used! Exiting.");
  11929. cbreak();
  11930. } else
  11931. #endif
  11932. quit(0, "No servers could be used! Exiting.");
  11933. }
  11934. } while (!pools_active);
  11935. #ifdef USE_SCRYPT
  11936. if (detect_algo == 1 && get_mining_goal("default")->malgo->algo != POW_SCRYPT) {
  11937. applog(LOG_NOTICE, "Detected scrypt algorithm");
  11938. set_malgo_scrypt();
  11939. }
  11940. #endif
  11941. detect_algo = 0;
  11942. begin_bench:
  11943. total_mhashes_done = 0;
  11944. for (i = 0; i < total_devices; i++) {
  11945. struct cgpu_info *cgpu = devices[i];
  11946. cgpu->rolling = cgpu->total_mhashes = 0;
  11947. }
  11948. cgtime(&total_tv_start);
  11949. cgtime(&total_tv_end);
  11950. miner_started = total_tv_start;
  11951. time_t miner_start_ts = time(NULL);
  11952. if (schedstart.tm.tm_sec)
  11953. localtime_r(&miner_start_ts, &schedstart.tm);
  11954. if (schedstop.tm.tm_sec)
  11955. localtime_r(&miner_start_ts, &schedstop .tm);
  11956. get_datestamp(datestamp, sizeof(datestamp), miner_start_ts);
  11957. // Initialise processors and threads
  11958. k = 0;
  11959. for (i = 0; i < total_devices; ++i) {
  11960. struct cgpu_info *cgpu = devices[i];
  11961. allocate_cgpu(cgpu, &k);
  11962. }
  11963. // Start threads
  11964. for (i = 0; i < total_devices; ++i) {
  11965. struct cgpu_info *cgpu = devices[i];
  11966. start_cgpu(cgpu);
  11967. }
  11968. #ifdef USE_OPENCL
  11969. for (i = 0; i < nDevs; i++)
  11970. pause_dynamic_threads(i);
  11971. #endif
  11972. #if defined(USE_CPUMINING) && defined(USE_SHA256D)
  11973. if (opt_n_threads > 0)
  11974. applog(LOG_INFO, "%d cpu miner threads started, using '%s' algorithm.",
  11975. opt_n_threads, algo_names[opt_algo]);
  11976. #endif
  11977. cgtime(&total_tv_start);
  11978. cgtime(&total_tv_end);
  11979. if (!opt_benchmark)
  11980. {
  11981. pthread_t submit_thread;
  11982. if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, NULL)))
  11983. quit(1, "submit_work thread create failed");
  11984. }
  11985. watchpool_thr_id = 1;
  11986. thr = &control_thr[watchpool_thr_id];
  11987. /* start watchpool thread */
  11988. if (thr_info_create(thr, NULL, watchpool_thread, NULL))
  11989. quit(1, "watchpool thread create failed");
  11990. pthread_detach(thr->pth);
  11991. watchdog_thr_id = 2;
  11992. thr = &control_thr[watchdog_thr_id];
  11993. /* start watchdog thread */
  11994. if (thr_info_create(thr, NULL, watchdog_thread, NULL))
  11995. quit(1, "watchdog thread create failed");
  11996. pthread_detach(thr->pth);
  11997. #ifdef USE_OPENCL
  11998. /* Create reinit gpu thread */
  11999. gpur_thr_id = 3;
  12000. thr = &control_thr[gpur_thr_id];
  12001. thr->q = tq_new();
  12002. if (!thr->q)
  12003. quit(1, "tq_new failed for gpur_thr_id");
  12004. if (thr_info_create(thr, NULL, reinit_gpu, thr))
  12005. quit(1, "reinit_gpu thread create failed");
  12006. #endif
  12007. /* Create API socket thread */
  12008. api_thr_id = 4;
  12009. thr = &control_thr[api_thr_id];
  12010. if (thr_info_create(thr, NULL, api_thread, thr))
  12011. quit(1, "API thread create failed");
  12012. #ifdef USE_LIBMICROHTTPD
  12013. if (httpsrv_port != -1)
  12014. httpsrv_start(httpsrv_port);
  12015. #endif
  12016. #ifdef USE_LIBEVENT
  12017. if (stratumsrv_port != -1)
  12018. stratumsrv_change_port(stratumsrv_port);
  12019. #endif
  12020. #ifdef HAVE_BFG_HOTPLUG
  12021. if (opt_hotplug)
  12022. hotplug_start();
  12023. #endif
  12024. #ifdef HAVE_CURSES
  12025. /* Create curses input thread for keyboard input. Create this last so
  12026. * that we know all threads are created since this can call kill_work
  12027. * to try and shut down ll previous threads. */
  12028. input_thr_id = 5;
  12029. thr = &control_thr[input_thr_id];
  12030. if (thr_info_create(thr, NULL, input_thread, thr))
  12031. quit(1, "input thread create failed");
  12032. pthread_detach(thr->pth);
  12033. #endif
  12034. /* Just to be sure */
  12035. if (total_control_threads != 6)
  12036. quit(1, "incorrect total_control_threads (%d) should be 7", total_control_threads);
  12037. /* Once everything is set up, main() becomes the getwork scheduler */
  12038. while (42) {
  12039. int ts, max_staged = opt_queue;
  12040. struct pool *pool, *cp;
  12041. bool lagging = false;
  12042. struct curl_ent *ce;
  12043. struct work *work;
  12044. struct mining_algorithm *malgo = NULL;
  12045. cp = current_pool();
  12046. // Generally, each processor needs a new work, and all at once during work restarts
  12047. max_staged += base_queue;
  12048. mutex_lock(stgd_lock);
  12049. ts = __total_staged(false);
  12050. if (!pool_localgen(cp) && !ts && !opt_fail_only)
  12051. lagging = true;
  12052. /* Wait until hash_pop tells us we need to create more work */
  12053. if (ts > max_staged) {
  12054. {
  12055. LL_FOREACH(mining_algorithms, malgo)
  12056. {
  12057. if (!malgo->goal_refs)
  12058. continue;
  12059. if (!malgo->base_queue)
  12060. continue;
  12061. if (malgo->staged < malgo->base_queue + opt_queue)
  12062. {
  12063. mutex_unlock(stgd_lock);
  12064. pool = select_pool(lagging, malgo);
  12065. if (pool)
  12066. {
  12067. work = make_work();
  12068. work->spare = true;
  12069. goto retry;
  12070. }
  12071. mutex_lock(stgd_lock);
  12072. }
  12073. }
  12074. malgo = NULL;
  12075. }
  12076. staged_full = true;
  12077. pthread_cond_wait(&gws_cond, stgd_lock);
  12078. ts = __total_staged(false);
  12079. }
  12080. mutex_unlock(stgd_lock);
  12081. if (ts > max_staged)
  12082. continue;
  12083. work = make_work();
  12084. if (lagging && !pool_tset(cp, &cp->lagging)) {
  12085. applog(LOG_WARNING, "Pool %d not providing work fast enough", cp->pool_no);
  12086. cp->getfail_occasions++;
  12087. total_go++;
  12088. }
  12089. pool = select_pool(lagging, malgo);
  12090. retry:
  12091. if (pool->has_stratum) {
  12092. while (!pool->stratum_active || !pool->stratum_notify) {
  12093. struct pool *altpool = select_pool(true, malgo);
  12094. if (altpool == pool && pool->has_stratum)
  12095. cgsleep_ms(5000);
  12096. pool = altpool;
  12097. goto retry;
  12098. }
  12099. gen_stratum_work(pool, work);
  12100. applog(LOG_DEBUG, "Generated stratum work");
  12101. stage_work(work);
  12102. continue;
  12103. }
  12104. if (pool->last_work_copy) {
  12105. mutex_lock(&pool->last_work_lock);
  12106. struct work *last_work = pool->last_work_copy;
  12107. if (!last_work)
  12108. {}
  12109. else
  12110. if (can_roll(last_work) && should_roll(last_work)) {
  12111. struct timeval tv_now;
  12112. cgtime(&tv_now);
  12113. free_work(work);
  12114. work = make_clone(pool->last_work_copy);
  12115. mutex_unlock(&pool->last_work_lock);
  12116. roll_work(work);
  12117. applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tr->tmpl, tv_now.tv_sec));
  12118. stage_work(work);
  12119. continue;
  12120. } else if (last_work->tr && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tr->tmpl) > (unsigned long)mining_threads) {
  12121. // Don't free last_work_copy, since it is used to detect upstream provides plenty of work per template
  12122. } else {
  12123. free_work(last_work);
  12124. pool->last_work_copy = NULL;
  12125. }
  12126. mutex_unlock(&pool->last_work_lock);
  12127. }
  12128. if (clone_available()) {
  12129. applog(LOG_DEBUG, "Cloned getwork work");
  12130. free_work(work);
  12131. continue;
  12132. }
  12133. if (opt_benchmark) {
  12134. get_benchmark_work(work, opt_benchmark_intense);
  12135. applog(LOG_DEBUG, "Generated benchmark work");
  12136. stage_work(work);
  12137. continue;
  12138. }
  12139. work->pool = pool;
  12140. ce = pop_curl_entry3(pool, 2);
  12141. /* obtain new work from bitcoin via JSON-RPC */
  12142. if (!get_upstream_work(work, ce->curl)) {
  12143. struct pool *next_pool;
  12144. /* Make sure the pool just hasn't stopped serving
  12145. * requests but is up as we'll keep hammering it */
  12146. push_curl_entry(ce, pool);
  12147. ++pool->seq_getfails;
  12148. pool_died(pool);
  12149. next_pool = select_pool(!opt_fail_only, malgo);
  12150. if (pool == next_pool) {
  12151. applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, retrying in 5s", pool->pool_no);
  12152. cgsleep_ms(5000);
  12153. } else {
  12154. applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, failover activated", pool->pool_no);
  12155. pool = next_pool;
  12156. }
  12157. goto retry;
  12158. }
  12159. if (ts >= max_staged)
  12160. pool_tclear(pool, &pool->lagging);
  12161. if (pool_tclear(pool, &pool->idle))
  12162. pool_resus(pool);
  12163. applog(LOG_DEBUG, "Generated getwork work");
  12164. stage_work(work);
  12165. push_curl_entry(ce, pool);
  12166. }
  12167. return 0;
  12168. }