1
0

ge25519_add_p1p1.S 111 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554
  1. # qhasm: int64 rp
  2. # qhasm: int64 pp
  3. # qhasm: int64 qp
  4. # qhasm: input rp
  5. # qhasm: input pp
  6. # qhasm: input qp
  7. # qhasm: int64 caller1
  8. # qhasm: int64 caller2
  9. # qhasm: int64 caller3
  10. # qhasm: int64 caller4
  11. # qhasm: int64 caller5
  12. # qhasm: int64 caller6
  13. # qhasm: int64 caller7
  14. # qhasm: caller caller1
  15. # qhasm: caller caller2
  16. # qhasm: caller caller3
  17. # qhasm: caller caller4
  18. # qhasm: caller caller5
  19. # qhasm: caller caller6
  20. # qhasm: caller caller7
  21. # qhasm: stack64 caller1_stack
  22. # qhasm: stack64 caller2_stack
  23. # qhasm: stack64 caller3_stack
  24. # qhasm: stack64 caller4_stack
  25. # qhasm: stack64 caller5_stack
  26. # qhasm: stack64 caller6_stack
  27. # qhasm: stack64 caller7_stack
  28. # qhasm: int64 a0
  29. # qhasm: int64 a1
  30. # qhasm: int64 a2
  31. # qhasm: int64 a3
  32. # qhasm: stack64 a0_stack
  33. # qhasm: stack64 a1_stack
  34. # qhasm: stack64 a2_stack
  35. # qhasm: stack64 a3_stack
  36. # qhasm: int64 b0
  37. # qhasm: int64 b1
  38. # qhasm: int64 b2
  39. # qhasm: int64 b3
  40. # qhasm: stack64 b0_stack
  41. # qhasm: stack64 b1_stack
  42. # qhasm: stack64 b2_stack
  43. # qhasm: stack64 b3_stack
  44. # qhasm: int64 c0
  45. # qhasm: int64 c1
  46. # qhasm: int64 c2
  47. # qhasm: int64 c3
  48. # qhasm: stack64 c0_stack
  49. # qhasm: stack64 c1_stack
  50. # qhasm: stack64 c2_stack
  51. # qhasm: stack64 c3_stack
  52. # qhasm: int64 d0
  53. # qhasm: int64 d1
  54. # qhasm: int64 d2
  55. # qhasm: int64 d3
  56. # qhasm: stack64 d0_stack
  57. # qhasm: stack64 d1_stack
  58. # qhasm: stack64 d2_stack
  59. # qhasm: stack64 d3_stack
  60. # qhasm: int64 t10
  61. # qhasm: int64 t11
  62. # qhasm: int64 t12
  63. # qhasm: int64 t13
  64. # qhasm: stack64 t10_stack
  65. # qhasm: stack64 t11_stack
  66. # qhasm: stack64 t12_stack
  67. # qhasm: stack64 t13_stack
  68. # qhasm: int64 t20
  69. # qhasm: int64 t21
  70. # qhasm: int64 t22
  71. # qhasm: int64 t23
  72. # qhasm: stack64 t20_stack
  73. # qhasm: stack64 t21_stack
  74. # qhasm: stack64 t22_stack
  75. # qhasm: stack64 t23_stack
  76. # qhasm: int64 rx0
  77. # qhasm: int64 rx1
  78. # qhasm: int64 rx2
  79. # qhasm: int64 rx3
  80. # qhasm: int64 ry0
  81. # qhasm: int64 ry1
  82. # qhasm: int64 ry2
  83. # qhasm: int64 ry3
  84. # qhasm: int64 rz0
  85. # qhasm: int64 rz1
  86. # qhasm: int64 rz2
  87. # qhasm: int64 rz3
  88. # qhasm: int64 rt0
  89. # qhasm: int64 rt1
  90. # qhasm: int64 rt2
  91. # qhasm: int64 rt3
  92. # qhasm: int64 x0
  93. # qhasm: int64 x1
  94. # qhasm: int64 x2
  95. # qhasm: int64 x3
  96. # qhasm: int64 mulr4
  97. # qhasm: int64 mulr5
  98. # qhasm: int64 mulr6
  99. # qhasm: int64 mulr7
  100. # qhasm: int64 mulr8
  101. # qhasm: int64 mulrax
  102. # qhasm: int64 mulrdx
  103. # qhasm: int64 mulx0
  104. # qhasm: int64 mulx1
  105. # qhasm: int64 mulx2
  106. # qhasm: int64 mulx3
  107. # qhasm: int64 mulc
  108. # qhasm: int64 mulzero
  109. # qhasm: int64 muli38
  110. # qhasm: int64 addt0
  111. # qhasm: int64 addt1
  112. # qhasm: int64 subt0
  113. # qhasm: int64 subt1
  114. # qhasm: enter CRYPTO_NAMESPACE(ge25519_add_p1p1)
  115. .text
  116. .p2align 5
  117. .globl _CRYPTO_NAMESPACE(ge25519_add_p1p1)
  118. .globl CRYPTO_NAMESPACE(ge25519_add_p1p1)
  119. _CRYPTO_NAMESPACE(ge25519_add_p1p1):
  120. CRYPTO_NAMESPACE(ge25519_add_p1p1):
  121. mov %rsp,%r11
  122. and $31,%r11
  123. add $192,%r11
  124. sub %r11,%rsp
  125. # qhasm: caller1_stack = caller1
  126. # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
  127. # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
  128. movq %r11,0(%rsp)
  129. # qhasm: caller2_stack = caller2
  130. # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
  131. # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
  132. movq %r12,8(%rsp)
  133. # qhasm: caller3_stack = caller3
  134. # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
  135. # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
  136. movq %r13,16(%rsp)
  137. # qhasm: caller4_stack = caller4
  138. # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
  139. # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
  140. movq %r14,24(%rsp)
  141. # qhasm: caller5_stack = caller5
  142. # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
  143. # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
  144. movq %r15,32(%rsp)
  145. # qhasm: caller6_stack = caller6
  146. # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
  147. # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
  148. movq %rbx,40(%rsp)
  149. # qhasm: caller7_stack = caller7
  150. # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
  151. # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
  152. movq %rbp,48(%rsp)
  153. # qhasm: qp = qp
  154. # asm 1: mov <qp=int64#3,>qp=int64#4
  155. # asm 2: mov <qp=%rdx,>qp=%rcx
  156. mov %rdx,%rcx
  157. # qhasm: a0 = *(uint64 *)(pp + 32)
  158. # asm 1: movq 32(<pp=int64#2),>a0=int64#3
  159. # asm 2: movq 32(<pp=%rsi),>a0=%rdx
  160. movq 32(%rsi),%rdx
  161. # qhasm: a1 = *(uint64 *)(pp + 40)
  162. # asm 1: movq 40(<pp=int64#2),>a1=int64#5
  163. # asm 2: movq 40(<pp=%rsi),>a1=%r8
  164. movq 40(%rsi),%r8
  165. # qhasm: a2 = *(uint64 *)(pp + 48)
  166. # asm 1: movq 48(<pp=int64#2),>a2=int64#6
  167. # asm 2: movq 48(<pp=%rsi),>a2=%r9
  168. movq 48(%rsi),%r9
  169. # qhasm: a3 = *(uint64 *)(pp + 56)
  170. # asm 1: movq 56(<pp=int64#2),>a3=int64#7
  171. # asm 2: movq 56(<pp=%rsi),>a3=%rax
  172. movq 56(%rsi),%rax
  173. # qhasm: b0 = a0
  174. # asm 1: mov <a0=int64#3,>b0=int64#8
  175. # asm 2: mov <a0=%rdx,>b0=%r10
  176. mov %rdx,%r10
  177. # qhasm: b1 = a1
  178. # asm 1: mov <a1=int64#5,>b1=int64#9
  179. # asm 2: mov <a1=%r8,>b1=%r11
  180. mov %r8,%r11
  181. # qhasm: b2 = a2
  182. # asm 1: mov <a2=int64#6,>b2=int64#10
  183. # asm 2: mov <a2=%r9,>b2=%r12
  184. mov %r9,%r12
  185. # qhasm: b3 = a3
  186. # asm 1: mov <a3=int64#7,>b3=int64#11
  187. # asm 2: mov <a3=%rax,>b3=%r13
  188. mov %rax,%r13
  189. # qhasm: carry? a0 -= *(uint64 *)(pp + 0)
  190. # asm 1: subq 0(<pp=int64#2),<a0=int64#3
  191. # asm 2: subq 0(<pp=%rsi),<a0=%rdx
  192. subq 0(%rsi),%rdx
  193. # qhasm: carry? a1 -= *(uint64 *)(pp + 8) - carry
  194. # asm 1: sbbq 8(<pp=int64#2),<a1=int64#5
  195. # asm 2: sbbq 8(<pp=%rsi),<a1=%r8
  196. sbbq 8(%rsi),%r8
  197. # qhasm: carry? a2 -= *(uint64 *)(pp + 16) - carry
  198. # asm 1: sbbq 16(<pp=int64#2),<a2=int64#6
  199. # asm 2: sbbq 16(<pp=%rsi),<a2=%r9
  200. sbbq 16(%rsi),%r9
  201. # qhasm: carry? a3 -= *(uint64 *)(pp + 24) - carry
  202. # asm 1: sbbq 24(<pp=int64#2),<a3=int64#7
  203. # asm 2: sbbq 24(<pp=%rsi),<a3=%rax
  204. sbbq 24(%rsi),%rax
  205. # qhasm: subt0 = 0
  206. # asm 1: mov $0,>subt0=int64#12
  207. # asm 2: mov $0,>subt0=%r14
  208. mov $0,%r14
  209. # qhasm: subt1 = 38
  210. # asm 1: mov $38,>subt1=int64#13
  211. # asm 2: mov $38,>subt1=%r15
  212. mov $38,%r15
  213. # qhasm: subt1 = subt0 if !carry
  214. # asm 1: cmovae <subt0=int64#12,<subt1=int64#13
  215. # asm 2: cmovae <subt0=%r14,<subt1=%r15
  216. cmovae %r14,%r15
  217. # qhasm: carry? a0 -= subt1
  218. # asm 1: sub <subt1=int64#13,<a0=int64#3
  219. # asm 2: sub <subt1=%r15,<a0=%rdx
  220. sub %r15,%rdx
  221. # qhasm: carry? a1 -= subt0 - carry
  222. # asm 1: sbb <subt0=int64#12,<a1=int64#5
  223. # asm 2: sbb <subt0=%r14,<a1=%r8
  224. sbb %r14,%r8
  225. # qhasm: carry? a2 -= subt0 - carry
  226. # asm 1: sbb <subt0=int64#12,<a2=int64#6
  227. # asm 2: sbb <subt0=%r14,<a2=%r9
  228. sbb %r14,%r9
  229. # qhasm: carry? a3 -= subt0 - carry
  230. # asm 1: sbb <subt0=int64#12,<a3=int64#7
  231. # asm 2: sbb <subt0=%r14,<a3=%rax
  232. sbb %r14,%rax
  233. # qhasm: subt0 = subt1 if carry
  234. # asm 1: cmovc <subt1=int64#13,<subt0=int64#12
  235. # asm 2: cmovc <subt1=%r15,<subt0=%r14
  236. cmovc %r15,%r14
  237. # qhasm: a0 -= subt0
  238. # asm 1: sub <subt0=int64#12,<a0=int64#3
  239. # asm 2: sub <subt0=%r14,<a0=%rdx
  240. sub %r14,%rdx
  241. # qhasm: carry? b0 += *(uint64 *)(pp + 0)
  242. # asm 1: addq 0(<pp=int64#2),<b0=int64#8
  243. # asm 2: addq 0(<pp=%rsi),<b0=%r10
  244. addq 0(%rsi),%r10
  245. # qhasm: carry? b1 += *(uint64 *)(pp + 8) + carry
  246. # asm 1: adcq 8(<pp=int64#2),<b1=int64#9
  247. # asm 2: adcq 8(<pp=%rsi),<b1=%r11
  248. adcq 8(%rsi),%r11
  249. # qhasm: carry? b2 += *(uint64 *)(pp + 16) + carry
  250. # asm 1: adcq 16(<pp=int64#2),<b2=int64#10
  251. # asm 2: adcq 16(<pp=%rsi),<b2=%r12
  252. adcq 16(%rsi),%r12
  253. # qhasm: carry? b3 += *(uint64 *)(pp + 24) + carry
  254. # asm 1: adcq 24(<pp=int64#2),<b3=int64#11
  255. # asm 2: adcq 24(<pp=%rsi),<b3=%r13
  256. adcq 24(%rsi),%r13
  257. # qhasm: addt0 = 0
  258. # asm 1: mov $0,>addt0=int64#12
  259. # asm 2: mov $0,>addt0=%r14
  260. mov $0,%r14
  261. # qhasm: addt1 = 38
  262. # asm 1: mov $38,>addt1=int64#13
  263. # asm 2: mov $38,>addt1=%r15
  264. mov $38,%r15
  265. # qhasm: addt1 = addt0 if !carry
  266. # asm 1: cmovae <addt0=int64#12,<addt1=int64#13
  267. # asm 2: cmovae <addt0=%r14,<addt1=%r15
  268. cmovae %r14,%r15
  269. # qhasm: carry? b0 += addt1
  270. # asm 1: add <addt1=int64#13,<b0=int64#8
  271. # asm 2: add <addt1=%r15,<b0=%r10
  272. add %r15,%r10
  273. # qhasm: carry? b1 += addt0 + carry
  274. # asm 1: adc <addt0=int64#12,<b1=int64#9
  275. # asm 2: adc <addt0=%r14,<b1=%r11
  276. adc %r14,%r11
  277. # qhasm: carry? b2 += addt0 + carry
  278. # asm 1: adc <addt0=int64#12,<b2=int64#10
  279. # asm 2: adc <addt0=%r14,<b2=%r12
  280. adc %r14,%r12
  281. # qhasm: carry? b3 += addt0 + carry
  282. # asm 1: adc <addt0=int64#12,<b3=int64#11
  283. # asm 2: adc <addt0=%r14,<b3=%r13
  284. adc %r14,%r13
  285. # qhasm: addt0 = addt1 if carry
  286. # asm 1: cmovc <addt1=int64#13,<addt0=int64#12
  287. # asm 2: cmovc <addt1=%r15,<addt0=%r14
  288. cmovc %r15,%r14
  289. # qhasm: b0 += addt0
  290. # asm 1: add <addt0=int64#12,<b0=int64#8
  291. # asm 2: add <addt0=%r14,<b0=%r10
  292. add %r14,%r10
  293. # qhasm: a0_stack = a0
  294. # asm 1: movq <a0=int64#3,>a0_stack=stack64#8
  295. # asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
  296. movq %rdx,56(%rsp)
  297. # qhasm: a1_stack = a1
  298. # asm 1: movq <a1=int64#5,>a1_stack=stack64#9
  299. # asm 2: movq <a1=%r8,>a1_stack=64(%rsp)
  300. movq %r8,64(%rsp)
  301. # qhasm: a2_stack = a2
  302. # asm 1: movq <a2=int64#6,>a2_stack=stack64#10
  303. # asm 2: movq <a2=%r9,>a2_stack=72(%rsp)
  304. movq %r9,72(%rsp)
  305. # qhasm: a3_stack = a3
  306. # asm 1: movq <a3=int64#7,>a3_stack=stack64#11
  307. # asm 2: movq <a3=%rax,>a3_stack=80(%rsp)
  308. movq %rax,80(%rsp)
  309. # qhasm: b0_stack = b0
  310. # asm 1: movq <b0=int64#8,>b0_stack=stack64#12
  311. # asm 2: movq <b0=%r10,>b0_stack=88(%rsp)
  312. movq %r10,88(%rsp)
  313. # qhasm: b1_stack = b1
  314. # asm 1: movq <b1=int64#9,>b1_stack=stack64#13
  315. # asm 2: movq <b1=%r11,>b1_stack=96(%rsp)
  316. movq %r11,96(%rsp)
  317. # qhasm: b2_stack = b2
  318. # asm 1: movq <b2=int64#10,>b2_stack=stack64#14
  319. # asm 2: movq <b2=%r12,>b2_stack=104(%rsp)
  320. movq %r12,104(%rsp)
  321. # qhasm: b3_stack = b3
  322. # asm 1: movq <b3=int64#11,>b3_stack=stack64#15
  323. # asm 2: movq <b3=%r13,>b3_stack=112(%rsp)
  324. movq %r13,112(%rsp)
  325. # qhasm: t10 = *(uint64 *)(qp + 32)
  326. # asm 1: movq 32(<qp=int64#4),>t10=int64#3
  327. # asm 2: movq 32(<qp=%rcx),>t10=%rdx
  328. movq 32(%rcx),%rdx
  329. # qhasm: t11 = *(uint64 *)(qp + 40)
  330. # asm 1: movq 40(<qp=int64#4),>t11=int64#5
  331. # asm 2: movq 40(<qp=%rcx),>t11=%r8
  332. movq 40(%rcx),%r8
  333. # qhasm: t12 = *(uint64 *)(qp + 48)
  334. # asm 1: movq 48(<qp=int64#4),>t12=int64#6
  335. # asm 2: movq 48(<qp=%rcx),>t12=%r9
  336. movq 48(%rcx),%r9
  337. # qhasm: t13 = *(uint64 *)(qp + 56)
  338. # asm 1: movq 56(<qp=int64#4),>t13=int64#7
  339. # asm 2: movq 56(<qp=%rcx),>t13=%rax
  340. movq 56(%rcx),%rax
  341. # qhasm: t20 = t10
  342. # asm 1: mov <t10=int64#3,>t20=int64#8
  343. # asm 2: mov <t10=%rdx,>t20=%r10
  344. mov %rdx,%r10
  345. # qhasm: t21 = t11
  346. # asm 1: mov <t11=int64#5,>t21=int64#9
  347. # asm 2: mov <t11=%r8,>t21=%r11
  348. mov %r8,%r11
  349. # qhasm: t22 = t12
  350. # asm 1: mov <t12=int64#6,>t22=int64#10
  351. # asm 2: mov <t12=%r9,>t22=%r12
  352. mov %r9,%r12
  353. # qhasm: t23 = t13
  354. # asm 1: mov <t13=int64#7,>t23=int64#11
  355. # asm 2: mov <t13=%rax,>t23=%r13
  356. mov %rax,%r13
  357. # qhasm: carry? t10 -= *(uint64 *) (qp + 0)
  358. # asm 1: subq 0(<qp=int64#4),<t10=int64#3
  359. # asm 2: subq 0(<qp=%rcx),<t10=%rdx
  360. subq 0(%rcx),%rdx
  361. # qhasm: carry? t11 -= *(uint64 *) (qp + 8) - carry
  362. # asm 1: sbbq 8(<qp=int64#4),<t11=int64#5
  363. # asm 2: sbbq 8(<qp=%rcx),<t11=%r8
  364. sbbq 8(%rcx),%r8
  365. # qhasm: carry? t12 -= *(uint64 *) (qp + 16) - carry
  366. # asm 1: sbbq 16(<qp=int64#4),<t12=int64#6
  367. # asm 2: sbbq 16(<qp=%rcx),<t12=%r9
  368. sbbq 16(%rcx),%r9
  369. # qhasm: carry? t13 -= *(uint64 *) (qp + 24) - carry
  370. # asm 1: sbbq 24(<qp=int64#4),<t13=int64#7
  371. # asm 2: sbbq 24(<qp=%rcx),<t13=%rax
  372. sbbq 24(%rcx),%rax
  373. # qhasm: subt0 = 0
  374. # asm 1: mov $0,>subt0=int64#12
  375. # asm 2: mov $0,>subt0=%r14
  376. mov $0,%r14
  377. # qhasm: subt1 = 38
  378. # asm 1: mov $38,>subt1=int64#13
  379. # asm 2: mov $38,>subt1=%r15
  380. mov $38,%r15
  381. # qhasm: subt1 = subt0 if !carry
  382. # asm 1: cmovae <subt0=int64#12,<subt1=int64#13
  383. # asm 2: cmovae <subt0=%r14,<subt1=%r15
  384. cmovae %r14,%r15
  385. # qhasm: carry? t10 -= subt1
  386. # asm 1: sub <subt1=int64#13,<t10=int64#3
  387. # asm 2: sub <subt1=%r15,<t10=%rdx
  388. sub %r15,%rdx
  389. # qhasm: carry? t11 -= subt0 - carry
  390. # asm 1: sbb <subt0=int64#12,<t11=int64#5
  391. # asm 2: sbb <subt0=%r14,<t11=%r8
  392. sbb %r14,%r8
  393. # qhasm: carry? t12 -= subt0 - carry
  394. # asm 1: sbb <subt0=int64#12,<t12=int64#6
  395. # asm 2: sbb <subt0=%r14,<t12=%r9
  396. sbb %r14,%r9
  397. # qhasm: carry? t13 -= subt0 - carry
  398. # asm 1: sbb <subt0=int64#12,<t13=int64#7
  399. # asm 2: sbb <subt0=%r14,<t13=%rax
  400. sbb %r14,%rax
  401. # qhasm: subt0 = subt1 if carry
  402. # asm 1: cmovc <subt1=int64#13,<subt0=int64#12
  403. # asm 2: cmovc <subt1=%r15,<subt0=%r14
  404. cmovc %r15,%r14
  405. # qhasm: t10 -= subt0
  406. # asm 1: sub <subt0=int64#12,<t10=int64#3
  407. # asm 2: sub <subt0=%r14,<t10=%rdx
  408. sub %r14,%rdx
  409. # qhasm: carry? t20 += *(uint64 *) (qp + 0)
  410. # asm 1: addq 0(<qp=int64#4),<t20=int64#8
  411. # asm 2: addq 0(<qp=%rcx),<t20=%r10
  412. addq 0(%rcx),%r10
  413. # qhasm: carry? t21 += *(uint64 *) (qp + 8) + carry
  414. # asm 1: adcq 8(<qp=int64#4),<t21=int64#9
  415. # asm 2: adcq 8(<qp=%rcx),<t21=%r11
  416. adcq 8(%rcx),%r11
  417. # qhasm: carry? t22 += *(uint64 *) (qp + 16) + carry
  418. # asm 1: adcq 16(<qp=int64#4),<t22=int64#10
  419. # asm 2: adcq 16(<qp=%rcx),<t22=%r12
  420. adcq 16(%rcx),%r12
  421. # qhasm: carry? t23 += *(uint64 *) (qp + 24) + carry
  422. # asm 1: adcq 24(<qp=int64#4),<t23=int64#11
  423. # asm 2: adcq 24(<qp=%rcx),<t23=%r13
  424. adcq 24(%rcx),%r13
  425. # qhasm: addt0 = 0
  426. # asm 1: mov $0,>addt0=int64#12
  427. # asm 2: mov $0,>addt0=%r14
  428. mov $0,%r14
  429. # qhasm: addt1 = 38
  430. # asm 1: mov $38,>addt1=int64#13
  431. # asm 2: mov $38,>addt1=%r15
  432. mov $38,%r15
  433. # qhasm: addt1 = addt0 if !carry
  434. # asm 1: cmovae <addt0=int64#12,<addt1=int64#13
  435. # asm 2: cmovae <addt0=%r14,<addt1=%r15
  436. cmovae %r14,%r15
  437. # qhasm: carry? t20 += addt1
  438. # asm 1: add <addt1=int64#13,<t20=int64#8
  439. # asm 2: add <addt1=%r15,<t20=%r10
  440. add %r15,%r10
  441. # qhasm: carry? t21 += addt0 + carry
  442. # asm 1: adc <addt0=int64#12,<t21=int64#9
  443. # asm 2: adc <addt0=%r14,<t21=%r11
  444. adc %r14,%r11
  445. # qhasm: carry? t22 += addt0 + carry
  446. # asm 1: adc <addt0=int64#12,<t22=int64#10
  447. # asm 2: adc <addt0=%r14,<t22=%r12
  448. adc %r14,%r12
  449. # qhasm: carry? t23 += addt0 + carry
  450. # asm 1: adc <addt0=int64#12,<t23=int64#11
  451. # asm 2: adc <addt0=%r14,<t23=%r13
  452. adc %r14,%r13
  453. # qhasm: addt0 = addt1 if carry
  454. # asm 1: cmovc <addt1=int64#13,<addt0=int64#12
  455. # asm 2: cmovc <addt1=%r15,<addt0=%r14
  456. cmovc %r15,%r14
  457. # qhasm: t20 += addt0
  458. # asm 1: add <addt0=int64#12,<t20=int64#8
  459. # asm 2: add <addt0=%r14,<t20=%r10
  460. add %r14,%r10
  461. # qhasm: t10_stack = t10
  462. # asm 1: movq <t10=int64#3,>t10_stack=stack64#16
  463. # asm 2: movq <t10=%rdx,>t10_stack=120(%rsp)
  464. movq %rdx,120(%rsp)
  465. # qhasm: t11_stack = t11
  466. # asm 1: movq <t11=int64#5,>t11_stack=stack64#17
  467. # asm 2: movq <t11=%r8,>t11_stack=128(%rsp)
  468. movq %r8,128(%rsp)
  469. # qhasm: t12_stack = t12
  470. # asm 1: movq <t12=int64#6,>t12_stack=stack64#18
  471. # asm 2: movq <t12=%r9,>t12_stack=136(%rsp)
  472. movq %r9,136(%rsp)
  473. # qhasm: t13_stack = t13
  474. # asm 1: movq <t13=int64#7,>t13_stack=stack64#19
  475. # asm 2: movq <t13=%rax,>t13_stack=144(%rsp)
  476. movq %rax,144(%rsp)
  477. # qhasm: t20_stack = t20
  478. # asm 1: movq <t20=int64#8,>t20_stack=stack64#20
  479. # asm 2: movq <t20=%r10,>t20_stack=152(%rsp)
  480. movq %r10,152(%rsp)
  481. # qhasm: t21_stack = t21
  482. # asm 1: movq <t21=int64#9,>t21_stack=stack64#21
  483. # asm 2: movq <t21=%r11,>t21_stack=160(%rsp)
  484. movq %r11,160(%rsp)
  485. # qhasm: t22_stack = t22
  486. # asm 1: movq <t22=int64#10,>t22_stack=stack64#22
  487. # asm 2: movq <t22=%r12,>t22_stack=168(%rsp)
  488. movq %r12,168(%rsp)
  489. # qhasm: t23_stack = t23
  490. # asm 1: movq <t23=int64#11,>t23_stack=stack64#23
  491. # asm 2: movq <t23=%r13,>t23_stack=176(%rsp)
  492. movq %r13,176(%rsp)
  493. # qhasm: mulr4 = 0
  494. # asm 1: mov $0,>mulr4=int64#5
  495. # asm 2: mov $0,>mulr4=%r8
  496. mov $0,%r8
  497. # qhasm: mulr5 = 0
  498. # asm 1: mov $0,>mulr5=int64#6
  499. # asm 2: mov $0,>mulr5=%r9
  500. mov $0,%r9
  501. # qhasm: mulr6 = 0
  502. # asm 1: mov $0,>mulr6=int64#8
  503. # asm 2: mov $0,>mulr6=%r10
  504. mov $0,%r10
  505. # qhasm: mulr7 = 0
  506. # asm 1: mov $0,>mulr7=int64#9
  507. # asm 2: mov $0,>mulr7=%r11
  508. mov $0,%r11
  509. # qhasm: mulx0 = a0_stack
  510. # asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10
  511. # asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12
  512. movq 56(%rsp),%r12
  513. # qhasm: mulrax = t10_stack
  514. # asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7
  515. # asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax
  516. movq 120(%rsp),%rax
  517. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  518. # asm 1: mul <mulx0=int64#10
  519. # asm 2: mul <mulx0=%r12
  520. mul %r12
  521. # qhasm: a0 = mulrax
  522. # asm 1: mov <mulrax=int64#7,>a0=int64#11
  523. # asm 2: mov <mulrax=%rax,>a0=%r13
  524. mov %rax,%r13
  525. # qhasm: a1 = mulrdx
  526. # asm 1: mov <mulrdx=int64#3,>a1=int64#12
  527. # asm 2: mov <mulrdx=%rdx,>a1=%r14
  528. mov %rdx,%r14
  529. # qhasm: mulrax = t11_stack
  530. # asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7
  531. # asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax
  532. movq 128(%rsp),%rax
  533. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  534. # asm 1: mul <mulx0=int64#10
  535. # asm 2: mul <mulx0=%r12
  536. mul %r12
  537. # qhasm: carry? a1 += mulrax
  538. # asm 1: add <mulrax=int64#7,<a1=int64#12
  539. # asm 2: add <mulrax=%rax,<a1=%r14
  540. add %rax,%r14
  541. # qhasm: a2 = 0
  542. # asm 1: mov $0,>a2=int64#13
  543. # asm 2: mov $0,>a2=%r15
  544. mov $0,%r15
  545. # qhasm: a2 += mulrdx + carry
  546. # asm 1: adc <mulrdx=int64#3,<a2=int64#13
  547. # asm 2: adc <mulrdx=%rdx,<a2=%r15
  548. adc %rdx,%r15
  549. # qhasm: mulrax = t12_stack
  550. # asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7
  551. # asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax
  552. movq 136(%rsp),%rax
  553. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  554. # asm 1: mul <mulx0=int64#10
  555. # asm 2: mul <mulx0=%r12
  556. mul %r12
  557. # qhasm: carry? a2 += mulrax
  558. # asm 1: add <mulrax=int64#7,<a2=int64#13
  559. # asm 2: add <mulrax=%rax,<a2=%r15
  560. add %rax,%r15
  561. # qhasm: a3 = 0
  562. # asm 1: mov $0,>a3=int64#14
  563. # asm 2: mov $0,>a3=%rbx
  564. mov $0,%rbx
  565. # qhasm: a3 += mulrdx + carry
  566. # asm 1: adc <mulrdx=int64#3,<a3=int64#14
  567. # asm 2: adc <mulrdx=%rdx,<a3=%rbx
  568. adc %rdx,%rbx
  569. # qhasm: mulrax = t13_stack
  570. # asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7
  571. # asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax
  572. movq 144(%rsp),%rax
  573. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  574. # asm 1: mul <mulx0=int64#10
  575. # asm 2: mul <mulx0=%r12
  576. mul %r12
  577. # qhasm: carry? a3 += mulrax
  578. # asm 1: add <mulrax=int64#7,<a3=int64#14
  579. # asm 2: add <mulrax=%rax,<a3=%rbx
  580. add %rax,%rbx
  581. # qhasm: mulr4 += mulrdx + carry
  582. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  583. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  584. adc %rdx,%r8
  585. # qhasm: mulx1 = a1_stack
  586. # asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10
  587. # asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12
  588. movq 64(%rsp),%r12
  589. # qhasm: mulrax = t10_stack
  590. # asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7
  591. # asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax
  592. movq 120(%rsp),%rax
  593. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  594. # asm 1: mul <mulx1=int64#10
  595. # asm 2: mul <mulx1=%r12
  596. mul %r12
  597. # qhasm: carry? a1 += mulrax
  598. # asm 1: add <mulrax=int64#7,<a1=int64#12
  599. # asm 2: add <mulrax=%rax,<a1=%r14
  600. add %rax,%r14
  601. # qhasm: mulc = 0
  602. # asm 1: mov $0,>mulc=int64#15
  603. # asm 2: mov $0,>mulc=%rbp
  604. mov $0,%rbp
  605. # qhasm: mulc += mulrdx + carry
  606. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  607. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  608. adc %rdx,%rbp
  609. # qhasm: mulrax = t11_stack
  610. # asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7
  611. # asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax
  612. movq 128(%rsp),%rax
  613. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  614. # asm 1: mul <mulx1=int64#10
  615. # asm 2: mul <mulx1=%r12
  616. mul %r12
  617. # qhasm: carry? a2 += mulrax
  618. # asm 1: add <mulrax=int64#7,<a2=int64#13
  619. # asm 2: add <mulrax=%rax,<a2=%r15
  620. add %rax,%r15
  621. # qhasm: mulrdx += 0 + carry
  622. # asm 1: adc $0,<mulrdx=int64#3
  623. # asm 2: adc $0,<mulrdx=%rdx
  624. adc $0,%rdx
  625. # qhasm: carry? a2 += mulc
  626. # asm 1: add <mulc=int64#15,<a2=int64#13
  627. # asm 2: add <mulc=%rbp,<a2=%r15
  628. add %rbp,%r15
  629. # qhasm: mulc = 0
  630. # asm 1: mov $0,>mulc=int64#15
  631. # asm 2: mov $0,>mulc=%rbp
  632. mov $0,%rbp
  633. # qhasm: mulc += mulrdx + carry
  634. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  635. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  636. adc %rdx,%rbp
  637. # qhasm: mulrax = t12_stack
  638. # asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7
  639. # asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax
  640. movq 136(%rsp),%rax
  641. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  642. # asm 1: mul <mulx1=int64#10
  643. # asm 2: mul <mulx1=%r12
  644. mul %r12
  645. # qhasm: carry? a3 += mulrax
  646. # asm 1: add <mulrax=int64#7,<a3=int64#14
  647. # asm 2: add <mulrax=%rax,<a3=%rbx
  648. add %rax,%rbx
  649. # qhasm: mulrdx += 0 + carry
  650. # asm 1: adc $0,<mulrdx=int64#3
  651. # asm 2: adc $0,<mulrdx=%rdx
  652. adc $0,%rdx
  653. # qhasm: carry? a3 += mulc
  654. # asm 1: add <mulc=int64#15,<a3=int64#14
  655. # asm 2: add <mulc=%rbp,<a3=%rbx
  656. add %rbp,%rbx
  657. # qhasm: mulc = 0
  658. # asm 1: mov $0,>mulc=int64#15
  659. # asm 2: mov $0,>mulc=%rbp
  660. mov $0,%rbp
  661. # qhasm: mulc += mulrdx + carry
  662. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  663. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  664. adc %rdx,%rbp
  665. # qhasm: mulrax = t13_stack
  666. # asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7
  667. # asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax
  668. movq 144(%rsp),%rax
  669. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  670. # asm 1: mul <mulx1=int64#10
  671. # asm 2: mul <mulx1=%r12
  672. mul %r12
  673. # qhasm: carry? mulr4 += mulrax
  674. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  675. # asm 2: add <mulrax=%rax,<mulr4=%r8
  676. add %rax,%r8
  677. # qhasm: mulrdx += 0 + carry
  678. # asm 1: adc $0,<mulrdx=int64#3
  679. # asm 2: adc $0,<mulrdx=%rdx
  680. adc $0,%rdx
  681. # qhasm: carry? mulr4 += mulc
  682. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  683. # asm 2: add <mulc=%rbp,<mulr4=%r8
  684. add %rbp,%r8
  685. # qhasm: mulr5 += mulrdx + carry
  686. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  687. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  688. adc %rdx,%r9
  689. # qhasm: mulx2 = a2_stack
  690. # asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10
  691. # asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12
  692. movq 72(%rsp),%r12
  693. # qhasm: mulrax = t10_stack
  694. # asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7
  695. # asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax
  696. movq 120(%rsp),%rax
  697. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  698. # asm 1: mul <mulx2=int64#10
  699. # asm 2: mul <mulx2=%r12
  700. mul %r12
  701. # qhasm: carry? a2 += mulrax
  702. # asm 1: add <mulrax=int64#7,<a2=int64#13
  703. # asm 2: add <mulrax=%rax,<a2=%r15
  704. add %rax,%r15
  705. # qhasm: mulc = 0
  706. # asm 1: mov $0,>mulc=int64#15
  707. # asm 2: mov $0,>mulc=%rbp
  708. mov $0,%rbp
  709. # qhasm: mulc += mulrdx + carry
  710. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  711. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  712. adc %rdx,%rbp
  713. # qhasm: mulrax = t11_stack
  714. # asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7
  715. # asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax
  716. movq 128(%rsp),%rax
  717. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  718. # asm 1: mul <mulx2=int64#10
  719. # asm 2: mul <mulx2=%r12
  720. mul %r12
  721. # qhasm: carry? a3 += mulrax
  722. # asm 1: add <mulrax=int64#7,<a3=int64#14
  723. # asm 2: add <mulrax=%rax,<a3=%rbx
  724. add %rax,%rbx
  725. # qhasm: mulrdx += 0 + carry
  726. # asm 1: adc $0,<mulrdx=int64#3
  727. # asm 2: adc $0,<mulrdx=%rdx
  728. adc $0,%rdx
  729. # qhasm: carry? a3 += mulc
  730. # asm 1: add <mulc=int64#15,<a3=int64#14
  731. # asm 2: add <mulc=%rbp,<a3=%rbx
  732. add %rbp,%rbx
  733. # qhasm: mulc = 0
  734. # asm 1: mov $0,>mulc=int64#15
  735. # asm 2: mov $0,>mulc=%rbp
  736. mov $0,%rbp
  737. # qhasm: mulc += mulrdx + carry
  738. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  739. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  740. adc %rdx,%rbp
  741. # qhasm: mulrax = t12_stack
  742. # asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7
  743. # asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax
  744. movq 136(%rsp),%rax
  745. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  746. # asm 1: mul <mulx2=int64#10
  747. # asm 2: mul <mulx2=%r12
  748. mul %r12
  749. # qhasm: carry? mulr4 += mulrax
  750. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  751. # asm 2: add <mulrax=%rax,<mulr4=%r8
  752. add %rax,%r8
  753. # qhasm: mulrdx += 0 + carry
  754. # asm 1: adc $0,<mulrdx=int64#3
  755. # asm 2: adc $0,<mulrdx=%rdx
  756. adc $0,%rdx
  757. # qhasm: carry? mulr4 += mulc
  758. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  759. # asm 2: add <mulc=%rbp,<mulr4=%r8
  760. add %rbp,%r8
  761. # qhasm: mulc = 0
  762. # asm 1: mov $0,>mulc=int64#15
  763. # asm 2: mov $0,>mulc=%rbp
  764. mov $0,%rbp
  765. # qhasm: mulc += mulrdx + carry
  766. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  767. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  768. adc %rdx,%rbp
  769. # qhasm: mulrax = t13_stack
  770. # asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7
  771. # asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax
  772. movq 144(%rsp),%rax
  773. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  774. # asm 1: mul <mulx2=int64#10
  775. # asm 2: mul <mulx2=%r12
  776. mul %r12
  777. # qhasm: carry? mulr5 += mulrax
  778. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  779. # asm 2: add <mulrax=%rax,<mulr5=%r9
  780. add %rax,%r9
  781. # qhasm: mulrdx += 0 + carry
  782. # asm 1: adc $0,<mulrdx=int64#3
  783. # asm 2: adc $0,<mulrdx=%rdx
  784. adc $0,%rdx
  785. # qhasm: carry? mulr5 += mulc
  786. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  787. # asm 2: add <mulc=%rbp,<mulr5=%r9
  788. add %rbp,%r9
  789. # qhasm: mulr6 += mulrdx + carry
  790. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  791. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  792. adc %rdx,%r10
  793. # qhasm: mulx3 = a3_stack
  794. # asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10
  795. # asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12
  796. movq 80(%rsp),%r12
  797. # qhasm: mulrax = t10_stack
  798. # asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7
  799. # asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax
  800. movq 120(%rsp),%rax
  801. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  802. # asm 1: mul <mulx3=int64#10
  803. # asm 2: mul <mulx3=%r12
  804. mul %r12
  805. # qhasm: carry? a3 += mulrax
  806. # asm 1: add <mulrax=int64#7,<a3=int64#14
  807. # asm 2: add <mulrax=%rax,<a3=%rbx
  808. add %rax,%rbx
  809. # qhasm: mulc = 0
  810. # asm 1: mov $0,>mulc=int64#15
  811. # asm 2: mov $0,>mulc=%rbp
  812. mov $0,%rbp
  813. # qhasm: mulc += mulrdx + carry
  814. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  815. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  816. adc %rdx,%rbp
  817. # qhasm: mulrax = t11_stack
  818. # asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7
  819. # asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax
  820. movq 128(%rsp),%rax
  821. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  822. # asm 1: mul <mulx3=int64#10
  823. # asm 2: mul <mulx3=%r12
  824. mul %r12
  825. # qhasm: carry? mulr4 += mulrax
  826. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  827. # asm 2: add <mulrax=%rax,<mulr4=%r8
  828. add %rax,%r8
  829. # qhasm: mulrdx += 0 + carry
  830. # asm 1: adc $0,<mulrdx=int64#3
  831. # asm 2: adc $0,<mulrdx=%rdx
  832. adc $0,%rdx
  833. # qhasm: carry? mulr4 += mulc
  834. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  835. # asm 2: add <mulc=%rbp,<mulr4=%r8
  836. add %rbp,%r8
  837. # qhasm: mulc = 0
  838. # asm 1: mov $0,>mulc=int64#15
  839. # asm 2: mov $0,>mulc=%rbp
  840. mov $0,%rbp
  841. # qhasm: mulc += mulrdx + carry
  842. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  843. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  844. adc %rdx,%rbp
  845. # qhasm: mulrax = t12_stack
  846. # asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7
  847. # asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax
  848. movq 136(%rsp),%rax
  849. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  850. # asm 1: mul <mulx3=int64#10
  851. # asm 2: mul <mulx3=%r12
  852. mul %r12
  853. # qhasm: carry? mulr5 += mulrax
  854. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  855. # asm 2: add <mulrax=%rax,<mulr5=%r9
  856. add %rax,%r9
  857. # qhasm: mulrdx += 0 + carry
  858. # asm 1: adc $0,<mulrdx=int64#3
  859. # asm 2: adc $0,<mulrdx=%rdx
  860. adc $0,%rdx
  861. # qhasm: carry? mulr5 += mulc
  862. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  863. # asm 2: add <mulc=%rbp,<mulr5=%r9
  864. add %rbp,%r9
  865. # qhasm: mulc = 0
  866. # asm 1: mov $0,>mulc=int64#15
  867. # asm 2: mov $0,>mulc=%rbp
  868. mov $0,%rbp
  869. # qhasm: mulc += mulrdx + carry
  870. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  871. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  872. adc %rdx,%rbp
  873. # qhasm: mulrax = t13_stack
  874. # asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7
  875. # asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax
  876. movq 144(%rsp),%rax
  877. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  878. # asm 1: mul <mulx3=int64#10
  879. # asm 2: mul <mulx3=%r12
  880. mul %r12
  881. # qhasm: carry? mulr6 += mulrax
  882. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  883. # asm 2: add <mulrax=%rax,<mulr6=%r10
  884. add %rax,%r10
  885. # qhasm: mulrdx += 0 + carry
  886. # asm 1: adc $0,<mulrdx=int64#3
  887. # asm 2: adc $0,<mulrdx=%rdx
  888. adc $0,%rdx
  889. # qhasm: carry? mulr6 += mulc
  890. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  891. # asm 2: add <mulc=%rbp,<mulr6=%r10
  892. add %rbp,%r10
  893. # qhasm: mulr7 += mulrdx + carry
  894. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  895. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  896. adc %rdx,%r11
  897. # qhasm: mulrax = mulr4
  898. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  899. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  900. mov %r8,%rax
  901. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  902. mulq CRYPTO_NAMESPACE(38)(%rip)
  903. # qhasm: mulr4 = mulrax
  904. # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
  905. # asm 2: mov <mulrax=%rax,>mulr4=%r8
  906. mov %rax,%r8
  907. # qhasm: mulrax = mulr5
  908. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  909. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  910. mov %r9,%rax
  911. # qhasm: mulr5 = mulrdx
  912. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
  913. # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
  914. mov %rdx,%r9
  915. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  916. mulq CRYPTO_NAMESPACE(38)(%rip)
  917. # qhasm: carry? mulr5 += mulrax
  918. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  919. # asm 2: add <mulrax=%rax,<mulr5=%r9
  920. add %rax,%r9
  921. # qhasm: mulrax = mulr6
  922. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  923. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  924. mov %r10,%rax
  925. # qhasm: mulr6 = 0
  926. # asm 1: mov $0,>mulr6=int64#8
  927. # asm 2: mov $0,>mulr6=%r10
  928. mov $0,%r10
  929. # qhasm: mulr6 += mulrdx + carry
  930. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  931. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  932. adc %rdx,%r10
  933. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  934. mulq CRYPTO_NAMESPACE(38)(%rip)
  935. # qhasm: carry? mulr6 += mulrax
  936. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  937. # asm 2: add <mulrax=%rax,<mulr6=%r10
  938. add %rax,%r10
  939. # qhasm: mulrax = mulr7
  940. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  941. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  942. mov %r11,%rax
  943. # qhasm: mulr7 = 0
  944. # asm 1: mov $0,>mulr7=int64#9
  945. # asm 2: mov $0,>mulr7=%r11
  946. mov $0,%r11
  947. # qhasm: mulr7 += mulrdx + carry
  948. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  949. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  950. adc %rdx,%r11
  951. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  952. mulq CRYPTO_NAMESPACE(38)(%rip)
  953. # qhasm: carry? mulr7 += mulrax
  954. # asm 1: add <mulrax=int64#7,<mulr7=int64#9
  955. # asm 2: add <mulrax=%rax,<mulr7=%r11
  956. add %rax,%r11
  957. # qhasm: mulr8 = 0
  958. # asm 1: mov $0,>mulr8=int64#7
  959. # asm 2: mov $0,>mulr8=%rax
  960. mov $0,%rax
  961. # qhasm: mulr8 += mulrdx + carry
  962. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  963. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  964. adc %rdx,%rax
  965. # qhasm: carry? a0 += mulr4
  966. # asm 1: add <mulr4=int64#5,<a0=int64#11
  967. # asm 2: add <mulr4=%r8,<a0=%r13
  968. add %r8,%r13
  969. # qhasm: carry? a1 += mulr5 + carry
  970. # asm 1: adc <mulr5=int64#6,<a1=int64#12
  971. # asm 2: adc <mulr5=%r9,<a1=%r14
  972. adc %r9,%r14
  973. # qhasm: carry? a2 += mulr6 + carry
  974. # asm 1: adc <mulr6=int64#8,<a2=int64#13
  975. # asm 2: adc <mulr6=%r10,<a2=%r15
  976. adc %r10,%r15
  977. # qhasm: carry? a3 += mulr7 + carry
  978. # asm 1: adc <mulr7=int64#9,<a3=int64#14
  979. # asm 2: adc <mulr7=%r11,<a3=%rbx
  980. adc %r11,%rbx
  981. # qhasm: mulzero = 0
  982. # asm 1: mov $0,>mulzero=int64#3
  983. # asm 2: mov $0,>mulzero=%rdx
  984. mov $0,%rdx
  985. # qhasm: mulr8 += mulzero + carry
  986. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  987. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  988. adc %rdx,%rax
  989. # qhasm: mulr8 *= 38
  990. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
  991. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
  992. imulq $38,%rax,%r8
  993. # qhasm: carry? a0 += mulr8
  994. # asm 1: add <mulr8=int64#5,<a0=int64#11
  995. # asm 2: add <mulr8=%r8,<a0=%r13
  996. add %r8,%r13
  997. # qhasm: carry? a1 += mulzero + carry
  998. # asm 1: adc <mulzero=int64#3,<a1=int64#12
  999. # asm 2: adc <mulzero=%rdx,<a1=%r14
  1000. adc %rdx,%r14
  1001. # qhasm: carry? a2 += mulzero + carry
  1002. # asm 1: adc <mulzero=int64#3,<a2=int64#13
  1003. # asm 2: adc <mulzero=%rdx,<a2=%r15
  1004. adc %rdx,%r15
  1005. # qhasm: carry? a3 += mulzero + carry
  1006. # asm 1: adc <mulzero=int64#3,<a3=int64#14
  1007. # asm 2: adc <mulzero=%rdx,<a3=%rbx
  1008. adc %rdx,%rbx
  1009. # qhasm: mulzero += mulzero + carry
  1010. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  1011. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  1012. adc %rdx,%rdx
  1013. # qhasm: mulzero *= 38
  1014. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  1015. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  1016. imulq $38,%rdx,%rdx
  1017. # qhasm: a0 += mulzero
  1018. # asm 1: add <mulzero=int64#3,<a0=int64#11
  1019. # asm 2: add <mulzero=%rdx,<a0=%r13
  1020. add %rdx,%r13
  1021. # qhasm: a0_stack = a0
  1022. # asm 1: movq <a0=int64#11,>a0_stack=stack64#8
  1023. # asm 2: movq <a0=%r13,>a0_stack=56(%rsp)
  1024. movq %r13,56(%rsp)
  1025. # qhasm: a1_stack = a1
  1026. # asm 1: movq <a1=int64#12,>a1_stack=stack64#9
  1027. # asm 2: movq <a1=%r14,>a1_stack=64(%rsp)
  1028. movq %r14,64(%rsp)
  1029. # qhasm: a2_stack = a2
  1030. # asm 1: movq <a2=int64#13,>a2_stack=stack64#10
  1031. # asm 2: movq <a2=%r15,>a2_stack=72(%rsp)
  1032. movq %r15,72(%rsp)
  1033. # qhasm: a3_stack = a3
  1034. # asm 1: movq <a3=int64#14,>a3_stack=stack64#11
  1035. # asm 2: movq <a3=%rbx,>a3_stack=80(%rsp)
  1036. movq %rbx,80(%rsp)
  1037. # qhasm: mulr4 = 0
  1038. # asm 1: mov $0,>mulr4=int64#5
  1039. # asm 2: mov $0,>mulr4=%r8
  1040. mov $0,%r8
  1041. # qhasm: mulr5 = 0
  1042. # asm 1: mov $0,>mulr5=int64#6
  1043. # asm 2: mov $0,>mulr5=%r9
  1044. mov $0,%r9
  1045. # qhasm: mulr6 = 0
  1046. # asm 1: mov $0,>mulr6=int64#8
  1047. # asm 2: mov $0,>mulr6=%r10
  1048. mov $0,%r10
  1049. # qhasm: mulr7 = 0
  1050. # asm 1: mov $0,>mulr7=int64#9
  1051. # asm 2: mov $0,>mulr7=%r11
  1052. mov $0,%r11
  1053. # qhasm: mulx0 = b0_stack
  1054. # asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10
  1055. # asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12
  1056. movq 88(%rsp),%r12
  1057. # qhasm: mulrax = t20_stack
  1058. # asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7
  1059. # asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax
  1060. movq 152(%rsp),%rax
  1061. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1062. # asm 1: mul <mulx0=int64#10
  1063. # asm 2: mul <mulx0=%r12
  1064. mul %r12
  1065. # qhasm: rx0 = mulrax
  1066. # asm 1: mov <mulrax=int64#7,>rx0=int64#11
  1067. # asm 2: mov <mulrax=%rax,>rx0=%r13
  1068. mov %rax,%r13
  1069. # qhasm: rx1 = mulrdx
  1070. # asm 1: mov <mulrdx=int64#3,>rx1=int64#12
  1071. # asm 2: mov <mulrdx=%rdx,>rx1=%r14
  1072. mov %rdx,%r14
  1073. # qhasm: mulrax = t21_stack
  1074. # asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7
  1075. # asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax
  1076. movq 160(%rsp),%rax
  1077. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1078. # asm 1: mul <mulx0=int64#10
  1079. # asm 2: mul <mulx0=%r12
  1080. mul %r12
  1081. # qhasm: carry? rx1 += mulrax
  1082. # asm 1: add <mulrax=int64#7,<rx1=int64#12
  1083. # asm 2: add <mulrax=%rax,<rx1=%r14
  1084. add %rax,%r14
  1085. # qhasm: rx2 = 0
  1086. # asm 1: mov $0,>rx2=int64#13
  1087. # asm 2: mov $0,>rx2=%r15
  1088. mov $0,%r15
  1089. # qhasm: rx2 += mulrdx + carry
  1090. # asm 1: adc <mulrdx=int64#3,<rx2=int64#13
  1091. # asm 2: adc <mulrdx=%rdx,<rx2=%r15
  1092. adc %rdx,%r15
  1093. # qhasm: mulrax = t22_stack
  1094. # asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7
  1095. # asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax
  1096. movq 168(%rsp),%rax
  1097. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1098. # asm 1: mul <mulx0=int64#10
  1099. # asm 2: mul <mulx0=%r12
  1100. mul %r12
  1101. # qhasm: carry? rx2 += mulrax
  1102. # asm 1: add <mulrax=int64#7,<rx2=int64#13
  1103. # asm 2: add <mulrax=%rax,<rx2=%r15
  1104. add %rax,%r15
  1105. # qhasm: rx3 = 0
  1106. # asm 1: mov $0,>rx3=int64#14
  1107. # asm 2: mov $0,>rx3=%rbx
  1108. mov $0,%rbx
  1109. # qhasm: rx3 += mulrdx + carry
  1110. # asm 1: adc <mulrdx=int64#3,<rx3=int64#14
  1111. # asm 2: adc <mulrdx=%rdx,<rx3=%rbx
  1112. adc %rdx,%rbx
  1113. # qhasm: mulrax = t23_stack
  1114. # asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7
  1115. # asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax
  1116. movq 176(%rsp),%rax
  1117. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1118. # asm 1: mul <mulx0=int64#10
  1119. # asm 2: mul <mulx0=%r12
  1120. mul %r12
  1121. # qhasm: carry? rx3 += mulrax
  1122. # asm 1: add <mulrax=int64#7,<rx3=int64#14
  1123. # asm 2: add <mulrax=%rax,<rx3=%rbx
  1124. add %rax,%rbx
  1125. # qhasm: mulr4 += mulrdx + carry
  1126. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  1127. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  1128. adc %rdx,%r8
  1129. # qhasm: mulx1 = b1_stack
  1130. # asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10
  1131. # asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12
  1132. movq 96(%rsp),%r12
  1133. # qhasm: mulrax = t20_stack
  1134. # asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7
  1135. # asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax
  1136. movq 152(%rsp),%rax
  1137. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1138. # asm 1: mul <mulx1=int64#10
  1139. # asm 2: mul <mulx1=%r12
  1140. mul %r12
  1141. # qhasm: carry? rx1 += mulrax
  1142. # asm 1: add <mulrax=int64#7,<rx1=int64#12
  1143. # asm 2: add <mulrax=%rax,<rx1=%r14
  1144. add %rax,%r14
  1145. # qhasm: mulc = 0
  1146. # asm 1: mov $0,>mulc=int64#15
  1147. # asm 2: mov $0,>mulc=%rbp
  1148. mov $0,%rbp
  1149. # qhasm: mulc += mulrdx + carry
  1150. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1151. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1152. adc %rdx,%rbp
  1153. # qhasm: mulrax = t21_stack
  1154. # asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7
  1155. # asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax
  1156. movq 160(%rsp),%rax
  1157. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1158. # asm 1: mul <mulx1=int64#10
  1159. # asm 2: mul <mulx1=%r12
  1160. mul %r12
  1161. # qhasm: carry? rx2 += mulrax
  1162. # asm 1: add <mulrax=int64#7,<rx2=int64#13
  1163. # asm 2: add <mulrax=%rax,<rx2=%r15
  1164. add %rax,%r15
  1165. # qhasm: mulrdx += 0 + carry
  1166. # asm 1: adc $0,<mulrdx=int64#3
  1167. # asm 2: adc $0,<mulrdx=%rdx
  1168. adc $0,%rdx
  1169. # qhasm: carry? rx2 += mulc
  1170. # asm 1: add <mulc=int64#15,<rx2=int64#13
  1171. # asm 2: add <mulc=%rbp,<rx2=%r15
  1172. add %rbp,%r15
  1173. # qhasm: mulc = 0
  1174. # asm 1: mov $0,>mulc=int64#15
  1175. # asm 2: mov $0,>mulc=%rbp
  1176. mov $0,%rbp
  1177. # qhasm: mulc += mulrdx + carry
  1178. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1179. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1180. adc %rdx,%rbp
  1181. # qhasm: mulrax = t22_stack
  1182. # asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7
  1183. # asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax
  1184. movq 168(%rsp),%rax
  1185. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1186. # asm 1: mul <mulx1=int64#10
  1187. # asm 2: mul <mulx1=%r12
  1188. mul %r12
  1189. # qhasm: carry? rx3 += mulrax
  1190. # asm 1: add <mulrax=int64#7,<rx3=int64#14
  1191. # asm 2: add <mulrax=%rax,<rx3=%rbx
  1192. add %rax,%rbx
  1193. # qhasm: mulrdx += 0 + carry
  1194. # asm 1: adc $0,<mulrdx=int64#3
  1195. # asm 2: adc $0,<mulrdx=%rdx
  1196. adc $0,%rdx
  1197. # qhasm: carry? rx3 += mulc
  1198. # asm 1: add <mulc=int64#15,<rx3=int64#14
  1199. # asm 2: add <mulc=%rbp,<rx3=%rbx
  1200. add %rbp,%rbx
  1201. # qhasm: mulc = 0
  1202. # asm 1: mov $0,>mulc=int64#15
  1203. # asm 2: mov $0,>mulc=%rbp
  1204. mov $0,%rbp
  1205. # qhasm: mulc += mulrdx + carry
  1206. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1207. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1208. adc %rdx,%rbp
  1209. # qhasm: mulrax = t23_stack
  1210. # asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7
  1211. # asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax
  1212. movq 176(%rsp),%rax
  1213. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1214. # asm 1: mul <mulx1=int64#10
  1215. # asm 2: mul <mulx1=%r12
  1216. mul %r12
  1217. # qhasm: carry? mulr4 += mulrax
  1218. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1219. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1220. add %rax,%r8
  1221. # qhasm: mulrdx += 0 + carry
  1222. # asm 1: adc $0,<mulrdx=int64#3
  1223. # asm 2: adc $0,<mulrdx=%rdx
  1224. adc $0,%rdx
  1225. # qhasm: carry? mulr4 += mulc
  1226. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1227. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1228. add %rbp,%r8
  1229. # qhasm: mulr5 += mulrdx + carry
  1230. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  1231. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  1232. adc %rdx,%r9
  1233. # qhasm: mulx2 = b2_stack
  1234. # asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10
  1235. # asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12
  1236. movq 104(%rsp),%r12
  1237. # qhasm: mulrax = t20_stack
  1238. # asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7
  1239. # asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax
  1240. movq 152(%rsp),%rax
  1241. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1242. # asm 1: mul <mulx2=int64#10
  1243. # asm 2: mul <mulx2=%r12
  1244. mul %r12
  1245. # qhasm: carry? rx2 += mulrax
  1246. # asm 1: add <mulrax=int64#7,<rx2=int64#13
  1247. # asm 2: add <mulrax=%rax,<rx2=%r15
  1248. add %rax,%r15
  1249. # qhasm: mulc = 0
  1250. # asm 1: mov $0,>mulc=int64#15
  1251. # asm 2: mov $0,>mulc=%rbp
  1252. mov $0,%rbp
  1253. # qhasm: mulc += mulrdx + carry
  1254. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1255. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1256. adc %rdx,%rbp
  1257. # qhasm: mulrax = t21_stack
  1258. # asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7
  1259. # asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax
  1260. movq 160(%rsp),%rax
  1261. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1262. # asm 1: mul <mulx2=int64#10
  1263. # asm 2: mul <mulx2=%r12
  1264. mul %r12
  1265. # qhasm: carry? rx3 += mulrax
  1266. # asm 1: add <mulrax=int64#7,<rx3=int64#14
  1267. # asm 2: add <mulrax=%rax,<rx3=%rbx
  1268. add %rax,%rbx
  1269. # qhasm: mulrdx += 0 + carry
  1270. # asm 1: adc $0,<mulrdx=int64#3
  1271. # asm 2: adc $0,<mulrdx=%rdx
  1272. adc $0,%rdx
  1273. # qhasm: carry? rx3 += mulc
  1274. # asm 1: add <mulc=int64#15,<rx3=int64#14
  1275. # asm 2: add <mulc=%rbp,<rx3=%rbx
  1276. add %rbp,%rbx
  1277. # qhasm: mulc = 0
  1278. # asm 1: mov $0,>mulc=int64#15
  1279. # asm 2: mov $0,>mulc=%rbp
  1280. mov $0,%rbp
  1281. # qhasm: mulc += mulrdx + carry
  1282. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1283. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1284. adc %rdx,%rbp
  1285. # qhasm: mulrax = t22_stack
  1286. # asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7
  1287. # asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax
  1288. movq 168(%rsp),%rax
  1289. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1290. # asm 1: mul <mulx2=int64#10
  1291. # asm 2: mul <mulx2=%r12
  1292. mul %r12
  1293. # qhasm: carry? mulr4 += mulrax
  1294. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1295. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1296. add %rax,%r8
  1297. # qhasm: mulrdx += 0 + carry
  1298. # asm 1: adc $0,<mulrdx=int64#3
  1299. # asm 2: adc $0,<mulrdx=%rdx
  1300. adc $0,%rdx
  1301. # qhasm: carry? mulr4 += mulc
  1302. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1303. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1304. add %rbp,%r8
  1305. # qhasm: mulc = 0
  1306. # asm 1: mov $0,>mulc=int64#15
  1307. # asm 2: mov $0,>mulc=%rbp
  1308. mov $0,%rbp
  1309. # qhasm: mulc += mulrdx + carry
  1310. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1311. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1312. adc %rdx,%rbp
  1313. # qhasm: mulrax = t23_stack
  1314. # asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7
  1315. # asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax
  1316. movq 176(%rsp),%rax
  1317. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1318. # asm 1: mul <mulx2=int64#10
  1319. # asm 2: mul <mulx2=%r12
  1320. mul %r12
  1321. # qhasm: carry? mulr5 += mulrax
  1322. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1323. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1324. add %rax,%r9
  1325. # qhasm: mulrdx += 0 + carry
  1326. # asm 1: adc $0,<mulrdx=int64#3
  1327. # asm 2: adc $0,<mulrdx=%rdx
  1328. adc $0,%rdx
  1329. # qhasm: carry? mulr5 += mulc
  1330. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  1331. # asm 2: add <mulc=%rbp,<mulr5=%r9
  1332. add %rbp,%r9
  1333. # qhasm: mulr6 += mulrdx + carry
  1334. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  1335. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  1336. adc %rdx,%r10
  1337. # qhasm: mulx3 = b3_stack
  1338. # asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10
  1339. # asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12
  1340. movq 112(%rsp),%r12
  1341. # qhasm: mulrax = t20_stack
  1342. # asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7
  1343. # asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax
  1344. movq 152(%rsp),%rax
  1345. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1346. # asm 1: mul <mulx3=int64#10
  1347. # asm 2: mul <mulx3=%r12
  1348. mul %r12
  1349. # qhasm: carry? rx3 += mulrax
  1350. # asm 1: add <mulrax=int64#7,<rx3=int64#14
  1351. # asm 2: add <mulrax=%rax,<rx3=%rbx
  1352. add %rax,%rbx
  1353. # qhasm: mulc = 0
  1354. # asm 1: mov $0,>mulc=int64#15
  1355. # asm 2: mov $0,>mulc=%rbp
  1356. mov $0,%rbp
  1357. # qhasm: mulc += mulrdx + carry
  1358. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1359. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1360. adc %rdx,%rbp
  1361. # qhasm: mulrax = t21_stack
  1362. # asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7
  1363. # asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax
  1364. movq 160(%rsp),%rax
  1365. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1366. # asm 1: mul <mulx3=int64#10
  1367. # asm 2: mul <mulx3=%r12
  1368. mul %r12
  1369. # qhasm: carry? mulr4 += mulrax
  1370. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1371. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1372. add %rax,%r8
  1373. # qhasm: mulrdx += 0 + carry
  1374. # asm 1: adc $0,<mulrdx=int64#3
  1375. # asm 2: adc $0,<mulrdx=%rdx
  1376. adc $0,%rdx
  1377. # qhasm: carry? mulr4 += mulc
  1378. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1379. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1380. add %rbp,%r8
  1381. # qhasm: mulc = 0
  1382. # asm 1: mov $0,>mulc=int64#15
  1383. # asm 2: mov $0,>mulc=%rbp
  1384. mov $0,%rbp
  1385. # qhasm: mulc += mulrdx + carry
  1386. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1387. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1388. adc %rdx,%rbp
  1389. # qhasm: mulrax = t22_stack
  1390. # asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7
  1391. # asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax
  1392. movq 168(%rsp),%rax
  1393. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1394. # asm 1: mul <mulx3=int64#10
  1395. # asm 2: mul <mulx3=%r12
  1396. mul %r12
  1397. # qhasm: carry? mulr5 += mulrax
  1398. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1399. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1400. add %rax,%r9
  1401. # qhasm: mulrdx += 0 + carry
  1402. # asm 1: adc $0,<mulrdx=int64#3
  1403. # asm 2: adc $0,<mulrdx=%rdx
  1404. adc $0,%rdx
  1405. # qhasm: carry? mulr5 += mulc
  1406. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  1407. # asm 2: add <mulc=%rbp,<mulr5=%r9
  1408. add %rbp,%r9
  1409. # qhasm: mulc = 0
  1410. # asm 1: mov $0,>mulc=int64#15
  1411. # asm 2: mov $0,>mulc=%rbp
  1412. mov $0,%rbp
  1413. # qhasm: mulc += mulrdx + carry
  1414. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1415. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1416. adc %rdx,%rbp
  1417. # qhasm: mulrax = t23_stack
  1418. # asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7
  1419. # asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax
  1420. movq 176(%rsp),%rax
  1421. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1422. # asm 1: mul <mulx3=int64#10
  1423. # asm 2: mul <mulx3=%r12
  1424. mul %r12
  1425. # qhasm: carry? mulr6 += mulrax
  1426. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  1427. # asm 2: add <mulrax=%rax,<mulr6=%r10
  1428. add %rax,%r10
  1429. # qhasm: mulrdx += 0 + carry
  1430. # asm 1: adc $0,<mulrdx=int64#3
  1431. # asm 2: adc $0,<mulrdx=%rdx
  1432. adc $0,%rdx
  1433. # qhasm: carry? mulr6 += mulc
  1434. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  1435. # asm 2: add <mulc=%rbp,<mulr6=%r10
  1436. add %rbp,%r10
  1437. # qhasm: mulr7 += mulrdx + carry
  1438. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  1439. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  1440. adc %rdx,%r11
  1441. # qhasm: mulrax = mulr4
  1442. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  1443. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  1444. mov %r8,%rax
  1445. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1446. mulq CRYPTO_NAMESPACE(38)(%rip)
  1447. # qhasm: mulr4 = mulrax
  1448. # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
  1449. # asm 2: mov <mulrax=%rax,>mulr4=%r8
  1450. mov %rax,%r8
  1451. # qhasm: mulrax = mulr5
  1452. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  1453. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  1454. mov %r9,%rax
  1455. # qhasm: mulr5 = mulrdx
  1456. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
  1457. # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
  1458. mov %rdx,%r9
  1459. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1460. mulq CRYPTO_NAMESPACE(38)(%rip)
  1461. # qhasm: carry? mulr5 += mulrax
  1462. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1463. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1464. add %rax,%r9
  1465. # qhasm: mulrax = mulr6
  1466. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  1467. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  1468. mov %r10,%rax
  1469. # qhasm: mulr6 = 0
  1470. # asm 1: mov $0,>mulr6=int64#8
  1471. # asm 2: mov $0,>mulr6=%r10
  1472. mov $0,%r10
  1473. # qhasm: mulr6 += mulrdx + carry
  1474. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  1475. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  1476. adc %rdx,%r10
  1477. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1478. mulq CRYPTO_NAMESPACE(38)(%rip)
  1479. # qhasm: carry? mulr6 += mulrax
  1480. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  1481. # asm 2: add <mulrax=%rax,<mulr6=%r10
  1482. add %rax,%r10
  1483. # qhasm: mulrax = mulr7
  1484. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  1485. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  1486. mov %r11,%rax
  1487. # qhasm: mulr7 = 0
  1488. # asm 1: mov $0,>mulr7=int64#9
  1489. # asm 2: mov $0,>mulr7=%r11
  1490. mov $0,%r11
  1491. # qhasm: mulr7 += mulrdx + carry
  1492. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  1493. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  1494. adc %rdx,%r11
  1495. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1496. mulq CRYPTO_NAMESPACE(38)(%rip)
  1497. # qhasm: carry? mulr7 += mulrax
  1498. # asm 1: add <mulrax=int64#7,<mulr7=int64#9
  1499. # asm 2: add <mulrax=%rax,<mulr7=%r11
  1500. add %rax,%r11
  1501. # qhasm: mulr8 = 0
  1502. # asm 1: mov $0,>mulr8=int64#7
  1503. # asm 2: mov $0,>mulr8=%rax
  1504. mov $0,%rax
  1505. # qhasm: mulr8 += mulrdx + carry
  1506. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  1507. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  1508. adc %rdx,%rax
  1509. # qhasm: carry? rx0 += mulr4
  1510. # asm 1: add <mulr4=int64#5,<rx0=int64#11
  1511. # asm 2: add <mulr4=%r8,<rx0=%r13
  1512. add %r8,%r13
  1513. # qhasm: carry? rx1 += mulr5 + carry
  1514. # asm 1: adc <mulr5=int64#6,<rx1=int64#12
  1515. # asm 2: adc <mulr5=%r9,<rx1=%r14
  1516. adc %r9,%r14
  1517. # qhasm: carry? rx2 += mulr6 + carry
  1518. # asm 1: adc <mulr6=int64#8,<rx2=int64#13
  1519. # asm 2: adc <mulr6=%r10,<rx2=%r15
  1520. adc %r10,%r15
  1521. # qhasm: carry? rx3 += mulr7 + carry
  1522. # asm 1: adc <mulr7=int64#9,<rx3=int64#14
  1523. # asm 2: adc <mulr7=%r11,<rx3=%rbx
  1524. adc %r11,%rbx
  1525. # qhasm: mulzero = 0
  1526. # asm 1: mov $0,>mulzero=int64#3
  1527. # asm 2: mov $0,>mulzero=%rdx
  1528. mov $0,%rdx
  1529. # qhasm: mulr8 += mulzero + carry
  1530. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  1531. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  1532. adc %rdx,%rax
  1533. # qhasm: mulr8 *= 38
  1534. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
  1535. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
  1536. imulq $38,%rax,%r8
  1537. # qhasm: carry? rx0 += mulr8
  1538. # asm 1: add <mulr8=int64#5,<rx0=int64#11
  1539. # asm 2: add <mulr8=%r8,<rx0=%r13
  1540. add %r8,%r13
  1541. # qhasm: carry? rx1 += mulzero + carry
  1542. # asm 1: adc <mulzero=int64#3,<rx1=int64#12
  1543. # asm 2: adc <mulzero=%rdx,<rx1=%r14
  1544. adc %rdx,%r14
  1545. # qhasm: carry? rx2 += mulzero + carry
  1546. # asm 1: adc <mulzero=int64#3,<rx2=int64#13
  1547. # asm 2: adc <mulzero=%rdx,<rx2=%r15
  1548. adc %rdx,%r15
  1549. # qhasm: carry? rx3 += mulzero + carry
  1550. # asm 1: adc <mulzero=int64#3,<rx3=int64#14
  1551. # asm 2: adc <mulzero=%rdx,<rx3=%rbx
  1552. adc %rdx,%rbx
  1553. # qhasm: mulzero += mulzero + carry
  1554. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  1555. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  1556. adc %rdx,%rdx
  1557. # qhasm: mulzero *= 38
  1558. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  1559. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  1560. imulq $38,%rdx,%rdx
  1561. # qhasm: rx0 += mulzero
  1562. # asm 1: add <mulzero=int64#3,<rx0=int64#11
  1563. # asm 2: add <mulzero=%rdx,<rx0=%r13
  1564. add %rdx,%r13
  1565. # qhasm: ry0 = rx0
  1566. # asm 1: mov <rx0=int64#11,>ry0=int64#3
  1567. # asm 2: mov <rx0=%r13,>ry0=%rdx
  1568. mov %r13,%rdx
  1569. # qhasm: ry1 = rx1
  1570. # asm 1: mov <rx1=int64#12,>ry1=int64#5
  1571. # asm 2: mov <rx1=%r14,>ry1=%r8
  1572. mov %r14,%r8
  1573. # qhasm: ry2 = rx2
  1574. # asm 1: mov <rx2=int64#13,>ry2=int64#6
  1575. # asm 2: mov <rx2=%r15,>ry2=%r9
  1576. mov %r15,%r9
  1577. # qhasm: ry3 = rx3
  1578. # asm 1: mov <rx3=int64#14,>ry3=int64#7
  1579. # asm 2: mov <rx3=%rbx,>ry3=%rax
  1580. mov %rbx,%rax
  1581. # qhasm: carry? ry0 += a0_stack
  1582. # asm 1: addq <a0_stack=stack64#8,<ry0=int64#3
  1583. # asm 2: addq <a0_stack=56(%rsp),<ry0=%rdx
  1584. addq 56(%rsp),%rdx
  1585. # qhasm: carry? ry1 += a1_stack + carry
  1586. # asm 1: adcq <a1_stack=stack64#9,<ry1=int64#5
  1587. # asm 2: adcq <a1_stack=64(%rsp),<ry1=%r8
  1588. adcq 64(%rsp),%r8
  1589. # qhasm: carry? ry2 += a2_stack + carry
  1590. # asm 1: adcq <a2_stack=stack64#10,<ry2=int64#6
  1591. # asm 2: adcq <a2_stack=72(%rsp),<ry2=%r9
  1592. adcq 72(%rsp),%r9
  1593. # qhasm: carry? ry3 += a3_stack + carry
  1594. # asm 1: adcq <a3_stack=stack64#11,<ry3=int64#7
  1595. # asm 2: adcq <a3_stack=80(%rsp),<ry3=%rax
  1596. adcq 80(%rsp),%rax
  1597. # qhasm: addt0 = 0
  1598. # asm 1: mov $0,>addt0=int64#8
  1599. # asm 2: mov $0,>addt0=%r10
  1600. mov $0,%r10
  1601. # qhasm: addt1 = 38
  1602. # asm 1: mov $38,>addt1=int64#9
  1603. # asm 2: mov $38,>addt1=%r11
  1604. mov $38,%r11
  1605. # qhasm: addt1 = addt0 if !carry
  1606. # asm 1: cmovae <addt0=int64#8,<addt1=int64#9
  1607. # asm 2: cmovae <addt0=%r10,<addt1=%r11
  1608. cmovae %r10,%r11
  1609. # qhasm: carry? ry0 += addt1
  1610. # asm 1: add <addt1=int64#9,<ry0=int64#3
  1611. # asm 2: add <addt1=%r11,<ry0=%rdx
  1612. add %r11,%rdx
  1613. # qhasm: carry? ry1 += addt0 + carry
  1614. # asm 1: adc <addt0=int64#8,<ry1=int64#5
  1615. # asm 2: adc <addt0=%r10,<ry1=%r8
  1616. adc %r10,%r8
  1617. # qhasm: carry? ry2 += addt0 + carry
  1618. # asm 1: adc <addt0=int64#8,<ry2=int64#6
  1619. # asm 2: adc <addt0=%r10,<ry2=%r9
  1620. adc %r10,%r9
  1621. # qhasm: carry? ry3 += addt0 + carry
  1622. # asm 1: adc <addt0=int64#8,<ry3=int64#7
  1623. # asm 2: adc <addt0=%r10,<ry3=%rax
  1624. adc %r10,%rax
  1625. # qhasm: addt0 = addt1 if carry
  1626. # asm 1: cmovc <addt1=int64#9,<addt0=int64#8
  1627. # asm 2: cmovc <addt1=%r11,<addt0=%r10
  1628. cmovc %r11,%r10
  1629. # qhasm: ry0 += addt0
  1630. # asm 1: add <addt0=int64#8,<ry0=int64#3
  1631. # asm 2: add <addt0=%r10,<ry0=%rdx
  1632. add %r10,%rdx
  1633. # qhasm: carry? rx0 -= a0_stack
  1634. # asm 1: subq <a0_stack=stack64#8,<rx0=int64#11
  1635. # asm 2: subq <a0_stack=56(%rsp),<rx0=%r13
  1636. subq 56(%rsp),%r13
  1637. # qhasm: carry? rx1 -= a1_stack - carry
  1638. # asm 1: sbbq <a1_stack=stack64#9,<rx1=int64#12
  1639. # asm 2: sbbq <a1_stack=64(%rsp),<rx1=%r14
  1640. sbbq 64(%rsp),%r14
  1641. # qhasm: carry? rx2 -= a2_stack - carry
  1642. # asm 1: sbbq <a2_stack=stack64#10,<rx2=int64#13
  1643. # asm 2: sbbq <a2_stack=72(%rsp),<rx2=%r15
  1644. sbbq 72(%rsp),%r15
  1645. # qhasm: carry? rx3 -= a3_stack - carry
  1646. # asm 1: sbbq <a3_stack=stack64#11,<rx3=int64#14
  1647. # asm 2: sbbq <a3_stack=80(%rsp),<rx3=%rbx
  1648. sbbq 80(%rsp),%rbx
  1649. # qhasm: subt0 = 0
  1650. # asm 1: mov $0,>subt0=int64#8
  1651. # asm 2: mov $0,>subt0=%r10
  1652. mov $0,%r10
  1653. # qhasm: subt1 = 38
  1654. # asm 1: mov $38,>subt1=int64#9
  1655. # asm 2: mov $38,>subt1=%r11
  1656. mov $38,%r11
  1657. # qhasm: subt1 = subt0 if !carry
  1658. # asm 1: cmovae <subt0=int64#8,<subt1=int64#9
  1659. # asm 2: cmovae <subt0=%r10,<subt1=%r11
  1660. cmovae %r10,%r11
  1661. # qhasm: carry? rx0 -= subt1
  1662. # asm 1: sub <subt1=int64#9,<rx0=int64#11
  1663. # asm 2: sub <subt1=%r11,<rx0=%r13
  1664. sub %r11,%r13
  1665. # qhasm: carry? rx1 -= subt0 - carry
  1666. # asm 1: sbb <subt0=int64#8,<rx1=int64#12
  1667. # asm 2: sbb <subt0=%r10,<rx1=%r14
  1668. sbb %r10,%r14
  1669. # qhasm: carry? rx2 -= subt0 - carry
  1670. # asm 1: sbb <subt0=int64#8,<rx2=int64#13
  1671. # asm 2: sbb <subt0=%r10,<rx2=%r15
  1672. sbb %r10,%r15
  1673. # qhasm: carry? rx3 -= subt0 - carry
  1674. # asm 1: sbb <subt0=int64#8,<rx3=int64#14
  1675. # asm 2: sbb <subt0=%r10,<rx3=%rbx
  1676. sbb %r10,%rbx
  1677. # qhasm: subt0 = subt1 if carry
  1678. # asm 1: cmovc <subt1=int64#9,<subt0=int64#8
  1679. # asm 2: cmovc <subt1=%r11,<subt0=%r10
  1680. cmovc %r11,%r10
  1681. # qhasm: rx0 -= subt0
  1682. # asm 1: sub <subt0=int64#8,<rx0=int64#11
  1683. # asm 2: sub <subt0=%r10,<rx0=%r13
  1684. sub %r10,%r13
  1685. # qhasm: *(uint64 *) (rp + 0) = rx0
  1686. # asm 1: movq <rx0=int64#11,0(<rp=int64#1)
  1687. # asm 2: movq <rx0=%r13,0(<rp=%rdi)
  1688. movq %r13,0(%rdi)
  1689. # qhasm: *(uint64 *) (rp + 8) = rx1
  1690. # asm 1: movq <rx1=int64#12,8(<rp=int64#1)
  1691. # asm 2: movq <rx1=%r14,8(<rp=%rdi)
  1692. movq %r14,8(%rdi)
  1693. # qhasm: *(uint64 *) (rp + 16) = rx2
  1694. # asm 1: movq <rx2=int64#13,16(<rp=int64#1)
  1695. # asm 2: movq <rx2=%r15,16(<rp=%rdi)
  1696. movq %r15,16(%rdi)
  1697. # qhasm: *(uint64 *) (rp + 24) = rx3
  1698. # asm 1: movq <rx3=int64#14,24(<rp=int64#1)
  1699. # asm 2: movq <rx3=%rbx,24(<rp=%rdi)
  1700. movq %rbx,24(%rdi)
  1701. # qhasm: *(uint64 *) (rp + 64) = ry0
  1702. # asm 1: movq <ry0=int64#3,64(<rp=int64#1)
  1703. # asm 2: movq <ry0=%rdx,64(<rp=%rdi)
  1704. movq %rdx,64(%rdi)
  1705. # qhasm: *(uint64 *) (rp + 72) = ry1
  1706. # asm 1: movq <ry1=int64#5,72(<rp=int64#1)
  1707. # asm 2: movq <ry1=%r8,72(<rp=%rdi)
  1708. movq %r8,72(%rdi)
  1709. # qhasm: *(uint64 *) (rp + 80) = ry2
  1710. # asm 1: movq <ry2=int64#6,80(<rp=int64#1)
  1711. # asm 2: movq <ry2=%r9,80(<rp=%rdi)
  1712. movq %r9,80(%rdi)
  1713. # qhasm: *(uint64 *) (rp + 88) = ry3
  1714. # asm 1: movq <ry3=int64#7,88(<rp=int64#1)
  1715. # asm 2: movq <ry3=%rax,88(<rp=%rdi)
  1716. movq %rax,88(%rdi)
  1717. # qhasm: mulr4 = 0
  1718. # asm 1: mov $0,>mulr4=int64#5
  1719. # asm 2: mov $0,>mulr4=%r8
  1720. mov $0,%r8
  1721. # qhasm: mulr5 = 0
  1722. # asm 1: mov $0,>mulr5=int64#6
  1723. # asm 2: mov $0,>mulr5=%r9
  1724. mov $0,%r9
  1725. # qhasm: mulr6 = 0
  1726. # asm 1: mov $0,>mulr6=int64#8
  1727. # asm 2: mov $0,>mulr6=%r10
  1728. mov $0,%r10
  1729. # qhasm: mulr7 = 0
  1730. # asm 1: mov $0,>mulr7=int64#9
  1731. # asm 2: mov $0,>mulr7=%r11
  1732. mov $0,%r11
  1733. # qhasm: mulx0 = *(uint64 *)(pp + 96)
  1734. # asm 1: movq 96(<pp=int64#2),>mulx0=int64#10
  1735. # asm 2: movq 96(<pp=%rsi),>mulx0=%r12
  1736. movq 96(%rsi),%r12
  1737. # qhasm: mulrax = *(uint64 *)(qp + 96)
  1738. # asm 1: movq 96(<qp=int64#4),>mulrax=int64#7
  1739. # asm 2: movq 96(<qp=%rcx),>mulrax=%rax
  1740. movq 96(%rcx),%rax
  1741. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1742. # asm 1: mul <mulx0=int64#10
  1743. # asm 2: mul <mulx0=%r12
  1744. mul %r12
  1745. # qhasm: c0 = mulrax
  1746. # asm 1: mov <mulrax=int64#7,>c0=int64#11
  1747. # asm 2: mov <mulrax=%rax,>c0=%r13
  1748. mov %rax,%r13
  1749. # qhasm: c1 = mulrdx
  1750. # asm 1: mov <mulrdx=int64#3,>c1=int64#12
  1751. # asm 2: mov <mulrdx=%rdx,>c1=%r14
  1752. mov %rdx,%r14
  1753. # qhasm: mulrax = *(uint64 *)(qp + 104)
  1754. # asm 1: movq 104(<qp=int64#4),>mulrax=int64#7
  1755. # asm 2: movq 104(<qp=%rcx),>mulrax=%rax
  1756. movq 104(%rcx),%rax
  1757. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1758. # asm 1: mul <mulx0=int64#10
  1759. # asm 2: mul <mulx0=%r12
  1760. mul %r12
  1761. # qhasm: carry? c1 += mulrax
  1762. # asm 1: add <mulrax=int64#7,<c1=int64#12
  1763. # asm 2: add <mulrax=%rax,<c1=%r14
  1764. add %rax,%r14
  1765. # qhasm: c2 = 0
  1766. # asm 1: mov $0,>c2=int64#13
  1767. # asm 2: mov $0,>c2=%r15
  1768. mov $0,%r15
  1769. # qhasm: c2 += mulrdx + carry
  1770. # asm 1: adc <mulrdx=int64#3,<c2=int64#13
  1771. # asm 2: adc <mulrdx=%rdx,<c2=%r15
  1772. adc %rdx,%r15
  1773. # qhasm: mulrax = *(uint64 *)(qp + 112)
  1774. # asm 1: movq 112(<qp=int64#4),>mulrax=int64#7
  1775. # asm 2: movq 112(<qp=%rcx),>mulrax=%rax
  1776. movq 112(%rcx),%rax
  1777. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1778. # asm 1: mul <mulx0=int64#10
  1779. # asm 2: mul <mulx0=%r12
  1780. mul %r12
  1781. # qhasm: carry? c2 += mulrax
  1782. # asm 1: add <mulrax=int64#7,<c2=int64#13
  1783. # asm 2: add <mulrax=%rax,<c2=%r15
  1784. add %rax,%r15
  1785. # qhasm: c3 = 0
  1786. # asm 1: mov $0,>c3=int64#14
  1787. # asm 2: mov $0,>c3=%rbx
  1788. mov $0,%rbx
  1789. # qhasm: c3 += mulrdx + carry
  1790. # asm 1: adc <mulrdx=int64#3,<c3=int64#14
  1791. # asm 2: adc <mulrdx=%rdx,<c3=%rbx
  1792. adc %rdx,%rbx
  1793. # qhasm: mulrax = *(uint64 *)(qp + 120)
  1794. # asm 1: movq 120(<qp=int64#4),>mulrax=int64#7
  1795. # asm 2: movq 120(<qp=%rcx),>mulrax=%rax
  1796. movq 120(%rcx),%rax
  1797. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1798. # asm 1: mul <mulx0=int64#10
  1799. # asm 2: mul <mulx0=%r12
  1800. mul %r12
  1801. # qhasm: carry? c3 += mulrax
  1802. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1803. # asm 2: add <mulrax=%rax,<c3=%rbx
  1804. add %rax,%rbx
  1805. # qhasm: mulr4 += mulrdx + carry
  1806. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  1807. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  1808. adc %rdx,%r8
  1809. # qhasm: mulx1 = *(uint64 *)(pp + 104)
  1810. # asm 1: movq 104(<pp=int64#2),>mulx1=int64#10
  1811. # asm 2: movq 104(<pp=%rsi),>mulx1=%r12
  1812. movq 104(%rsi),%r12
  1813. # qhasm: mulrax = *(uint64 *)(qp + 96)
  1814. # asm 1: movq 96(<qp=int64#4),>mulrax=int64#7
  1815. # asm 2: movq 96(<qp=%rcx),>mulrax=%rax
  1816. movq 96(%rcx),%rax
  1817. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1818. # asm 1: mul <mulx1=int64#10
  1819. # asm 2: mul <mulx1=%r12
  1820. mul %r12
  1821. # qhasm: carry? c1 += mulrax
  1822. # asm 1: add <mulrax=int64#7,<c1=int64#12
  1823. # asm 2: add <mulrax=%rax,<c1=%r14
  1824. add %rax,%r14
  1825. # qhasm: mulc = 0
  1826. # asm 1: mov $0,>mulc=int64#15
  1827. # asm 2: mov $0,>mulc=%rbp
  1828. mov $0,%rbp
  1829. # qhasm: mulc += mulrdx + carry
  1830. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1831. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1832. adc %rdx,%rbp
  1833. # qhasm: mulrax = *(uint64 *)(qp + 104)
  1834. # asm 1: movq 104(<qp=int64#4),>mulrax=int64#7
  1835. # asm 2: movq 104(<qp=%rcx),>mulrax=%rax
  1836. movq 104(%rcx),%rax
  1837. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1838. # asm 1: mul <mulx1=int64#10
  1839. # asm 2: mul <mulx1=%r12
  1840. mul %r12
  1841. # qhasm: carry? c2 += mulrax
  1842. # asm 1: add <mulrax=int64#7,<c2=int64#13
  1843. # asm 2: add <mulrax=%rax,<c2=%r15
  1844. add %rax,%r15
  1845. # qhasm: mulrdx += 0 + carry
  1846. # asm 1: adc $0,<mulrdx=int64#3
  1847. # asm 2: adc $0,<mulrdx=%rdx
  1848. adc $0,%rdx
  1849. # qhasm: carry? c2 += mulc
  1850. # asm 1: add <mulc=int64#15,<c2=int64#13
  1851. # asm 2: add <mulc=%rbp,<c2=%r15
  1852. add %rbp,%r15
  1853. # qhasm: mulc = 0
  1854. # asm 1: mov $0,>mulc=int64#15
  1855. # asm 2: mov $0,>mulc=%rbp
  1856. mov $0,%rbp
  1857. # qhasm: mulc += mulrdx + carry
  1858. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1859. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1860. adc %rdx,%rbp
  1861. # qhasm: mulrax = *(uint64 *)(qp + 112)
  1862. # asm 1: movq 112(<qp=int64#4),>mulrax=int64#7
  1863. # asm 2: movq 112(<qp=%rcx),>mulrax=%rax
  1864. movq 112(%rcx),%rax
  1865. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1866. # asm 1: mul <mulx1=int64#10
  1867. # asm 2: mul <mulx1=%r12
  1868. mul %r12
  1869. # qhasm: carry? c3 += mulrax
  1870. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1871. # asm 2: add <mulrax=%rax,<c3=%rbx
  1872. add %rax,%rbx
  1873. # qhasm: mulrdx += 0 + carry
  1874. # asm 1: adc $0,<mulrdx=int64#3
  1875. # asm 2: adc $0,<mulrdx=%rdx
  1876. adc $0,%rdx
  1877. # qhasm: carry? c3 += mulc
  1878. # asm 1: add <mulc=int64#15,<c3=int64#14
  1879. # asm 2: add <mulc=%rbp,<c3=%rbx
  1880. add %rbp,%rbx
  1881. # qhasm: mulc = 0
  1882. # asm 1: mov $0,>mulc=int64#15
  1883. # asm 2: mov $0,>mulc=%rbp
  1884. mov $0,%rbp
  1885. # qhasm: mulc += mulrdx + carry
  1886. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1887. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1888. adc %rdx,%rbp
  1889. # qhasm: mulrax = *(uint64 *)(qp + 120)
  1890. # asm 1: movq 120(<qp=int64#4),>mulrax=int64#7
  1891. # asm 2: movq 120(<qp=%rcx),>mulrax=%rax
  1892. movq 120(%rcx),%rax
  1893. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1894. # asm 1: mul <mulx1=int64#10
  1895. # asm 2: mul <mulx1=%r12
  1896. mul %r12
  1897. # qhasm: carry? mulr4 += mulrax
  1898. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1899. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1900. add %rax,%r8
  1901. # qhasm: mulrdx += 0 + carry
  1902. # asm 1: adc $0,<mulrdx=int64#3
  1903. # asm 2: adc $0,<mulrdx=%rdx
  1904. adc $0,%rdx
  1905. # qhasm: carry? mulr4 += mulc
  1906. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1907. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1908. add %rbp,%r8
  1909. # qhasm: mulr5 += mulrdx + carry
  1910. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  1911. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  1912. adc %rdx,%r9
  1913. # qhasm: mulx2 = *(uint64 *)(pp + 112)
  1914. # asm 1: movq 112(<pp=int64#2),>mulx2=int64#10
  1915. # asm 2: movq 112(<pp=%rsi),>mulx2=%r12
  1916. movq 112(%rsi),%r12
  1917. # qhasm: mulrax = *(uint64 *)(qp + 96)
  1918. # asm 1: movq 96(<qp=int64#4),>mulrax=int64#7
  1919. # asm 2: movq 96(<qp=%rcx),>mulrax=%rax
  1920. movq 96(%rcx),%rax
  1921. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1922. # asm 1: mul <mulx2=int64#10
  1923. # asm 2: mul <mulx2=%r12
  1924. mul %r12
  1925. # qhasm: carry? c2 += mulrax
  1926. # asm 1: add <mulrax=int64#7,<c2=int64#13
  1927. # asm 2: add <mulrax=%rax,<c2=%r15
  1928. add %rax,%r15
  1929. # qhasm: mulc = 0
  1930. # asm 1: mov $0,>mulc=int64#15
  1931. # asm 2: mov $0,>mulc=%rbp
  1932. mov $0,%rbp
  1933. # qhasm: mulc += mulrdx + carry
  1934. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1935. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1936. adc %rdx,%rbp
  1937. # qhasm: mulrax = *(uint64 *)(qp + 104)
  1938. # asm 1: movq 104(<qp=int64#4),>mulrax=int64#7
  1939. # asm 2: movq 104(<qp=%rcx),>mulrax=%rax
  1940. movq 104(%rcx),%rax
  1941. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1942. # asm 1: mul <mulx2=int64#10
  1943. # asm 2: mul <mulx2=%r12
  1944. mul %r12
  1945. # qhasm: carry? c3 += mulrax
  1946. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1947. # asm 2: add <mulrax=%rax,<c3=%rbx
  1948. add %rax,%rbx
  1949. # qhasm: mulrdx += 0 + carry
  1950. # asm 1: adc $0,<mulrdx=int64#3
  1951. # asm 2: adc $0,<mulrdx=%rdx
  1952. adc $0,%rdx
  1953. # qhasm: carry? c3 += mulc
  1954. # asm 1: add <mulc=int64#15,<c3=int64#14
  1955. # asm 2: add <mulc=%rbp,<c3=%rbx
  1956. add %rbp,%rbx
  1957. # qhasm: mulc = 0
  1958. # asm 1: mov $0,>mulc=int64#15
  1959. # asm 2: mov $0,>mulc=%rbp
  1960. mov $0,%rbp
  1961. # qhasm: mulc += mulrdx + carry
  1962. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1963. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1964. adc %rdx,%rbp
  1965. # qhasm: mulrax = *(uint64 *)(qp + 112)
  1966. # asm 1: movq 112(<qp=int64#4),>mulrax=int64#7
  1967. # asm 2: movq 112(<qp=%rcx),>mulrax=%rax
  1968. movq 112(%rcx),%rax
  1969. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1970. # asm 1: mul <mulx2=int64#10
  1971. # asm 2: mul <mulx2=%r12
  1972. mul %r12
  1973. # qhasm: carry? mulr4 += mulrax
  1974. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1975. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1976. add %rax,%r8
  1977. # qhasm: mulrdx += 0 + carry
  1978. # asm 1: adc $0,<mulrdx=int64#3
  1979. # asm 2: adc $0,<mulrdx=%rdx
  1980. adc $0,%rdx
  1981. # qhasm: carry? mulr4 += mulc
  1982. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1983. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1984. add %rbp,%r8
  1985. # qhasm: mulc = 0
  1986. # asm 1: mov $0,>mulc=int64#15
  1987. # asm 2: mov $0,>mulc=%rbp
  1988. mov $0,%rbp
  1989. # qhasm: mulc += mulrdx + carry
  1990. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1991. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1992. adc %rdx,%rbp
  1993. # qhasm: mulrax = *(uint64 *)(qp + 120)
  1994. # asm 1: movq 120(<qp=int64#4),>mulrax=int64#7
  1995. # asm 2: movq 120(<qp=%rcx),>mulrax=%rax
  1996. movq 120(%rcx),%rax
  1997. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1998. # asm 1: mul <mulx2=int64#10
  1999. # asm 2: mul <mulx2=%r12
  2000. mul %r12
  2001. # qhasm: carry? mulr5 += mulrax
  2002. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  2003. # asm 2: add <mulrax=%rax,<mulr5=%r9
  2004. add %rax,%r9
  2005. # qhasm: mulrdx += 0 + carry
  2006. # asm 1: adc $0,<mulrdx=int64#3
  2007. # asm 2: adc $0,<mulrdx=%rdx
  2008. adc $0,%rdx
  2009. # qhasm: carry? mulr5 += mulc
  2010. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  2011. # asm 2: add <mulc=%rbp,<mulr5=%r9
  2012. add %rbp,%r9
  2013. # qhasm: mulr6 += mulrdx + carry
  2014. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  2015. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  2016. adc %rdx,%r10
  2017. # qhasm: mulx3 = *(uint64 *)(pp + 120)
  2018. # asm 1: movq 120(<pp=int64#2),>mulx3=int64#10
  2019. # asm 2: movq 120(<pp=%rsi),>mulx3=%r12
  2020. movq 120(%rsi),%r12
  2021. # qhasm: mulrax = *(uint64 *)(qp + 96)
  2022. # asm 1: movq 96(<qp=int64#4),>mulrax=int64#7
  2023. # asm 2: movq 96(<qp=%rcx),>mulrax=%rax
  2024. movq 96(%rcx),%rax
  2025. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2026. # asm 1: mul <mulx3=int64#10
  2027. # asm 2: mul <mulx3=%r12
  2028. mul %r12
  2029. # qhasm: carry? c3 += mulrax
  2030. # asm 1: add <mulrax=int64#7,<c3=int64#14
  2031. # asm 2: add <mulrax=%rax,<c3=%rbx
  2032. add %rax,%rbx
  2033. # qhasm: mulc = 0
  2034. # asm 1: mov $0,>mulc=int64#15
  2035. # asm 2: mov $0,>mulc=%rbp
  2036. mov $0,%rbp
  2037. # qhasm: mulc += mulrdx + carry
  2038. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2039. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2040. adc %rdx,%rbp
  2041. # qhasm: mulrax = *(uint64 *)(qp + 104)
  2042. # asm 1: movq 104(<qp=int64#4),>mulrax=int64#7
  2043. # asm 2: movq 104(<qp=%rcx),>mulrax=%rax
  2044. movq 104(%rcx),%rax
  2045. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2046. # asm 1: mul <mulx3=int64#10
  2047. # asm 2: mul <mulx3=%r12
  2048. mul %r12
  2049. # qhasm: carry? mulr4 += mulrax
  2050. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  2051. # asm 2: add <mulrax=%rax,<mulr4=%r8
  2052. add %rax,%r8
  2053. # qhasm: mulrdx += 0 + carry
  2054. # asm 1: adc $0,<mulrdx=int64#3
  2055. # asm 2: adc $0,<mulrdx=%rdx
  2056. adc $0,%rdx
  2057. # qhasm: carry? mulr4 += mulc
  2058. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  2059. # asm 2: add <mulc=%rbp,<mulr4=%r8
  2060. add %rbp,%r8
  2061. # qhasm: mulc = 0
  2062. # asm 1: mov $0,>mulc=int64#15
  2063. # asm 2: mov $0,>mulc=%rbp
  2064. mov $0,%rbp
  2065. # qhasm: mulc += mulrdx + carry
  2066. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2067. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2068. adc %rdx,%rbp
  2069. # qhasm: mulrax = *(uint64 *)(qp + 112)
  2070. # asm 1: movq 112(<qp=int64#4),>mulrax=int64#7
  2071. # asm 2: movq 112(<qp=%rcx),>mulrax=%rax
  2072. movq 112(%rcx),%rax
  2073. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2074. # asm 1: mul <mulx3=int64#10
  2075. # asm 2: mul <mulx3=%r12
  2076. mul %r12
  2077. # qhasm: carry? mulr5 += mulrax
  2078. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  2079. # asm 2: add <mulrax=%rax,<mulr5=%r9
  2080. add %rax,%r9
  2081. # qhasm: mulrdx += 0 + carry
  2082. # asm 1: adc $0,<mulrdx=int64#3
  2083. # asm 2: adc $0,<mulrdx=%rdx
  2084. adc $0,%rdx
  2085. # qhasm: carry? mulr5 += mulc
  2086. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  2087. # asm 2: add <mulc=%rbp,<mulr5=%r9
  2088. add %rbp,%r9
  2089. # qhasm: mulc = 0
  2090. # asm 1: mov $0,>mulc=int64#15
  2091. # asm 2: mov $0,>mulc=%rbp
  2092. mov $0,%rbp
  2093. # qhasm: mulc += mulrdx + carry
  2094. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2095. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2096. adc %rdx,%rbp
  2097. # qhasm: mulrax = *(uint64 *)(qp + 120)
  2098. # asm 1: movq 120(<qp=int64#4),>mulrax=int64#7
  2099. # asm 2: movq 120(<qp=%rcx),>mulrax=%rax
  2100. movq 120(%rcx),%rax
  2101. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2102. # asm 1: mul <mulx3=int64#10
  2103. # asm 2: mul <mulx3=%r12
  2104. mul %r12
  2105. # qhasm: carry? mulr6 += mulrax
  2106. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  2107. # asm 2: add <mulrax=%rax,<mulr6=%r10
  2108. add %rax,%r10
  2109. # qhasm: mulrdx += 0 + carry
  2110. # asm 1: adc $0,<mulrdx=int64#3
  2111. # asm 2: adc $0,<mulrdx=%rdx
  2112. adc $0,%rdx
  2113. # qhasm: carry? mulr6 += mulc
  2114. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  2115. # asm 2: add <mulc=%rbp,<mulr6=%r10
  2116. add %rbp,%r10
  2117. # qhasm: mulr7 += mulrdx + carry
  2118. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  2119. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  2120. adc %rdx,%r11
  2121. # qhasm: mulrax = mulr4
  2122. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  2123. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  2124. mov %r8,%rax
  2125. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2126. mulq CRYPTO_NAMESPACE(38)(%rip)
  2127. # qhasm: mulr4 = mulrax
  2128. # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
  2129. # asm 2: mov <mulrax=%rax,>mulr4=%r8
  2130. mov %rax,%r8
  2131. # qhasm: mulrax = mulr5
  2132. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  2133. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  2134. mov %r9,%rax
  2135. # qhasm: mulr5 = mulrdx
  2136. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
  2137. # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
  2138. mov %rdx,%r9
  2139. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2140. mulq CRYPTO_NAMESPACE(38)(%rip)
  2141. # qhasm: carry? mulr5 += mulrax
  2142. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  2143. # asm 2: add <mulrax=%rax,<mulr5=%r9
  2144. add %rax,%r9
  2145. # qhasm: mulrax = mulr6
  2146. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  2147. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  2148. mov %r10,%rax
  2149. # qhasm: mulr6 = 0
  2150. # asm 1: mov $0,>mulr6=int64#8
  2151. # asm 2: mov $0,>mulr6=%r10
  2152. mov $0,%r10
  2153. # qhasm: mulr6 += mulrdx + carry
  2154. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  2155. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  2156. adc %rdx,%r10
  2157. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2158. mulq CRYPTO_NAMESPACE(38)(%rip)
  2159. # qhasm: carry? mulr6 += mulrax
  2160. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  2161. # asm 2: add <mulrax=%rax,<mulr6=%r10
  2162. add %rax,%r10
  2163. # qhasm: mulrax = mulr7
  2164. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  2165. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  2166. mov %r11,%rax
  2167. # qhasm: mulr7 = 0
  2168. # asm 1: mov $0,>mulr7=int64#9
  2169. # asm 2: mov $0,>mulr7=%r11
  2170. mov $0,%r11
  2171. # qhasm: mulr7 += mulrdx + carry
  2172. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  2173. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  2174. adc %rdx,%r11
  2175. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2176. mulq CRYPTO_NAMESPACE(38)(%rip)
  2177. # qhasm: carry? mulr7 += mulrax
  2178. # asm 1: add <mulrax=int64#7,<mulr7=int64#9
  2179. # asm 2: add <mulrax=%rax,<mulr7=%r11
  2180. add %rax,%r11
  2181. # qhasm: mulr8 = 0
  2182. # asm 1: mov $0,>mulr8=int64#7
  2183. # asm 2: mov $0,>mulr8=%rax
  2184. mov $0,%rax
  2185. # qhasm: mulr8 += mulrdx + carry
  2186. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  2187. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  2188. adc %rdx,%rax
  2189. # qhasm: carry? c0 += mulr4
  2190. # asm 1: add <mulr4=int64#5,<c0=int64#11
  2191. # asm 2: add <mulr4=%r8,<c0=%r13
  2192. add %r8,%r13
  2193. # qhasm: carry? c1 += mulr5 + carry
  2194. # asm 1: adc <mulr5=int64#6,<c1=int64#12
  2195. # asm 2: adc <mulr5=%r9,<c1=%r14
  2196. adc %r9,%r14
  2197. # qhasm: carry? c2 += mulr6 + carry
  2198. # asm 1: adc <mulr6=int64#8,<c2=int64#13
  2199. # asm 2: adc <mulr6=%r10,<c2=%r15
  2200. adc %r10,%r15
  2201. # qhasm: carry? c3 += mulr7 + carry
  2202. # asm 1: adc <mulr7=int64#9,<c3=int64#14
  2203. # asm 2: adc <mulr7=%r11,<c3=%rbx
  2204. adc %r11,%rbx
  2205. # qhasm: mulzero = 0
  2206. # asm 1: mov $0,>mulzero=int64#3
  2207. # asm 2: mov $0,>mulzero=%rdx
  2208. mov $0,%rdx
  2209. # qhasm: mulr8 += mulzero + carry
  2210. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  2211. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  2212. adc %rdx,%rax
  2213. # qhasm: mulr8 *= 38
  2214. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
  2215. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
  2216. imulq $38,%rax,%r8
  2217. # qhasm: carry? c0 += mulr8
  2218. # asm 1: add <mulr8=int64#5,<c0=int64#11
  2219. # asm 2: add <mulr8=%r8,<c0=%r13
  2220. add %r8,%r13
  2221. # qhasm: carry? c1 += mulzero + carry
  2222. # asm 1: adc <mulzero=int64#3,<c1=int64#12
  2223. # asm 2: adc <mulzero=%rdx,<c1=%r14
  2224. adc %rdx,%r14
  2225. # qhasm: carry? c2 += mulzero + carry
  2226. # asm 1: adc <mulzero=int64#3,<c2=int64#13
  2227. # asm 2: adc <mulzero=%rdx,<c2=%r15
  2228. adc %rdx,%r15
  2229. # qhasm: carry? c3 += mulzero + carry
  2230. # asm 1: adc <mulzero=int64#3,<c3=int64#14
  2231. # asm 2: adc <mulzero=%rdx,<c3=%rbx
  2232. adc %rdx,%rbx
  2233. # qhasm: mulzero += mulzero + carry
  2234. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  2235. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  2236. adc %rdx,%rdx
  2237. # qhasm: mulzero *= 38
  2238. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  2239. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  2240. imulq $38,%rdx,%rdx
  2241. # qhasm: c0 += mulzero
  2242. # asm 1: add <mulzero=int64#3,<c0=int64#11
  2243. # asm 2: add <mulzero=%rdx,<c0=%r13
  2244. add %rdx,%r13
  2245. # qhasm: c0_stack = c0
  2246. # asm 1: movq <c0=int64#11,>c0_stack=stack64#8
  2247. # asm 2: movq <c0=%r13,>c0_stack=56(%rsp)
  2248. movq %r13,56(%rsp)
  2249. # qhasm: c1_stack = c1
  2250. # asm 1: movq <c1=int64#12,>c1_stack=stack64#9
  2251. # asm 2: movq <c1=%r14,>c1_stack=64(%rsp)
  2252. movq %r14,64(%rsp)
  2253. # qhasm: c2_stack = c2
  2254. # asm 1: movq <c2=int64#13,>c2_stack=stack64#10
  2255. # asm 2: movq <c2=%r15,>c2_stack=72(%rsp)
  2256. movq %r15,72(%rsp)
  2257. # qhasm: c3_stack = c3
  2258. # asm 1: movq <c3=int64#14,>c3_stack=stack64#11
  2259. # asm 2: movq <c3=%rbx,>c3_stack=80(%rsp)
  2260. movq %rbx,80(%rsp)
  2261. # qhasm: mulr4 = 0
  2262. # asm 1: mov $0,>mulr4=int64#5
  2263. # asm 2: mov $0,>mulr4=%r8
  2264. mov $0,%r8
  2265. # qhasm: mulr5 = 0
  2266. # asm 1: mov $0,>mulr5=int64#6
  2267. # asm 2: mov $0,>mulr5=%r9
  2268. mov $0,%r9
  2269. # qhasm: mulr6 = 0
  2270. # asm 1: mov $0,>mulr6=int64#8
  2271. # asm 2: mov $0,>mulr6=%r10
  2272. mov $0,%r10
  2273. # qhasm: mulr7 = 0
  2274. # asm 1: mov $0,>mulr7=int64#9
  2275. # asm 2: mov $0,>mulr7=%r11
  2276. mov $0,%r11
  2277. # qhasm: mulx0 = c0_stack
  2278. # asm 1: movq <c0_stack=stack64#8,>mulx0=int64#10
  2279. # asm 2: movq <c0_stack=56(%rsp),>mulx0=%r12
  2280. movq 56(%rsp),%r12
  2281. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0)
  2282. # asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7
  2283. # asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax
  2284. movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax
  2285. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2286. # asm 1: mul <mulx0=int64#10
  2287. # asm 2: mul <mulx0=%r12
  2288. mul %r12
  2289. # qhasm: c0 = mulrax
  2290. # asm 1: mov <mulrax=int64#7,>c0=int64#11
  2291. # asm 2: mov <mulrax=%rax,>c0=%r13
  2292. mov %rax,%r13
  2293. # qhasm: c1 = mulrdx
  2294. # asm 1: mov <mulrdx=int64#3,>c1=int64#12
  2295. # asm 2: mov <mulrdx=%rdx,>c1=%r14
  2296. mov %rdx,%r14
  2297. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1)
  2298. # asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7
  2299. # asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax
  2300. movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax
  2301. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2302. # asm 1: mul <mulx0=int64#10
  2303. # asm 2: mul <mulx0=%r12
  2304. mul %r12
  2305. # qhasm: carry? c1 += mulrax
  2306. # asm 1: add <mulrax=int64#7,<c1=int64#12
  2307. # asm 2: add <mulrax=%rax,<c1=%r14
  2308. add %rax,%r14
  2309. # qhasm: c2 = 0
  2310. # asm 1: mov $0,>c2=int64#13
  2311. # asm 2: mov $0,>c2=%r15
  2312. mov $0,%r15
  2313. # qhasm: c2 += mulrdx + carry
  2314. # asm 1: adc <mulrdx=int64#3,<c2=int64#13
  2315. # asm 2: adc <mulrdx=%rdx,<c2=%r15
  2316. adc %rdx,%r15
  2317. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2)
  2318. # asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7
  2319. # asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax
  2320. movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax
  2321. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2322. # asm 1: mul <mulx0=int64#10
  2323. # asm 2: mul <mulx0=%r12
  2324. mul %r12
  2325. # qhasm: carry? c2 += mulrax
  2326. # asm 1: add <mulrax=int64#7,<c2=int64#13
  2327. # asm 2: add <mulrax=%rax,<c2=%r15
  2328. add %rax,%r15
  2329. # qhasm: c3 = 0
  2330. # asm 1: mov $0,>c3=int64#14
  2331. # asm 2: mov $0,>c3=%rbx
  2332. mov $0,%rbx
  2333. # qhasm: c3 += mulrdx + carry
  2334. # asm 1: adc <mulrdx=int64#3,<c3=int64#14
  2335. # asm 2: adc <mulrdx=%rdx,<c3=%rbx
  2336. adc %rdx,%rbx
  2337. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3)
  2338. # asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7
  2339. # asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax
  2340. movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax
  2341. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2342. # asm 1: mul <mulx0=int64#10
  2343. # asm 2: mul <mulx0=%r12
  2344. mul %r12
  2345. # qhasm: carry? c3 += mulrax
  2346. # asm 1: add <mulrax=int64#7,<c3=int64#14
  2347. # asm 2: add <mulrax=%rax,<c3=%rbx
  2348. add %rax,%rbx
  2349. # qhasm: mulr4 += mulrdx + carry
  2350. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  2351. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  2352. adc %rdx,%r8
  2353. # qhasm: mulx1 = c1_stack
  2354. # asm 1: movq <c1_stack=stack64#9,>mulx1=int64#10
  2355. # asm 2: movq <c1_stack=64(%rsp),>mulx1=%r12
  2356. movq 64(%rsp),%r12
  2357. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0)
  2358. # asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7
  2359. # asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax
  2360. movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax
  2361. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2362. # asm 1: mul <mulx1=int64#10
  2363. # asm 2: mul <mulx1=%r12
  2364. mul %r12
  2365. # qhasm: carry? c1 += mulrax
  2366. # asm 1: add <mulrax=int64#7,<c1=int64#12
  2367. # asm 2: add <mulrax=%rax,<c1=%r14
  2368. add %rax,%r14
  2369. # qhasm: mulc = 0
  2370. # asm 1: mov $0,>mulc=int64#15
  2371. # asm 2: mov $0,>mulc=%rbp
  2372. mov $0,%rbp
  2373. # qhasm: mulc += mulrdx + carry
  2374. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2375. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2376. adc %rdx,%rbp
  2377. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1)
  2378. # asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7
  2379. # asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax
  2380. movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax
  2381. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2382. # asm 1: mul <mulx1=int64#10
  2383. # asm 2: mul <mulx1=%r12
  2384. mul %r12
  2385. # qhasm: carry? c2 += mulrax
  2386. # asm 1: add <mulrax=int64#7,<c2=int64#13
  2387. # asm 2: add <mulrax=%rax,<c2=%r15
  2388. add %rax,%r15
  2389. # qhasm: mulrdx += 0 + carry
  2390. # asm 1: adc $0,<mulrdx=int64#3
  2391. # asm 2: adc $0,<mulrdx=%rdx
  2392. adc $0,%rdx
  2393. # qhasm: carry? c2 += mulc
  2394. # asm 1: add <mulc=int64#15,<c2=int64#13
  2395. # asm 2: add <mulc=%rbp,<c2=%r15
  2396. add %rbp,%r15
  2397. # qhasm: mulc = 0
  2398. # asm 1: mov $0,>mulc=int64#15
  2399. # asm 2: mov $0,>mulc=%rbp
  2400. mov $0,%rbp
  2401. # qhasm: mulc += mulrdx + carry
  2402. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2403. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2404. adc %rdx,%rbp
  2405. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2)
  2406. # asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7
  2407. # asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax
  2408. movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax
  2409. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2410. # asm 1: mul <mulx1=int64#10
  2411. # asm 2: mul <mulx1=%r12
  2412. mul %r12
  2413. # qhasm: carry? c3 += mulrax
  2414. # asm 1: add <mulrax=int64#7,<c3=int64#14
  2415. # asm 2: add <mulrax=%rax,<c3=%rbx
  2416. add %rax,%rbx
  2417. # qhasm: mulrdx += 0 + carry
  2418. # asm 1: adc $0,<mulrdx=int64#3
  2419. # asm 2: adc $0,<mulrdx=%rdx
  2420. adc $0,%rdx
  2421. # qhasm: carry? c3 += mulc
  2422. # asm 1: add <mulc=int64#15,<c3=int64#14
  2423. # asm 2: add <mulc=%rbp,<c3=%rbx
  2424. add %rbp,%rbx
  2425. # qhasm: mulc = 0
  2426. # asm 1: mov $0,>mulc=int64#15
  2427. # asm 2: mov $0,>mulc=%rbp
  2428. mov $0,%rbp
  2429. # qhasm: mulc += mulrdx + carry
  2430. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2431. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2432. adc %rdx,%rbp
  2433. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3)
  2434. # asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7
  2435. # asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax
  2436. movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax
  2437. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2438. # asm 1: mul <mulx1=int64#10
  2439. # asm 2: mul <mulx1=%r12
  2440. mul %r12
  2441. # qhasm: carry? mulr4 += mulrax
  2442. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  2443. # asm 2: add <mulrax=%rax,<mulr4=%r8
  2444. add %rax,%r8
  2445. # qhasm: mulrdx += 0 + carry
  2446. # asm 1: adc $0,<mulrdx=int64#3
  2447. # asm 2: adc $0,<mulrdx=%rdx
  2448. adc $0,%rdx
  2449. # qhasm: carry? mulr4 += mulc
  2450. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  2451. # asm 2: add <mulc=%rbp,<mulr4=%r8
  2452. add %rbp,%r8
  2453. # qhasm: mulr5 += mulrdx + carry
  2454. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  2455. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  2456. adc %rdx,%r9
  2457. # qhasm: mulx2 = c2_stack
  2458. # asm 1: movq <c2_stack=stack64#10,>mulx2=int64#10
  2459. # asm 2: movq <c2_stack=72(%rsp),>mulx2=%r12
  2460. movq 72(%rsp),%r12
  2461. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0)
  2462. # asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7
  2463. # asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax
  2464. movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax
  2465. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2466. # asm 1: mul <mulx2=int64#10
  2467. # asm 2: mul <mulx2=%r12
  2468. mul %r12
  2469. # qhasm: carry? c2 += mulrax
  2470. # asm 1: add <mulrax=int64#7,<c2=int64#13
  2471. # asm 2: add <mulrax=%rax,<c2=%r15
  2472. add %rax,%r15
  2473. # qhasm: mulc = 0
  2474. # asm 1: mov $0,>mulc=int64#15
  2475. # asm 2: mov $0,>mulc=%rbp
  2476. mov $0,%rbp
  2477. # qhasm: mulc += mulrdx + carry
  2478. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2479. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2480. adc %rdx,%rbp
  2481. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1)
  2482. # asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7
  2483. # asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax
  2484. movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax
  2485. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2486. # asm 1: mul <mulx2=int64#10
  2487. # asm 2: mul <mulx2=%r12
  2488. mul %r12
  2489. # qhasm: carry? c3 += mulrax
  2490. # asm 1: add <mulrax=int64#7,<c3=int64#14
  2491. # asm 2: add <mulrax=%rax,<c3=%rbx
  2492. add %rax,%rbx
  2493. # qhasm: mulrdx += 0 + carry
  2494. # asm 1: adc $0,<mulrdx=int64#3
  2495. # asm 2: adc $0,<mulrdx=%rdx
  2496. adc $0,%rdx
  2497. # qhasm: carry? c3 += mulc
  2498. # asm 1: add <mulc=int64#15,<c3=int64#14
  2499. # asm 2: add <mulc=%rbp,<c3=%rbx
  2500. add %rbp,%rbx
  2501. # qhasm: mulc = 0
  2502. # asm 1: mov $0,>mulc=int64#15
  2503. # asm 2: mov $0,>mulc=%rbp
  2504. mov $0,%rbp
  2505. # qhasm: mulc += mulrdx + carry
  2506. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2507. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2508. adc %rdx,%rbp
  2509. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2)
  2510. # asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7
  2511. # asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax
  2512. movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax
  2513. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2514. # asm 1: mul <mulx2=int64#10
  2515. # asm 2: mul <mulx2=%r12
  2516. mul %r12
  2517. # qhasm: carry? mulr4 += mulrax
  2518. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  2519. # asm 2: add <mulrax=%rax,<mulr4=%r8
  2520. add %rax,%r8
  2521. # qhasm: mulrdx += 0 + carry
  2522. # asm 1: adc $0,<mulrdx=int64#3
  2523. # asm 2: adc $0,<mulrdx=%rdx
  2524. adc $0,%rdx
  2525. # qhasm: carry? mulr4 += mulc
  2526. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  2527. # asm 2: add <mulc=%rbp,<mulr4=%r8
  2528. add %rbp,%r8
  2529. # qhasm: mulc = 0
  2530. # asm 1: mov $0,>mulc=int64#15
  2531. # asm 2: mov $0,>mulc=%rbp
  2532. mov $0,%rbp
  2533. # qhasm: mulc += mulrdx + carry
  2534. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2535. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2536. adc %rdx,%rbp
  2537. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3)
  2538. # asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7
  2539. # asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax
  2540. movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax
  2541. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2542. # asm 1: mul <mulx2=int64#10
  2543. # asm 2: mul <mulx2=%r12
  2544. mul %r12
  2545. # qhasm: carry? mulr5 += mulrax
  2546. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  2547. # asm 2: add <mulrax=%rax,<mulr5=%r9
  2548. add %rax,%r9
  2549. # qhasm: mulrdx += 0 + carry
  2550. # asm 1: adc $0,<mulrdx=int64#3
  2551. # asm 2: adc $0,<mulrdx=%rdx
  2552. adc $0,%rdx
  2553. # qhasm: carry? mulr5 += mulc
  2554. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  2555. # asm 2: add <mulc=%rbp,<mulr5=%r9
  2556. add %rbp,%r9
  2557. # qhasm: mulr6 += mulrdx + carry
  2558. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  2559. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  2560. adc %rdx,%r10
  2561. # qhasm: mulx3 = c3_stack
  2562. # asm 1: movq <c3_stack=stack64#11,>mulx3=int64#10
  2563. # asm 2: movq <c3_stack=80(%rsp),>mulx3=%r12
  2564. movq 80(%rsp),%r12
  2565. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0)
  2566. # asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7
  2567. # asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax
  2568. movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax
  2569. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2570. # asm 1: mul <mulx3=int64#10
  2571. # asm 2: mul <mulx3=%r12
  2572. mul %r12
  2573. # qhasm: carry? c3 += mulrax
  2574. # asm 1: add <mulrax=int64#7,<c3=int64#14
  2575. # asm 2: add <mulrax=%rax,<c3=%rbx
  2576. add %rax,%rbx
  2577. # qhasm: mulc = 0
  2578. # asm 1: mov $0,>mulc=int64#15
  2579. # asm 2: mov $0,>mulc=%rbp
  2580. mov $0,%rbp
  2581. # qhasm: mulc += mulrdx + carry
  2582. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2583. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2584. adc %rdx,%rbp
  2585. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1)
  2586. # asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7
  2587. # asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax
  2588. movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax
  2589. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2590. # asm 1: mul <mulx3=int64#10
  2591. # asm 2: mul <mulx3=%r12
  2592. mul %r12
  2593. # qhasm: carry? mulr4 += mulrax
  2594. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  2595. # asm 2: add <mulrax=%rax,<mulr4=%r8
  2596. add %rax,%r8
  2597. # qhasm: mulrdx += 0 + carry
  2598. # asm 1: adc $0,<mulrdx=int64#3
  2599. # asm 2: adc $0,<mulrdx=%rdx
  2600. adc $0,%rdx
  2601. # qhasm: carry? mulr4 += mulc
  2602. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  2603. # asm 2: add <mulc=%rbp,<mulr4=%r8
  2604. add %rbp,%r8
  2605. # qhasm: mulc = 0
  2606. # asm 1: mov $0,>mulc=int64#15
  2607. # asm 2: mov $0,>mulc=%rbp
  2608. mov $0,%rbp
  2609. # qhasm: mulc += mulrdx + carry
  2610. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2611. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2612. adc %rdx,%rbp
  2613. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2)
  2614. # asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7
  2615. # asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax
  2616. movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax
  2617. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2618. # asm 1: mul <mulx3=int64#10
  2619. # asm 2: mul <mulx3=%r12
  2620. mul %r12
  2621. # qhasm: carry? mulr5 += mulrax
  2622. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  2623. # asm 2: add <mulrax=%rax,<mulr5=%r9
  2624. add %rax,%r9
  2625. # qhasm: mulrdx += 0 + carry
  2626. # asm 1: adc $0,<mulrdx=int64#3
  2627. # asm 2: adc $0,<mulrdx=%rdx
  2628. adc $0,%rdx
  2629. # qhasm: carry? mulr5 += mulc
  2630. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  2631. # asm 2: add <mulc=%rbp,<mulr5=%r9
  2632. add %rbp,%r9
  2633. # qhasm: mulc = 0
  2634. # asm 1: mov $0,>mulc=int64#15
  2635. # asm 2: mov $0,>mulc=%rbp
  2636. mov $0,%rbp
  2637. # qhasm: mulc += mulrdx + carry
  2638. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2639. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2640. adc %rdx,%rbp
  2641. # qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3)
  2642. # asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7
  2643. # asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax
  2644. movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax
  2645. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2646. # asm 1: mul <mulx3=int64#10
  2647. # asm 2: mul <mulx3=%r12
  2648. mul %r12
  2649. # qhasm: carry? mulr6 += mulrax
  2650. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  2651. # asm 2: add <mulrax=%rax,<mulr6=%r10
  2652. add %rax,%r10
  2653. # qhasm: mulrdx += 0 + carry
  2654. # asm 1: adc $0,<mulrdx=int64#3
  2655. # asm 2: adc $0,<mulrdx=%rdx
  2656. adc $0,%rdx
  2657. # qhasm: carry? mulr6 += mulc
  2658. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  2659. # asm 2: add <mulc=%rbp,<mulr6=%r10
  2660. add %rbp,%r10
  2661. # qhasm: mulr7 += mulrdx + carry
  2662. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  2663. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  2664. adc %rdx,%r11
  2665. # qhasm: mulrax = mulr4
  2666. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  2667. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  2668. mov %r8,%rax
  2669. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2670. mulq CRYPTO_NAMESPACE(38)(%rip)
  2671. # qhasm: mulr4 = mulrax
  2672. # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
  2673. # asm 2: mov <mulrax=%rax,>mulr4=%r8
  2674. mov %rax,%r8
  2675. # qhasm: mulrax = mulr5
  2676. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  2677. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  2678. mov %r9,%rax
  2679. # qhasm: mulr5 = mulrdx
  2680. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
  2681. # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
  2682. mov %rdx,%r9
  2683. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2684. mulq CRYPTO_NAMESPACE(38)(%rip)
  2685. # qhasm: carry? mulr5 += mulrax
  2686. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  2687. # asm 2: add <mulrax=%rax,<mulr5=%r9
  2688. add %rax,%r9
  2689. # qhasm: mulrax = mulr6
  2690. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  2691. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  2692. mov %r10,%rax
  2693. # qhasm: mulr6 = 0
  2694. # asm 1: mov $0,>mulr6=int64#8
  2695. # asm 2: mov $0,>mulr6=%r10
  2696. mov $0,%r10
  2697. # qhasm: mulr6 += mulrdx + carry
  2698. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  2699. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  2700. adc %rdx,%r10
  2701. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2702. mulq CRYPTO_NAMESPACE(38)(%rip)
  2703. # qhasm: carry? mulr6 += mulrax
  2704. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  2705. # asm 2: add <mulrax=%rax,<mulr6=%r10
  2706. add %rax,%r10
  2707. # qhasm: mulrax = mulr7
  2708. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  2709. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  2710. mov %r11,%rax
  2711. # qhasm: mulr7 = 0
  2712. # asm 1: mov $0,>mulr7=int64#9
  2713. # asm 2: mov $0,>mulr7=%r11
  2714. mov $0,%r11
  2715. # qhasm: mulr7 += mulrdx + carry
  2716. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  2717. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  2718. adc %rdx,%r11
  2719. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2720. mulq CRYPTO_NAMESPACE(38)(%rip)
  2721. # qhasm: carry? mulr7 += mulrax
  2722. # asm 1: add <mulrax=int64#7,<mulr7=int64#9
  2723. # asm 2: add <mulrax=%rax,<mulr7=%r11
  2724. add %rax,%r11
  2725. # qhasm: mulr8 = 0
  2726. # asm 1: mov $0,>mulr8=int64#7
  2727. # asm 2: mov $0,>mulr8=%rax
  2728. mov $0,%rax
  2729. # qhasm: mulr8 += mulrdx + carry
  2730. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  2731. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  2732. adc %rdx,%rax
  2733. # qhasm: carry? c0 += mulr4
  2734. # asm 1: add <mulr4=int64#5,<c0=int64#11
  2735. # asm 2: add <mulr4=%r8,<c0=%r13
  2736. add %r8,%r13
  2737. # qhasm: carry? c1 += mulr5 + carry
  2738. # asm 1: adc <mulr5=int64#6,<c1=int64#12
  2739. # asm 2: adc <mulr5=%r9,<c1=%r14
  2740. adc %r9,%r14
  2741. # qhasm: carry? c2 += mulr6 + carry
  2742. # asm 1: adc <mulr6=int64#8,<c2=int64#13
  2743. # asm 2: adc <mulr6=%r10,<c2=%r15
  2744. adc %r10,%r15
  2745. # qhasm: carry? c3 += mulr7 + carry
  2746. # asm 1: adc <mulr7=int64#9,<c3=int64#14
  2747. # asm 2: adc <mulr7=%r11,<c3=%rbx
  2748. adc %r11,%rbx
  2749. # qhasm: mulzero = 0
  2750. # asm 1: mov $0,>mulzero=int64#3
  2751. # asm 2: mov $0,>mulzero=%rdx
  2752. mov $0,%rdx
  2753. # qhasm: mulr8 += mulzero + carry
  2754. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  2755. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  2756. adc %rdx,%rax
  2757. # qhasm: mulr8 *= 38
  2758. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
  2759. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
  2760. imulq $38,%rax,%r8
  2761. # qhasm: carry? c0 += mulr8
  2762. # asm 1: add <mulr8=int64#5,<c0=int64#11
  2763. # asm 2: add <mulr8=%r8,<c0=%r13
  2764. add %r8,%r13
  2765. # qhasm: carry? c1 += mulzero + carry
  2766. # asm 1: adc <mulzero=int64#3,<c1=int64#12
  2767. # asm 2: adc <mulzero=%rdx,<c1=%r14
  2768. adc %rdx,%r14
  2769. # qhasm: carry? c2 += mulzero + carry
  2770. # asm 1: adc <mulzero=int64#3,<c2=int64#13
  2771. # asm 2: adc <mulzero=%rdx,<c2=%r15
  2772. adc %rdx,%r15
  2773. # qhasm: carry? c3 += mulzero + carry
  2774. # asm 1: adc <mulzero=int64#3,<c3=int64#14
  2775. # asm 2: adc <mulzero=%rdx,<c3=%rbx
  2776. adc %rdx,%rbx
  2777. # qhasm: mulzero += mulzero + carry
  2778. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  2779. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  2780. adc %rdx,%rdx
  2781. # qhasm: mulzero *= 38
  2782. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  2783. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  2784. imulq $38,%rdx,%rdx
  2785. # qhasm: c0 += mulzero
  2786. # asm 1: add <mulzero=int64#3,<c0=int64#11
  2787. # asm 2: add <mulzero=%rdx,<c0=%r13
  2788. add %rdx,%r13
  2789. # qhasm: c0_stack = c0
  2790. # asm 1: movq <c0=int64#11,>c0_stack=stack64#8
  2791. # asm 2: movq <c0=%r13,>c0_stack=56(%rsp)
  2792. movq %r13,56(%rsp)
  2793. # qhasm: c1_stack = c1
  2794. # asm 1: movq <c1=int64#12,>c1_stack=stack64#9
  2795. # asm 2: movq <c1=%r14,>c1_stack=64(%rsp)
  2796. movq %r14,64(%rsp)
  2797. # qhasm: c2_stack = c2
  2798. # asm 1: movq <c2=int64#13,>c2_stack=stack64#10
  2799. # asm 2: movq <c2=%r15,>c2_stack=72(%rsp)
  2800. movq %r15,72(%rsp)
  2801. # qhasm: c3_stack = c3
  2802. # asm 1: movq <c3=int64#14,>c3_stack=stack64#11
  2803. # asm 2: movq <c3=%rbx,>c3_stack=80(%rsp)
  2804. movq %rbx,80(%rsp)
  2805. # qhasm: mulr4 = 0
  2806. # asm 1: mov $0,>mulr4=int64#5
  2807. # asm 2: mov $0,>mulr4=%r8
  2808. mov $0,%r8
  2809. # qhasm: mulr5 = 0
  2810. # asm 1: mov $0,>mulr5=int64#6
  2811. # asm 2: mov $0,>mulr5=%r9
  2812. mov $0,%r9
  2813. # qhasm: mulr6 = 0
  2814. # asm 1: mov $0,>mulr6=int64#8
  2815. # asm 2: mov $0,>mulr6=%r10
  2816. mov $0,%r10
  2817. # qhasm: mulr7 = 0
  2818. # asm 1: mov $0,>mulr7=int64#9
  2819. # asm 2: mov $0,>mulr7=%r11
  2820. mov $0,%r11
  2821. # qhasm: mulx0 = *(uint64 *)(pp + 64)
  2822. # asm 1: movq 64(<pp=int64#2),>mulx0=int64#10
  2823. # asm 2: movq 64(<pp=%rsi),>mulx0=%r12
  2824. movq 64(%rsi),%r12
  2825. # qhasm: mulrax = *(uint64 *)(qp + 64)
  2826. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  2827. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  2828. movq 64(%rcx),%rax
  2829. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2830. # asm 1: mul <mulx0=int64#10
  2831. # asm 2: mul <mulx0=%r12
  2832. mul %r12
  2833. # qhasm: rt0 = mulrax
  2834. # asm 1: mov <mulrax=int64#7,>rt0=int64#11
  2835. # asm 2: mov <mulrax=%rax,>rt0=%r13
  2836. mov %rax,%r13
  2837. # qhasm: rt1 = mulrdx
  2838. # asm 1: mov <mulrdx=int64#3,>rt1=int64#12
  2839. # asm 2: mov <mulrdx=%rdx,>rt1=%r14
  2840. mov %rdx,%r14
  2841. # qhasm: mulrax = *(uint64 *)(qp + 72)
  2842. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  2843. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  2844. movq 72(%rcx),%rax
  2845. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2846. # asm 1: mul <mulx0=int64#10
  2847. # asm 2: mul <mulx0=%r12
  2848. mul %r12
  2849. # qhasm: carry? rt1 += mulrax
  2850. # asm 1: add <mulrax=int64#7,<rt1=int64#12
  2851. # asm 2: add <mulrax=%rax,<rt1=%r14
  2852. add %rax,%r14
  2853. # qhasm: rt2 = 0
  2854. # asm 1: mov $0,>rt2=int64#13
  2855. # asm 2: mov $0,>rt2=%r15
  2856. mov $0,%r15
  2857. # qhasm: rt2 += mulrdx + carry
  2858. # asm 1: adc <mulrdx=int64#3,<rt2=int64#13
  2859. # asm 2: adc <mulrdx=%rdx,<rt2=%r15
  2860. adc %rdx,%r15
  2861. # qhasm: mulrax = *(uint64 *)(qp + 80)
  2862. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  2863. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  2864. movq 80(%rcx),%rax
  2865. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2866. # asm 1: mul <mulx0=int64#10
  2867. # asm 2: mul <mulx0=%r12
  2868. mul %r12
  2869. # qhasm: carry? rt2 += mulrax
  2870. # asm 1: add <mulrax=int64#7,<rt2=int64#13
  2871. # asm 2: add <mulrax=%rax,<rt2=%r15
  2872. add %rax,%r15
  2873. # qhasm: rt3 = 0
  2874. # asm 1: mov $0,>rt3=int64#14
  2875. # asm 2: mov $0,>rt3=%rbx
  2876. mov $0,%rbx
  2877. # qhasm: rt3 += mulrdx + carry
  2878. # asm 1: adc <mulrdx=int64#3,<rt3=int64#14
  2879. # asm 2: adc <mulrdx=%rdx,<rt3=%rbx
  2880. adc %rdx,%rbx
  2881. # qhasm: mulrax = *(uint64 *)(qp + 88)
  2882. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  2883. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  2884. movq 88(%rcx),%rax
  2885. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2886. # asm 1: mul <mulx0=int64#10
  2887. # asm 2: mul <mulx0=%r12
  2888. mul %r12
  2889. # qhasm: carry? rt3 += mulrax
  2890. # asm 1: add <mulrax=int64#7,<rt3=int64#14
  2891. # asm 2: add <mulrax=%rax,<rt3=%rbx
  2892. add %rax,%rbx
  2893. # qhasm: mulr4 += mulrdx + carry
  2894. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  2895. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  2896. adc %rdx,%r8
  2897. # qhasm: mulx1 = *(uint64 *)(pp + 72)
  2898. # asm 1: movq 72(<pp=int64#2),>mulx1=int64#10
  2899. # asm 2: movq 72(<pp=%rsi),>mulx1=%r12
  2900. movq 72(%rsi),%r12
  2901. # qhasm: mulrax = *(uint64 *)(qp + 64)
  2902. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  2903. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  2904. movq 64(%rcx),%rax
  2905. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2906. # asm 1: mul <mulx1=int64#10
  2907. # asm 2: mul <mulx1=%r12
  2908. mul %r12
  2909. # qhasm: carry? rt1 += mulrax
  2910. # asm 1: add <mulrax=int64#7,<rt1=int64#12
  2911. # asm 2: add <mulrax=%rax,<rt1=%r14
  2912. add %rax,%r14
  2913. # qhasm: mulc = 0
  2914. # asm 1: mov $0,>mulc=int64#15
  2915. # asm 2: mov $0,>mulc=%rbp
  2916. mov $0,%rbp
  2917. # qhasm: mulc += mulrdx + carry
  2918. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2919. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2920. adc %rdx,%rbp
  2921. # qhasm: mulrax = *(uint64 *)(qp + 72)
  2922. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  2923. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  2924. movq 72(%rcx),%rax
  2925. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2926. # asm 1: mul <mulx1=int64#10
  2927. # asm 2: mul <mulx1=%r12
  2928. mul %r12
  2929. # qhasm: carry? rt2 += mulrax
  2930. # asm 1: add <mulrax=int64#7,<rt2=int64#13
  2931. # asm 2: add <mulrax=%rax,<rt2=%r15
  2932. add %rax,%r15
  2933. # qhasm: mulrdx += 0 + carry
  2934. # asm 1: adc $0,<mulrdx=int64#3
  2935. # asm 2: adc $0,<mulrdx=%rdx
  2936. adc $0,%rdx
  2937. # qhasm: carry? rt2 += mulc
  2938. # asm 1: add <mulc=int64#15,<rt2=int64#13
  2939. # asm 2: add <mulc=%rbp,<rt2=%r15
  2940. add %rbp,%r15
  2941. # qhasm: mulc = 0
  2942. # asm 1: mov $0,>mulc=int64#15
  2943. # asm 2: mov $0,>mulc=%rbp
  2944. mov $0,%rbp
  2945. # qhasm: mulc += mulrdx + carry
  2946. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2947. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2948. adc %rdx,%rbp
  2949. # qhasm: mulrax = *(uint64 *)(qp + 80)
  2950. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  2951. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  2952. movq 80(%rcx),%rax
  2953. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2954. # asm 1: mul <mulx1=int64#10
  2955. # asm 2: mul <mulx1=%r12
  2956. mul %r12
  2957. # qhasm: carry? rt3 += mulrax
  2958. # asm 1: add <mulrax=int64#7,<rt3=int64#14
  2959. # asm 2: add <mulrax=%rax,<rt3=%rbx
  2960. add %rax,%rbx
  2961. # qhasm: mulrdx += 0 + carry
  2962. # asm 1: adc $0,<mulrdx=int64#3
  2963. # asm 2: adc $0,<mulrdx=%rdx
  2964. adc $0,%rdx
  2965. # qhasm: carry? rt3 += mulc
  2966. # asm 1: add <mulc=int64#15,<rt3=int64#14
  2967. # asm 2: add <mulc=%rbp,<rt3=%rbx
  2968. add %rbp,%rbx
  2969. # qhasm: mulc = 0
  2970. # asm 1: mov $0,>mulc=int64#15
  2971. # asm 2: mov $0,>mulc=%rbp
  2972. mov $0,%rbp
  2973. # qhasm: mulc += mulrdx + carry
  2974. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  2975. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  2976. adc %rdx,%rbp
  2977. # qhasm: mulrax = *(uint64 *)(qp + 88)
  2978. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  2979. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  2980. movq 88(%rcx),%rax
  2981. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2982. # asm 1: mul <mulx1=int64#10
  2983. # asm 2: mul <mulx1=%r12
  2984. mul %r12
  2985. # qhasm: carry? mulr4 += mulrax
  2986. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  2987. # asm 2: add <mulrax=%rax,<mulr4=%r8
  2988. add %rax,%r8
  2989. # qhasm: mulrdx += 0 + carry
  2990. # asm 1: adc $0,<mulrdx=int64#3
  2991. # asm 2: adc $0,<mulrdx=%rdx
  2992. adc $0,%rdx
  2993. # qhasm: carry? mulr4 += mulc
  2994. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  2995. # asm 2: add <mulc=%rbp,<mulr4=%r8
  2996. add %rbp,%r8
  2997. # qhasm: mulr5 += mulrdx + carry
  2998. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  2999. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  3000. adc %rdx,%r9
  3001. # qhasm: mulx2 = *(uint64 *)(pp + 80)
  3002. # asm 1: movq 80(<pp=int64#2),>mulx2=int64#10
  3003. # asm 2: movq 80(<pp=%rsi),>mulx2=%r12
  3004. movq 80(%rsi),%r12
  3005. # qhasm: mulrax = *(uint64 *)(qp + 64)
  3006. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  3007. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  3008. movq 64(%rcx),%rax
  3009. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3010. # asm 1: mul <mulx2=int64#10
  3011. # asm 2: mul <mulx2=%r12
  3012. mul %r12
  3013. # qhasm: carry? rt2 += mulrax
  3014. # asm 1: add <mulrax=int64#7,<rt2=int64#13
  3015. # asm 2: add <mulrax=%rax,<rt2=%r15
  3016. add %rax,%r15
  3017. # qhasm: mulc = 0
  3018. # asm 1: mov $0,>mulc=int64#15
  3019. # asm 2: mov $0,>mulc=%rbp
  3020. mov $0,%rbp
  3021. # qhasm: mulc += mulrdx + carry
  3022. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  3023. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  3024. adc %rdx,%rbp
  3025. # qhasm: mulrax = *(uint64 *)(qp + 72)
  3026. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  3027. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  3028. movq 72(%rcx),%rax
  3029. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3030. # asm 1: mul <mulx2=int64#10
  3031. # asm 2: mul <mulx2=%r12
  3032. mul %r12
  3033. # qhasm: carry? rt3 += mulrax
  3034. # asm 1: add <mulrax=int64#7,<rt3=int64#14
  3035. # asm 2: add <mulrax=%rax,<rt3=%rbx
  3036. add %rax,%rbx
  3037. # qhasm: mulrdx += 0 + carry
  3038. # asm 1: adc $0,<mulrdx=int64#3
  3039. # asm 2: adc $0,<mulrdx=%rdx
  3040. adc $0,%rdx
  3041. # qhasm: carry? rt3 += mulc
  3042. # asm 1: add <mulc=int64#15,<rt3=int64#14
  3043. # asm 2: add <mulc=%rbp,<rt3=%rbx
  3044. add %rbp,%rbx
  3045. # qhasm: mulc = 0
  3046. # asm 1: mov $0,>mulc=int64#15
  3047. # asm 2: mov $0,>mulc=%rbp
  3048. mov $0,%rbp
  3049. # qhasm: mulc += mulrdx + carry
  3050. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  3051. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  3052. adc %rdx,%rbp
  3053. # qhasm: mulrax = *(uint64 *)(qp + 80)
  3054. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  3055. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  3056. movq 80(%rcx),%rax
  3057. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3058. # asm 1: mul <mulx2=int64#10
  3059. # asm 2: mul <mulx2=%r12
  3060. mul %r12
  3061. # qhasm: carry? mulr4 += mulrax
  3062. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  3063. # asm 2: add <mulrax=%rax,<mulr4=%r8
  3064. add %rax,%r8
  3065. # qhasm: mulrdx += 0 + carry
  3066. # asm 1: adc $0,<mulrdx=int64#3
  3067. # asm 2: adc $0,<mulrdx=%rdx
  3068. adc $0,%rdx
  3069. # qhasm: carry? mulr4 += mulc
  3070. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  3071. # asm 2: add <mulc=%rbp,<mulr4=%r8
  3072. add %rbp,%r8
  3073. # qhasm: mulc = 0
  3074. # asm 1: mov $0,>mulc=int64#15
  3075. # asm 2: mov $0,>mulc=%rbp
  3076. mov $0,%rbp
  3077. # qhasm: mulc += mulrdx + carry
  3078. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  3079. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  3080. adc %rdx,%rbp
  3081. # qhasm: mulrax = *(uint64 *)(qp + 88)
  3082. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  3083. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  3084. movq 88(%rcx),%rax
  3085. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3086. # asm 1: mul <mulx2=int64#10
  3087. # asm 2: mul <mulx2=%r12
  3088. mul %r12
  3089. # qhasm: carry? mulr5 += mulrax
  3090. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  3091. # asm 2: add <mulrax=%rax,<mulr5=%r9
  3092. add %rax,%r9
  3093. # qhasm: mulrdx += 0 + carry
  3094. # asm 1: adc $0,<mulrdx=int64#3
  3095. # asm 2: adc $0,<mulrdx=%rdx
  3096. adc $0,%rdx
  3097. # qhasm: carry? mulr5 += mulc
  3098. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  3099. # asm 2: add <mulc=%rbp,<mulr5=%r9
  3100. add %rbp,%r9
  3101. # qhasm: mulr6 += mulrdx + carry
  3102. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  3103. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  3104. adc %rdx,%r10
  3105. # qhasm: mulx3 = *(uint64 *)(pp + 88)
  3106. # asm 1: movq 88(<pp=int64#2),>mulx3=int64#2
  3107. # asm 2: movq 88(<pp=%rsi),>mulx3=%rsi
  3108. movq 88(%rsi),%rsi
  3109. # qhasm: mulrax = *(uint64 *)(qp + 64)
  3110. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  3111. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  3112. movq 64(%rcx),%rax
  3113. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3114. # asm 1: mul <mulx3=int64#2
  3115. # asm 2: mul <mulx3=%rsi
  3116. mul %rsi
  3117. # qhasm: carry? rt3 += mulrax
  3118. # asm 1: add <mulrax=int64#7,<rt3=int64#14
  3119. # asm 2: add <mulrax=%rax,<rt3=%rbx
  3120. add %rax,%rbx
  3121. # qhasm: mulc = 0
  3122. # asm 1: mov $0,>mulc=int64#10
  3123. # asm 2: mov $0,>mulc=%r12
  3124. mov $0,%r12
  3125. # qhasm: mulc += mulrdx + carry
  3126. # asm 1: adc <mulrdx=int64#3,<mulc=int64#10
  3127. # asm 2: adc <mulrdx=%rdx,<mulc=%r12
  3128. adc %rdx,%r12
  3129. # qhasm: mulrax = *(uint64 *)(qp + 72)
  3130. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  3131. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  3132. movq 72(%rcx),%rax
  3133. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3134. # asm 1: mul <mulx3=int64#2
  3135. # asm 2: mul <mulx3=%rsi
  3136. mul %rsi
  3137. # qhasm: carry? mulr4 += mulrax
  3138. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  3139. # asm 2: add <mulrax=%rax,<mulr4=%r8
  3140. add %rax,%r8
  3141. # qhasm: mulrdx += 0 + carry
  3142. # asm 1: adc $0,<mulrdx=int64#3
  3143. # asm 2: adc $0,<mulrdx=%rdx
  3144. adc $0,%rdx
  3145. # qhasm: carry? mulr4 += mulc
  3146. # asm 1: add <mulc=int64#10,<mulr4=int64#5
  3147. # asm 2: add <mulc=%r12,<mulr4=%r8
  3148. add %r12,%r8
  3149. # qhasm: mulc = 0
  3150. # asm 1: mov $0,>mulc=int64#10
  3151. # asm 2: mov $0,>mulc=%r12
  3152. mov $0,%r12
  3153. # qhasm: mulc += mulrdx + carry
  3154. # asm 1: adc <mulrdx=int64#3,<mulc=int64#10
  3155. # asm 2: adc <mulrdx=%rdx,<mulc=%r12
  3156. adc %rdx,%r12
  3157. # qhasm: mulrax = *(uint64 *)(qp + 80)
  3158. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  3159. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  3160. movq 80(%rcx),%rax
  3161. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3162. # asm 1: mul <mulx3=int64#2
  3163. # asm 2: mul <mulx3=%rsi
  3164. mul %rsi
  3165. # qhasm: carry? mulr5 += mulrax
  3166. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  3167. # asm 2: add <mulrax=%rax,<mulr5=%r9
  3168. add %rax,%r9
  3169. # qhasm: mulrdx += 0 + carry
  3170. # asm 1: adc $0,<mulrdx=int64#3
  3171. # asm 2: adc $0,<mulrdx=%rdx
  3172. adc $0,%rdx
  3173. # qhasm: carry? mulr5 += mulc
  3174. # asm 1: add <mulc=int64#10,<mulr5=int64#6
  3175. # asm 2: add <mulc=%r12,<mulr5=%r9
  3176. add %r12,%r9
  3177. # qhasm: mulc = 0
  3178. # asm 1: mov $0,>mulc=int64#10
  3179. # asm 2: mov $0,>mulc=%r12
  3180. mov $0,%r12
  3181. # qhasm: mulc += mulrdx + carry
  3182. # asm 1: adc <mulrdx=int64#3,<mulc=int64#10
  3183. # asm 2: adc <mulrdx=%rdx,<mulc=%r12
  3184. adc %rdx,%r12
  3185. # qhasm: mulrax = *(uint64 *)(qp + 88)
  3186. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  3187. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  3188. movq 88(%rcx),%rax
  3189. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3190. # asm 1: mul <mulx3=int64#2
  3191. # asm 2: mul <mulx3=%rsi
  3192. mul %rsi
  3193. # qhasm: carry? mulr6 += mulrax
  3194. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  3195. # asm 2: add <mulrax=%rax,<mulr6=%r10
  3196. add %rax,%r10
  3197. # qhasm: mulrdx += 0 + carry
  3198. # asm 1: adc $0,<mulrdx=int64#3
  3199. # asm 2: adc $0,<mulrdx=%rdx
  3200. adc $0,%rdx
  3201. # qhasm: carry? mulr6 += mulc
  3202. # asm 1: add <mulc=int64#10,<mulr6=int64#8
  3203. # asm 2: add <mulc=%r12,<mulr6=%r10
  3204. add %r12,%r10
  3205. # qhasm: mulr7 += mulrdx + carry
  3206. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  3207. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  3208. adc %rdx,%r11
  3209. # qhasm: mulrax = mulr4
  3210. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  3211. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  3212. mov %r8,%rax
  3213. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3214. mulq CRYPTO_NAMESPACE(38)(%rip)
  3215. # qhasm: mulr4 = mulrax
  3216. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  3217. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  3218. mov %rax,%rsi
  3219. # qhasm: mulrax = mulr5
  3220. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  3221. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  3222. mov %r9,%rax
  3223. # qhasm: mulr5 = mulrdx
  3224. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  3225. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  3226. mov %rdx,%rcx
  3227. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3228. mulq CRYPTO_NAMESPACE(38)(%rip)
  3229. # qhasm: carry? mulr5 += mulrax
  3230. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3231. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3232. add %rax,%rcx
  3233. # qhasm: mulrax = mulr6
  3234. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  3235. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  3236. mov %r10,%rax
  3237. # qhasm: mulr6 = 0
  3238. # asm 1: mov $0,>mulr6=int64#5
  3239. # asm 2: mov $0,>mulr6=%r8
  3240. mov $0,%r8
  3241. # qhasm: mulr6 += mulrdx + carry
  3242. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  3243. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  3244. adc %rdx,%r8
  3245. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3246. mulq CRYPTO_NAMESPACE(38)(%rip)
  3247. # qhasm: carry? mulr6 += mulrax
  3248. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  3249. # asm 2: add <mulrax=%rax,<mulr6=%r8
  3250. add %rax,%r8
  3251. # qhasm: mulrax = mulr7
  3252. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  3253. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  3254. mov %r11,%rax
  3255. # qhasm: mulr7 = 0
  3256. # asm 1: mov $0,>mulr7=int64#6
  3257. # asm 2: mov $0,>mulr7=%r9
  3258. mov $0,%r9
  3259. # qhasm: mulr7 += mulrdx + carry
  3260. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  3261. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  3262. adc %rdx,%r9
  3263. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3264. mulq CRYPTO_NAMESPACE(38)(%rip)
  3265. # qhasm: carry? mulr7 += mulrax
  3266. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  3267. # asm 2: add <mulrax=%rax,<mulr7=%r9
  3268. add %rax,%r9
  3269. # qhasm: mulr8 = 0
  3270. # asm 1: mov $0,>mulr8=int64#7
  3271. # asm 2: mov $0,>mulr8=%rax
  3272. mov $0,%rax
  3273. # qhasm: mulr8 += mulrdx + carry
  3274. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  3275. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  3276. adc %rdx,%rax
  3277. # qhasm: carry? rt0 += mulr4
  3278. # asm 1: add <mulr4=int64#2,<rt0=int64#11
  3279. # asm 2: add <mulr4=%rsi,<rt0=%r13
  3280. add %rsi,%r13
  3281. # qhasm: carry? rt1 += mulr5 + carry
  3282. # asm 1: adc <mulr5=int64#4,<rt1=int64#12
  3283. # asm 2: adc <mulr5=%rcx,<rt1=%r14
  3284. adc %rcx,%r14
  3285. # qhasm: carry? rt2 += mulr6 + carry
  3286. # asm 1: adc <mulr6=int64#5,<rt2=int64#13
  3287. # asm 2: adc <mulr6=%r8,<rt2=%r15
  3288. adc %r8,%r15
  3289. # qhasm: carry? rt3 += mulr7 + carry
  3290. # asm 1: adc <mulr7=int64#6,<rt3=int64#14
  3291. # asm 2: adc <mulr7=%r9,<rt3=%rbx
  3292. adc %r9,%rbx
  3293. # qhasm: mulzero = 0
  3294. # asm 1: mov $0,>mulzero=int64#2
  3295. # asm 2: mov $0,>mulzero=%rsi
  3296. mov $0,%rsi
  3297. # qhasm: mulr8 += mulzero + carry
  3298. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  3299. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  3300. adc %rsi,%rax
  3301. # qhasm: mulr8 *= 38
  3302. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  3303. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  3304. imulq $38,%rax,%rdx
  3305. # qhasm: carry? rt0 += mulr8
  3306. # asm 1: add <mulr8=int64#3,<rt0=int64#11
  3307. # asm 2: add <mulr8=%rdx,<rt0=%r13
  3308. add %rdx,%r13
  3309. # qhasm: carry? rt1 += mulzero + carry
  3310. # asm 1: adc <mulzero=int64#2,<rt1=int64#12
  3311. # asm 2: adc <mulzero=%rsi,<rt1=%r14
  3312. adc %rsi,%r14
  3313. # qhasm: carry? rt2 += mulzero + carry
  3314. # asm 1: adc <mulzero=int64#2,<rt2=int64#13
  3315. # asm 2: adc <mulzero=%rsi,<rt2=%r15
  3316. adc %rsi,%r15
  3317. # qhasm: carry? rt3 += mulzero + carry
  3318. # asm 1: adc <mulzero=int64#2,<rt3=int64#14
  3319. # asm 2: adc <mulzero=%rsi,<rt3=%rbx
  3320. adc %rsi,%rbx
  3321. # qhasm: mulzero += mulzero + carry
  3322. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  3323. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  3324. adc %rsi,%rsi
  3325. # qhasm: mulzero *= 38
  3326. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  3327. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  3328. imulq $38,%rsi,%rsi
  3329. # qhasm: rt0 += mulzero
  3330. # asm 1: add <mulzero=int64#2,<rt0=int64#11
  3331. # asm 2: add <mulzero=%rsi,<rt0=%r13
  3332. add %rsi,%r13
  3333. # qhasm: carry? rt0 += rt0
  3334. # asm 1: add <rt0=int64#11,<rt0=int64#11
  3335. # asm 2: add <rt0=%r13,<rt0=%r13
  3336. add %r13,%r13
  3337. # qhasm: carry? rt1 += rt1 + carry
  3338. # asm 1: adc <rt1=int64#12,<rt1=int64#12
  3339. # asm 2: adc <rt1=%r14,<rt1=%r14
  3340. adc %r14,%r14
  3341. # qhasm: carry? rt2 += rt2 + carry
  3342. # asm 1: adc <rt2=int64#13,<rt2=int64#13
  3343. # asm 2: adc <rt2=%r15,<rt2=%r15
  3344. adc %r15,%r15
  3345. # qhasm: carry? rt3 += rt3 + carry
  3346. # asm 1: adc <rt3=int64#14,<rt3=int64#14
  3347. # asm 2: adc <rt3=%rbx,<rt3=%rbx
  3348. adc %rbx,%rbx
  3349. # qhasm: addt0 = 0
  3350. # asm 1: mov $0,>addt0=int64#2
  3351. # asm 2: mov $0,>addt0=%rsi
  3352. mov $0,%rsi
  3353. # qhasm: addt1 = 38
  3354. # asm 1: mov $38,>addt1=int64#3
  3355. # asm 2: mov $38,>addt1=%rdx
  3356. mov $38,%rdx
  3357. # qhasm: addt1 = addt0 if !carry
  3358. # asm 1: cmovae <addt0=int64#2,<addt1=int64#3
  3359. # asm 2: cmovae <addt0=%rsi,<addt1=%rdx
  3360. cmovae %rsi,%rdx
  3361. # qhasm: carry? rt0 += addt1
  3362. # asm 1: add <addt1=int64#3,<rt0=int64#11
  3363. # asm 2: add <addt1=%rdx,<rt0=%r13
  3364. add %rdx,%r13
  3365. # qhasm: carry? rt1 += addt0 + carry
  3366. # asm 1: adc <addt0=int64#2,<rt1=int64#12
  3367. # asm 2: adc <addt0=%rsi,<rt1=%r14
  3368. adc %rsi,%r14
  3369. # qhasm: carry? rt2 += addt0 + carry
  3370. # asm 1: adc <addt0=int64#2,<rt2=int64#13
  3371. # asm 2: adc <addt0=%rsi,<rt2=%r15
  3372. adc %rsi,%r15
  3373. # qhasm: carry? rt3 += addt0 + carry
  3374. # asm 1: adc <addt0=int64#2,<rt3=int64#14
  3375. # asm 2: adc <addt0=%rsi,<rt3=%rbx
  3376. adc %rsi,%rbx
  3377. # qhasm: addt0 = addt1 if carry
  3378. # asm 1: cmovc <addt1=int64#3,<addt0=int64#2
  3379. # asm 2: cmovc <addt1=%rdx,<addt0=%rsi
  3380. cmovc %rdx,%rsi
  3381. # qhasm: rt0 += addt0
  3382. # asm 1: add <addt0=int64#2,<rt0=int64#11
  3383. # asm 2: add <addt0=%rsi,<rt0=%r13
  3384. add %rsi,%r13
  3385. # qhasm: rz0 = rt0
  3386. # asm 1: mov <rt0=int64#11,>rz0=int64#2
  3387. # asm 2: mov <rt0=%r13,>rz0=%rsi
  3388. mov %r13,%rsi
  3389. # qhasm: rz1 = rt1
  3390. # asm 1: mov <rt1=int64#12,>rz1=int64#3
  3391. # asm 2: mov <rt1=%r14,>rz1=%rdx
  3392. mov %r14,%rdx
  3393. # qhasm: rz2 = rt2
  3394. # asm 1: mov <rt2=int64#13,>rz2=int64#4
  3395. # asm 2: mov <rt2=%r15,>rz2=%rcx
  3396. mov %r15,%rcx
  3397. # qhasm: rz3 = rt3
  3398. # asm 1: mov <rt3=int64#14,>rz3=int64#5
  3399. # asm 2: mov <rt3=%rbx,>rz3=%r8
  3400. mov %rbx,%r8
  3401. # qhasm: carry? rz0 += c0_stack
  3402. # asm 1: addq <c0_stack=stack64#8,<rz0=int64#2
  3403. # asm 2: addq <c0_stack=56(%rsp),<rz0=%rsi
  3404. addq 56(%rsp),%rsi
  3405. # qhasm: carry? rz1 += c1_stack + carry
  3406. # asm 1: adcq <c1_stack=stack64#9,<rz1=int64#3
  3407. # asm 2: adcq <c1_stack=64(%rsp),<rz1=%rdx
  3408. adcq 64(%rsp),%rdx
  3409. # qhasm: carry? rz2 += c2_stack + carry
  3410. # asm 1: adcq <c2_stack=stack64#10,<rz2=int64#4
  3411. # asm 2: adcq <c2_stack=72(%rsp),<rz2=%rcx
  3412. adcq 72(%rsp),%rcx
  3413. # qhasm: carry? rz3 += c3_stack + carry
  3414. # asm 1: adcq <c3_stack=stack64#11,<rz3=int64#5
  3415. # asm 2: adcq <c3_stack=80(%rsp),<rz3=%r8
  3416. adcq 80(%rsp),%r8
  3417. # qhasm: addt0 = 0
  3418. # asm 1: mov $0,>addt0=int64#6
  3419. # asm 2: mov $0,>addt0=%r9
  3420. mov $0,%r9
  3421. # qhasm: addt1 = 38
  3422. # asm 1: mov $38,>addt1=int64#7
  3423. # asm 2: mov $38,>addt1=%rax
  3424. mov $38,%rax
  3425. # qhasm: addt1 = addt0 if !carry
  3426. # asm 1: cmovae <addt0=int64#6,<addt1=int64#7
  3427. # asm 2: cmovae <addt0=%r9,<addt1=%rax
  3428. cmovae %r9,%rax
  3429. # qhasm: carry? rz0 += addt1
  3430. # asm 1: add <addt1=int64#7,<rz0=int64#2
  3431. # asm 2: add <addt1=%rax,<rz0=%rsi
  3432. add %rax,%rsi
  3433. # qhasm: carry? rz1 += addt0 + carry
  3434. # asm 1: adc <addt0=int64#6,<rz1=int64#3
  3435. # asm 2: adc <addt0=%r9,<rz1=%rdx
  3436. adc %r9,%rdx
  3437. # qhasm: carry? rz2 += addt0 + carry
  3438. # asm 1: adc <addt0=int64#6,<rz2=int64#4
  3439. # asm 2: adc <addt0=%r9,<rz2=%rcx
  3440. adc %r9,%rcx
  3441. # qhasm: carry? rz3 += addt0 + carry
  3442. # asm 1: adc <addt0=int64#6,<rz3=int64#5
  3443. # asm 2: adc <addt0=%r9,<rz3=%r8
  3444. adc %r9,%r8
  3445. # qhasm: addt0 = addt1 if carry
  3446. # asm 1: cmovc <addt1=int64#7,<addt0=int64#6
  3447. # asm 2: cmovc <addt1=%rax,<addt0=%r9
  3448. cmovc %rax,%r9
  3449. # qhasm: rz0 += addt0
  3450. # asm 1: add <addt0=int64#6,<rz0=int64#2
  3451. # asm 2: add <addt0=%r9,<rz0=%rsi
  3452. add %r9,%rsi
  3453. # qhasm: carry? rt0 -= c0_stack
  3454. # asm 1: subq <c0_stack=stack64#8,<rt0=int64#11
  3455. # asm 2: subq <c0_stack=56(%rsp),<rt0=%r13
  3456. subq 56(%rsp),%r13
  3457. # qhasm: carry? rt1 -= c1_stack - carry
  3458. # asm 1: sbbq <c1_stack=stack64#9,<rt1=int64#12
  3459. # asm 2: sbbq <c1_stack=64(%rsp),<rt1=%r14
  3460. sbbq 64(%rsp),%r14
  3461. # qhasm: carry? rt2 -= c2_stack - carry
  3462. # asm 1: sbbq <c2_stack=stack64#10,<rt2=int64#13
  3463. # asm 2: sbbq <c2_stack=72(%rsp),<rt2=%r15
  3464. sbbq 72(%rsp),%r15
  3465. # qhasm: carry? rt3 -= c3_stack - carry
  3466. # asm 1: sbbq <c3_stack=stack64#11,<rt3=int64#14
  3467. # asm 2: sbbq <c3_stack=80(%rsp),<rt3=%rbx
  3468. sbbq 80(%rsp),%rbx
  3469. # qhasm: subt0 = 0
  3470. # asm 1: mov $0,>subt0=int64#6
  3471. # asm 2: mov $0,>subt0=%r9
  3472. mov $0,%r9
  3473. # qhasm: subt1 = 38
  3474. # asm 1: mov $38,>subt1=int64#7
  3475. # asm 2: mov $38,>subt1=%rax
  3476. mov $38,%rax
  3477. # qhasm: subt1 = subt0 if !carry
  3478. # asm 1: cmovae <subt0=int64#6,<subt1=int64#7
  3479. # asm 2: cmovae <subt0=%r9,<subt1=%rax
  3480. cmovae %r9,%rax
  3481. # qhasm: carry? rt0 -= subt1
  3482. # asm 1: sub <subt1=int64#7,<rt0=int64#11
  3483. # asm 2: sub <subt1=%rax,<rt0=%r13
  3484. sub %rax,%r13
  3485. # qhasm: carry? rt1 -= subt0 - carry
  3486. # asm 1: sbb <subt0=int64#6,<rt1=int64#12
  3487. # asm 2: sbb <subt0=%r9,<rt1=%r14
  3488. sbb %r9,%r14
  3489. # qhasm: carry? rt2 -= subt0 - carry
  3490. # asm 1: sbb <subt0=int64#6,<rt2=int64#13
  3491. # asm 2: sbb <subt0=%r9,<rt2=%r15
  3492. sbb %r9,%r15
  3493. # qhasm: carry? rt3 -= subt0 - carry
  3494. # asm 1: sbb <subt0=int64#6,<rt3=int64#14
  3495. # asm 2: sbb <subt0=%r9,<rt3=%rbx
  3496. sbb %r9,%rbx
  3497. # qhasm: subt0 = subt1 if carry
  3498. # asm 1: cmovc <subt1=int64#7,<subt0=int64#6
  3499. # asm 2: cmovc <subt1=%rax,<subt0=%r9
  3500. cmovc %rax,%r9
  3501. # qhasm: rt0 -= subt0
  3502. # asm 1: sub <subt0=int64#6,<rt0=int64#11
  3503. # asm 2: sub <subt0=%r9,<rt0=%r13
  3504. sub %r9,%r13
  3505. # qhasm: *(uint64 *)(rp + 32) = rz0
  3506. # asm 1: movq <rz0=int64#2,32(<rp=int64#1)
  3507. # asm 2: movq <rz0=%rsi,32(<rp=%rdi)
  3508. movq %rsi,32(%rdi)
  3509. # qhasm: *(uint64 *)(rp + 40) = rz1
  3510. # asm 1: movq <rz1=int64#3,40(<rp=int64#1)
  3511. # asm 2: movq <rz1=%rdx,40(<rp=%rdi)
  3512. movq %rdx,40(%rdi)
  3513. # qhasm: *(uint64 *)(rp + 48) = rz2
  3514. # asm 1: movq <rz2=int64#4,48(<rp=int64#1)
  3515. # asm 2: movq <rz2=%rcx,48(<rp=%rdi)
  3516. movq %rcx,48(%rdi)
  3517. # qhasm: *(uint64 *)(rp + 56) = rz3
  3518. # asm 1: movq <rz3=int64#5,56(<rp=int64#1)
  3519. # asm 2: movq <rz3=%r8,56(<rp=%rdi)
  3520. movq %r8,56(%rdi)
  3521. # qhasm: *(uint64 *)(rp + 96) = rt0
  3522. # asm 1: movq <rt0=int64#11,96(<rp=int64#1)
  3523. # asm 2: movq <rt0=%r13,96(<rp=%rdi)
  3524. movq %r13,96(%rdi)
  3525. # qhasm: *(uint64 *)(rp + 104) = rt1
  3526. # asm 1: movq <rt1=int64#12,104(<rp=int64#1)
  3527. # asm 2: movq <rt1=%r14,104(<rp=%rdi)
  3528. movq %r14,104(%rdi)
  3529. # qhasm: *(uint64 *)(rp + 112) = rt2
  3530. # asm 1: movq <rt2=int64#13,112(<rp=int64#1)
  3531. # asm 2: movq <rt2=%r15,112(<rp=%rdi)
  3532. movq %r15,112(%rdi)
  3533. # qhasm: *(uint64 *)(rp + 120) = rt3
  3534. # asm 1: movq <rt3=int64#14,120(<rp=int64#1)
  3535. # asm 2: movq <rt3=%rbx,120(<rp=%rdi)
  3536. movq %rbx,120(%rdi)
  3537. # qhasm: caller1 = caller1_stack
  3538. # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
  3539. # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
  3540. movq 0(%rsp),%r11
  3541. # qhasm: caller2 = caller2_stack
  3542. # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
  3543. # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
  3544. movq 8(%rsp),%r12
  3545. # qhasm: caller3 = caller3_stack
  3546. # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
  3547. # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
  3548. movq 16(%rsp),%r13
  3549. # qhasm: caller4 = caller4_stack
  3550. # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
  3551. # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
  3552. movq 24(%rsp),%r14
  3553. # qhasm: caller5 = caller5_stack
  3554. # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
  3555. # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
  3556. movq 32(%rsp),%r15
  3557. # qhasm: caller6 = caller6_stack
  3558. # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
  3559. # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
  3560. movq 40(%rsp),%rbx
  3561. # qhasm: caller7 = caller7_stack
  3562. # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
  3563. # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
  3564. movq 48(%rsp),%rbp
  3565. # qhasm: leave
  3566. add %r11,%rsp
  3567. mov %rdi,%rax
  3568. mov %rsi,%rdx
  3569. ret