1
0

ge25519_p1p1_to_pniels.S 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110
  1. # qhasm: int64 rp
  2. # qhasm: int64 pp
  3. # qhasm: input rp
  4. # qhasm: input pp
  5. # qhasm: int64 caller1
  6. # qhasm: int64 caller2
  7. # qhasm: int64 caller3
  8. # qhasm: int64 caller4
  9. # qhasm: int64 caller5
  10. # qhasm: int64 caller6
  11. # qhasm: int64 caller7
  12. # qhasm: caller caller1
  13. # qhasm: caller caller2
  14. # qhasm: caller caller3
  15. # qhasm: caller caller4
  16. # qhasm: caller caller5
  17. # qhasm: caller caller6
  18. # qhasm: caller caller7
  19. # qhasm: stack64 caller1_stack
  20. # qhasm: stack64 caller2_stack
  21. # qhasm: stack64 caller3_stack
  22. # qhasm: stack64 caller4_stack
  23. # qhasm: stack64 caller5_stack
  24. # qhasm: stack64 caller6_stack
  25. # qhasm: stack64 caller7_stack
  26. # qhasm: int64 x0
  27. # qhasm: int64 x1
  28. # qhasm: int64 x2
  29. # qhasm: int64 x3
  30. # qhasm: int64 x4
  31. # qhasm: int64 y0
  32. # qhasm: int64 y1
  33. # qhasm: int64 y2
  34. # qhasm: int64 y3
  35. # qhasm: int64 y4
  36. # qhasm: int64 ysubx0
  37. # qhasm: int64 ysubx1
  38. # qhasm: int64 ysubx2
  39. # qhasm: int64 ysubx3
  40. # qhasm: int64 ysubx4
  41. # qhasm: int64 xaddy0
  42. # qhasm: int64 xaddy1
  43. # qhasm: int64 xaddy2
  44. # qhasm: int64 xaddy3
  45. # qhasm: int64 xaddy4
  46. # qhasm: int64 rz0
  47. # qhasm: int64 rz1
  48. # qhasm: int64 rz2
  49. # qhasm: int64 rz3
  50. # qhasm: int64 rz4
  51. # qhasm: int64 t0
  52. # qhasm: int64 t1
  53. # qhasm: int64 t2
  54. # qhasm: int64 t3
  55. # qhasm: int64 t4
  56. # qhasm: int64 t2d0
  57. # qhasm: int64 t2d1
  58. # qhasm: int64 t2d2
  59. # qhasm: int64 t2d3
  60. # qhasm: int64 t2d4
  61. # qhasm: stack64 stackt0
  62. # qhasm: stack64 stackt1
  63. # qhasm: stack64 stackt2
  64. # qhasm: stack64 stackt3
  65. # qhasm: stack64 stackt4
  66. # qhasm: stack64 stackx0
  67. # qhasm: stack64 stackx1
  68. # qhasm: stack64 stackx2
  69. # qhasm: stack64 stackx3
  70. # qhasm: stack64 stackx4
  71. # qhasm: stack64 stacky1
  72. # qhasm: stack64 stacky2
  73. # qhasm: stack64 stacky3
  74. # qhasm: stack64 stacky4
  75. # qhasm: int64 mulr01
  76. # qhasm: int64 mulr11
  77. # qhasm: int64 mulr21
  78. # qhasm: int64 mulr31
  79. # qhasm: int64 mulr41
  80. # qhasm: int64 mulrax
  81. # qhasm: int64 mulrdx
  82. # qhasm: int64 mult
  83. # qhasm: int64 mulredmask
  84. # qhasm: stack64 mulx219_stack
  85. # qhasm: stack64 mulx319_stack
  86. # qhasm: stack64 mulx419_stack
  87. # qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
  88. .text
  89. .p2align 5
  90. .globl _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
  91. .globl CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
  92. _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels):
  93. CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels):
  94. mov %rsp,%r11
  95. and $31,%r11
  96. add $128,%r11
  97. sub %r11,%rsp
  98. # qhasm: caller1_stack = caller1
  99. # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
  100. # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
  101. movq %r11,0(%rsp)
  102. # qhasm: caller2_stack = caller2
  103. # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
  104. # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
  105. movq %r12,8(%rsp)
  106. # qhasm: caller3_stack = caller3
  107. # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
  108. # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
  109. movq %r13,16(%rsp)
  110. # qhasm: caller4_stack = caller4
  111. # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
  112. # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
  113. movq %r14,24(%rsp)
  114. # qhasm: caller5_stack = caller5
  115. # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
  116. # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
  117. movq %r15,32(%rsp)
  118. # qhasm: caller6_stack = caller6
  119. # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
  120. # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
  121. movq %rbx,40(%rsp)
  122. # qhasm: caller7_stack = caller7
  123. # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
  124. # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
  125. movq %rbp,48(%rsp)
  126. # qhasm: mulrax = *(uint64 *)(pp + 24)
  127. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
  128. # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
  129. movq 24(%rsi),%rdx
  130. # qhasm: mulrax *= 19
  131. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  132. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  133. imulq $19,%rdx,%rax
  134. # qhasm: mulx319_stack = mulrax
  135. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  136. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  137. movq %rax,56(%rsp)
  138. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  139. # asm 1: mulq 136(<pp=int64#2)
  140. # asm 2: mulq 136(<pp=%rsi)
  141. mulq 136(%rsi)
  142. # qhasm: x0 = mulrax
  143. # asm 1: mov <mulrax=int64#7,>x0=int64#4
  144. # asm 2: mov <mulrax=%rax,>x0=%rcx
  145. mov %rax,%rcx
  146. # qhasm: mulr01 = mulrdx
  147. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  148. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  149. mov %rdx,%r8
  150. # qhasm: mulrax = *(uint64 *)(pp + 32)
  151. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
  152. # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
  153. movq 32(%rsi),%rdx
  154. # qhasm: mulrax *= 19
  155. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  156. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  157. imulq $19,%rdx,%rax
  158. # qhasm: mulx419_stack = mulrax
  159. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  160. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  161. movq %rax,64(%rsp)
  162. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  163. # asm 1: mulq 128(<pp=int64#2)
  164. # asm 2: mulq 128(<pp=%rsi)
  165. mulq 128(%rsi)
  166. # qhasm: carry? x0 += mulrax
  167. # asm 1: add <mulrax=int64#7,<x0=int64#4
  168. # asm 2: add <mulrax=%rax,<x0=%rcx
  169. add %rax,%rcx
  170. # qhasm: mulr01 += mulrdx + carry
  171. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  172. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  173. adc %rdx,%r8
  174. # qhasm: mulrax = *(uint64 *)(pp + 0)
  175. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  176. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  177. movq 0(%rsi),%rax
  178. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  179. # asm 1: mulq 120(<pp=int64#2)
  180. # asm 2: mulq 120(<pp=%rsi)
  181. mulq 120(%rsi)
  182. # qhasm: carry? x0 += mulrax
  183. # asm 1: add <mulrax=int64#7,<x0=int64#4
  184. # asm 2: add <mulrax=%rax,<x0=%rcx
  185. add %rax,%rcx
  186. # qhasm: mulr01 += mulrdx + carry
  187. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  188. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  189. adc %rdx,%r8
  190. # qhasm: mulrax = *(uint64 *)(pp + 0)
  191. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  192. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  193. movq 0(%rsi),%rax
  194. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  195. # asm 1: mulq 128(<pp=int64#2)
  196. # asm 2: mulq 128(<pp=%rsi)
  197. mulq 128(%rsi)
  198. # qhasm: x1 = mulrax
  199. # asm 1: mov <mulrax=int64#7,>x1=int64#6
  200. # asm 2: mov <mulrax=%rax,>x1=%r9
  201. mov %rax,%r9
  202. # qhasm: mulr11 = mulrdx
  203. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  204. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  205. mov %rdx,%r10
  206. # qhasm: mulrax = *(uint64 *)(pp + 0)
  207. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  208. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  209. movq 0(%rsi),%rax
  210. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  211. # asm 1: mulq 136(<pp=int64#2)
  212. # asm 2: mulq 136(<pp=%rsi)
  213. mulq 136(%rsi)
  214. # qhasm: x2 = mulrax
  215. # asm 1: mov <mulrax=int64#7,>x2=int64#9
  216. # asm 2: mov <mulrax=%rax,>x2=%r11
  217. mov %rax,%r11
  218. # qhasm: mulr21 = mulrdx
  219. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  220. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  221. mov %rdx,%r12
  222. # qhasm: mulrax = *(uint64 *)(pp + 0)
  223. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  224. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  225. movq 0(%rsi),%rax
  226. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  227. # asm 1: mulq 144(<pp=int64#2)
  228. # asm 2: mulq 144(<pp=%rsi)
  229. mulq 144(%rsi)
  230. # qhasm: x3 = mulrax
  231. # asm 1: mov <mulrax=int64#7,>x3=int64#11
  232. # asm 2: mov <mulrax=%rax,>x3=%r13
  233. mov %rax,%r13
  234. # qhasm: mulr31 = mulrdx
  235. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  236. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  237. mov %rdx,%r14
  238. # qhasm: mulrax = *(uint64 *)(pp + 0)
  239. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  240. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  241. movq 0(%rsi),%rax
  242. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  243. # asm 1: mulq 152(<pp=int64#2)
  244. # asm 2: mulq 152(<pp=%rsi)
  245. mulq 152(%rsi)
  246. # qhasm: x4 = mulrax
  247. # asm 1: mov <mulrax=int64#7,>x4=int64#13
  248. # asm 2: mov <mulrax=%rax,>x4=%r15
  249. mov %rax,%r15
  250. # qhasm: mulr41 = mulrdx
  251. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  252. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  253. mov %rdx,%rbx
  254. # qhasm: mulrax = *(uint64 *)(pp + 8)
  255. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  256. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  257. movq 8(%rsi),%rax
  258. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  259. # asm 1: mulq 120(<pp=int64#2)
  260. # asm 2: mulq 120(<pp=%rsi)
  261. mulq 120(%rsi)
  262. # qhasm: carry? x1 += mulrax
  263. # asm 1: add <mulrax=int64#7,<x1=int64#6
  264. # asm 2: add <mulrax=%rax,<x1=%r9
  265. add %rax,%r9
  266. # qhasm: mulr11 += mulrdx + carry
  267. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  268. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  269. adc %rdx,%r10
  270. # qhasm: mulrax = *(uint64 *)(pp + 8)
  271. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  272. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  273. movq 8(%rsi),%rax
  274. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  275. # asm 1: mulq 128(<pp=int64#2)
  276. # asm 2: mulq 128(<pp=%rsi)
  277. mulq 128(%rsi)
  278. # qhasm: carry? x2 += mulrax
  279. # asm 1: add <mulrax=int64#7,<x2=int64#9
  280. # asm 2: add <mulrax=%rax,<x2=%r11
  281. add %rax,%r11
  282. # qhasm: mulr21 += mulrdx + carry
  283. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  284. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  285. adc %rdx,%r12
  286. # qhasm: mulrax = *(uint64 *)(pp + 8)
  287. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  288. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  289. movq 8(%rsi),%rax
  290. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  291. # asm 1: mulq 136(<pp=int64#2)
  292. # asm 2: mulq 136(<pp=%rsi)
  293. mulq 136(%rsi)
  294. # qhasm: carry? x3 += mulrax
  295. # asm 1: add <mulrax=int64#7,<x3=int64#11
  296. # asm 2: add <mulrax=%rax,<x3=%r13
  297. add %rax,%r13
  298. # qhasm: mulr31 += mulrdx + carry
  299. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  300. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  301. adc %rdx,%r14
  302. # qhasm: mulrax = *(uint64 *)(pp + 8)
  303. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  304. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  305. movq 8(%rsi),%rax
  306. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  307. # asm 1: mulq 144(<pp=int64#2)
  308. # asm 2: mulq 144(<pp=%rsi)
  309. mulq 144(%rsi)
  310. # qhasm: carry? x4 += mulrax
  311. # asm 1: add <mulrax=int64#7,<x4=int64#13
  312. # asm 2: add <mulrax=%rax,<x4=%r15
  313. add %rax,%r15
  314. # qhasm: mulr41 += mulrdx + carry
  315. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  316. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  317. adc %rdx,%rbx
  318. # qhasm: mulrax = *(uint64 *)(pp + 8)
  319. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
  320. # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
  321. movq 8(%rsi),%rdx
  322. # qhasm: mulrax *= 19
  323. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  324. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  325. imulq $19,%rdx,%rax
  326. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  327. # asm 1: mulq 152(<pp=int64#2)
  328. # asm 2: mulq 152(<pp=%rsi)
  329. mulq 152(%rsi)
  330. # qhasm: carry? x0 += mulrax
  331. # asm 1: add <mulrax=int64#7,<x0=int64#4
  332. # asm 2: add <mulrax=%rax,<x0=%rcx
  333. add %rax,%rcx
  334. # qhasm: mulr01 += mulrdx + carry
  335. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  336. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  337. adc %rdx,%r8
  338. # qhasm: mulrax = *(uint64 *)(pp + 16)
  339. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  340. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  341. movq 16(%rsi),%rax
  342. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  343. # asm 1: mulq 120(<pp=int64#2)
  344. # asm 2: mulq 120(<pp=%rsi)
  345. mulq 120(%rsi)
  346. # qhasm: carry? x2 += mulrax
  347. # asm 1: add <mulrax=int64#7,<x2=int64#9
  348. # asm 2: add <mulrax=%rax,<x2=%r11
  349. add %rax,%r11
  350. # qhasm: mulr21 += mulrdx + carry
  351. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  352. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  353. adc %rdx,%r12
  354. # qhasm: mulrax = *(uint64 *)(pp + 16)
  355. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  356. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  357. movq 16(%rsi),%rax
  358. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  359. # asm 1: mulq 128(<pp=int64#2)
  360. # asm 2: mulq 128(<pp=%rsi)
  361. mulq 128(%rsi)
  362. # qhasm: carry? x3 += mulrax
  363. # asm 1: add <mulrax=int64#7,<x3=int64#11
  364. # asm 2: add <mulrax=%rax,<x3=%r13
  365. add %rax,%r13
  366. # qhasm: mulr31 += mulrdx + carry
  367. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  368. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  369. adc %rdx,%r14
  370. # qhasm: mulrax = *(uint64 *)(pp + 16)
  371. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  372. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  373. movq 16(%rsi),%rax
  374. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  375. # asm 1: mulq 136(<pp=int64#2)
  376. # asm 2: mulq 136(<pp=%rsi)
  377. mulq 136(%rsi)
  378. # qhasm: carry? x4 += mulrax
  379. # asm 1: add <mulrax=int64#7,<x4=int64#13
  380. # asm 2: add <mulrax=%rax,<x4=%r15
  381. add %rax,%r15
  382. # qhasm: mulr41 += mulrdx + carry
  383. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  384. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  385. adc %rdx,%rbx
  386. # qhasm: mulrax = *(uint64 *)(pp + 16)
  387. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  388. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  389. movq 16(%rsi),%rdx
  390. # qhasm: mulrax *= 19
  391. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  392. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  393. imulq $19,%rdx,%rax
  394. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  395. # asm 1: mulq 144(<pp=int64#2)
  396. # asm 2: mulq 144(<pp=%rsi)
  397. mulq 144(%rsi)
  398. # qhasm: carry? x0 += mulrax
  399. # asm 1: add <mulrax=int64#7,<x0=int64#4
  400. # asm 2: add <mulrax=%rax,<x0=%rcx
  401. add %rax,%rcx
  402. # qhasm: mulr01 += mulrdx + carry
  403. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  404. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  405. adc %rdx,%r8
  406. # qhasm: mulrax = *(uint64 *)(pp + 16)
  407. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  408. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  409. movq 16(%rsi),%rdx
  410. # qhasm: mulrax *= 19
  411. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  412. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  413. imulq $19,%rdx,%rax
  414. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  415. # asm 1: mulq 152(<pp=int64#2)
  416. # asm 2: mulq 152(<pp=%rsi)
  417. mulq 152(%rsi)
  418. # qhasm: carry? x1 += mulrax
  419. # asm 1: add <mulrax=int64#7,<x1=int64#6
  420. # asm 2: add <mulrax=%rax,<x1=%r9
  421. add %rax,%r9
  422. # qhasm: mulr11 += mulrdx + carry
  423. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  424. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  425. adc %rdx,%r10
  426. # qhasm: mulrax = *(uint64 *)(pp + 24)
  427. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  428. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  429. movq 24(%rsi),%rax
  430. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  431. # asm 1: mulq 120(<pp=int64#2)
  432. # asm 2: mulq 120(<pp=%rsi)
  433. mulq 120(%rsi)
  434. # qhasm: carry? x3 += mulrax
  435. # asm 1: add <mulrax=int64#7,<x3=int64#11
  436. # asm 2: add <mulrax=%rax,<x3=%r13
  437. add %rax,%r13
  438. # qhasm: mulr31 += mulrdx + carry
  439. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  440. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  441. adc %rdx,%r14
  442. # qhasm: mulrax = *(uint64 *)(pp + 24)
  443. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  444. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  445. movq 24(%rsi),%rax
  446. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  447. # asm 1: mulq 128(<pp=int64#2)
  448. # asm 2: mulq 128(<pp=%rsi)
  449. mulq 128(%rsi)
  450. # qhasm: carry? x4 += mulrax
  451. # asm 1: add <mulrax=int64#7,<x4=int64#13
  452. # asm 2: add <mulrax=%rax,<x4=%r15
  453. add %rax,%r15
  454. # qhasm: mulr41 += mulrdx + carry
  455. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  456. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  457. adc %rdx,%rbx
  458. # qhasm: mulrax = mulx319_stack
  459. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  460. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  461. movq 56(%rsp),%rax
  462. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  463. # asm 1: mulq 144(<pp=int64#2)
  464. # asm 2: mulq 144(<pp=%rsi)
  465. mulq 144(%rsi)
  466. # qhasm: carry? x1 += mulrax
  467. # asm 1: add <mulrax=int64#7,<x1=int64#6
  468. # asm 2: add <mulrax=%rax,<x1=%r9
  469. add %rax,%r9
  470. # qhasm: mulr11 += mulrdx + carry
  471. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  472. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  473. adc %rdx,%r10
  474. # qhasm: mulrax = mulx319_stack
  475. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  476. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  477. movq 56(%rsp),%rax
  478. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  479. # asm 1: mulq 152(<pp=int64#2)
  480. # asm 2: mulq 152(<pp=%rsi)
  481. mulq 152(%rsi)
  482. # qhasm: carry? x2 += mulrax
  483. # asm 1: add <mulrax=int64#7,<x2=int64#9
  484. # asm 2: add <mulrax=%rax,<x2=%r11
  485. add %rax,%r11
  486. # qhasm: mulr21 += mulrdx + carry
  487. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  488. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  489. adc %rdx,%r12
  490. # qhasm: mulrax = *(uint64 *)(pp + 32)
  491. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  492. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  493. movq 32(%rsi),%rax
  494. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  495. # asm 1: mulq 120(<pp=int64#2)
  496. # asm 2: mulq 120(<pp=%rsi)
  497. mulq 120(%rsi)
  498. # qhasm: carry? x4 += mulrax
  499. # asm 1: add <mulrax=int64#7,<x4=int64#13
  500. # asm 2: add <mulrax=%rax,<x4=%r15
  501. add %rax,%r15
  502. # qhasm: mulr41 += mulrdx + carry
  503. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  504. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  505. adc %rdx,%rbx
  506. # qhasm: mulrax = mulx419_stack
  507. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  508. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  509. movq 64(%rsp),%rax
  510. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  511. # asm 1: mulq 136(<pp=int64#2)
  512. # asm 2: mulq 136(<pp=%rsi)
  513. mulq 136(%rsi)
  514. # qhasm: carry? x1 += mulrax
  515. # asm 1: add <mulrax=int64#7,<x1=int64#6
  516. # asm 2: add <mulrax=%rax,<x1=%r9
  517. add %rax,%r9
  518. # qhasm: mulr11 += mulrdx + carry
  519. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  520. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  521. adc %rdx,%r10
  522. # qhasm: mulrax = mulx419_stack
  523. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  524. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  525. movq 64(%rsp),%rax
  526. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  527. # asm 1: mulq 144(<pp=int64#2)
  528. # asm 2: mulq 144(<pp=%rsi)
  529. mulq 144(%rsi)
  530. # qhasm: carry? x2 += mulrax
  531. # asm 1: add <mulrax=int64#7,<x2=int64#9
  532. # asm 2: add <mulrax=%rax,<x2=%r11
  533. add %rax,%r11
  534. # qhasm: mulr21 += mulrdx + carry
  535. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  536. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  537. adc %rdx,%r12
  538. # qhasm: mulrax = mulx419_stack
  539. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  540. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  541. movq 64(%rsp),%rax
  542. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  543. # asm 1: mulq 152(<pp=int64#2)
  544. # asm 2: mulq 152(<pp=%rsi)
  545. mulq 152(%rsi)
  546. # qhasm: carry? x3 += mulrax
  547. # asm 1: add <mulrax=int64#7,<x3=int64#11
  548. # asm 2: add <mulrax=%rax,<x3=%r13
  549. add %rax,%r13
  550. # qhasm: mulr31 += mulrdx + carry
  551. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  552. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  553. adc %rdx,%r14
  554. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  555. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  556. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  557. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  558. # qhasm: mulr01 = (mulr01.x0) << 13
  559. # asm 1: shld $13,<x0=int64#4,<mulr01=int64#5
  560. # asm 2: shld $13,<x0=%rcx,<mulr01=%r8
  561. shld $13,%rcx,%r8
  562. # qhasm: x0 &= mulredmask
  563. # asm 1: and <mulredmask=int64#3,<x0=int64#4
  564. # asm 2: and <mulredmask=%rdx,<x0=%rcx
  565. and %rdx,%rcx
  566. # qhasm: mulr11 = (mulr11.x1) << 13
  567. # asm 1: shld $13,<x1=int64#6,<mulr11=int64#8
  568. # asm 2: shld $13,<x1=%r9,<mulr11=%r10
  569. shld $13,%r9,%r10
  570. # qhasm: x1 &= mulredmask
  571. # asm 1: and <mulredmask=int64#3,<x1=int64#6
  572. # asm 2: and <mulredmask=%rdx,<x1=%r9
  573. and %rdx,%r9
  574. # qhasm: x1 += mulr01
  575. # asm 1: add <mulr01=int64#5,<x1=int64#6
  576. # asm 2: add <mulr01=%r8,<x1=%r9
  577. add %r8,%r9
  578. # qhasm: mulr21 = (mulr21.x2) << 13
  579. # asm 1: shld $13,<x2=int64#9,<mulr21=int64#10
  580. # asm 2: shld $13,<x2=%r11,<mulr21=%r12
  581. shld $13,%r11,%r12
  582. # qhasm: x2 &= mulredmask
  583. # asm 1: and <mulredmask=int64#3,<x2=int64#9
  584. # asm 2: and <mulredmask=%rdx,<x2=%r11
  585. and %rdx,%r11
  586. # qhasm: x2 += mulr11
  587. # asm 1: add <mulr11=int64#8,<x2=int64#9
  588. # asm 2: add <mulr11=%r10,<x2=%r11
  589. add %r10,%r11
  590. # qhasm: mulr31 = (mulr31.x3) << 13
  591. # asm 1: shld $13,<x3=int64#11,<mulr31=int64#12
  592. # asm 2: shld $13,<x3=%r13,<mulr31=%r14
  593. shld $13,%r13,%r14
  594. # qhasm: x3 &= mulredmask
  595. # asm 1: and <mulredmask=int64#3,<x3=int64#11
  596. # asm 2: and <mulredmask=%rdx,<x3=%r13
  597. and %rdx,%r13
  598. # qhasm: x3 += mulr21
  599. # asm 1: add <mulr21=int64#10,<x3=int64#11
  600. # asm 2: add <mulr21=%r12,<x3=%r13
  601. add %r12,%r13
  602. # qhasm: mulr41 = (mulr41.x4) << 13
  603. # asm 1: shld $13,<x4=int64#13,<mulr41=int64#14
  604. # asm 2: shld $13,<x4=%r15,<mulr41=%rbx
  605. shld $13,%r15,%rbx
  606. # qhasm: x4 &= mulredmask
  607. # asm 1: and <mulredmask=int64#3,<x4=int64#13
  608. # asm 2: and <mulredmask=%rdx,<x4=%r15
  609. and %rdx,%r15
  610. # qhasm: x4 += mulr31
  611. # asm 1: add <mulr31=int64#12,<x4=int64#13
  612. # asm 2: add <mulr31=%r14,<x4=%r15
  613. add %r14,%r15
  614. # qhasm: mulr41 = mulr41 * 19
  615. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
  616. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
  617. imulq $19,%rbx,%r8
  618. # qhasm: x0 += mulr41
  619. # asm 1: add <mulr41=int64#5,<x0=int64#4
  620. # asm 2: add <mulr41=%r8,<x0=%rcx
  621. add %r8,%rcx
  622. # qhasm: mult = x0
  623. # asm 1: mov <x0=int64#4,>mult=int64#5
  624. # asm 2: mov <x0=%rcx,>mult=%r8
  625. mov %rcx,%r8
  626. # qhasm: (uint64) mult >>= 51
  627. # asm 1: shr $51,<mult=int64#5
  628. # asm 2: shr $51,<mult=%r8
  629. shr $51,%r8
  630. # qhasm: mult += x1
  631. # asm 1: add <x1=int64#6,<mult=int64#5
  632. # asm 2: add <x1=%r9,<mult=%r8
  633. add %r9,%r8
  634. # qhasm: x1 = mult
  635. # asm 1: mov <mult=int64#5,>x1=int64#6
  636. # asm 2: mov <mult=%r8,>x1=%r9
  637. mov %r8,%r9
  638. # qhasm: (uint64) mult >>= 51
  639. # asm 1: shr $51,<mult=int64#5
  640. # asm 2: shr $51,<mult=%r8
  641. shr $51,%r8
  642. # qhasm: x0 &= mulredmask
  643. # asm 1: and <mulredmask=int64#3,<x0=int64#4
  644. # asm 2: and <mulredmask=%rdx,<x0=%rcx
  645. and %rdx,%rcx
  646. # qhasm: mult += x2
  647. # asm 1: add <x2=int64#9,<mult=int64#5
  648. # asm 2: add <x2=%r11,<mult=%r8
  649. add %r11,%r8
  650. # qhasm: x2 = mult
  651. # asm 1: mov <mult=int64#5,>x2=int64#7
  652. # asm 2: mov <mult=%r8,>x2=%rax
  653. mov %r8,%rax
  654. # qhasm: (uint64) mult >>= 51
  655. # asm 1: shr $51,<mult=int64#5
  656. # asm 2: shr $51,<mult=%r8
  657. shr $51,%r8
  658. # qhasm: x1 &= mulredmask
  659. # asm 1: and <mulredmask=int64#3,<x1=int64#6
  660. # asm 2: and <mulredmask=%rdx,<x1=%r9
  661. and %rdx,%r9
  662. # qhasm: mult += x3
  663. # asm 1: add <x3=int64#11,<mult=int64#5
  664. # asm 2: add <x3=%r13,<mult=%r8
  665. add %r13,%r8
  666. # qhasm: x3 = mult
  667. # asm 1: mov <mult=int64#5,>x3=int64#8
  668. # asm 2: mov <mult=%r8,>x3=%r10
  669. mov %r8,%r10
  670. # qhasm: (uint64) mult >>= 51
  671. # asm 1: shr $51,<mult=int64#5
  672. # asm 2: shr $51,<mult=%r8
  673. shr $51,%r8
  674. # qhasm: x2 &= mulredmask
  675. # asm 1: and <mulredmask=int64#3,<x2=int64#7
  676. # asm 2: and <mulredmask=%rdx,<x2=%rax
  677. and %rdx,%rax
  678. # qhasm: mult += x4
  679. # asm 1: add <x4=int64#13,<mult=int64#5
  680. # asm 2: add <x4=%r15,<mult=%r8
  681. add %r15,%r8
  682. # qhasm: x4 = mult
  683. # asm 1: mov <mult=int64#5,>x4=int64#9
  684. # asm 2: mov <mult=%r8,>x4=%r11
  685. mov %r8,%r11
  686. # qhasm: (uint64) mult >>= 51
  687. # asm 1: shr $51,<mult=int64#5
  688. # asm 2: shr $51,<mult=%r8
  689. shr $51,%r8
  690. # qhasm: x3 &= mulredmask
  691. # asm 1: and <mulredmask=int64#3,<x3=int64#8
  692. # asm 2: and <mulredmask=%rdx,<x3=%r10
  693. and %rdx,%r10
  694. # qhasm: mult *= 19
  695. # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
  696. # asm 2: imulq $19,<mult=%r8,>mult=%r8
  697. imulq $19,%r8,%r8
  698. # qhasm: x0 += mult
  699. # asm 1: add <mult=int64#5,<x0=int64#4
  700. # asm 2: add <mult=%r8,<x0=%rcx
  701. add %r8,%rcx
  702. # qhasm: x4 &= mulredmask
  703. # asm 1: and <mulredmask=int64#3,<x4=int64#9
  704. # asm 2: and <mulredmask=%rdx,<x4=%r11
  705. and %rdx,%r11
  706. # qhasm: stackx0 = x0
  707. # asm 1: movq <x0=int64#4,>stackx0=stack64#8
  708. # asm 2: movq <x0=%rcx,>stackx0=56(%rsp)
  709. movq %rcx,56(%rsp)
  710. # qhasm: stackx1 = x1
  711. # asm 1: movq <x1=int64#6,>stackx1=stack64#9
  712. # asm 2: movq <x1=%r9,>stackx1=64(%rsp)
  713. movq %r9,64(%rsp)
  714. # qhasm: stackx2 = x2
  715. # asm 1: movq <x2=int64#7,>stackx2=stack64#10
  716. # asm 2: movq <x2=%rax,>stackx2=72(%rsp)
  717. movq %rax,72(%rsp)
  718. # qhasm: stackx3 = x3
  719. # asm 1: movq <x3=int64#8,>stackx3=stack64#11
  720. # asm 2: movq <x3=%r10,>stackx3=80(%rsp)
  721. movq %r10,80(%rsp)
  722. # qhasm: stackx4 = x4
  723. # asm 1: movq <x4=int64#9,>stackx4=stack64#12
  724. # asm 2: movq <x4=%r11,>stackx4=88(%rsp)
  725. movq %r11,88(%rsp)
  726. # qhasm: mulrax = *(uint64 *)(pp + 104)
  727. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#3
  728. # asm 2: movq 104(<pp=%rsi),>mulrax=%rdx
  729. movq 104(%rsi),%rdx
  730. # qhasm: mulrax *= 19
  731. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  732. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  733. imulq $19,%rdx,%rax
  734. # qhasm: mulx319_stack = mulrax
  735. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#13
  736. # asm 2: movq <mulrax=%rax,>mulx319_stack=96(%rsp)
  737. movq %rax,96(%rsp)
  738. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  739. # asm 1: mulq 56(<pp=int64#2)
  740. # asm 2: mulq 56(<pp=%rsi)
  741. mulq 56(%rsi)
  742. # qhasm: y0 = mulrax
  743. # asm 1: mov <mulrax=int64#7,>y0=int64#4
  744. # asm 2: mov <mulrax=%rax,>y0=%rcx
  745. mov %rax,%rcx
  746. # qhasm: mulr01 = mulrdx
  747. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  748. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  749. mov %rdx,%r8
  750. # qhasm: mulrax = *(uint64 *)(pp + 112)
  751. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#3
  752. # asm 2: movq 112(<pp=%rsi),>mulrax=%rdx
  753. movq 112(%rsi),%rdx
  754. # qhasm: mulrax *= 19
  755. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  756. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  757. imulq $19,%rdx,%rax
  758. # qhasm: mulx419_stack = mulrax
  759. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#14
  760. # asm 2: movq <mulrax=%rax,>mulx419_stack=104(%rsp)
  761. movq %rax,104(%rsp)
  762. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  763. # asm 1: mulq 48(<pp=int64#2)
  764. # asm 2: mulq 48(<pp=%rsi)
  765. mulq 48(%rsi)
  766. # qhasm: carry? y0 += mulrax
  767. # asm 1: add <mulrax=int64#7,<y0=int64#4
  768. # asm 2: add <mulrax=%rax,<y0=%rcx
  769. add %rax,%rcx
  770. # qhasm: mulr01 += mulrdx + carry
  771. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  772. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  773. adc %rdx,%r8
  774. # qhasm: mulrax = *(uint64 *)(pp + 80)
  775. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  776. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  777. movq 80(%rsi),%rax
  778. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  779. # asm 1: mulq 40(<pp=int64#2)
  780. # asm 2: mulq 40(<pp=%rsi)
  781. mulq 40(%rsi)
  782. # qhasm: carry? y0 += mulrax
  783. # asm 1: add <mulrax=int64#7,<y0=int64#4
  784. # asm 2: add <mulrax=%rax,<y0=%rcx
  785. add %rax,%rcx
  786. # qhasm: mulr01 += mulrdx + carry
  787. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  788. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  789. adc %rdx,%r8
  790. # qhasm: mulrax = *(uint64 *)(pp + 80)
  791. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  792. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  793. movq 80(%rsi),%rax
  794. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  795. # asm 1: mulq 48(<pp=int64#2)
  796. # asm 2: mulq 48(<pp=%rsi)
  797. mulq 48(%rsi)
  798. # qhasm: y1 = mulrax
  799. # asm 1: mov <mulrax=int64#7,>y1=int64#6
  800. # asm 2: mov <mulrax=%rax,>y1=%r9
  801. mov %rax,%r9
  802. # qhasm: mulr11 = mulrdx
  803. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  804. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  805. mov %rdx,%r10
  806. # qhasm: mulrax = *(uint64 *)(pp + 80)
  807. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  808. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  809. movq 80(%rsi),%rax
  810. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  811. # asm 1: mulq 56(<pp=int64#2)
  812. # asm 2: mulq 56(<pp=%rsi)
  813. mulq 56(%rsi)
  814. # qhasm: y2 = mulrax
  815. # asm 1: mov <mulrax=int64#7,>y2=int64#9
  816. # asm 2: mov <mulrax=%rax,>y2=%r11
  817. mov %rax,%r11
  818. # qhasm: mulr21 = mulrdx
  819. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  820. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  821. mov %rdx,%r12
  822. # qhasm: mulrax = *(uint64 *)(pp + 80)
  823. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  824. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  825. movq 80(%rsi),%rax
  826. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  827. # asm 1: mulq 64(<pp=int64#2)
  828. # asm 2: mulq 64(<pp=%rsi)
  829. mulq 64(%rsi)
  830. # qhasm: y3 = mulrax
  831. # asm 1: mov <mulrax=int64#7,>y3=int64#11
  832. # asm 2: mov <mulrax=%rax,>y3=%r13
  833. mov %rax,%r13
  834. # qhasm: mulr31 = mulrdx
  835. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  836. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  837. mov %rdx,%r14
  838. # qhasm: mulrax = *(uint64 *)(pp + 80)
  839. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  840. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  841. movq 80(%rsi),%rax
  842. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  843. # asm 1: mulq 72(<pp=int64#2)
  844. # asm 2: mulq 72(<pp=%rsi)
  845. mulq 72(%rsi)
  846. # qhasm: y4 = mulrax
  847. # asm 1: mov <mulrax=int64#7,>y4=int64#13
  848. # asm 2: mov <mulrax=%rax,>y4=%r15
  849. mov %rax,%r15
  850. # qhasm: mulr41 = mulrdx
  851. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  852. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  853. mov %rdx,%rbx
  854. # qhasm: mulrax = *(uint64 *)(pp + 88)
  855. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  856. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  857. movq 88(%rsi),%rax
  858. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  859. # asm 1: mulq 40(<pp=int64#2)
  860. # asm 2: mulq 40(<pp=%rsi)
  861. mulq 40(%rsi)
  862. # qhasm: carry? y1 += mulrax
  863. # asm 1: add <mulrax=int64#7,<y1=int64#6
  864. # asm 2: add <mulrax=%rax,<y1=%r9
  865. add %rax,%r9
  866. # qhasm: mulr11 += mulrdx + carry
  867. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  868. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  869. adc %rdx,%r10
  870. # qhasm: mulrax = *(uint64 *)(pp + 88)
  871. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  872. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  873. movq 88(%rsi),%rax
  874. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  875. # asm 1: mulq 48(<pp=int64#2)
  876. # asm 2: mulq 48(<pp=%rsi)
  877. mulq 48(%rsi)
  878. # qhasm: carry? y2 += mulrax
  879. # asm 1: add <mulrax=int64#7,<y2=int64#9
  880. # asm 2: add <mulrax=%rax,<y2=%r11
  881. add %rax,%r11
  882. # qhasm: mulr21 += mulrdx + carry
  883. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  884. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  885. adc %rdx,%r12
  886. # qhasm: mulrax = *(uint64 *)(pp + 88)
  887. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  888. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  889. movq 88(%rsi),%rax
  890. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  891. # asm 1: mulq 56(<pp=int64#2)
  892. # asm 2: mulq 56(<pp=%rsi)
  893. mulq 56(%rsi)
  894. # qhasm: carry? y3 += mulrax
  895. # asm 1: add <mulrax=int64#7,<y3=int64#11
  896. # asm 2: add <mulrax=%rax,<y3=%r13
  897. add %rax,%r13
  898. # qhasm: mulr31 += mulrdx + carry
  899. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  900. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  901. adc %rdx,%r14
  902. # qhasm: mulrax = *(uint64 *)(pp + 88)
  903. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  904. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  905. movq 88(%rsi),%rax
  906. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  907. # asm 1: mulq 64(<pp=int64#2)
  908. # asm 2: mulq 64(<pp=%rsi)
  909. mulq 64(%rsi)
  910. # qhasm: carry? y4 += mulrax
  911. # asm 1: add <mulrax=int64#7,<y4=int64#13
  912. # asm 2: add <mulrax=%rax,<y4=%r15
  913. add %rax,%r15
  914. # qhasm: mulr41 += mulrdx + carry
  915. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  916. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  917. adc %rdx,%rbx
  918. # qhasm: mulrax = *(uint64 *)(pp + 88)
  919. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#3
  920. # asm 2: movq 88(<pp=%rsi),>mulrax=%rdx
  921. movq 88(%rsi),%rdx
  922. # qhasm: mulrax *= 19
  923. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  924. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  925. imulq $19,%rdx,%rax
  926. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  927. # asm 1: mulq 72(<pp=int64#2)
  928. # asm 2: mulq 72(<pp=%rsi)
  929. mulq 72(%rsi)
  930. # qhasm: carry? y0 += mulrax
  931. # asm 1: add <mulrax=int64#7,<y0=int64#4
  932. # asm 2: add <mulrax=%rax,<y0=%rcx
  933. add %rax,%rcx
  934. # qhasm: mulr01 += mulrdx + carry
  935. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  936. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  937. adc %rdx,%r8
  938. # qhasm: mulrax = *(uint64 *)(pp + 96)
  939. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  940. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  941. movq 96(%rsi),%rax
  942. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  943. # asm 1: mulq 40(<pp=int64#2)
  944. # asm 2: mulq 40(<pp=%rsi)
  945. mulq 40(%rsi)
  946. # qhasm: carry? y2 += mulrax
  947. # asm 1: add <mulrax=int64#7,<y2=int64#9
  948. # asm 2: add <mulrax=%rax,<y2=%r11
  949. add %rax,%r11
  950. # qhasm: mulr21 += mulrdx + carry
  951. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  952. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  953. adc %rdx,%r12
  954. # qhasm: mulrax = *(uint64 *)(pp + 96)
  955. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  956. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  957. movq 96(%rsi),%rax
  958. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  959. # asm 1: mulq 48(<pp=int64#2)
  960. # asm 2: mulq 48(<pp=%rsi)
  961. mulq 48(%rsi)
  962. # qhasm: carry? y3 += mulrax
  963. # asm 1: add <mulrax=int64#7,<y3=int64#11
  964. # asm 2: add <mulrax=%rax,<y3=%r13
  965. add %rax,%r13
  966. # qhasm: mulr31 += mulrdx + carry
  967. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  968. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  969. adc %rdx,%r14
  970. # qhasm: mulrax = *(uint64 *)(pp + 96)
  971. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  972. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  973. movq 96(%rsi),%rax
  974. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  975. # asm 1: mulq 56(<pp=int64#2)
  976. # asm 2: mulq 56(<pp=%rsi)
  977. mulq 56(%rsi)
  978. # qhasm: carry? y4 += mulrax
  979. # asm 1: add <mulrax=int64#7,<y4=int64#13
  980. # asm 2: add <mulrax=%rax,<y4=%r15
  981. add %rax,%r15
  982. # qhasm: mulr41 += mulrdx + carry
  983. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  984. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  985. adc %rdx,%rbx
  986. # qhasm: mulrax = *(uint64 *)(pp + 96)
  987. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
  988. # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
  989. movq 96(%rsi),%rdx
  990. # qhasm: mulrax *= 19
  991. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  992. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  993. imulq $19,%rdx,%rax
  994. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  995. # asm 1: mulq 64(<pp=int64#2)
  996. # asm 2: mulq 64(<pp=%rsi)
  997. mulq 64(%rsi)
  998. # qhasm: carry? y0 += mulrax
  999. # asm 1: add <mulrax=int64#7,<y0=int64#4
  1000. # asm 2: add <mulrax=%rax,<y0=%rcx
  1001. add %rax,%rcx
  1002. # qhasm: mulr01 += mulrdx + carry
  1003. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1004. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1005. adc %rdx,%r8
  1006. # qhasm: mulrax = *(uint64 *)(pp + 96)
  1007. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
  1008. # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
  1009. movq 96(%rsi),%rdx
  1010. # qhasm: mulrax *= 19
  1011. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1012. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1013. imulq $19,%rdx,%rax
  1014. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  1015. # asm 1: mulq 72(<pp=int64#2)
  1016. # asm 2: mulq 72(<pp=%rsi)
  1017. mulq 72(%rsi)
  1018. # qhasm: carry? y1 += mulrax
  1019. # asm 1: add <mulrax=int64#7,<y1=int64#6
  1020. # asm 2: add <mulrax=%rax,<y1=%r9
  1021. add %rax,%r9
  1022. # qhasm: mulr11 += mulrdx + carry
  1023. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1024. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1025. adc %rdx,%r10
  1026. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1027. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1028. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1029. movq 104(%rsi),%rax
  1030. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  1031. # asm 1: mulq 40(<pp=int64#2)
  1032. # asm 2: mulq 40(<pp=%rsi)
  1033. mulq 40(%rsi)
  1034. # qhasm: carry? y3 += mulrax
  1035. # asm 1: add <mulrax=int64#7,<y3=int64#11
  1036. # asm 2: add <mulrax=%rax,<y3=%r13
  1037. add %rax,%r13
  1038. # qhasm: mulr31 += mulrdx + carry
  1039. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1040. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1041. adc %rdx,%r14
  1042. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1043. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1044. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1045. movq 104(%rsi),%rax
  1046. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  1047. # asm 1: mulq 48(<pp=int64#2)
  1048. # asm 2: mulq 48(<pp=%rsi)
  1049. mulq 48(%rsi)
  1050. # qhasm: carry? y4 += mulrax
  1051. # asm 1: add <mulrax=int64#7,<y4=int64#13
  1052. # asm 2: add <mulrax=%rax,<y4=%r15
  1053. add %rax,%r15
  1054. # qhasm: mulr41 += mulrdx + carry
  1055. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1056. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1057. adc %rdx,%rbx
  1058. # qhasm: mulrax = mulx319_stack
  1059. # asm 1: movq <mulx319_stack=stack64#13,>mulrax=int64#7
  1060. # asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
  1061. movq 96(%rsp),%rax
  1062. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  1063. # asm 1: mulq 64(<pp=int64#2)
  1064. # asm 2: mulq 64(<pp=%rsi)
  1065. mulq 64(%rsi)
  1066. # qhasm: carry? y1 += mulrax
  1067. # asm 1: add <mulrax=int64#7,<y1=int64#6
  1068. # asm 2: add <mulrax=%rax,<y1=%r9
  1069. add %rax,%r9
  1070. # qhasm: mulr11 += mulrdx + carry
  1071. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1072. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1073. adc %rdx,%r10
  1074. # qhasm: mulrax = mulx319_stack
  1075. # asm 1: movq <mulx319_stack=stack64#13,>mulrax=int64#7
  1076. # asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
  1077. movq 96(%rsp),%rax
  1078. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  1079. # asm 1: mulq 72(<pp=int64#2)
  1080. # asm 2: mulq 72(<pp=%rsi)
  1081. mulq 72(%rsi)
  1082. # qhasm: carry? y2 += mulrax
  1083. # asm 1: add <mulrax=int64#7,<y2=int64#9
  1084. # asm 2: add <mulrax=%rax,<y2=%r11
  1085. add %rax,%r11
  1086. # qhasm: mulr21 += mulrdx + carry
  1087. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1088. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1089. adc %rdx,%r12
  1090. # qhasm: mulrax = *(uint64 *)(pp + 112)
  1091. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  1092. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  1093. movq 112(%rsi),%rax
  1094. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  1095. # asm 1: mulq 40(<pp=int64#2)
  1096. # asm 2: mulq 40(<pp=%rsi)
  1097. mulq 40(%rsi)
  1098. # qhasm: carry? y4 += mulrax
  1099. # asm 1: add <mulrax=int64#7,<y4=int64#13
  1100. # asm 2: add <mulrax=%rax,<y4=%r15
  1101. add %rax,%r15
  1102. # qhasm: mulr41 += mulrdx + carry
  1103. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1104. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1105. adc %rdx,%rbx
  1106. # qhasm: mulrax = mulx419_stack
  1107. # asm 1: movq <mulx419_stack=stack64#14,>mulrax=int64#7
  1108. # asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
  1109. movq 104(%rsp),%rax
  1110. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  1111. # asm 1: mulq 56(<pp=int64#2)
  1112. # asm 2: mulq 56(<pp=%rsi)
  1113. mulq 56(%rsi)
  1114. # qhasm: carry? y1 += mulrax
  1115. # asm 1: add <mulrax=int64#7,<y1=int64#6
  1116. # asm 2: add <mulrax=%rax,<y1=%r9
  1117. add %rax,%r9
  1118. # qhasm: mulr11 += mulrdx + carry
  1119. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1120. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1121. adc %rdx,%r10
  1122. # qhasm: mulrax = mulx419_stack
  1123. # asm 1: movq <mulx419_stack=stack64#14,>mulrax=int64#7
  1124. # asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
  1125. movq 104(%rsp),%rax
  1126. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  1127. # asm 1: mulq 64(<pp=int64#2)
  1128. # asm 2: mulq 64(<pp=%rsi)
  1129. mulq 64(%rsi)
  1130. # qhasm: carry? y2 += mulrax
  1131. # asm 1: add <mulrax=int64#7,<y2=int64#9
  1132. # asm 2: add <mulrax=%rax,<y2=%r11
  1133. add %rax,%r11
  1134. # qhasm: mulr21 += mulrdx + carry
  1135. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1136. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1137. adc %rdx,%r12
  1138. # qhasm: mulrax = mulx419_stack
  1139. # asm 1: movq <mulx419_stack=stack64#14,>mulrax=int64#7
  1140. # asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
  1141. movq 104(%rsp),%rax
  1142. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  1143. # asm 1: mulq 72(<pp=int64#2)
  1144. # asm 2: mulq 72(<pp=%rsi)
  1145. mulq 72(%rsi)
  1146. # qhasm: carry? y3 += mulrax
  1147. # asm 1: add <mulrax=int64#7,<y3=int64#11
  1148. # asm 2: add <mulrax=%rax,<y3=%r13
  1149. add %rax,%r13
  1150. # qhasm: mulr31 += mulrdx + carry
  1151. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1152. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1153. adc %rdx,%r14
  1154. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  1155. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  1156. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  1157. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  1158. # qhasm: mulr01 = (mulr01.y0) << 13
  1159. # asm 1: shld $13,<y0=int64#4,<mulr01=int64#5
  1160. # asm 2: shld $13,<y0=%rcx,<mulr01=%r8
  1161. shld $13,%rcx,%r8
  1162. # qhasm: y0 &= mulredmask
  1163. # asm 1: and <mulredmask=int64#3,<y0=int64#4
  1164. # asm 2: and <mulredmask=%rdx,<y0=%rcx
  1165. and %rdx,%rcx
  1166. # qhasm: mulr11 = (mulr11.y1) << 13
  1167. # asm 1: shld $13,<y1=int64#6,<mulr11=int64#8
  1168. # asm 2: shld $13,<y1=%r9,<mulr11=%r10
  1169. shld $13,%r9,%r10
  1170. # qhasm: y1 &= mulredmask
  1171. # asm 1: and <mulredmask=int64#3,<y1=int64#6
  1172. # asm 2: and <mulredmask=%rdx,<y1=%r9
  1173. and %rdx,%r9
  1174. # qhasm: y1 += mulr01
  1175. # asm 1: add <mulr01=int64#5,<y1=int64#6
  1176. # asm 2: add <mulr01=%r8,<y1=%r9
  1177. add %r8,%r9
  1178. # qhasm: mulr21 = (mulr21.y2) << 13
  1179. # asm 1: shld $13,<y2=int64#9,<mulr21=int64#10
  1180. # asm 2: shld $13,<y2=%r11,<mulr21=%r12
  1181. shld $13,%r11,%r12
  1182. # qhasm: y2 &= mulredmask
  1183. # asm 1: and <mulredmask=int64#3,<y2=int64#9
  1184. # asm 2: and <mulredmask=%rdx,<y2=%r11
  1185. and %rdx,%r11
  1186. # qhasm: y2 += mulr11
  1187. # asm 1: add <mulr11=int64#8,<y2=int64#9
  1188. # asm 2: add <mulr11=%r10,<y2=%r11
  1189. add %r10,%r11
  1190. # qhasm: mulr31 = (mulr31.y3) << 13
  1191. # asm 1: shld $13,<y3=int64#11,<mulr31=int64#12
  1192. # asm 2: shld $13,<y3=%r13,<mulr31=%r14
  1193. shld $13,%r13,%r14
  1194. # qhasm: y3 &= mulredmask
  1195. # asm 1: and <mulredmask=int64#3,<y3=int64#11
  1196. # asm 2: and <mulredmask=%rdx,<y3=%r13
  1197. and %rdx,%r13
  1198. # qhasm: y3 += mulr21
  1199. # asm 1: add <mulr21=int64#10,<y3=int64#11
  1200. # asm 2: add <mulr21=%r12,<y3=%r13
  1201. add %r12,%r13
  1202. # qhasm: mulr41 = (mulr41.y4) << 13
  1203. # asm 1: shld $13,<y4=int64#13,<mulr41=int64#14
  1204. # asm 2: shld $13,<y4=%r15,<mulr41=%rbx
  1205. shld $13,%r15,%rbx
  1206. # qhasm: y4 &= mulredmask
  1207. # asm 1: and <mulredmask=int64#3,<y4=int64#13
  1208. # asm 2: and <mulredmask=%rdx,<y4=%r15
  1209. and %rdx,%r15
  1210. # qhasm: y4 += mulr31
  1211. # asm 1: add <mulr31=int64#12,<y4=int64#13
  1212. # asm 2: add <mulr31=%r14,<y4=%r15
  1213. add %r14,%r15
  1214. # qhasm: mulr41 = mulr41 * 19
  1215. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
  1216. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
  1217. imulq $19,%rbx,%r8
  1218. # qhasm: y0 += mulr41
  1219. # asm 1: add <mulr41=int64#5,<y0=int64#4
  1220. # asm 2: add <mulr41=%r8,<y0=%rcx
  1221. add %r8,%rcx
  1222. # qhasm: mult = y0
  1223. # asm 1: mov <y0=int64#4,>mult=int64#5
  1224. # asm 2: mov <y0=%rcx,>mult=%r8
  1225. mov %rcx,%r8
  1226. # qhasm: (uint64) mult >>= 51
  1227. # asm 1: shr $51,<mult=int64#5
  1228. # asm 2: shr $51,<mult=%r8
  1229. shr $51,%r8
  1230. # qhasm: mult += y1
  1231. # asm 1: add <y1=int64#6,<mult=int64#5
  1232. # asm 2: add <y1=%r9,<mult=%r8
  1233. add %r9,%r8
  1234. # qhasm: y1 = mult
  1235. # asm 1: mov <mult=int64#5,>y1=int64#6
  1236. # asm 2: mov <mult=%r8,>y1=%r9
  1237. mov %r8,%r9
  1238. # qhasm: (uint64) mult >>= 51
  1239. # asm 1: shr $51,<mult=int64#5
  1240. # asm 2: shr $51,<mult=%r8
  1241. shr $51,%r8
  1242. # qhasm: y0 &= mulredmask
  1243. # asm 1: and <mulredmask=int64#3,<y0=int64#4
  1244. # asm 2: and <mulredmask=%rdx,<y0=%rcx
  1245. and %rdx,%rcx
  1246. # qhasm: mult += y2
  1247. # asm 1: add <y2=int64#9,<mult=int64#5
  1248. # asm 2: add <y2=%r11,<mult=%r8
  1249. add %r11,%r8
  1250. # qhasm: y2 = mult
  1251. # asm 1: mov <mult=int64#5,>y2=int64#7
  1252. # asm 2: mov <mult=%r8,>y2=%rax
  1253. mov %r8,%rax
  1254. # qhasm: (uint64) mult >>= 51
  1255. # asm 1: shr $51,<mult=int64#5
  1256. # asm 2: shr $51,<mult=%r8
  1257. shr $51,%r8
  1258. # qhasm: y1 &= mulredmask
  1259. # asm 1: and <mulredmask=int64#3,<y1=int64#6
  1260. # asm 2: and <mulredmask=%rdx,<y1=%r9
  1261. and %rdx,%r9
  1262. # qhasm: mult += y3
  1263. # asm 1: add <y3=int64#11,<mult=int64#5
  1264. # asm 2: add <y3=%r13,<mult=%r8
  1265. add %r13,%r8
  1266. # qhasm: y3 = mult
  1267. # asm 1: mov <mult=int64#5,>y3=int64#8
  1268. # asm 2: mov <mult=%r8,>y3=%r10
  1269. mov %r8,%r10
  1270. # qhasm: (uint64) mult >>= 51
  1271. # asm 1: shr $51,<mult=int64#5
  1272. # asm 2: shr $51,<mult=%r8
  1273. shr $51,%r8
  1274. # qhasm: y2 &= mulredmask
  1275. # asm 1: and <mulredmask=int64#3,<y2=int64#7
  1276. # asm 2: and <mulredmask=%rdx,<y2=%rax
  1277. and %rdx,%rax
  1278. # qhasm: mult += y4
  1279. # asm 1: add <y4=int64#13,<mult=int64#5
  1280. # asm 2: add <y4=%r15,<mult=%r8
  1281. add %r15,%r8
  1282. # qhasm: y4 = mult
  1283. # asm 1: mov <mult=int64#5,>y4=int64#9
  1284. # asm 2: mov <mult=%r8,>y4=%r11
  1285. mov %r8,%r11
  1286. # qhasm: (uint64) mult >>= 51
  1287. # asm 1: shr $51,<mult=int64#5
  1288. # asm 2: shr $51,<mult=%r8
  1289. shr $51,%r8
  1290. # qhasm: y3 &= mulredmask
  1291. # asm 1: and <mulredmask=int64#3,<y3=int64#8
  1292. # asm 2: and <mulredmask=%rdx,<y3=%r10
  1293. and %rdx,%r10
  1294. # qhasm: mult *= 19
  1295. # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
  1296. # asm 2: imulq $19,<mult=%r8,>mult=%r8
  1297. imulq $19,%r8,%r8
  1298. # qhasm: y0 += mult
  1299. # asm 1: add <mult=int64#5,<y0=int64#4
  1300. # asm 2: add <mult=%r8,<y0=%rcx
  1301. add %r8,%rcx
  1302. # qhasm: y4 &= mulredmask
  1303. # asm 1: and <mulredmask=int64#3,<y4=int64#9
  1304. # asm 2: and <mulredmask=%rdx,<y4=%r11
  1305. and %rdx,%r11
  1306. # qhasm: ysubx0 = y0
  1307. # asm 1: mov <y0=int64#4,>ysubx0=int64#3
  1308. # asm 2: mov <y0=%rcx,>ysubx0=%rdx
  1309. mov %rcx,%rdx
  1310. # qhasm: ysubx1 = y1
  1311. # asm 1: mov <y1=int64#6,>ysubx1=int64#5
  1312. # asm 2: mov <y1=%r9,>ysubx1=%r8
  1313. mov %r9,%r8
  1314. # qhasm: ysubx2 = y2
  1315. # asm 1: mov <y2=int64#7,>ysubx2=int64#10
  1316. # asm 2: mov <y2=%rax,>ysubx2=%r12
  1317. mov %rax,%r12
  1318. # qhasm: ysubx3 = y3
  1319. # asm 1: mov <y3=int64#8,>ysubx3=int64#11
  1320. # asm 2: mov <y3=%r10,>ysubx3=%r13
  1321. mov %r10,%r13
  1322. # qhasm: ysubx4 = y4
  1323. # asm 1: mov <y4=int64#9,>ysubx4=int64#12
  1324. # asm 2: mov <y4=%r11,>ysubx4=%r14
  1325. mov %r11,%r14
  1326. # qhasm: ysubx0 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
  1327. # asm 1: add CRYPTO_NAMESPACE(batch_2P0),<ysubx0=int64#3
  1328. # asm 2: add CRYPTO_NAMESPACE(batch_2P0),<ysubx0=%rdx
  1329. add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
  1330. # qhasm: ysubx1 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
  1331. # asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx1=int64#5
  1332. # asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx1=%r8
  1333. add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
  1334. # qhasm: ysubx2 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
  1335. # asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx2=int64#10
  1336. # asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx2=%r12
  1337. add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
  1338. # qhasm: ysubx3 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
  1339. # asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx3=int64#11
  1340. # asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx3=%r13
  1341. add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r13
  1342. # qhasm: ysubx4 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
  1343. # asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx4=int64#12
  1344. # asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx4=%r14
  1345. add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r14
  1346. # qhasm: x0 = stackx0
  1347. # asm 1: movq <stackx0=stack64#8,>x0=int64#13
  1348. # asm 2: movq <stackx0=56(%rsp),>x0=%r15
  1349. movq 56(%rsp),%r15
  1350. # qhasm: ysubx0 -= x0
  1351. # asm 1: sub <x0=int64#13,<ysubx0=int64#3
  1352. # asm 2: sub <x0=%r15,<ysubx0=%rdx
  1353. sub %r15,%rdx
  1354. # qhasm: y0 += x0
  1355. # asm 1: add <x0=int64#13,<y0=int64#4
  1356. # asm 2: add <x0=%r15,<y0=%rcx
  1357. add %r15,%rcx
  1358. # qhasm: x1 = stackx1
  1359. # asm 1: movq <stackx1=stack64#9,>x1=int64#13
  1360. # asm 2: movq <stackx1=64(%rsp),>x1=%r15
  1361. movq 64(%rsp),%r15
  1362. # qhasm: ysubx1 -= x1
  1363. # asm 1: sub <x1=int64#13,<ysubx1=int64#5
  1364. # asm 2: sub <x1=%r15,<ysubx1=%r8
  1365. sub %r15,%r8
  1366. # qhasm: y1 += x1
  1367. # asm 1: add <x1=int64#13,<y1=int64#6
  1368. # asm 2: add <x1=%r15,<y1=%r9
  1369. add %r15,%r9
  1370. # qhasm: x2 = stackx2
  1371. # asm 1: movq <stackx2=stack64#10,>x2=int64#13
  1372. # asm 2: movq <stackx2=72(%rsp),>x2=%r15
  1373. movq 72(%rsp),%r15
  1374. # qhasm: ysubx2 -= x2
  1375. # asm 1: sub <x2=int64#13,<ysubx2=int64#10
  1376. # asm 2: sub <x2=%r15,<ysubx2=%r12
  1377. sub %r15,%r12
  1378. # qhasm: y2 += x2
  1379. # asm 1: add <x2=int64#13,<y2=int64#7
  1380. # asm 2: add <x2=%r15,<y2=%rax
  1381. add %r15,%rax
  1382. # qhasm: x3 = stackx3
  1383. # asm 1: movq <stackx3=stack64#11,>x3=int64#13
  1384. # asm 2: movq <stackx3=80(%rsp),>x3=%r15
  1385. movq 80(%rsp),%r15
  1386. # qhasm: ysubx3 -= x3
  1387. # asm 1: sub <x3=int64#13,<ysubx3=int64#11
  1388. # asm 2: sub <x3=%r15,<ysubx3=%r13
  1389. sub %r15,%r13
  1390. # qhasm: y3 += x3
  1391. # asm 1: add <x3=int64#13,<y3=int64#8
  1392. # asm 2: add <x3=%r15,<y3=%r10
  1393. add %r15,%r10
  1394. # qhasm: x4 = stackx4
  1395. # asm 1: movq <stackx4=stack64#12,>x4=int64#13
  1396. # asm 2: movq <stackx4=88(%rsp),>x4=%r15
  1397. movq 88(%rsp),%r15
  1398. # qhasm: ysubx4 -= x4
  1399. # asm 1: sub <x4=int64#13,<ysubx4=int64#12
  1400. # asm 2: sub <x4=%r15,<ysubx4=%r14
  1401. sub %r15,%r14
  1402. # qhasm: y4 += x4
  1403. # asm 1: add <x4=int64#13,<y4=int64#9
  1404. # asm 2: add <x4=%r15,<y4=%r11
  1405. add %r15,%r11
  1406. # qhasm: *(uint64 *)(rp + 0) = ysubx0
  1407. # asm 1: movq <ysubx0=int64#3,0(<rp=int64#1)
  1408. # asm 2: movq <ysubx0=%rdx,0(<rp=%rdi)
  1409. movq %rdx,0(%rdi)
  1410. # qhasm: *(uint64 *)(rp + 8) = ysubx1
  1411. # asm 1: movq <ysubx1=int64#5,8(<rp=int64#1)
  1412. # asm 2: movq <ysubx1=%r8,8(<rp=%rdi)
  1413. movq %r8,8(%rdi)
  1414. # qhasm: *(uint64 *)(rp + 16) = ysubx2
  1415. # asm 1: movq <ysubx2=int64#10,16(<rp=int64#1)
  1416. # asm 2: movq <ysubx2=%r12,16(<rp=%rdi)
  1417. movq %r12,16(%rdi)
  1418. # qhasm: *(uint64 *)(rp + 24) = ysubx3
  1419. # asm 1: movq <ysubx3=int64#11,24(<rp=int64#1)
  1420. # asm 2: movq <ysubx3=%r13,24(<rp=%rdi)
  1421. movq %r13,24(%rdi)
  1422. # qhasm: *(uint64 *)(rp + 32) = ysubx4
  1423. # asm 1: movq <ysubx4=int64#12,32(<rp=int64#1)
  1424. # asm 2: movq <ysubx4=%r14,32(<rp=%rdi)
  1425. movq %r14,32(%rdi)
  1426. # qhasm: *(uint64 *)(rp + 40) = y0
  1427. # asm 1: movq <y0=int64#4,40(<rp=int64#1)
  1428. # asm 2: movq <y0=%rcx,40(<rp=%rdi)
  1429. movq %rcx,40(%rdi)
  1430. # qhasm: *(uint64 *)(rp + 48) = y1
  1431. # asm 1: movq <y1=int64#6,48(<rp=int64#1)
  1432. # asm 2: movq <y1=%r9,48(<rp=%rdi)
  1433. movq %r9,48(%rdi)
  1434. # qhasm: *(uint64 *)(rp + 56) = y2
  1435. # asm 1: movq <y2=int64#7,56(<rp=int64#1)
  1436. # asm 2: movq <y2=%rax,56(<rp=%rdi)
  1437. movq %rax,56(%rdi)
  1438. # qhasm: *(uint64 *)(rp + 64) = y3
  1439. # asm 1: movq <y3=int64#8,64(<rp=int64#1)
  1440. # asm 2: movq <y3=%r10,64(<rp=%rdi)
  1441. movq %r10,64(%rdi)
  1442. # qhasm: *(uint64 *)(rp + 72) = y4
  1443. # asm 1: movq <y4=int64#9,72(<rp=int64#1)
  1444. # asm 2: movq <y4=%r11,72(<rp=%rdi)
  1445. movq %r11,72(%rdi)
  1446. # qhasm: mulrax = *(uint64 *)(pp + 64)
  1447. # asm 1: movq 64(<pp=int64#2),>mulrax=int64#3
  1448. # asm 2: movq 64(<pp=%rsi),>mulrax=%rdx
  1449. movq 64(%rsi),%rdx
  1450. # qhasm: mulrax *= 19
  1451. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1452. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1453. imulq $19,%rdx,%rax
  1454. # qhasm: mulx319_stack = mulrax
  1455. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  1456. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  1457. movq %rax,56(%rsp)
  1458. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1459. # asm 1: mulq 136(<pp=int64#2)
  1460. # asm 2: mulq 136(<pp=%rsi)
  1461. mulq 136(%rsi)
  1462. # qhasm: rz0 = mulrax
  1463. # asm 1: mov <mulrax=int64#7,>rz0=int64#4
  1464. # asm 2: mov <mulrax=%rax,>rz0=%rcx
  1465. mov %rax,%rcx
  1466. # qhasm: mulr01 = mulrdx
  1467. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  1468. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  1469. mov %rdx,%r8
  1470. # qhasm: mulrax = *(uint64 *)(pp + 72)
  1471. # asm 1: movq 72(<pp=int64#2),>mulrax=int64#3
  1472. # asm 2: movq 72(<pp=%rsi),>mulrax=%rdx
  1473. movq 72(%rsi),%rdx
  1474. # qhasm: mulrax *= 19
  1475. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1476. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1477. imulq $19,%rdx,%rax
  1478. # qhasm: mulx419_stack = mulrax
  1479. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  1480. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  1481. movq %rax,64(%rsp)
  1482. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1483. # asm 1: mulq 128(<pp=int64#2)
  1484. # asm 2: mulq 128(<pp=%rsi)
  1485. mulq 128(%rsi)
  1486. # qhasm: carry? rz0 += mulrax
  1487. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1488. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1489. add %rax,%rcx
  1490. # qhasm: mulr01 += mulrdx + carry
  1491. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1492. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1493. adc %rdx,%r8
  1494. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1495. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1496. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1497. movq 40(%rsi),%rax
  1498. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1499. # asm 1: mulq 120(<pp=int64#2)
  1500. # asm 2: mulq 120(<pp=%rsi)
  1501. mulq 120(%rsi)
  1502. # qhasm: carry? rz0 += mulrax
  1503. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1504. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1505. add %rax,%rcx
  1506. # qhasm: mulr01 += mulrdx + carry
  1507. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1508. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1509. adc %rdx,%r8
  1510. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1511. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1512. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1513. movq 40(%rsi),%rax
  1514. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1515. # asm 1: mulq 128(<pp=int64#2)
  1516. # asm 2: mulq 128(<pp=%rsi)
  1517. mulq 128(%rsi)
  1518. # qhasm: rz1 = mulrax
  1519. # asm 1: mov <mulrax=int64#7,>rz1=int64#6
  1520. # asm 2: mov <mulrax=%rax,>rz1=%r9
  1521. mov %rax,%r9
  1522. # qhasm: mulr11 = mulrdx
  1523. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  1524. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  1525. mov %rdx,%r10
  1526. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1527. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1528. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1529. movq 40(%rsi),%rax
  1530. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1531. # asm 1: mulq 136(<pp=int64#2)
  1532. # asm 2: mulq 136(<pp=%rsi)
  1533. mulq 136(%rsi)
  1534. # qhasm: rz2 = mulrax
  1535. # asm 1: mov <mulrax=int64#7,>rz2=int64#9
  1536. # asm 2: mov <mulrax=%rax,>rz2=%r11
  1537. mov %rax,%r11
  1538. # qhasm: mulr21 = mulrdx
  1539. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  1540. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  1541. mov %rdx,%r12
  1542. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1543. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1544. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1545. movq 40(%rsi),%rax
  1546. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1547. # asm 1: mulq 144(<pp=int64#2)
  1548. # asm 2: mulq 144(<pp=%rsi)
  1549. mulq 144(%rsi)
  1550. # qhasm: rz3 = mulrax
  1551. # asm 1: mov <mulrax=int64#7,>rz3=int64#11
  1552. # asm 2: mov <mulrax=%rax,>rz3=%r13
  1553. mov %rax,%r13
  1554. # qhasm: mulr31 = mulrdx
  1555. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  1556. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  1557. mov %rdx,%r14
  1558. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1559. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1560. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1561. movq 40(%rsi),%rax
  1562. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1563. # asm 1: mulq 152(<pp=int64#2)
  1564. # asm 2: mulq 152(<pp=%rsi)
  1565. mulq 152(%rsi)
  1566. # qhasm: rz4 = mulrax
  1567. # asm 1: mov <mulrax=int64#7,>rz4=int64#13
  1568. # asm 2: mov <mulrax=%rax,>rz4=%r15
  1569. mov %rax,%r15
  1570. # qhasm: mulr41 = mulrdx
  1571. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  1572. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  1573. mov %rdx,%rbx
  1574. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1575. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1576. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1577. movq 48(%rsi),%rax
  1578. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1579. # asm 1: mulq 120(<pp=int64#2)
  1580. # asm 2: mulq 120(<pp=%rsi)
  1581. mulq 120(%rsi)
  1582. # qhasm: carry? rz1 += mulrax
  1583. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1584. # asm 2: add <mulrax=%rax,<rz1=%r9
  1585. add %rax,%r9
  1586. # qhasm: mulr11 += mulrdx + carry
  1587. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1588. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1589. adc %rdx,%r10
  1590. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1591. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1592. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1593. movq 48(%rsi),%rax
  1594. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1595. # asm 1: mulq 128(<pp=int64#2)
  1596. # asm 2: mulq 128(<pp=%rsi)
  1597. mulq 128(%rsi)
  1598. # qhasm: carry? rz2 += mulrax
  1599. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1600. # asm 2: add <mulrax=%rax,<rz2=%r11
  1601. add %rax,%r11
  1602. # qhasm: mulr21 += mulrdx + carry
  1603. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1604. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1605. adc %rdx,%r12
  1606. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1607. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1608. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1609. movq 48(%rsi),%rax
  1610. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1611. # asm 1: mulq 136(<pp=int64#2)
  1612. # asm 2: mulq 136(<pp=%rsi)
  1613. mulq 136(%rsi)
  1614. # qhasm: carry? rz3 += mulrax
  1615. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1616. # asm 2: add <mulrax=%rax,<rz3=%r13
  1617. add %rax,%r13
  1618. # qhasm: mulr31 += mulrdx + carry
  1619. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1620. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1621. adc %rdx,%r14
  1622. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1623. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1624. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1625. movq 48(%rsi),%rax
  1626. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1627. # asm 1: mulq 144(<pp=int64#2)
  1628. # asm 2: mulq 144(<pp=%rsi)
  1629. mulq 144(%rsi)
  1630. # qhasm: carry? rz4 += mulrax
  1631. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1632. # asm 2: add <mulrax=%rax,<rz4=%r15
  1633. add %rax,%r15
  1634. # qhasm: mulr41 += mulrdx + carry
  1635. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1636. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1637. adc %rdx,%rbx
  1638. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1639. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#3
  1640. # asm 2: movq 48(<pp=%rsi),>mulrax=%rdx
  1641. movq 48(%rsi),%rdx
  1642. # qhasm: mulrax *= 19
  1643. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1644. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1645. imulq $19,%rdx,%rax
  1646. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1647. # asm 1: mulq 152(<pp=int64#2)
  1648. # asm 2: mulq 152(<pp=%rsi)
  1649. mulq 152(%rsi)
  1650. # qhasm: carry? rz0 += mulrax
  1651. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1652. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1653. add %rax,%rcx
  1654. # qhasm: mulr01 += mulrdx + carry
  1655. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1656. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1657. adc %rdx,%r8
  1658. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1659. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1660. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1661. movq 56(%rsi),%rax
  1662. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1663. # asm 1: mulq 120(<pp=int64#2)
  1664. # asm 2: mulq 120(<pp=%rsi)
  1665. mulq 120(%rsi)
  1666. # qhasm: carry? rz2 += mulrax
  1667. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1668. # asm 2: add <mulrax=%rax,<rz2=%r11
  1669. add %rax,%r11
  1670. # qhasm: mulr21 += mulrdx + carry
  1671. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1672. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1673. adc %rdx,%r12
  1674. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1675. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1676. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1677. movq 56(%rsi),%rax
  1678. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1679. # asm 1: mulq 128(<pp=int64#2)
  1680. # asm 2: mulq 128(<pp=%rsi)
  1681. mulq 128(%rsi)
  1682. # qhasm: carry? rz3 += mulrax
  1683. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1684. # asm 2: add <mulrax=%rax,<rz3=%r13
  1685. add %rax,%r13
  1686. # qhasm: mulr31 += mulrdx + carry
  1687. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1688. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1689. adc %rdx,%r14
  1690. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1691. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1692. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1693. movq 56(%rsi),%rax
  1694. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1695. # asm 1: mulq 136(<pp=int64#2)
  1696. # asm 2: mulq 136(<pp=%rsi)
  1697. mulq 136(%rsi)
  1698. # qhasm: carry? rz4 += mulrax
  1699. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1700. # asm 2: add <mulrax=%rax,<rz4=%r15
  1701. add %rax,%r15
  1702. # qhasm: mulr41 += mulrdx + carry
  1703. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1704. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1705. adc %rdx,%rbx
  1706. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1707. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
  1708. # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
  1709. movq 56(%rsi),%rdx
  1710. # qhasm: mulrax *= 19
  1711. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1712. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1713. imulq $19,%rdx,%rax
  1714. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1715. # asm 1: mulq 144(<pp=int64#2)
  1716. # asm 2: mulq 144(<pp=%rsi)
  1717. mulq 144(%rsi)
  1718. # qhasm: carry? rz0 += mulrax
  1719. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1720. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1721. add %rax,%rcx
  1722. # qhasm: mulr01 += mulrdx + carry
  1723. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1724. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1725. adc %rdx,%r8
  1726. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1727. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
  1728. # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
  1729. movq 56(%rsi),%rdx
  1730. # qhasm: mulrax *= 19
  1731. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1732. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1733. imulq $19,%rdx,%rax
  1734. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1735. # asm 1: mulq 152(<pp=int64#2)
  1736. # asm 2: mulq 152(<pp=%rsi)
  1737. mulq 152(%rsi)
  1738. # qhasm: carry? rz1 += mulrax
  1739. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1740. # asm 2: add <mulrax=%rax,<rz1=%r9
  1741. add %rax,%r9
  1742. # qhasm: mulr11 += mulrdx + carry
  1743. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1744. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1745. adc %rdx,%r10
  1746. # qhasm: mulrax = *(uint64 *)(pp + 64)
  1747. # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
  1748. # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
  1749. movq 64(%rsi),%rax
  1750. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1751. # asm 1: mulq 120(<pp=int64#2)
  1752. # asm 2: mulq 120(<pp=%rsi)
  1753. mulq 120(%rsi)
  1754. # qhasm: carry? rz3 += mulrax
  1755. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1756. # asm 2: add <mulrax=%rax,<rz3=%r13
  1757. add %rax,%r13
  1758. # qhasm: mulr31 += mulrdx + carry
  1759. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1760. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1761. adc %rdx,%r14
  1762. # qhasm: mulrax = *(uint64 *)(pp + 64)
  1763. # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
  1764. # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
  1765. movq 64(%rsi),%rax
  1766. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1767. # asm 1: mulq 128(<pp=int64#2)
  1768. # asm 2: mulq 128(<pp=%rsi)
  1769. mulq 128(%rsi)
  1770. # qhasm: carry? rz4 += mulrax
  1771. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1772. # asm 2: add <mulrax=%rax,<rz4=%r15
  1773. add %rax,%r15
  1774. # qhasm: mulr41 += mulrdx + carry
  1775. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1776. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1777. adc %rdx,%rbx
  1778. # qhasm: mulrax = mulx319_stack
  1779. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  1780. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  1781. movq 56(%rsp),%rax
  1782. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1783. # asm 1: mulq 144(<pp=int64#2)
  1784. # asm 2: mulq 144(<pp=%rsi)
  1785. mulq 144(%rsi)
  1786. # qhasm: carry? rz1 += mulrax
  1787. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1788. # asm 2: add <mulrax=%rax,<rz1=%r9
  1789. add %rax,%r9
  1790. # qhasm: mulr11 += mulrdx + carry
  1791. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1792. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1793. adc %rdx,%r10
  1794. # qhasm: mulrax = mulx319_stack
  1795. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  1796. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  1797. movq 56(%rsp),%rax
  1798. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1799. # asm 1: mulq 152(<pp=int64#2)
  1800. # asm 2: mulq 152(<pp=%rsi)
  1801. mulq 152(%rsi)
  1802. # qhasm: carry? rz2 += mulrax
  1803. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1804. # asm 2: add <mulrax=%rax,<rz2=%r11
  1805. add %rax,%r11
  1806. # qhasm: mulr21 += mulrdx + carry
  1807. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1808. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1809. adc %rdx,%r12
  1810. # qhasm: mulrax = *(uint64 *)(pp + 72)
  1811. # asm 1: movq 72(<pp=int64#2),>mulrax=int64#7
  1812. # asm 2: movq 72(<pp=%rsi),>mulrax=%rax
  1813. movq 72(%rsi),%rax
  1814. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1815. # asm 1: mulq 120(<pp=int64#2)
  1816. # asm 2: mulq 120(<pp=%rsi)
  1817. mulq 120(%rsi)
  1818. # qhasm: carry? rz4 += mulrax
  1819. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1820. # asm 2: add <mulrax=%rax,<rz4=%r15
  1821. add %rax,%r15
  1822. # qhasm: mulr41 += mulrdx + carry
  1823. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1824. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1825. adc %rdx,%rbx
  1826. # qhasm: mulrax = mulx419_stack
  1827. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1828. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1829. movq 64(%rsp),%rax
  1830. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1831. # asm 1: mulq 136(<pp=int64#2)
  1832. # asm 2: mulq 136(<pp=%rsi)
  1833. mulq 136(%rsi)
  1834. # qhasm: carry? rz1 += mulrax
  1835. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1836. # asm 2: add <mulrax=%rax,<rz1=%r9
  1837. add %rax,%r9
  1838. # qhasm: mulr11 += mulrdx + carry
  1839. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1840. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1841. adc %rdx,%r10
  1842. # qhasm: mulrax = mulx419_stack
  1843. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1844. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1845. movq 64(%rsp),%rax
  1846. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1847. # asm 1: mulq 144(<pp=int64#2)
  1848. # asm 2: mulq 144(<pp=%rsi)
  1849. mulq 144(%rsi)
  1850. # qhasm: carry? rz2 += mulrax
  1851. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1852. # asm 2: add <mulrax=%rax,<rz2=%r11
  1853. add %rax,%r11
  1854. # qhasm: mulr21 += mulrdx + carry
  1855. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1856. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1857. adc %rdx,%r12
  1858. # qhasm: mulrax = mulx419_stack
  1859. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1860. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1861. movq 64(%rsp),%rax
  1862. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1863. # asm 1: mulq 152(<pp=int64#2)
  1864. # asm 2: mulq 152(<pp=%rsi)
  1865. mulq 152(%rsi)
  1866. # qhasm: carry? rz3 += mulrax
  1867. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1868. # asm 2: add <mulrax=%rax,<rz3=%r13
  1869. add %rax,%r13
  1870. # qhasm: mulr31 += mulrdx + carry
  1871. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1872. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1873. adc %rdx,%r14
  1874. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  1875. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  1876. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  1877. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  1878. # qhasm: mulr01 = (mulr01.rz0) << 13
  1879. # asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
  1880. # asm 2: shld $13,<rz0=%rcx,<mulr01=%r8
  1881. shld $13,%rcx,%r8
  1882. # qhasm: rz0 &= mulredmask
  1883. # asm 1: and <mulredmask=int64#3,<rz0=int64#4
  1884. # asm 2: and <mulredmask=%rdx,<rz0=%rcx
  1885. and %rdx,%rcx
  1886. # qhasm: mulr11 = (mulr11.rz1) << 13
  1887. # asm 1: shld $13,<rz1=int64#6,<mulr11=int64#8
  1888. # asm 2: shld $13,<rz1=%r9,<mulr11=%r10
  1889. shld $13,%r9,%r10
  1890. # qhasm: rz1 &= mulredmask
  1891. # asm 1: and <mulredmask=int64#3,<rz1=int64#6
  1892. # asm 2: and <mulredmask=%rdx,<rz1=%r9
  1893. and %rdx,%r9
  1894. # qhasm: rz1 += mulr01
  1895. # asm 1: add <mulr01=int64#5,<rz1=int64#6
  1896. # asm 2: add <mulr01=%r8,<rz1=%r9
  1897. add %r8,%r9
  1898. # qhasm: mulr21 = (mulr21.rz2) << 13
  1899. # asm 1: shld $13,<rz2=int64#9,<mulr21=int64#10
  1900. # asm 2: shld $13,<rz2=%r11,<mulr21=%r12
  1901. shld $13,%r11,%r12
  1902. # qhasm: rz2 &= mulredmask
  1903. # asm 1: and <mulredmask=int64#3,<rz2=int64#9
  1904. # asm 2: and <mulredmask=%rdx,<rz2=%r11
  1905. and %rdx,%r11
  1906. # qhasm: rz2 += mulr11
  1907. # asm 1: add <mulr11=int64#8,<rz2=int64#9
  1908. # asm 2: add <mulr11=%r10,<rz2=%r11
  1909. add %r10,%r11
  1910. # qhasm: mulr31 = (mulr31.rz3) << 13
  1911. # asm 1: shld $13,<rz3=int64#11,<mulr31=int64#12
  1912. # asm 2: shld $13,<rz3=%r13,<mulr31=%r14
  1913. shld $13,%r13,%r14
  1914. # qhasm: rz3 &= mulredmask
  1915. # asm 1: and <mulredmask=int64#3,<rz3=int64#11
  1916. # asm 2: and <mulredmask=%rdx,<rz3=%r13
  1917. and %rdx,%r13
  1918. # qhasm: rz3 += mulr21
  1919. # asm 1: add <mulr21=int64#10,<rz3=int64#11
  1920. # asm 2: add <mulr21=%r12,<rz3=%r13
  1921. add %r12,%r13
  1922. # qhasm: mulr41 = (mulr41.rz4) << 13
  1923. # asm 1: shld $13,<rz4=int64#13,<mulr41=int64#14
  1924. # asm 2: shld $13,<rz4=%r15,<mulr41=%rbx
  1925. shld $13,%r15,%rbx
  1926. # qhasm: rz4 &= mulredmask
  1927. # asm 1: and <mulredmask=int64#3,<rz4=int64#13
  1928. # asm 2: and <mulredmask=%rdx,<rz4=%r15
  1929. and %rdx,%r15
  1930. # qhasm: rz4 += mulr31
  1931. # asm 1: add <mulr31=int64#12,<rz4=int64#13
  1932. # asm 2: add <mulr31=%r14,<rz4=%r15
  1933. add %r14,%r15
  1934. # qhasm: mulr41 = mulr41 * 19
  1935. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
  1936. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
  1937. imulq $19,%rbx,%r8
  1938. # qhasm: rz0 += mulr41
  1939. # asm 1: add <mulr41=int64#5,<rz0=int64#4
  1940. # asm 2: add <mulr41=%r8,<rz0=%rcx
  1941. add %r8,%rcx
  1942. # qhasm: mult = rz0
  1943. # asm 1: mov <rz0=int64#4,>mult=int64#5
  1944. # asm 2: mov <rz0=%rcx,>mult=%r8
  1945. mov %rcx,%r8
  1946. # qhasm: (uint64) mult >>= 51
  1947. # asm 1: shr $51,<mult=int64#5
  1948. # asm 2: shr $51,<mult=%r8
  1949. shr $51,%r8
  1950. # qhasm: mult += rz1
  1951. # asm 1: add <rz1=int64#6,<mult=int64#5
  1952. # asm 2: add <rz1=%r9,<mult=%r8
  1953. add %r9,%r8
  1954. # qhasm: rz1 = mult
  1955. # asm 1: mov <mult=int64#5,>rz1=int64#6
  1956. # asm 2: mov <mult=%r8,>rz1=%r9
  1957. mov %r8,%r9
  1958. # qhasm: (uint64) mult >>= 51
  1959. # asm 1: shr $51,<mult=int64#5
  1960. # asm 2: shr $51,<mult=%r8
  1961. shr $51,%r8
  1962. # qhasm: rz0 &= mulredmask
  1963. # asm 1: and <mulredmask=int64#3,<rz0=int64#4
  1964. # asm 2: and <mulredmask=%rdx,<rz0=%rcx
  1965. and %rdx,%rcx
  1966. # qhasm: mult += rz2
  1967. # asm 1: add <rz2=int64#9,<mult=int64#5
  1968. # asm 2: add <rz2=%r11,<mult=%r8
  1969. add %r11,%r8
  1970. # qhasm: rz2 = mult
  1971. # asm 1: mov <mult=int64#5,>rz2=int64#7
  1972. # asm 2: mov <mult=%r8,>rz2=%rax
  1973. mov %r8,%rax
  1974. # qhasm: (uint64) mult >>= 51
  1975. # asm 1: shr $51,<mult=int64#5
  1976. # asm 2: shr $51,<mult=%r8
  1977. shr $51,%r8
  1978. # qhasm: rz1 &= mulredmask
  1979. # asm 1: and <mulredmask=int64#3,<rz1=int64#6
  1980. # asm 2: and <mulredmask=%rdx,<rz1=%r9
  1981. and %rdx,%r9
  1982. # qhasm: mult += rz3
  1983. # asm 1: add <rz3=int64#11,<mult=int64#5
  1984. # asm 2: add <rz3=%r13,<mult=%r8
  1985. add %r13,%r8
  1986. # qhasm: rz3 = mult
  1987. # asm 1: mov <mult=int64#5,>rz3=int64#8
  1988. # asm 2: mov <mult=%r8,>rz3=%r10
  1989. mov %r8,%r10
  1990. # qhasm: (uint64) mult >>= 51
  1991. # asm 1: shr $51,<mult=int64#5
  1992. # asm 2: shr $51,<mult=%r8
  1993. shr $51,%r8
  1994. # qhasm: rz2 &= mulredmask
  1995. # asm 1: and <mulredmask=int64#3,<rz2=int64#7
  1996. # asm 2: and <mulredmask=%rdx,<rz2=%rax
  1997. and %rdx,%rax
  1998. # qhasm: mult += rz4
  1999. # asm 1: add <rz4=int64#13,<mult=int64#5
  2000. # asm 2: add <rz4=%r15,<mult=%r8
  2001. add %r15,%r8
  2002. # qhasm: rz4 = mult
  2003. # asm 1: mov <mult=int64#5,>rz4=int64#9
  2004. # asm 2: mov <mult=%r8,>rz4=%r11
  2005. mov %r8,%r11
  2006. # qhasm: (uint64) mult >>= 51
  2007. # asm 1: shr $51,<mult=int64#5
  2008. # asm 2: shr $51,<mult=%r8
  2009. shr $51,%r8
  2010. # qhasm: rz3 &= mulredmask
  2011. # asm 1: and <mulredmask=int64#3,<rz3=int64#8
  2012. # asm 2: and <mulredmask=%rdx,<rz3=%r10
  2013. and %rdx,%r10
  2014. # qhasm: mult *= 19
  2015. # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
  2016. # asm 2: imulq $19,<mult=%r8,>mult=%r8
  2017. imulq $19,%r8,%r8
  2018. # qhasm: rz0 += mult
  2019. # asm 1: add <mult=int64#5,<rz0=int64#4
  2020. # asm 2: add <mult=%r8,<rz0=%rcx
  2021. add %r8,%rcx
  2022. # qhasm: rz4 &= mulredmask
  2023. # asm 1: and <mulredmask=int64#3,<rz4=int64#9
  2024. # asm 2: and <mulredmask=%rdx,<rz4=%r11
  2025. and %rdx,%r11
  2026. # qhasm: *(uint64 *)(rp + 80) = rz0
  2027. # asm 1: movq <rz0=int64#4,80(<rp=int64#1)
  2028. # asm 2: movq <rz0=%rcx,80(<rp=%rdi)
  2029. movq %rcx,80(%rdi)
  2030. # qhasm: *(uint64 *)(rp + 88) = rz1
  2031. # asm 1: movq <rz1=int64#6,88(<rp=int64#1)
  2032. # asm 2: movq <rz1=%r9,88(<rp=%rdi)
  2033. movq %r9,88(%rdi)
  2034. # qhasm: *(uint64 *)(rp + 96) = rz2
  2035. # asm 1: movq <rz2=int64#7,96(<rp=int64#1)
  2036. # asm 2: movq <rz2=%rax,96(<rp=%rdi)
  2037. movq %rax,96(%rdi)
  2038. # qhasm: *(uint64 *)(rp + 104) = rz3
  2039. # asm 1: movq <rz3=int64#8,104(<rp=int64#1)
  2040. # asm 2: movq <rz3=%r10,104(<rp=%rdi)
  2041. movq %r10,104(%rdi)
  2042. # qhasm: *(uint64 *)(rp + 112) = rz4
  2043. # asm 1: movq <rz4=int64#9,112(<rp=int64#1)
  2044. # asm 2: movq <rz4=%r11,112(<rp=%rdi)
  2045. movq %r11,112(%rdi)
  2046. # qhasm: mulrax = *(uint64 *)(pp + 24)
  2047. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
  2048. # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
  2049. movq 24(%rsi),%rdx
  2050. # qhasm: mulrax *= 19
  2051. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2052. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2053. imulq $19,%rdx,%rax
  2054. # qhasm: mulx319_stack = mulrax
  2055. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  2056. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  2057. movq %rax,56(%rsp)
  2058. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2059. # asm 1: mulq 96(<pp=int64#2)
  2060. # asm 2: mulq 96(<pp=%rsi)
  2061. mulq 96(%rsi)
  2062. # qhasm: t0 = mulrax
  2063. # asm 1: mov <mulrax=int64#7,>t0=int64#4
  2064. # asm 2: mov <mulrax=%rax,>t0=%rcx
  2065. mov %rax,%rcx
  2066. # qhasm: mulr01 = mulrdx
  2067. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  2068. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  2069. mov %rdx,%r8
  2070. # qhasm: mulrax = *(uint64 *)(pp + 32)
  2071. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
  2072. # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
  2073. movq 32(%rsi),%rdx
  2074. # qhasm: mulrax *= 19
  2075. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2076. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2077. imulq $19,%rdx,%rax
  2078. # qhasm: mulx419_stack = mulrax
  2079. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  2080. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  2081. movq %rax,64(%rsp)
  2082. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2083. # asm 1: mulq 88(<pp=int64#2)
  2084. # asm 2: mulq 88(<pp=%rsi)
  2085. mulq 88(%rsi)
  2086. # qhasm: carry? t0 += mulrax
  2087. # asm 1: add <mulrax=int64#7,<t0=int64#4
  2088. # asm 2: add <mulrax=%rax,<t0=%rcx
  2089. add %rax,%rcx
  2090. # qhasm: mulr01 += mulrdx + carry
  2091. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  2092. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  2093. adc %rdx,%r8
  2094. # qhasm: mulrax = *(uint64 *)(pp + 0)
  2095. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  2096. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  2097. movq 0(%rsi),%rax
  2098. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2099. # asm 1: mulq 80(<pp=int64#2)
  2100. # asm 2: mulq 80(<pp=%rsi)
  2101. mulq 80(%rsi)
  2102. # qhasm: carry? t0 += mulrax
  2103. # asm 1: add <mulrax=int64#7,<t0=int64#4
  2104. # asm 2: add <mulrax=%rax,<t0=%rcx
  2105. add %rax,%rcx
  2106. # qhasm: mulr01 += mulrdx + carry
  2107. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  2108. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  2109. adc %rdx,%r8
  2110. # qhasm: mulrax = *(uint64 *)(pp + 0)
  2111. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  2112. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  2113. movq 0(%rsi),%rax
  2114. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2115. # asm 1: mulq 88(<pp=int64#2)
  2116. # asm 2: mulq 88(<pp=%rsi)
  2117. mulq 88(%rsi)
  2118. # qhasm: t1 = mulrax
  2119. # asm 1: mov <mulrax=int64#7,>t1=int64#6
  2120. # asm 2: mov <mulrax=%rax,>t1=%r9
  2121. mov %rax,%r9
  2122. # qhasm: mulr11 = mulrdx
  2123. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  2124. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  2125. mov %rdx,%r10
  2126. # qhasm: mulrax = *(uint64 *)(pp + 0)
  2127. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  2128. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  2129. movq 0(%rsi),%rax
  2130. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2131. # asm 1: mulq 96(<pp=int64#2)
  2132. # asm 2: mulq 96(<pp=%rsi)
  2133. mulq 96(%rsi)
  2134. # qhasm: t2 = mulrax
  2135. # asm 1: mov <mulrax=int64#7,>t2=int64#9
  2136. # asm 2: mov <mulrax=%rax,>t2=%r11
  2137. mov %rax,%r11
  2138. # qhasm: mulr21 = mulrdx
  2139. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  2140. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  2141. mov %rdx,%r12
  2142. # qhasm: mulrax = *(uint64 *)(pp + 0)
  2143. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  2144. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  2145. movq 0(%rsi),%rax
  2146. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2147. # asm 1: mulq 104(<pp=int64#2)
  2148. # asm 2: mulq 104(<pp=%rsi)
  2149. mulq 104(%rsi)
  2150. # qhasm: t3 = mulrax
  2151. # asm 1: mov <mulrax=int64#7,>t3=int64#11
  2152. # asm 2: mov <mulrax=%rax,>t3=%r13
  2153. mov %rax,%r13
  2154. # qhasm: mulr31 = mulrdx
  2155. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  2156. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  2157. mov %rdx,%r14
  2158. # qhasm: mulrax = *(uint64 *)(pp + 0)
  2159. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  2160. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  2161. movq 0(%rsi),%rax
  2162. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2163. # asm 1: mulq 112(<pp=int64#2)
  2164. # asm 2: mulq 112(<pp=%rsi)
  2165. mulq 112(%rsi)
  2166. # qhasm: t4 = mulrax
  2167. # asm 1: mov <mulrax=int64#7,>t4=int64#13
  2168. # asm 2: mov <mulrax=%rax,>t4=%r15
  2169. mov %rax,%r15
  2170. # qhasm: mulr41 = mulrdx
  2171. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  2172. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  2173. mov %rdx,%rbx
  2174. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2175. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2176. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2177. movq 8(%rsi),%rax
  2178. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2179. # asm 1: mulq 80(<pp=int64#2)
  2180. # asm 2: mulq 80(<pp=%rsi)
  2181. mulq 80(%rsi)
  2182. # qhasm: carry? t1 += mulrax
  2183. # asm 1: add <mulrax=int64#7,<t1=int64#6
  2184. # asm 2: add <mulrax=%rax,<t1=%r9
  2185. add %rax,%r9
  2186. # qhasm: mulr11 += mulrdx + carry
  2187. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2188. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2189. adc %rdx,%r10
  2190. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2191. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2192. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2193. movq 8(%rsi),%rax
  2194. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2195. # asm 1: mulq 88(<pp=int64#2)
  2196. # asm 2: mulq 88(<pp=%rsi)
  2197. mulq 88(%rsi)
  2198. # qhasm: carry? t2 += mulrax
  2199. # asm 1: add <mulrax=int64#7,<t2=int64#9
  2200. # asm 2: add <mulrax=%rax,<t2=%r11
  2201. add %rax,%r11
  2202. # qhasm: mulr21 += mulrdx + carry
  2203. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2204. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2205. adc %rdx,%r12
  2206. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2207. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2208. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2209. movq 8(%rsi),%rax
  2210. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2211. # asm 1: mulq 96(<pp=int64#2)
  2212. # asm 2: mulq 96(<pp=%rsi)
  2213. mulq 96(%rsi)
  2214. # qhasm: carry? t3 += mulrax
  2215. # asm 1: add <mulrax=int64#7,<t3=int64#11
  2216. # asm 2: add <mulrax=%rax,<t3=%r13
  2217. add %rax,%r13
  2218. # qhasm: mulr31 += mulrdx + carry
  2219. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2220. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2221. adc %rdx,%r14
  2222. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2223. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2224. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2225. movq 8(%rsi),%rax
  2226. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2227. # asm 1: mulq 104(<pp=int64#2)
  2228. # asm 2: mulq 104(<pp=%rsi)
  2229. mulq 104(%rsi)
  2230. # qhasm: carry? t4 += mulrax
  2231. # asm 1: add <mulrax=int64#7,<t4=int64#13
  2232. # asm 2: add <mulrax=%rax,<t4=%r15
  2233. add %rax,%r15
  2234. # qhasm: mulr41 += mulrdx + carry
  2235. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2236. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2237. adc %rdx,%rbx
  2238. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2239. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
  2240. # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
  2241. movq 8(%rsi),%rdx
  2242. # qhasm: mulrax *= 19
  2243. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2244. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2245. imulq $19,%rdx,%rax
  2246. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2247. # asm 1: mulq 112(<pp=int64#2)
  2248. # asm 2: mulq 112(<pp=%rsi)
  2249. mulq 112(%rsi)
  2250. # qhasm: carry? t0 += mulrax
  2251. # asm 1: add <mulrax=int64#7,<t0=int64#4
  2252. # asm 2: add <mulrax=%rax,<t0=%rcx
  2253. add %rax,%rcx
  2254. # qhasm: mulr01 += mulrdx + carry
  2255. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  2256. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  2257. adc %rdx,%r8
  2258. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2259. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  2260. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  2261. movq 16(%rsi),%rax
  2262. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2263. # asm 1: mulq 80(<pp=int64#2)
  2264. # asm 2: mulq 80(<pp=%rsi)
  2265. mulq 80(%rsi)
  2266. # qhasm: carry? t2 += mulrax
  2267. # asm 1: add <mulrax=int64#7,<t2=int64#9
  2268. # asm 2: add <mulrax=%rax,<t2=%r11
  2269. add %rax,%r11
  2270. # qhasm: mulr21 += mulrdx + carry
  2271. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2272. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2273. adc %rdx,%r12
  2274. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2275. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  2276. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  2277. movq 16(%rsi),%rax
  2278. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2279. # asm 1: mulq 88(<pp=int64#2)
  2280. # asm 2: mulq 88(<pp=%rsi)
  2281. mulq 88(%rsi)
  2282. # qhasm: carry? t3 += mulrax
  2283. # asm 1: add <mulrax=int64#7,<t3=int64#11
  2284. # asm 2: add <mulrax=%rax,<t3=%r13
  2285. add %rax,%r13
  2286. # qhasm: mulr31 += mulrdx + carry
  2287. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2288. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2289. adc %rdx,%r14
  2290. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2291. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  2292. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  2293. movq 16(%rsi),%rax
  2294. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2295. # asm 1: mulq 96(<pp=int64#2)
  2296. # asm 2: mulq 96(<pp=%rsi)
  2297. mulq 96(%rsi)
  2298. # qhasm: carry? t4 += mulrax
  2299. # asm 1: add <mulrax=int64#7,<t4=int64#13
  2300. # asm 2: add <mulrax=%rax,<t4=%r15
  2301. add %rax,%r15
  2302. # qhasm: mulr41 += mulrdx + carry
  2303. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2304. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2305. adc %rdx,%rbx
  2306. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2307. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  2308. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  2309. movq 16(%rsi),%rdx
  2310. # qhasm: mulrax *= 19
  2311. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2312. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2313. imulq $19,%rdx,%rax
  2314. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2315. # asm 1: mulq 104(<pp=int64#2)
  2316. # asm 2: mulq 104(<pp=%rsi)
  2317. mulq 104(%rsi)
  2318. # qhasm: carry? t0 += mulrax
  2319. # asm 1: add <mulrax=int64#7,<t0=int64#4
  2320. # asm 2: add <mulrax=%rax,<t0=%rcx
  2321. add %rax,%rcx
  2322. # qhasm: mulr01 += mulrdx + carry
  2323. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  2324. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  2325. adc %rdx,%r8
  2326. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2327. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  2328. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  2329. movq 16(%rsi),%rdx
  2330. # qhasm: mulrax *= 19
  2331. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2332. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2333. imulq $19,%rdx,%rax
  2334. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2335. # asm 1: mulq 112(<pp=int64#2)
  2336. # asm 2: mulq 112(<pp=%rsi)
  2337. mulq 112(%rsi)
  2338. # qhasm: carry? t1 += mulrax
  2339. # asm 1: add <mulrax=int64#7,<t1=int64#6
  2340. # asm 2: add <mulrax=%rax,<t1=%r9
  2341. add %rax,%r9
  2342. # qhasm: mulr11 += mulrdx + carry
  2343. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2344. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2345. adc %rdx,%r10
  2346. # qhasm: mulrax = *(uint64 *)(pp + 24)
  2347. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  2348. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  2349. movq 24(%rsi),%rax
  2350. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2351. # asm 1: mulq 80(<pp=int64#2)
  2352. # asm 2: mulq 80(<pp=%rsi)
  2353. mulq 80(%rsi)
  2354. # qhasm: carry? t3 += mulrax
  2355. # asm 1: add <mulrax=int64#7,<t3=int64#11
  2356. # asm 2: add <mulrax=%rax,<t3=%r13
  2357. add %rax,%r13
  2358. # qhasm: mulr31 += mulrdx + carry
  2359. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2360. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2361. adc %rdx,%r14
  2362. # qhasm: mulrax = *(uint64 *)(pp + 24)
  2363. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  2364. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  2365. movq 24(%rsi),%rax
  2366. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2367. # asm 1: mulq 88(<pp=int64#2)
  2368. # asm 2: mulq 88(<pp=%rsi)
  2369. mulq 88(%rsi)
  2370. # qhasm: carry? t4 += mulrax
  2371. # asm 1: add <mulrax=int64#7,<t4=int64#13
  2372. # asm 2: add <mulrax=%rax,<t4=%r15
  2373. add %rax,%r15
  2374. # qhasm: mulr41 += mulrdx + carry
  2375. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2376. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2377. adc %rdx,%rbx
  2378. # qhasm: mulrax = mulx319_stack
  2379. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  2380. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  2381. movq 56(%rsp),%rax
  2382. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2383. # asm 1: mulq 104(<pp=int64#2)
  2384. # asm 2: mulq 104(<pp=%rsi)
  2385. mulq 104(%rsi)
  2386. # qhasm: carry? t1 += mulrax
  2387. # asm 1: add <mulrax=int64#7,<t1=int64#6
  2388. # asm 2: add <mulrax=%rax,<t1=%r9
  2389. add %rax,%r9
  2390. # qhasm: mulr11 += mulrdx + carry
  2391. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2392. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2393. adc %rdx,%r10
  2394. # qhasm: mulrax = mulx319_stack
  2395. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  2396. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  2397. movq 56(%rsp),%rax
  2398. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2399. # asm 1: mulq 112(<pp=int64#2)
  2400. # asm 2: mulq 112(<pp=%rsi)
  2401. mulq 112(%rsi)
  2402. # qhasm: carry? t2 += mulrax
  2403. # asm 1: add <mulrax=int64#7,<t2=int64#9
  2404. # asm 2: add <mulrax=%rax,<t2=%r11
  2405. add %rax,%r11
  2406. # qhasm: mulr21 += mulrdx + carry
  2407. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2408. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2409. adc %rdx,%r12
  2410. # qhasm: mulrax = *(uint64 *)(pp + 32)
  2411. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  2412. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  2413. movq 32(%rsi),%rax
  2414. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2415. # asm 1: mulq 80(<pp=int64#2)
  2416. # asm 2: mulq 80(<pp=%rsi)
  2417. mulq 80(%rsi)
  2418. # qhasm: carry? t4 += mulrax
  2419. # asm 1: add <mulrax=int64#7,<t4=int64#13
  2420. # asm 2: add <mulrax=%rax,<t4=%r15
  2421. add %rax,%r15
  2422. # qhasm: mulr41 += mulrdx + carry
  2423. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2424. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2425. adc %rdx,%rbx
  2426. # qhasm: mulrax = mulx419_stack
  2427. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  2428. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  2429. movq 64(%rsp),%rax
  2430. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2431. # asm 1: mulq 96(<pp=int64#2)
  2432. # asm 2: mulq 96(<pp=%rsi)
  2433. mulq 96(%rsi)
  2434. # qhasm: carry? t1 += mulrax
  2435. # asm 1: add <mulrax=int64#7,<t1=int64#6
  2436. # asm 2: add <mulrax=%rax,<t1=%r9
  2437. add %rax,%r9
  2438. # qhasm: mulr11 += mulrdx + carry
  2439. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2440. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2441. adc %rdx,%r10
  2442. # qhasm: mulrax = mulx419_stack
  2443. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  2444. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  2445. movq 64(%rsp),%rax
  2446. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2447. # asm 1: mulq 104(<pp=int64#2)
  2448. # asm 2: mulq 104(<pp=%rsi)
  2449. mulq 104(%rsi)
  2450. # qhasm: carry? t2 += mulrax
  2451. # asm 1: add <mulrax=int64#7,<t2=int64#9
  2452. # asm 2: add <mulrax=%rax,<t2=%r11
  2453. add %rax,%r11
  2454. # qhasm: mulr21 += mulrdx + carry
  2455. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2456. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2457. adc %rdx,%r12
  2458. # qhasm: mulrax = mulx419_stack
  2459. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  2460. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  2461. movq 64(%rsp),%rax
  2462. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2463. # asm 1: mulq 112(<pp=int64#2)
  2464. # asm 2: mulq 112(<pp=%rsi)
  2465. mulq 112(%rsi)
  2466. # qhasm: carry? t3 += mulrax
  2467. # asm 1: add <mulrax=int64#7,<t3=int64#11
  2468. # asm 2: add <mulrax=%rax,<t3=%r13
  2469. add %rax,%r13
  2470. # qhasm: mulr31 += mulrdx + carry
  2471. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2472. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2473. adc %rdx,%r14
  2474. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  2475. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
  2476. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
  2477. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
  2478. # qhasm: mulr01 = (mulr01.t0) << 13
  2479. # asm 1: shld $13,<t0=int64#4,<mulr01=int64#5
  2480. # asm 2: shld $13,<t0=%rcx,<mulr01=%r8
  2481. shld $13,%rcx,%r8
  2482. # qhasm: t0 &= mulredmask
  2483. # asm 1: and <mulredmask=int64#2,<t0=int64#4
  2484. # asm 2: and <mulredmask=%rsi,<t0=%rcx
  2485. and %rsi,%rcx
  2486. # qhasm: mulr11 = (mulr11.t1) << 13
  2487. # asm 1: shld $13,<t1=int64#6,<mulr11=int64#8
  2488. # asm 2: shld $13,<t1=%r9,<mulr11=%r10
  2489. shld $13,%r9,%r10
  2490. # qhasm: t1 &= mulredmask
  2491. # asm 1: and <mulredmask=int64#2,<t1=int64#6
  2492. # asm 2: and <mulredmask=%rsi,<t1=%r9
  2493. and %rsi,%r9
  2494. # qhasm: t1 += mulr01
  2495. # asm 1: add <mulr01=int64#5,<t1=int64#6
  2496. # asm 2: add <mulr01=%r8,<t1=%r9
  2497. add %r8,%r9
  2498. # qhasm: mulr21 = (mulr21.t2) << 13
  2499. # asm 1: shld $13,<t2=int64#9,<mulr21=int64#10
  2500. # asm 2: shld $13,<t2=%r11,<mulr21=%r12
  2501. shld $13,%r11,%r12
  2502. # qhasm: t2 &= mulredmask
  2503. # asm 1: and <mulredmask=int64#2,<t2=int64#9
  2504. # asm 2: and <mulredmask=%rsi,<t2=%r11
  2505. and %rsi,%r11
  2506. # qhasm: t2 += mulr11
  2507. # asm 1: add <mulr11=int64#8,<t2=int64#9
  2508. # asm 2: add <mulr11=%r10,<t2=%r11
  2509. add %r10,%r11
  2510. # qhasm: mulr31 = (mulr31.t3) << 13
  2511. # asm 1: shld $13,<t3=int64#11,<mulr31=int64#12
  2512. # asm 2: shld $13,<t3=%r13,<mulr31=%r14
  2513. shld $13,%r13,%r14
  2514. # qhasm: t3 &= mulredmask
  2515. # asm 1: and <mulredmask=int64#2,<t3=int64#11
  2516. # asm 2: and <mulredmask=%rsi,<t3=%r13
  2517. and %rsi,%r13
  2518. # qhasm: t3 += mulr21
  2519. # asm 1: add <mulr21=int64#10,<t3=int64#11
  2520. # asm 2: add <mulr21=%r12,<t3=%r13
  2521. add %r12,%r13
  2522. # qhasm: mulr41 = (mulr41.t4) << 13
  2523. # asm 1: shld $13,<t4=int64#13,<mulr41=int64#14
  2524. # asm 2: shld $13,<t4=%r15,<mulr41=%rbx
  2525. shld $13,%r15,%rbx
  2526. # qhasm: t4 &= mulredmask
  2527. # asm 1: and <mulredmask=int64#2,<t4=int64#13
  2528. # asm 2: and <mulredmask=%rsi,<t4=%r15
  2529. and %rsi,%r15
  2530. # qhasm: t4 += mulr31
  2531. # asm 1: add <mulr31=int64#12,<t4=int64#13
  2532. # asm 2: add <mulr31=%r14,<t4=%r15
  2533. add %r14,%r15
  2534. # qhasm: mulr41 = mulr41 * 19
  2535. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#3
  2536. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%rdx
  2537. imulq $19,%rbx,%rdx
  2538. # qhasm: t0 += mulr41
  2539. # asm 1: add <mulr41=int64#3,<t0=int64#4
  2540. # asm 2: add <mulr41=%rdx,<t0=%rcx
  2541. add %rdx,%rcx
  2542. # qhasm: mult = t0
  2543. # asm 1: mov <t0=int64#4,>mult=int64#3
  2544. # asm 2: mov <t0=%rcx,>mult=%rdx
  2545. mov %rcx,%rdx
  2546. # qhasm: (uint64) mult >>= 51
  2547. # asm 1: shr $51,<mult=int64#3
  2548. # asm 2: shr $51,<mult=%rdx
  2549. shr $51,%rdx
  2550. # qhasm: mult += t1
  2551. # asm 1: add <t1=int64#6,<mult=int64#3
  2552. # asm 2: add <t1=%r9,<mult=%rdx
  2553. add %r9,%rdx
  2554. # qhasm: t1 = mult
  2555. # asm 1: mov <mult=int64#3,>t1=int64#5
  2556. # asm 2: mov <mult=%rdx,>t1=%r8
  2557. mov %rdx,%r8
  2558. # qhasm: (uint64) mult >>= 51
  2559. # asm 1: shr $51,<mult=int64#3
  2560. # asm 2: shr $51,<mult=%rdx
  2561. shr $51,%rdx
  2562. # qhasm: t0 &= mulredmask
  2563. # asm 1: and <mulredmask=int64#2,<t0=int64#4
  2564. # asm 2: and <mulredmask=%rsi,<t0=%rcx
  2565. and %rsi,%rcx
  2566. # qhasm: mult += t2
  2567. # asm 1: add <t2=int64#9,<mult=int64#3
  2568. # asm 2: add <t2=%r11,<mult=%rdx
  2569. add %r11,%rdx
  2570. # qhasm: t2 = mult
  2571. # asm 1: mov <mult=int64#3,>t2=int64#6
  2572. # asm 2: mov <mult=%rdx,>t2=%r9
  2573. mov %rdx,%r9
  2574. # qhasm: (uint64) mult >>= 51
  2575. # asm 1: shr $51,<mult=int64#3
  2576. # asm 2: shr $51,<mult=%rdx
  2577. shr $51,%rdx
  2578. # qhasm: t1 &= mulredmask
  2579. # asm 1: and <mulredmask=int64#2,<t1=int64#5
  2580. # asm 2: and <mulredmask=%rsi,<t1=%r8
  2581. and %rsi,%r8
  2582. # qhasm: mult += t3
  2583. # asm 1: add <t3=int64#11,<mult=int64#3
  2584. # asm 2: add <t3=%r13,<mult=%rdx
  2585. add %r13,%rdx
  2586. # qhasm: t3 = mult
  2587. # asm 1: mov <mult=int64#3,>t3=int64#7
  2588. # asm 2: mov <mult=%rdx,>t3=%rax
  2589. mov %rdx,%rax
  2590. # qhasm: (uint64) mult >>= 51
  2591. # asm 1: shr $51,<mult=int64#3
  2592. # asm 2: shr $51,<mult=%rdx
  2593. shr $51,%rdx
  2594. # qhasm: t2 &= mulredmask
  2595. # asm 1: and <mulredmask=int64#2,<t2=int64#6
  2596. # asm 2: and <mulredmask=%rsi,<t2=%r9
  2597. and %rsi,%r9
  2598. # qhasm: mult += t4
  2599. # asm 1: add <t4=int64#13,<mult=int64#3
  2600. # asm 2: add <t4=%r15,<mult=%rdx
  2601. add %r15,%rdx
  2602. # qhasm: t4 = mult
  2603. # asm 1: mov <mult=int64#3,>t4=int64#8
  2604. # asm 2: mov <mult=%rdx,>t4=%r10
  2605. mov %rdx,%r10
  2606. # qhasm: (uint64) mult >>= 51
  2607. # asm 1: shr $51,<mult=int64#3
  2608. # asm 2: shr $51,<mult=%rdx
  2609. shr $51,%rdx
  2610. # qhasm: t3 &= mulredmask
  2611. # asm 1: and <mulredmask=int64#2,<t3=int64#7
  2612. # asm 2: and <mulredmask=%rsi,<t3=%rax
  2613. and %rsi,%rax
  2614. # qhasm: mult *= 19
  2615. # asm 1: imulq $19,<mult=int64#3,>mult=int64#3
  2616. # asm 2: imulq $19,<mult=%rdx,>mult=%rdx
  2617. imulq $19,%rdx,%rdx
  2618. # qhasm: t0 += mult
  2619. # asm 1: add <mult=int64#3,<t0=int64#4
  2620. # asm 2: add <mult=%rdx,<t0=%rcx
  2621. add %rdx,%rcx
  2622. # qhasm: t4 &= mulredmask
  2623. # asm 1: and <mulredmask=int64#2,<t4=int64#8
  2624. # asm 2: and <mulredmask=%rsi,<t4=%r10
  2625. and %rsi,%r10
  2626. # qhasm: stackt0 = t0
  2627. # asm 1: movq <t0=int64#4,>stackt0=stack64#8
  2628. # asm 2: movq <t0=%rcx,>stackt0=56(%rsp)
  2629. movq %rcx,56(%rsp)
  2630. # qhasm: stackt1 = t1
  2631. # asm 1: movq <t1=int64#5,>stackt1=stack64#9
  2632. # asm 2: movq <t1=%r8,>stackt1=64(%rsp)
  2633. movq %r8,64(%rsp)
  2634. # qhasm: stackt2 = t2
  2635. # asm 1: movq <t2=int64#6,>stackt2=stack64#10
  2636. # asm 2: movq <t2=%r9,>stackt2=72(%rsp)
  2637. movq %r9,72(%rsp)
  2638. # qhasm: stackt3 = t3
  2639. # asm 1: movq <t3=int64#7,>stackt3=stack64#11
  2640. # asm 2: movq <t3=%rax,>stackt3=80(%rsp)
  2641. movq %rax,80(%rsp)
  2642. # qhasm: stackt4 = t4
  2643. # asm 1: movq <t4=int64#8,>stackt4=stack64#12
  2644. # asm 2: movq <t4=%r10,>stackt4=88(%rsp)
  2645. movq %r10,88(%rsp)
  2646. # qhasm: mulrax = stackt3
  2647. # asm 1: movq <stackt3=stack64#11,>mulrax=int64#2
  2648. # asm 2: movq <stackt3=80(%rsp),>mulrax=%rsi
  2649. movq 80(%rsp),%rsi
  2650. # qhasm: mulrax *= 19
  2651. # asm 1: imulq $19,<mulrax=int64#2,>mulrax=int64#7
  2652. # asm 2: imulq $19,<mulrax=%rsi,>mulrax=%rax
  2653. imulq $19,%rsi,%rax
  2654. # qhasm: mulx319_stack = mulrax
  2655. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#13
  2656. # asm 2: movq <mulrax=%rax,>mulx319_stack=96(%rsp)
  2657. movq %rax,96(%rsp)
  2658. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
  2659. mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
  2660. # qhasm: t2d0 = mulrax
  2661. # asm 1: mov <mulrax=int64#7,>t2d0=int64#2
  2662. # asm 2: mov <mulrax=%rax,>t2d0=%rsi
  2663. mov %rax,%rsi
  2664. # qhasm: mulr01 = mulrdx
  2665. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#4
  2666. # asm 2: mov <mulrdx=%rdx,>mulr01=%rcx
  2667. mov %rdx,%rcx
  2668. # qhasm: mulrax = stackt4
  2669. # asm 1: movq <stackt4=stack64#12,>mulrax=int64#3
  2670. # asm 2: movq <stackt4=88(%rsp),>mulrax=%rdx
  2671. movq 88(%rsp),%rdx
  2672. # qhasm: mulrax *= 19
  2673. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2674. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2675. imulq $19,%rdx,%rax
  2676. # qhasm: mulx419_stack = mulrax
  2677. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#14
  2678. # asm 2: movq <mulrax=%rax,>mulx419_stack=104(%rsp)
  2679. movq %rax,104(%rsp)
  2680. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
  2681. mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
  2682. # qhasm: carry? t2d0 += mulrax
  2683. # asm 1: add <mulrax=int64#7,<t2d0=int64#2
  2684. # asm 2: add <mulrax=%rax,<t2d0=%rsi
  2685. add %rax,%rsi
  2686. # qhasm: mulr01 += mulrdx + carry
  2687. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
  2688. # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
  2689. adc %rdx,%rcx
  2690. # qhasm: mulrax = stackt0
  2691. # asm 1: movq <stackt0=stack64#8,>mulrax=int64#7
  2692. # asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
  2693. movq 56(%rsp),%rax
  2694. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
  2695. mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
  2696. # qhasm: carry? t2d0 += mulrax
  2697. # asm 1: add <mulrax=int64#7,<t2d0=int64#2
  2698. # asm 2: add <mulrax=%rax,<t2d0=%rsi
  2699. add %rax,%rsi
  2700. # qhasm: mulr01 += mulrdx + carry
  2701. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
  2702. # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
  2703. adc %rdx,%rcx
  2704. # qhasm: mulrax = stackt0
  2705. # asm 1: movq <stackt0=stack64#8,>mulrax=int64#7
  2706. # asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
  2707. movq 56(%rsp),%rax
  2708. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
  2709. mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
  2710. # qhasm: t2d1 = mulrax
  2711. # asm 1: mov <mulrax=int64#7,>t2d1=int64#5
  2712. # asm 2: mov <mulrax=%rax,>t2d1=%r8
  2713. mov %rax,%r8
  2714. # qhasm: mulr11 = mulrdx
  2715. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#6
  2716. # asm 2: mov <mulrdx=%rdx,>mulr11=%r9
  2717. mov %rdx,%r9
  2718. # qhasm: mulrax = stackt0
  2719. # asm 1: movq <stackt0=stack64#8,>mulrax=int64#7
  2720. # asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
  2721. movq 56(%rsp),%rax
  2722. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
  2723. mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
  2724. # qhasm: t2d2 = mulrax
  2725. # asm 1: mov <mulrax=int64#7,>t2d2=int64#8
  2726. # asm 2: mov <mulrax=%rax,>t2d2=%r10
  2727. mov %rax,%r10
  2728. # qhasm: mulr21 = mulrdx
  2729. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#9
  2730. # asm 2: mov <mulrdx=%rdx,>mulr21=%r11
  2731. mov %rdx,%r11
  2732. # qhasm: mulrax = stackt0
  2733. # asm 1: movq <stackt0=stack64#8,>mulrax=int64#7
  2734. # asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
  2735. movq 56(%rsp),%rax
  2736. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
  2737. mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
  2738. # qhasm: t2d3 = mulrax
  2739. # asm 1: mov <mulrax=int64#7,>t2d3=int64#10
  2740. # asm 2: mov <mulrax=%rax,>t2d3=%r12
  2741. mov %rax,%r12
  2742. # qhasm: mulr31 = mulrdx
  2743. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#11
  2744. # asm 2: mov <mulrdx=%rdx,>mulr31=%r13
  2745. mov %rdx,%r13
  2746. # qhasm: mulrax = stackt0
  2747. # asm 1: movq <stackt0=stack64#8,>mulrax=int64#7
  2748. # asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
  2749. movq 56(%rsp),%rax
  2750. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
  2751. mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
  2752. # qhasm: t2d4 = mulrax
  2753. # asm 1: mov <mulrax=int64#7,>t2d4=int64#12
  2754. # asm 2: mov <mulrax=%rax,>t2d4=%r14
  2755. mov %rax,%r14
  2756. # qhasm: mulr41 = mulrdx
  2757. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#13
  2758. # asm 2: mov <mulrdx=%rdx,>mulr41=%r15
  2759. mov %rdx,%r15
  2760. # qhasm: mulrax = stackt1
  2761. # asm 1: movq <stackt1=stack64#9,>mulrax=int64#7
  2762. # asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
  2763. movq 64(%rsp),%rax
  2764. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
  2765. mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
  2766. # qhasm: carry? t2d1 += mulrax
  2767. # asm 1: add <mulrax=int64#7,<t2d1=int64#5
  2768. # asm 2: add <mulrax=%rax,<t2d1=%r8
  2769. add %rax,%r8
  2770. # qhasm: mulr11 += mulrdx + carry
  2771. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
  2772. # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
  2773. adc %rdx,%r9
  2774. # qhasm: mulrax = stackt1
  2775. # asm 1: movq <stackt1=stack64#9,>mulrax=int64#7
  2776. # asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
  2777. movq 64(%rsp),%rax
  2778. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
  2779. mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
  2780. # qhasm: carry? t2d2 += mulrax
  2781. # asm 1: add <mulrax=int64#7,<t2d2=int64#8
  2782. # asm 2: add <mulrax=%rax,<t2d2=%r10
  2783. add %rax,%r10
  2784. # qhasm: mulr21 += mulrdx + carry
  2785. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
  2786. # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
  2787. adc %rdx,%r11
  2788. # qhasm: mulrax = stackt1
  2789. # asm 1: movq <stackt1=stack64#9,>mulrax=int64#7
  2790. # asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
  2791. movq 64(%rsp),%rax
  2792. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
  2793. mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
  2794. # qhasm: carry? t2d3 += mulrax
  2795. # asm 1: add <mulrax=int64#7,<t2d3=int64#10
  2796. # asm 2: add <mulrax=%rax,<t2d3=%r12
  2797. add %rax,%r12
  2798. # qhasm: mulr31 += mulrdx + carry
  2799. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
  2800. # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
  2801. adc %rdx,%r13
  2802. # qhasm: mulrax = stackt1
  2803. # asm 1: movq <stackt1=stack64#9,>mulrax=int64#7
  2804. # asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
  2805. movq 64(%rsp),%rax
  2806. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
  2807. mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
  2808. # qhasm: carry? t2d4 += mulrax
  2809. # asm 1: add <mulrax=int64#7,<t2d4=int64#12
  2810. # asm 2: add <mulrax=%rax,<t2d4=%r14
  2811. add %rax,%r14
  2812. # qhasm: mulr41 += mulrdx + carry
  2813. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
  2814. # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
  2815. adc %rdx,%r15
  2816. # qhasm: mulrax = stackt1
  2817. # asm 1: movq <stackt1=stack64#9,>mulrax=int64#3
  2818. # asm 2: movq <stackt1=64(%rsp),>mulrax=%rdx
  2819. movq 64(%rsp),%rdx
  2820. # qhasm: mulrax *= 19
  2821. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2822. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2823. imulq $19,%rdx,%rax
  2824. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
  2825. mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
  2826. # qhasm: carry? t2d0 += mulrax
  2827. # asm 1: add <mulrax=int64#7,<t2d0=int64#2
  2828. # asm 2: add <mulrax=%rax,<t2d0=%rsi
  2829. add %rax,%rsi
  2830. # qhasm: mulr01 += mulrdx + carry
  2831. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
  2832. # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
  2833. adc %rdx,%rcx
  2834. # qhasm: mulrax = stackt2
  2835. # asm 1: movq <stackt2=stack64#10,>mulrax=int64#7
  2836. # asm 2: movq <stackt2=72(%rsp),>mulrax=%rax
  2837. movq 72(%rsp),%rax
  2838. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
  2839. mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
  2840. # qhasm: carry? t2d2 += mulrax
  2841. # asm 1: add <mulrax=int64#7,<t2d2=int64#8
  2842. # asm 2: add <mulrax=%rax,<t2d2=%r10
  2843. add %rax,%r10
  2844. # qhasm: mulr21 += mulrdx + carry
  2845. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
  2846. # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
  2847. adc %rdx,%r11
  2848. # qhasm: mulrax = stackt2
  2849. # asm 1: movq <stackt2=stack64#10,>mulrax=int64#7
  2850. # asm 2: movq <stackt2=72(%rsp),>mulrax=%rax
  2851. movq 72(%rsp),%rax
  2852. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
  2853. mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
  2854. # qhasm: carry? t2d3 += mulrax
  2855. # asm 1: add <mulrax=int64#7,<t2d3=int64#10
  2856. # asm 2: add <mulrax=%rax,<t2d3=%r12
  2857. add %rax,%r12
  2858. # qhasm: mulr31 += mulrdx + carry
  2859. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
  2860. # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
  2861. adc %rdx,%r13
  2862. # qhasm: mulrax = stackt2
  2863. # asm 1: movq <stackt2=stack64#10,>mulrax=int64#7
  2864. # asm 2: movq <stackt2=72(%rsp),>mulrax=%rax
  2865. movq 72(%rsp),%rax
  2866. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
  2867. mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
  2868. # qhasm: carry? t2d4 += mulrax
  2869. # asm 1: add <mulrax=int64#7,<t2d4=int64#12
  2870. # asm 2: add <mulrax=%rax,<t2d4=%r14
  2871. add %rax,%r14
  2872. # qhasm: mulr41 += mulrdx + carry
  2873. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
  2874. # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
  2875. adc %rdx,%r15
  2876. # qhasm: mulrax = stackt2
  2877. # asm 1: movq <stackt2=stack64#10,>mulrax=int64#3
  2878. # asm 2: movq <stackt2=72(%rsp),>mulrax=%rdx
  2879. movq 72(%rsp),%rdx
  2880. # qhasm: mulrax *= 19
  2881. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2882. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2883. imulq $19,%rdx,%rax
  2884. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
  2885. mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
  2886. # qhasm: carry? t2d0 += mulrax
  2887. # asm 1: add <mulrax=int64#7,<t2d0=int64#2
  2888. # asm 2: add <mulrax=%rax,<t2d0=%rsi
  2889. add %rax,%rsi
  2890. # qhasm: mulr01 += mulrdx + carry
  2891. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
  2892. # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
  2893. adc %rdx,%rcx
  2894. # qhasm: mulrax = stackt2
  2895. # asm 1: movq <stackt2=stack64#10,>mulrax=int64#3
  2896. # asm 2: movq <stackt2=72(%rsp),>mulrax=%rdx
  2897. movq 72(%rsp),%rdx
  2898. # qhasm: mulrax *= 19
  2899. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2900. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2901. imulq $19,%rdx,%rax
  2902. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
  2903. mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
  2904. # qhasm: carry? t2d1 += mulrax
  2905. # asm 1: add <mulrax=int64#7,<t2d1=int64#5
  2906. # asm 2: add <mulrax=%rax,<t2d1=%r8
  2907. add %rax,%r8
  2908. # qhasm: mulr11 += mulrdx + carry
  2909. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
  2910. # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
  2911. adc %rdx,%r9
  2912. # qhasm: mulrax = stackt3
  2913. # asm 1: movq <stackt3=stack64#11,>mulrax=int64#7
  2914. # asm 2: movq <stackt3=80(%rsp),>mulrax=%rax
  2915. movq 80(%rsp),%rax
  2916. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
  2917. mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
  2918. # qhasm: carry? t2d3 += mulrax
  2919. # asm 1: add <mulrax=int64#7,<t2d3=int64#10
  2920. # asm 2: add <mulrax=%rax,<t2d3=%r12
  2921. add %rax,%r12
  2922. # qhasm: mulr31 += mulrdx + carry
  2923. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
  2924. # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
  2925. adc %rdx,%r13
  2926. # qhasm: mulrax = stackt3
  2927. # asm 1: movq <stackt3=stack64#11,>mulrax=int64#7
  2928. # asm 2: movq <stackt3=80(%rsp),>mulrax=%rax
  2929. movq 80(%rsp),%rax
  2930. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
  2931. mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
  2932. # qhasm: carry? t2d4 += mulrax
  2933. # asm 1: add <mulrax=int64#7,<t2d4=int64#12
  2934. # asm 2: add <mulrax=%rax,<t2d4=%r14
  2935. add %rax,%r14
  2936. # qhasm: mulr41 += mulrdx + carry
  2937. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
  2938. # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
  2939. adc %rdx,%r15
  2940. # qhasm: mulrax = mulx319_stack
  2941. # asm 1: movq <mulx319_stack=stack64#13,>mulrax=int64#7
  2942. # asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
  2943. movq 96(%rsp),%rax
  2944. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
  2945. mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
  2946. # qhasm: carry? t2d1 += mulrax
  2947. # asm 1: add <mulrax=int64#7,<t2d1=int64#5
  2948. # asm 2: add <mulrax=%rax,<t2d1=%r8
  2949. add %rax,%r8
  2950. # qhasm: mulr11 += mulrdx + carry
  2951. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
  2952. # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
  2953. adc %rdx,%r9
  2954. # qhasm: mulrax = mulx319_stack
  2955. # asm 1: movq <mulx319_stack=stack64#13,>mulrax=int64#7
  2956. # asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
  2957. movq 96(%rsp),%rax
  2958. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
  2959. mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
  2960. # qhasm: carry? t2d2 += mulrax
  2961. # asm 1: add <mulrax=int64#7,<t2d2=int64#8
  2962. # asm 2: add <mulrax=%rax,<t2d2=%r10
  2963. add %rax,%r10
  2964. # qhasm: mulr21 += mulrdx + carry
  2965. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
  2966. # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
  2967. adc %rdx,%r11
  2968. # qhasm: mulrax = stackt4
  2969. # asm 1: movq <stackt4=stack64#12,>mulrax=int64#7
  2970. # asm 2: movq <stackt4=88(%rsp),>mulrax=%rax
  2971. movq 88(%rsp),%rax
  2972. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
  2973. mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
  2974. # qhasm: carry? t2d4 += mulrax
  2975. # asm 1: add <mulrax=int64#7,<t2d4=int64#12
  2976. # asm 2: add <mulrax=%rax,<t2d4=%r14
  2977. add %rax,%r14
  2978. # qhasm: mulr41 += mulrdx + carry
  2979. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
  2980. # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
  2981. adc %rdx,%r15
  2982. # qhasm: mulrax = mulx419_stack
  2983. # asm 1: movq <mulx419_stack=stack64#14,>mulrax=int64#7
  2984. # asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
  2985. movq 104(%rsp),%rax
  2986. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
  2987. mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
  2988. # qhasm: carry? t2d1 += mulrax
  2989. # asm 1: add <mulrax=int64#7,<t2d1=int64#5
  2990. # asm 2: add <mulrax=%rax,<t2d1=%r8
  2991. add %rax,%r8
  2992. # qhasm: mulr11 += mulrdx + carry
  2993. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
  2994. # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
  2995. adc %rdx,%r9
  2996. # qhasm: mulrax = mulx419_stack
  2997. # asm 1: movq <mulx419_stack=stack64#14,>mulrax=int64#7
  2998. # asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
  2999. movq 104(%rsp),%rax
  3000. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
  3001. mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
  3002. # qhasm: carry? t2d2 += mulrax
  3003. # asm 1: add <mulrax=int64#7,<t2d2=int64#8
  3004. # asm 2: add <mulrax=%rax,<t2d2=%r10
  3005. add %rax,%r10
  3006. # qhasm: mulr21 += mulrdx + carry
  3007. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
  3008. # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
  3009. adc %rdx,%r11
  3010. # qhasm: mulrax = mulx419_stack
  3011. # asm 1: movq <mulx419_stack=stack64#14,>mulrax=int64#7
  3012. # asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
  3013. movq 104(%rsp),%rax
  3014. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
  3015. mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
  3016. # qhasm: carry? t2d3 += mulrax
  3017. # asm 1: add <mulrax=int64#7,<t2d3=int64#10
  3018. # asm 2: add <mulrax=%rax,<t2d3=%r12
  3019. add %rax,%r12
  3020. # qhasm: mulr31 += mulrdx + carry
  3021. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
  3022. # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
  3023. adc %rdx,%r13
  3024. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  3025. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  3026. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  3027. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  3028. # qhasm: mulr01 = (mulr01.t2d0) << 13
  3029. # asm 1: shld $13,<t2d0=int64#2,<mulr01=int64#4
  3030. # asm 2: shld $13,<t2d0=%rsi,<mulr01=%rcx
  3031. shld $13,%rsi,%rcx
  3032. # qhasm: t2d0 &= mulredmask
  3033. # asm 1: and <mulredmask=int64#3,<t2d0=int64#2
  3034. # asm 2: and <mulredmask=%rdx,<t2d0=%rsi
  3035. and %rdx,%rsi
  3036. # qhasm: mulr11 = (mulr11.t2d1) << 13
  3037. # asm 1: shld $13,<t2d1=int64#5,<mulr11=int64#6
  3038. # asm 2: shld $13,<t2d1=%r8,<mulr11=%r9
  3039. shld $13,%r8,%r9
  3040. # qhasm: t2d1 &= mulredmask
  3041. # asm 1: and <mulredmask=int64#3,<t2d1=int64#5
  3042. # asm 2: and <mulredmask=%rdx,<t2d1=%r8
  3043. and %rdx,%r8
  3044. # qhasm: t2d1 += mulr01
  3045. # asm 1: add <mulr01=int64#4,<t2d1=int64#5
  3046. # asm 2: add <mulr01=%rcx,<t2d1=%r8
  3047. add %rcx,%r8
  3048. # qhasm: mulr21 = (mulr21.t2d2) << 13
  3049. # asm 1: shld $13,<t2d2=int64#8,<mulr21=int64#9
  3050. # asm 2: shld $13,<t2d2=%r10,<mulr21=%r11
  3051. shld $13,%r10,%r11
  3052. # qhasm: t2d2 &= mulredmask
  3053. # asm 1: and <mulredmask=int64#3,<t2d2=int64#8
  3054. # asm 2: and <mulredmask=%rdx,<t2d2=%r10
  3055. and %rdx,%r10
  3056. # qhasm: t2d2 += mulr11
  3057. # asm 1: add <mulr11=int64#6,<t2d2=int64#8
  3058. # asm 2: add <mulr11=%r9,<t2d2=%r10
  3059. add %r9,%r10
  3060. # qhasm: mulr31 = (mulr31.t2d3) << 13
  3061. # asm 1: shld $13,<t2d3=int64#10,<mulr31=int64#11
  3062. # asm 2: shld $13,<t2d3=%r12,<mulr31=%r13
  3063. shld $13,%r12,%r13
  3064. # qhasm: t2d3 &= mulredmask
  3065. # asm 1: and <mulredmask=int64#3,<t2d3=int64#10
  3066. # asm 2: and <mulredmask=%rdx,<t2d3=%r12
  3067. and %rdx,%r12
  3068. # qhasm: t2d3 += mulr21
  3069. # asm 1: add <mulr21=int64#9,<t2d3=int64#10
  3070. # asm 2: add <mulr21=%r11,<t2d3=%r12
  3071. add %r11,%r12
  3072. # qhasm: mulr41 = (mulr41.t2d4) << 13
  3073. # asm 1: shld $13,<t2d4=int64#12,<mulr41=int64#13
  3074. # asm 2: shld $13,<t2d4=%r14,<mulr41=%r15
  3075. shld $13,%r14,%r15
  3076. # qhasm: t2d4 &= mulredmask
  3077. # asm 1: and <mulredmask=int64#3,<t2d4=int64#12
  3078. # asm 2: and <mulredmask=%rdx,<t2d4=%r14
  3079. and %rdx,%r14
  3080. # qhasm: t2d4 += mulr31
  3081. # asm 1: add <mulr31=int64#11,<t2d4=int64#12
  3082. # asm 2: add <mulr31=%r13,<t2d4=%r14
  3083. add %r13,%r14
  3084. # qhasm: mulr41 = mulr41 * 19
  3085. # asm 1: imulq $19,<mulr41=int64#13,>mulr41=int64#4
  3086. # asm 2: imulq $19,<mulr41=%r15,>mulr41=%rcx
  3087. imulq $19,%r15,%rcx
  3088. # qhasm: t2d0 += mulr41
  3089. # asm 1: add <mulr41=int64#4,<t2d0=int64#2
  3090. # asm 2: add <mulr41=%rcx,<t2d0=%rsi
  3091. add %rcx,%rsi
  3092. # qhasm: mult = t2d0
  3093. # asm 1: mov <t2d0=int64#2,>mult=int64#4
  3094. # asm 2: mov <t2d0=%rsi,>mult=%rcx
  3095. mov %rsi,%rcx
  3096. # qhasm: (uint64) mult >>= 51
  3097. # asm 1: shr $51,<mult=int64#4
  3098. # asm 2: shr $51,<mult=%rcx
  3099. shr $51,%rcx
  3100. # qhasm: mult += t2d1
  3101. # asm 1: add <t2d1=int64#5,<mult=int64#4
  3102. # asm 2: add <t2d1=%r8,<mult=%rcx
  3103. add %r8,%rcx
  3104. # qhasm: t2d1 = mult
  3105. # asm 1: mov <mult=int64#4,>t2d1=int64#5
  3106. # asm 2: mov <mult=%rcx,>t2d1=%r8
  3107. mov %rcx,%r8
  3108. # qhasm: (uint64) mult >>= 51
  3109. # asm 1: shr $51,<mult=int64#4
  3110. # asm 2: shr $51,<mult=%rcx
  3111. shr $51,%rcx
  3112. # qhasm: t2d0 &= mulredmask
  3113. # asm 1: and <mulredmask=int64#3,<t2d0=int64#2
  3114. # asm 2: and <mulredmask=%rdx,<t2d0=%rsi
  3115. and %rdx,%rsi
  3116. # qhasm: mult += t2d2
  3117. # asm 1: add <t2d2=int64#8,<mult=int64#4
  3118. # asm 2: add <t2d2=%r10,<mult=%rcx
  3119. add %r10,%rcx
  3120. # qhasm: t2d2 = mult
  3121. # asm 1: mov <mult=int64#4,>t2d2=int64#6
  3122. # asm 2: mov <mult=%rcx,>t2d2=%r9
  3123. mov %rcx,%r9
  3124. # qhasm: (uint64) mult >>= 51
  3125. # asm 1: shr $51,<mult=int64#4
  3126. # asm 2: shr $51,<mult=%rcx
  3127. shr $51,%rcx
  3128. # qhasm: t2d1 &= mulredmask
  3129. # asm 1: and <mulredmask=int64#3,<t2d1=int64#5
  3130. # asm 2: and <mulredmask=%rdx,<t2d1=%r8
  3131. and %rdx,%r8
  3132. # qhasm: mult += t2d3
  3133. # asm 1: add <t2d3=int64#10,<mult=int64#4
  3134. # asm 2: add <t2d3=%r12,<mult=%rcx
  3135. add %r12,%rcx
  3136. # qhasm: t2d3 = mult
  3137. # asm 1: mov <mult=int64#4,>t2d3=int64#7
  3138. # asm 2: mov <mult=%rcx,>t2d3=%rax
  3139. mov %rcx,%rax
  3140. # qhasm: (uint64) mult >>= 51
  3141. # asm 1: shr $51,<mult=int64#4
  3142. # asm 2: shr $51,<mult=%rcx
  3143. shr $51,%rcx
  3144. # qhasm: t2d2 &= mulredmask
  3145. # asm 1: and <mulredmask=int64#3,<t2d2=int64#6
  3146. # asm 2: and <mulredmask=%rdx,<t2d2=%r9
  3147. and %rdx,%r9
  3148. # qhasm: mult += t2d4
  3149. # asm 1: add <t2d4=int64#12,<mult=int64#4
  3150. # asm 2: add <t2d4=%r14,<mult=%rcx
  3151. add %r14,%rcx
  3152. # qhasm: t2d4 = mult
  3153. # asm 1: mov <mult=int64#4,>t2d4=int64#8
  3154. # asm 2: mov <mult=%rcx,>t2d4=%r10
  3155. mov %rcx,%r10
  3156. # qhasm: (uint64) mult >>= 51
  3157. # asm 1: shr $51,<mult=int64#4
  3158. # asm 2: shr $51,<mult=%rcx
  3159. shr $51,%rcx
  3160. # qhasm: t2d3 &= mulredmask
  3161. # asm 1: and <mulredmask=int64#3,<t2d3=int64#7
  3162. # asm 2: and <mulredmask=%rdx,<t2d3=%rax
  3163. and %rdx,%rax
  3164. # qhasm: mult *= 19
  3165. # asm 1: imulq $19,<mult=int64#4,>mult=int64#4
  3166. # asm 2: imulq $19,<mult=%rcx,>mult=%rcx
  3167. imulq $19,%rcx,%rcx
  3168. # qhasm: t2d0 += mult
  3169. # asm 1: add <mult=int64#4,<t2d0=int64#2
  3170. # asm 2: add <mult=%rcx,<t2d0=%rsi
  3171. add %rcx,%rsi
  3172. # qhasm: t2d4 &= mulredmask
  3173. # asm 1: and <mulredmask=int64#3,<t2d4=int64#8
  3174. # asm 2: and <mulredmask=%rdx,<t2d4=%r10
  3175. and %rdx,%r10
  3176. # qhasm: *(uint64 *)(rp + 120) = t2d0
  3177. # asm 1: movq <t2d0=int64#2,120(<rp=int64#1)
  3178. # asm 2: movq <t2d0=%rsi,120(<rp=%rdi)
  3179. movq %rsi,120(%rdi)
  3180. # qhasm: *(uint64 *)(rp + 128) = t2d1
  3181. # asm 1: movq <t2d1=int64#5,128(<rp=int64#1)
  3182. # asm 2: movq <t2d1=%r8,128(<rp=%rdi)
  3183. movq %r8,128(%rdi)
  3184. # qhasm: *(uint64 *)(rp + 136) = t2d2
  3185. # asm 1: movq <t2d2=int64#6,136(<rp=int64#1)
  3186. # asm 2: movq <t2d2=%r9,136(<rp=%rdi)
  3187. movq %r9,136(%rdi)
  3188. # qhasm: *(uint64 *)(rp + 144) = t2d3
  3189. # asm 1: movq <t2d3=int64#7,144(<rp=int64#1)
  3190. # asm 2: movq <t2d3=%rax,144(<rp=%rdi)
  3191. movq %rax,144(%rdi)
  3192. # qhasm: *(uint64 *)(rp + 152) = t2d4
  3193. # asm 1: movq <t2d4=int64#8,152(<rp=int64#1)
  3194. # asm 2: movq <t2d4=%r10,152(<rp=%rdi)
  3195. movq %r10,152(%rdi)
  3196. # qhasm: caller1 = caller1_stack
  3197. # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
  3198. # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
  3199. movq 0(%rsp),%r11
  3200. # qhasm: caller2 = caller2_stack
  3201. # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
  3202. # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
  3203. movq 8(%rsp),%r12
  3204. # qhasm: caller3 = caller3_stack
  3205. # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
  3206. # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
  3207. movq 16(%rsp),%r13
  3208. # qhasm: caller4 = caller4_stack
  3209. # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
  3210. # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
  3211. movq 24(%rsp),%r14
  3212. # qhasm: caller5 = caller5_stack
  3213. # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
  3214. # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
  3215. movq 32(%rsp),%r15
  3216. # qhasm: caller6 = caller6_stack
  3217. # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
  3218. # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
  3219. movq 40(%rsp),%rbx
  3220. # qhasm: caller7 = caller7_stack
  3221. # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
  3222. # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
  3223. movq 48(%rsp),%rbp
  3224. # qhasm: leave
  3225. add %r11,%rsp
  3226. mov %rdi,%rax
  3227. mov %rsi,%rdx
  3228. ret