123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072 |
- # qhasm: int64 rp
- # qhasm: int64 pp
- # qhasm: int64 qp
- # qhasm: input rp
- # qhasm: input pp
- # qhasm: input qp
- # qhasm: int64 caller1
- # qhasm: int64 caller2
- # qhasm: int64 caller3
- # qhasm: int64 caller4
- # qhasm: int64 caller5
- # qhasm: int64 caller6
- # qhasm: int64 caller7
- # qhasm: caller caller1
- # qhasm: caller caller2
- # qhasm: caller caller3
- # qhasm: caller caller4
- # qhasm: caller caller5
- # qhasm: caller caller6
- # qhasm: caller caller7
- # qhasm: stack64 caller1_stack
- # qhasm: stack64 caller2_stack
- # qhasm: stack64 caller3_stack
- # qhasm: stack64 caller4_stack
- # qhasm: stack64 caller5_stack
- # qhasm: stack64 caller6_stack
- # qhasm: stack64 caller7_stack
- # qhasm: int64 a0
- # qhasm: int64 a1
- # qhasm: int64 a2
- # qhasm: int64 a3
- # qhasm: stack64 a0_stack
- # qhasm: stack64 a1_stack
- # qhasm: stack64 a2_stack
- # qhasm: stack64 a3_stack
- # qhasm: int64 b0
- # qhasm: int64 b1
- # qhasm: int64 b2
- # qhasm: int64 b3
- # qhasm: stack64 b0_stack
- # qhasm: stack64 b1_stack
- # qhasm: stack64 b2_stack
- # qhasm: stack64 b3_stack
- # qhasm: int64 c0
- # qhasm: int64 c1
- # qhasm: int64 c2
- # qhasm: int64 c3
- # qhasm: stack64 c0_stack
- # qhasm: stack64 c1_stack
- # qhasm: stack64 c2_stack
- # qhasm: stack64 c3_stack
- # qhasm: int64 d0
- # qhasm: int64 d1
- # qhasm: int64 d2
- # qhasm: int64 d3
- # qhasm: stack64 d0_stack
- # qhasm: stack64 d1_stack
- # qhasm: stack64 d2_stack
- # qhasm: stack64 d3_stack
- # qhasm: int64 e0
- # qhasm: int64 e1
- # qhasm: int64 e2
- # qhasm: int64 e3
- # qhasm: stack64 e0_stack
- # qhasm: stack64 e1_stack
- # qhasm: stack64 e2_stack
- # qhasm: stack64 e3_stack
- # qhasm: int64 f0
- # qhasm: int64 f1
- # qhasm: int64 f2
- # qhasm: int64 f3
- # qhasm: stack64 f0_stack
- # qhasm: stack64 f1_stack
- # qhasm: stack64 f2_stack
- # qhasm: stack64 f3_stack
- # qhasm: int64 g0
- # qhasm: int64 g1
- # qhasm: int64 g2
- # qhasm: int64 g3
- # qhasm: stack64 g0_stack
- # qhasm: stack64 g1_stack
- # qhasm: stack64 g2_stack
- # qhasm: stack64 g3_stack
- # qhasm: int64 h0
- # qhasm: int64 h1
- # qhasm: int64 h2
- # qhasm: int64 h3
- # qhasm: stack64 h0_stack
- # qhasm: stack64 h1_stack
- # qhasm: stack64 h2_stack
- # qhasm: stack64 h3_stack
- # qhasm: int64 qt0
- # qhasm: int64 qt1
- # qhasm: int64 qt2
- # qhasm: int64 qt3
- # qhasm: stack64 qt0_stack
- # qhasm: stack64 qt1_stack
- # qhasm: stack64 qt2_stack
- # qhasm: stack64 qt3_stack
- # qhasm: int64 t10
- # qhasm: int64 t11
- # qhasm: int64 t12
- # qhasm: int64 t13
- # qhasm: stack64 t10_stack
- # qhasm: stack64 t11_stack
- # qhasm: stack64 t12_stack
- # qhasm: stack64 t13_stack
- # qhasm: int64 t20
- # qhasm: int64 t21
- # qhasm: int64 t22
- # qhasm: int64 t23
- # qhasm: stack64 t20_stack
- # qhasm: stack64 t21_stack
- # qhasm: stack64 t22_stack
- # qhasm: stack64 t23_stack
- # qhasm: int64 rx0
- # qhasm: int64 rx1
- # qhasm: int64 rx2
- # qhasm: int64 rx3
- # qhasm: int64 ry0
- # qhasm: int64 ry1
- # qhasm: int64 ry2
- # qhasm: int64 ry3
- # qhasm: int64 rz0
- # qhasm: int64 rz1
- # qhasm: int64 rz2
- # qhasm: int64 rz3
- # qhasm: int64 rt0
- # qhasm: int64 rt1
- # qhasm: int64 rt2
- # qhasm: int64 rt3
- # qhasm: int64 mulr4
- # qhasm: int64 mulr5
- # qhasm: int64 mulr6
- # qhasm: int64 mulr7
- # qhasm: int64 mulr8
- # qhasm: int64 mulrax
- # qhasm: int64 mulrdx
- # qhasm: int64 mulx0
- # qhasm: int64 mulx1
- # qhasm: int64 mulx2
- # qhasm: int64 mulx3
- # qhasm: int64 mulc
- # qhasm: int64 mulzero
- # qhasm: int64 muli38
- # qhasm: int64 addt0
- # qhasm: int64 addt1
- # qhasm: int64 subt0
- # qhasm: int64 subt1
- # qhasm: enter CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1)
- .text
- .p2align 5
- .globl _CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1)
- .globl CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1)
- _CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1):
- CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1):
- mov %rsp,%r11
- and $31,%r11
- add $128,%r11
- sub %r11,%rsp
- # qhasm: caller1_stack = caller1
- # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
- # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
- movq %r11,0(%rsp)
- # qhasm: caller2_stack = caller2
- # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
- # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
- movq %r12,8(%rsp)
- # qhasm: caller3_stack = caller3
- # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
- # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
- movq %r13,16(%rsp)
- # qhasm: caller4_stack = caller4
- # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
- # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
- movq %r14,24(%rsp)
- # qhasm: caller5_stack = caller5
- # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
- # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
- movq %r15,32(%rsp)
- # qhasm: caller6_stack = caller6
- # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
- # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
- movq %rbx,40(%rsp)
- # qhasm: caller7_stack = caller7
- # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
- # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
- movq %rbp,48(%rsp)
- # qhasm: qp = qp
- # asm 1: mov <qp=int64#3,>qp=int64#4
- # asm 2: mov <qp=%rdx,>qp=%rcx
- mov %rdx,%rcx
- # qhasm: a0 = *(uint64 *)(pp + 32)
- # asm 1: movq 32(<pp=int64#2),>a0=int64#3
- # asm 2: movq 32(<pp=%rsi),>a0=%rdx
- movq 32(%rsi),%rdx
- # qhasm: a1 = *(uint64 *)(pp + 40)
- # asm 1: movq 40(<pp=int64#2),>a1=int64#5
- # asm 2: movq 40(<pp=%rsi),>a1=%r8
- movq 40(%rsi),%r8
- # qhasm: a2 = *(uint64 *)(pp + 48)
- # asm 1: movq 48(<pp=int64#2),>a2=int64#6
- # asm 2: movq 48(<pp=%rsi),>a2=%r9
- movq 48(%rsi),%r9
- # qhasm: a3 = *(uint64 *)(pp + 56)
- # asm 1: movq 56(<pp=int64#2),>a3=int64#7
- # asm 2: movq 56(<pp=%rsi),>a3=%rax
- movq 56(%rsi),%rax
- # qhasm: b0 = a0
- # asm 1: mov <a0=int64#3,>b0=int64#8
- # asm 2: mov <a0=%rdx,>b0=%r10
- mov %rdx,%r10
- # qhasm: b1 = a1
- # asm 1: mov <a1=int64#5,>b1=int64#9
- # asm 2: mov <a1=%r8,>b1=%r11
- mov %r8,%r11
- # qhasm: b2 = a2
- # asm 1: mov <a2=int64#6,>b2=int64#10
- # asm 2: mov <a2=%r9,>b2=%r12
- mov %r9,%r12
- # qhasm: b3 = a3
- # asm 1: mov <a3=int64#7,>b3=int64#11
- # asm 2: mov <a3=%rax,>b3=%r13
- mov %rax,%r13
- # qhasm: carry? a0 -= *(uint64 *) (pp + 0)
- # asm 1: subq 0(<pp=int64#2),<a0=int64#3
- # asm 2: subq 0(<pp=%rsi),<a0=%rdx
- subq 0(%rsi),%rdx
- # qhasm: carry? a1 -= *(uint64 *) (pp + 8) - carry
- # asm 1: sbbq 8(<pp=int64#2),<a1=int64#5
- # asm 2: sbbq 8(<pp=%rsi),<a1=%r8
- sbbq 8(%rsi),%r8
- # qhasm: carry? a2 -= *(uint64 *) (pp + 16) - carry
- # asm 1: sbbq 16(<pp=int64#2),<a2=int64#6
- # asm 2: sbbq 16(<pp=%rsi),<a2=%r9
- sbbq 16(%rsi),%r9
- # qhasm: carry? a3 -= *(uint64 *) (pp + 24) - carry
- # asm 1: sbbq 24(<pp=int64#2),<a3=int64#7
- # asm 2: sbbq 24(<pp=%rsi),<a3=%rax
- sbbq 24(%rsi),%rax
- # qhasm: subt0 = 0
- # asm 1: mov $0,>subt0=int64#12
- # asm 2: mov $0,>subt0=%r14
- mov $0,%r14
- # qhasm: subt1 = 38
- # asm 1: mov $38,>subt1=int64#13
- # asm 2: mov $38,>subt1=%r15
- mov $38,%r15
- # qhasm: subt1 = subt0 if !carry
- # asm 1: cmovae <subt0=int64#12,<subt1=int64#13
- # asm 2: cmovae <subt0=%r14,<subt1=%r15
- cmovae %r14,%r15
- # qhasm: carry? a0 -= subt1
- # asm 1: sub <subt1=int64#13,<a0=int64#3
- # asm 2: sub <subt1=%r15,<a0=%rdx
- sub %r15,%rdx
- # qhasm: carry? a1 -= subt0 - carry
- # asm 1: sbb <subt0=int64#12,<a1=int64#5
- # asm 2: sbb <subt0=%r14,<a1=%r8
- sbb %r14,%r8
- # qhasm: carry? a2 -= subt0 - carry
- # asm 1: sbb <subt0=int64#12,<a2=int64#6
- # asm 2: sbb <subt0=%r14,<a2=%r9
- sbb %r14,%r9
- # qhasm: carry? a3 -= subt0 - carry
- # asm 1: sbb <subt0=int64#12,<a3=int64#7
- # asm 2: sbb <subt0=%r14,<a3=%rax
- sbb %r14,%rax
- # qhasm: subt0 = subt1 if carry
- # asm 1: cmovc <subt1=int64#13,<subt0=int64#12
- # asm 2: cmovc <subt1=%r15,<subt0=%r14
- cmovc %r15,%r14
- # qhasm: a0 -= subt0
- # asm 1: sub <subt0=int64#12,<a0=int64#3
- # asm 2: sub <subt0=%r14,<a0=%rdx
- sub %r14,%rdx
- # qhasm: carry? b0 += *(uint64 *) (pp + 0)
- # asm 1: addq 0(<pp=int64#2),<b0=int64#8
- # asm 2: addq 0(<pp=%rsi),<b0=%r10
- addq 0(%rsi),%r10
- # qhasm: carry? b1 += *(uint64 *) (pp + 8) + carry
- # asm 1: adcq 8(<pp=int64#2),<b1=int64#9
- # asm 2: adcq 8(<pp=%rsi),<b1=%r11
- adcq 8(%rsi),%r11
- # qhasm: carry? b2 += *(uint64 *) (pp + 16) + carry
- # asm 1: adcq 16(<pp=int64#2),<b2=int64#10
- # asm 2: adcq 16(<pp=%rsi),<b2=%r12
- adcq 16(%rsi),%r12
- # qhasm: carry? b3 += *(uint64 *) (pp + 24) + carry
- # asm 1: adcq 24(<pp=int64#2),<b3=int64#11
- # asm 2: adcq 24(<pp=%rsi),<b3=%r13
- adcq 24(%rsi),%r13
- # qhasm: addt0 = 0
- # asm 1: mov $0,>addt0=int64#12
- # asm 2: mov $0,>addt0=%r14
- mov $0,%r14
- # qhasm: addt1 = 38
- # asm 1: mov $38,>addt1=int64#13
- # asm 2: mov $38,>addt1=%r15
- mov $38,%r15
- # qhasm: addt1 = addt0 if !carry
- # asm 1: cmovae <addt0=int64#12,<addt1=int64#13
- # asm 2: cmovae <addt0=%r14,<addt1=%r15
- cmovae %r14,%r15
- # qhasm: carry? b0 += addt1
- # asm 1: add <addt1=int64#13,<b0=int64#8
- # asm 2: add <addt1=%r15,<b0=%r10
- add %r15,%r10
- # qhasm: carry? b1 += addt0 + carry
- # asm 1: adc <addt0=int64#12,<b1=int64#9
- # asm 2: adc <addt0=%r14,<b1=%r11
- adc %r14,%r11
- # qhasm: carry? b2 += addt0 + carry
- # asm 1: adc <addt0=int64#12,<b2=int64#10
- # asm 2: adc <addt0=%r14,<b2=%r12
- adc %r14,%r12
- # qhasm: carry? b3 += addt0 + carry
- # asm 1: adc <addt0=int64#12,<b3=int64#11
- # asm 2: adc <addt0=%r14,<b3=%r13
- adc %r14,%r13
- # qhasm: addt0 = addt1 if carry
- # asm 1: cmovc <addt1=int64#13,<addt0=int64#12
- # asm 2: cmovc <addt1=%r15,<addt0=%r14
- cmovc %r15,%r14
- # qhasm: b0 += addt0
- # asm 1: add <addt0=int64#12,<b0=int64#8
- # asm 2: add <addt0=%r14,<b0=%r10
- add %r14,%r10
- # qhasm: a0_stack = a0
- # asm 1: movq <a0=int64#3,>a0_stack=stack64#8
- # asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
- movq %rdx,56(%rsp)
- # qhasm: a1_stack = a1
- # asm 1: movq <a1=int64#5,>a1_stack=stack64#9
- # asm 2: movq <a1=%r8,>a1_stack=64(%rsp)
- movq %r8,64(%rsp)
- # qhasm: a2_stack = a2
- # asm 1: movq <a2=int64#6,>a2_stack=stack64#10
- # asm 2: movq <a2=%r9,>a2_stack=72(%rsp)
- movq %r9,72(%rsp)
- # qhasm: a3_stack = a3
- # asm 1: movq <a3=int64#7,>a3_stack=stack64#11
- # asm 2: movq <a3=%rax,>a3_stack=80(%rsp)
- movq %rax,80(%rsp)
- # qhasm: b0_stack = b0
- # asm 1: movq <b0=int64#8,>b0_stack=stack64#12
- # asm 2: movq <b0=%r10,>b0_stack=88(%rsp)
- movq %r10,88(%rsp)
- # qhasm: b1_stack = b1
- # asm 1: movq <b1=int64#9,>b1_stack=stack64#13
- # asm 2: movq <b1=%r11,>b1_stack=96(%rsp)
- movq %r11,96(%rsp)
- # qhasm: b2_stack = b2
- # asm 1: movq <b2=int64#10,>b2_stack=stack64#14
- # asm 2: movq <b2=%r12,>b2_stack=104(%rsp)
- movq %r12,104(%rsp)
- # qhasm: b3_stack = b3
- # asm 1: movq <b3=int64#11,>b3_stack=stack64#15
- # asm 2: movq <b3=%r13,>b3_stack=112(%rsp)
- movq %r13,112(%rsp)
- # qhasm: mulr4 = 0
- # asm 1: mov $0,>mulr4=int64#5
- # asm 2: mov $0,>mulr4=%r8
- mov $0,%r8
- # qhasm: mulr5 = 0
- # asm 1: mov $0,>mulr5=int64#6
- # asm 2: mov $0,>mulr5=%r9
- mov $0,%r9
- # qhasm: mulr6 = 0
- # asm 1: mov $0,>mulr6=int64#8
- # asm 2: mov $0,>mulr6=%r10
- mov $0,%r10
- # qhasm: mulr7 = 0
- # asm 1: mov $0,>mulr7=int64#9
- # asm 2: mov $0,>mulr7=%r11
- mov $0,%r11
- # qhasm: mulx0 = a0_stack
- # asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10
- # asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12
- movq 56(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 0)
- # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
- movq 0(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: a0 = mulrax
- # asm 1: mov <mulrax=int64#7,>a0=int64#11
- # asm 2: mov <mulrax=%rax,>a0=%r13
- mov %rax,%r13
- # qhasm: a1 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>a1=int64#12
- # asm 2: mov <mulrdx=%rdx,>a1=%r14
- mov %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(qp + 8)
- # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
- movq 8(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? a1 += mulrax
- # asm 1: add <mulrax=int64#7,<a1=int64#12
- # asm 2: add <mulrax=%rax,<a1=%r14
- add %rax,%r14
- # qhasm: a2 = 0
- # asm 1: mov $0,>a2=int64#13
- # asm 2: mov $0,>a2=%r15
- mov $0,%r15
- # qhasm: a2 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<a2=int64#13
- # asm 2: adc <mulrdx=%rdx,<a2=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(qp + 16)
- # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
- movq 16(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? a2 += mulrax
- # asm 1: add <mulrax=int64#7,<a2=int64#13
- # asm 2: add <mulrax=%rax,<a2=%r15
- add %rax,%r15
- # qhasm: a3 = 0
- # asm 1: mov $0,>a3=int64#14
- # asm 2: mov $0,>a3=%rbx
- mov $0,%rbx
- # qhasm: a3 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<a3=int64#14
- # asm 2: adc <mulrdx=%rdx,<a3=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(qp + 24)
- # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
- movq 24(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? a3 += mulrax
- # asm 1: add <mulrax=int64#7,<a3=int64#14
- # asm 2: add <mulrax=%rax,<a3=%rbx
- add %rax,%rbx
- # qhasm: mulr4 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
- adc %rdx,%r8
- # qhasm: mulx1 = a1_stack
- # asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10
- # asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12
- movq 64(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 0)
- # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
- movq 0(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? a1 += mulrax
- # asm 1: add <mulrax=int64#7,<a1=int64#12
- # asm 2: add <mulrax=%rax,<a1=%r14
- add %rax,%r14
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 8)
- # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
- movq 8(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? a2 += mulrax
- # asm 1: add <mulrax=int64#7,<a2=int64#13
- # asm 2: add <mulrax=%rax,<a2=%r15
- add %rax,%r15
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? a2 += mulc
- # asm 1: add <mulc=int64#15,<a2=int64#13
- # asm 2: add <mulc=%rbp,<a2=%r15
- add %rbp,%r15
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 16)
- # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
- movq 16(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? a3 += mulrax
- # asm 1: add <mulrax=int64#7,<a3=int64#14
- # asm 2: add <mulrax=%rax,<a3=%rbx
- add %rax,%rbx
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? a3 += mulc
- # asm 1: add <mulc=int64#15,<a3=int64#14
- # asm 2: add <mulc=%rbp,<a3=%rbx
- add %rbp,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 24)
- # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
- movq 24(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulr5 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
- adc %rdx,%r9
- # qhasm: mulx2 = a2_stack
- # asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10
- # asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12
- movq 72(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 0)
- # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
- movq 0(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? a2 += mulrax
- # asm 1: add <mulrax=int64#7,<a2=int64#13
- # asm 2: add <mulrax=%rax,<a2=%r15
- add %rax,%r15
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 8)
- # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
- movq 8(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? a3 += mulrax
- # asm 1: add <mulrax=int64#7,<a3=int64#14
- # asm 2: add <mulrax=%rax,<a3=%rbx
- add %rax,%rbx
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? a3 += mulc
- # asm 1: add <mulc=int64#15,<a3=int64#14
- # asm 2: add <mulc=%rbp,<a3=%rbx
- add %rbp,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 16)
- # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
- movq 16(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 24)
- # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
- movq 24(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr5 += mulc
- # asm 1: add <mulc=int64#15,<mulr5=int64#6
- # asm 2: add <mulc=%rbp,<mulr5=%r9
- add %rbp,%r9
- # qhasm: mulr6 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
- adc %rdx,%r10
- # qhasm: mulx3 = a3_stack
- # asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10
- # asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12
- movq 80(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 0)
- # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
- movq 0(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? a3 += mulrax
- # asm 1: add <mulrax=int64#7,<a3=int64#14
- # asm 2: add <mulrax=%rax,<a3=%rbx
- add %rax,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 8)
- # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
- movq 8(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 16)
- # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
- movq 16(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr5 += mulc
- # asm 1: add <mulc=int64#15,<mulr5=int64#6
- # asm 2: add <mulc=%rbp,<mulr5=%r9
- add %rbp,%r9
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 24)
- # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
- movq 24(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr6 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr6=int64#8
- # asm 2: add <mulrax=%rax,<mulr6=%r10
- add %rax,%r10
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr6 += mulc
- # asm 1: add <mulc=int64#15,<mulr6=int64#8
- # asm 2: add <mulc=%rbp,<mulr6=%r10
- add %rbp,%r10
- # qhasm: mulr7 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulr4
- # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
- # asm 2: mov <mulr4=%r8,>mulrax=%rax
- mov %r8,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: mulr4 = mulrax
- # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
- # asm 2: mov <mulrax=%rax,>mulr4=%r8
- mov %rax,%r8
- # qhasm: mulrax = mulr5
- # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
- # asm 2: mov <mulr5=%r9,>mulrax=%rax
- mov %r9,%rax
- # qhasm: mulr5 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
- mov %rdx,%r9
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrax = mulr6
- # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
- # asm 2: mov <mulr6=%r10,>mulrax=%rax
- mov %r10,%rax
- # qhasm: mulr6 = 0
- # asm 1: mov $0,>mulr6=int64#8
- # asm 2: mov $0,>mulr6=%r10
- mov $0,%r10
- # qhasm: mulr6 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
- adc %rdx,%r10
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr6 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr6=int64#8
- # asm 2: add <mulrax=%rax,<mulr6=%r10
- add %rax,%r10
- # qhasm: mulrax = mulr7
- # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
- # asm 2: mov <mulr7=%r11,>mulrax=%rax
- mov %r11,%rax
- # qhasm: mulr7 = 0
- # asm 1: mov $0,>mulr7=int64#9
- # asm 2: mov $0,>mulr7=%r11
- mov $0,%r11
- # qhasm: mulr7 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
- adc %rdx,%r11
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr7 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr7=int64#9
- # asm 2: add <mulrax=%rax,<mulr7=%r11
- add %rax,%r11
- # qhasm: mulr8 = 0
- # asm 1: mov $0,>mulr8=int64#7
- # asm 2: mov $0,>mulr8=%rax
- mov $0,%rax
- # qhasm: mulr8 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
- # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
- adc %rdx,%rax
- # qhasm: carry? a0 += mulr4
- # asm 1: add <mulr4=int64#5,<a0=int64#11
- # asm 2: add <mulr4=%r8,<a0=%r13
- add %r8,%r13
- # qhasm: carry? a1 += mulr5 + carry
- # asm 1: adc <mulr5=int64#6,<a1=int64#12
- # asm 2: adc <mulr5=%r9,<a1=%r14
- adc %r9,%r14
- # qhasm: carry? a2 += mulr6 + carry
- # asm 1: adc <mulr6=int64#8,<a2=int64#13
- # asm 2: adc <mulr6=%r10,<a2=%r15
- adc %r10,%r15
- # qhasm: carry? a3 += mulr7 + carry
- # asm 1: adc <mulr7=int64#9,<a3=int64#14
- # asm 2: adc <mulr7=%r11,<a3=%rbx
- adc %r11,%rbx
- # qhasm: mulzero = 0
- # asm 1: mov $0,>mulzero=int64#3
- # asm 2: mov $0,>mulzero=%rdx
- mov $0,%rdx
- # qhasm: mulr8 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
- # asm 2: adc <mulzero=%rdx,<mulr8=%rax
- adc %rdx,%rax
- # qhasm: mulr8 *= 38
- # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
- # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
- imulq $38,%rax,%r8
- # qhasm: carry? a0 += mulr8
- # asm 1: add <mulr8=int64#5,<a0=int64#11
- # asm 2: add <mulr8=%r8,<a0=%r13
- add %r8,%r13
- # qhasm: carry? a1 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<a1=int64#12
- # asm 2: adc <mulzero=%rdx,<a1=%r14
- adc %rdx,%r14
- # qhasm: carry? a2 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<a2=int64#13
- # asm 2: adc <mulzero=%rdx,<a2=%r15
- adc %rdx,%r15
- # qhasm: carry? a3 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<a3=int64#14
- # asm 2: adc <mulzero=%rdx,<a3=%rbx
- adc %rdx,%rbx
- # qhasm: mulzero += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
- # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
- adc %rdx,%rdx
- # qhasm: mulzero *= 38
- # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
- # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
- imulq $38,%rdx,%rdx
- # qhasm: a0 += mulzero
- # asm 1: add <mulzero=int64#3,<a0=int64#11
- # asm 2: add <mulzero=%rdx,<a0=%r13
- add %rdx,%r13
- # qhasm: a0_stack = a0
- # asm 1: movq <a0=int64#11,>a0_stack=stack64#8
- # asm 2: movq <a0=%r13,>a0_stack=56(%rsp)
- movq %r13,56(%rsp)
- # qhasm: a1_stack = a1
- # asm 1: movq <a1=int64#12,>a1_stack=stack64#9
- # asm 2: movq <a1=%r14,>a1_stack=64(%rsp)
- movq %r14,64(%rsp)
- # qhasm: a2_stack = a2
- # asm 1: movq <a2=int64#13,>a2_stack=stack64#10
- # asm 2: movq <a2=%r15,>a2_stack=72(%rsp)
- movq %r15,72(%rsp)
- # qhasm: a3_stack = a3
- # asm 1: movq <a3=int64#14,>a3_stack=stack64#11
- # asm 2: movq <a3=%rbx,>a3_stack=80(%rsp)
- movq %rbx,80(%rsp)
- # qhasm: mulr4 = 0
- # asm 1: mov $0,>mulr4=int64#5
- # asm 2: mov $0,>mulr4=%r8
- mov $0,%r8
- # qhasm: mulr5 = 0
- # asm 1: mov $0,>mulr5=int64#6
- # asm 2: mov $0,>mulr5=%r9
- mov $0,%r9
- # qhasm: mulr6 = 0
- # asm 1: mov $0,>mulr6=int64#8
- # asm 2: mov $0,>mulr6=%r10
- mov $0,%r10
- # qhasm: mulr7 = 0
- # asm 1: mov $0,>mulr7=int64#9
- # asm 2: mov $0,>mulr7=%r11
- mov $0,%r11
- # qhasm: mulx0 = b0_stack
- # asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10
- # asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12
- movq 88(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 32)
- # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
- movq 32(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: e0 = mulrax
- # asm 1: mov <mulrax=int64#7,>e0=int64#11
- # asm 2: mov <mulrax=%rax,>e0=%r13
- mov %rax,%r13
- # qhasm: e1 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>e1=int64#12
- # asm 2: mov <mulrdx=%rdx,>e1=%r14
- mov %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(qp + 40)
- # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
- movq 40(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? e1 += mulrax
- # asm 1: add <mulrax=int64#7,<e1=int64#12
- # asm 2: add <mulrax=%rax,<e1=%r14
- add %rax,%r14
- # qhasm: e2 = 0
- # asm 1: mov $0,>e2=int64#13
- # asm 2: mov $0,>e2=%r15
- mov $0,%r15
- # qhasm: e2 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<e2=int64#13
- # asm 2: adc <mulrdx=%rdx,<e2=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(qp + 48)
- # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
- movq 48(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? e2 += mulrax
- # asm 1: add <mulrax=int64#7,<e2=int64#13
- # asm 2: add <mulrax=%rax,<e2=%r15
- add %rax,%r15
- # qhasm: e3 = 0
- # asm 1: mov $0,>e3=int64#14
- # asm 2: mov $0,>e3=%rbx
- mov $0,%rbx
- # qhasm: e3 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<e3=int64#14
- # asm 2: adc <mulrdx=%rdx,<e3=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(qp + 56)
- # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
- movq 56(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? e3 += mulrax
- # asm 1: add <mulrax=int64#7,<e3=int64#14
- # asm 2: add <mulrax=%rax,<e3=%rbx
- add %rax,%rbx
- # qhasm: mulr4 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
- adc %rdx,%r8
- # qhasm: mulx1 = b1_stack
- # asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10
- # asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12
- movq 96(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 32)
- # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
- movq 32(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? e1 += mulrax
- # asm 1: add <mulrax=int64#7,<e1=int64#12
- # asm 2: add <mulrax=%rax,<e1=%r14
- add %rax,%r14
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 40)
- # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
- movq 40(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? e2 += mulrax
- # asm 1: add <mulrax=int64#7,<e2=int64#13
- # asm 2: add <mulrax=%rax,<e2=%r15
- add %rax,%r15
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? e2 += mulc
- # asm 1: add <mulc=int64#15,<e2=int64#13
- # asm 2: add <mulc=%rbp,<e2=%r15
- add %rbp,%r15
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 48)
- # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
- movq 48(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? e3 += mulrax
- # asm 1: add <mulrax=int64#7,<e3=int64#14
- # asm 2: add <mulrax=%rax,<e3=%rbx
- add %rax,%rbx
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? e3 += mulc
- # asm 1: add <mulc=int64#15,<e3=int64#14
- # asm 2: add <mulc=%rbp,<e3=%rbx
- add %rbp,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 56)
- # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
- movq 56(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulr5 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
- adc %rdx,%r9
- # qhasm: mulx2 = b2_stack
- # asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10
- # asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12
- movq 104(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 32)
- # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
- movq 32(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? e2 += mulrax
- # asm 1: add <mulrax=int64#7,<e2=int64#13
- # asm 2: add <mulrax=%rax,<e2=%r15
- add %rax,%r15
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 40)
- # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
- movq 40(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? e3 += mulrax
- # asm 1: add <mulrax=int64#7,<e3=int64#14
- # asm 2: add <mulrax=%rax,<e3=%rbx
- add %rax,%rbx
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? e3 += mulc
- # asm 1: add <mulc=int64#15,<e3=int64#14
- # asm 2: add <mulc=%rbp,<e3=%rbx
- add %rbp,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 48)
- # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
- movq 48(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 56)
- # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
- movq 56(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr5 += mulc
- # asm 1: add <mulc=int64#15,<mulr5=int64#6
- # asm 2: add <mulc=%rbp,<mulr5=%r9
- add %rbp,%r9
- # qhasm: mulr6 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
- adc %rdx,%r10
- # qhasm: mulx3 = b3_stack
- # asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10
- # asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12
- movq 112(%rsp),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 32)
- # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
- movq 32(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? e3 += mulrax
- # asm 1: add <mulrax=int64#7,<e3=int64#14
- # asm 2: add <mulrax=%rax,<e3=%rbx
- add %rax,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 40)
- # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
- movq 40(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 48)
- # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
- movq 48(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr5 += mulc
- # asm 1: add <mulc=int64#15,<mulr5=int64#6
- # asm 2: add <mulc=%rbp,<mulr5=%r9
- add %rbp,%r9
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 56)
- # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
- movq 56(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr6 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr6=int64#8
- # asm 2: add <mulrax=%rax,<mulr6=%r10
- add %rax,%r10
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr6 += mulc
- # asm 1: add <mulc=int64#15,<mulr6=int64#8
- # asm 2: add <mulc=%rbp,<mulr6=%r10
- add %rbp,%r10
- # qhasm: mulr7 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulr4
- # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
- # asm 2: mov <mulr4=%r8,>mulrax=%rax
- mov %r8,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: mulr4 = mulrax
- # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
- # asm 2: mov <mulrax=%rax,>mulr4=%r8
- mov %rax,%r8
- # qhasm: mulrax = mulr5
- # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
- # asm 2: mov <mulr5=%r9,>mulrax=%rax
- mov %r9,%rax
- # qhasm: mulr5 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
- mov %rdx,%r9
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrax = mulr6
- # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
- # asm 2: mov <mulr6=%r10,>mulrax=%rax
- mov %r10,%rax
- # qhasm: mulr6 = 0
- # asm 1: mov $0,>mulr6=int64#8
- # asm 2: mov $0,>mulr6=%r10
- mov $0,%r10
- # qhasm: mulr6 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
- adc %rdx,%r10
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr6 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr6=int64#8
- # asm 2: add <mulrax=%rax,<mulr6=%r10
- add %rax,%r10
- # qhasm: mulrax = mulr7
- # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
- # asm 2: mov <mulr7=%r11,>mulrax=%rax
- mov %r11,%rax
- # qhasm: mulr7 = 0
- # asm 1: mov $0,>mulr7=int64#9
- # asm 2: mov $0,>mulr7=%r11
- mov $0,%r11
- # qhasm: mulr7 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
- adc %rdx,%r11
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr7 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr7=int64#9
- # asm 2: add <mulrax=%rax,<mulr7=%r11
- add %rax,%r11
- # qhasm: mulr8 = 0
- # asm 1: mov $0,>mulr8=int64#7
- # asm 2: mov $0,>mulr8=%rax
- mov $0,%rax
- # qhasm: mulr8 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
- # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
- adc %rdx,%rax
- # qhasm: carry? e0 += mulr4
- # asm 1: add <mulr4=int64#5,<e0=int64#11
- # asm 2: add <mulr4=%r8,<e0=%r13
- add %r8,%r13
- # qhasm: carry? e1 += mulr5 + carry
- # asm 1: adc <mulr5=int64#6,<e1=int64#12
- # asm 2: adc <mulr5=%r9,<e1=%r14
- adc %r9,%r14
- # qhasm: carry? e2 += mulr6 + carry
- # asm 1: adc <mulr6=int64#8,<e2=int64#13
- # asm 2: adc <mulr6=%r10,<e2=%r15
- adc %r10,%r15
- # qhasm: carry? e3 += mulr7 + carry
- # asm 1: adc <mulr7=int64#9,<e3=int64#14
- # asm 2: adc <mulr7=%r11,<e3=%rbx
- adc %r11,%rbx
- # qhasm: mulzero = 0
- # asm 1: mov $0,>mulzero=int64#3
- # asm 2: mov $0,>mulzero=%rdx
- mov $0,%rdx
- # qhasm: mulr8 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
- # asm 2: adc <mulzero=%rdx,<mulr8=%rax
- adc %rdx,%rax
- # qhasm: mulr8 *= 38
- # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
- # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
- imulq $38,%rax,%r8
- # qhasm: carry? e0 += mulr8
- # asm 1: add <mulr8=int64#5,<e0=int64#11
- # asm 2: add <mulr8=%r8,<e0=%r13
- add %r8,%r13
- # qhasm: carry? e1 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<e1=int64#12
- # asm 2: adc <mulzero=%rdx,<e1=%r14
- adc %rdx,%r14
- # qhasm: carry? e2 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<e2=int64#13
- # asm 2: adc <mulzero=%rdx,<e2=%r15
- adc %rdx,%r15
- # qhasm: carry? e3 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<e3=int64#14
- # asm 2: adc <mulzero=%rdx,<e3=%rbx
- adc %rdx,%rbx
- # qhasm: mulzero += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
- # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
- adc %rdx,%rdx
- # qhasm: mulzero *= 38
- # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
- # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
- imulq $38,%rdx,%rdx
- # qhasm: e0 += mulzero
- # asm 1: add <mulzero=int64#3,<e0=int64#11
- # asm 2: add <mulzero=%rdx,<e0=%r13
- add %rdx,%r13
- # qhasm: h0 = e0
- # asm 1: mov <e0=int64#11,>h0=int64#3
- # asm 2: mov <e0=%r13,>h0=%rdx
- mov %r13,%rdx
- # qhasm: h1 = e1
- # asm 1: mov <e1=int64#12,>h1=int64#5
- # asm 2: mov <e1=%r14,>h1=%r8
- mov %r14,%r8
- # qhasm: h2 = e2
- # asm 1: mov <e2=int64#13,>h2=int64#6
- # asm 2: mov <e2=%r15,>h2=%r9
- mov %r15,%r9
- # qhasm: h3 = e3
- # asm 1: mov <e3=int64#14,>h3=int64#7
- # asm 2: mov <e3=%rbx,>h3=%rax
- mov %rbx,%rax
- # qhasm: carry? e0 -= a0_stack
- # asm 1: subq <a0_stack=stack64#8,<e0=int64#11
- # asm 2: subq <a0_stack=56(%rsp),<e0=%r13
- subq 56(%rsp),%r13
- # qhasm: carry? e1 -= a1_stack - carry
- # asm 1: sbbq <a1_stack=stack64#9,<e1=int64#12
- # asm 2: sbbq <a1_stack=64(%rsp),<e1=%r14
- sbbq 64(%rsp),%r14
- # qhasm: carry? e2 -= a2_stack - carry
- # asm 1: sbbq <a2_stack=stack64#10,<e2=int64#13
- # asm 2: sbbq <a2_stack=72(%rsp),<e2=%r15
- sbbq 72(%rsp),%r15
- # qhasm: carry? e3 -= a3_stack - carry
- # asm 1: sbbq <a3_stack=stack64#11,<e3=int64#14
- # asm 2: sbbq <a3_stack=80(%rsp),<e3=%rbx
- sbbq 80(%rsp),%rbx
- # qhasm: subt0 = 0
- # asm 1: mov $0,>subt0=int64#8
- # asm 2: mov $0,>subt0=%r10
- mov $0,%r10
- # qhasm: subt1 = 38
- # asm 1: mov $38,>subt1=int64#9
- # asm 2: mov $38,>subt1=%r11
- mov $38,%r11
- # qhasm: subt1 = subt0 if !carry
- # asm 1: cmovae <subt0=int64#8,<subt1=int64#9
- # asm 2: cmovae <subt0=%r10,<subt1=%r11
- cmovae %r10,%r11
- # qhasm: carry? e0 -= subt1
- # asm 1: sub <subt1=int64#9,<e0=int64#11
- # asm 2: sub <subt1=%r11,<e0=%r13
- sub %r11,%r13
- # qhasm: carry? e1 -= subt0 - carry
- # asm 1: sbb <subt0=int64#8,<e1=int64#12
- # asm 2: sbb <subt0=%r10,<e1=%r14
- sbb %r10,%r14
- # qhasm: carry? e2 -= subt0 - carry
- # asm 1: sbb <subt0=int64#8,<e2=int64#13
- # asm 2: sbb <subt0=%r10,<e2=%r15
- sbb %r10,%r15
- # qhasm: carry? e3 -= subt0 - carry
- # asm 1: sbb <subt0=int64#8,<e3=int64#14
- # asm 2: sbb <subt0=%r10,<e3=%rbx
- sbb %r10,%rbx
- # qhasm: subt0 = subt1 if carry
- # asm 1: cmovc <subt1=int64#9,<subt0=int64#8
- # asm 2: cmovc <subt1=%r11,<subt0=%r10
- cmovc %r11,%r10
- # qhasm: e0 -= subt0
- # asm 1: sub <subt0=int64#8,<e0=int64#11
- # asm 2: sub <subt0=%r10,<e0=%r13
- sub %r10,%r13
- # qhasm: carry? h0 += a0_stack
- # asm 1: addq <a0_stack=stack64#8,<h0=int64#3
- # asm 2: addq <a0_stack=56(%rsp),<h0=%rdx
- addq 56(%rsp),%rdx
- # qhasm: carry? h1 += a1_stack + carry
- # asm 1: adcq <a1_stack=stack64#9,<h1=int64#5
- # asm 2: adcq <a1_stack=64(%rsp),<h1=%r8
- adcq 64(%rsp),%r8
- # qhasm: carry? h2 += a2_stack + carry
- # asm 1: adcq <a2_stack=stack64#10,<h2=int64#6
- # asm 2: adcq <a2_stack=72(%rsp),<h2=%r9
- adcq 72(%rsp),%r9
- # qhasm: carry? h3 += a3_stack + carry
- # asm 1: adcq <a3_stack=stack64#11,<h3=int64#7
- # asm 2: adcq <a3_stack=80(%rsp),<h3=%rax
- adcq 80(%rsp),%rax
- # qhasm: addt0 = 0
- # asm 1: mov $0,>addt0=int64#8
- # asm 2: mov $0,>addt0=%r10
- mov $0,%r10
- # qhasm: addt1 = 38
- # asm 1: mov $38,>addt1=int64#9
- # asm 2: mov $38,>addt1=%r11
- mov $38,%r11
- # qhasm: addt1 = addt0 if !carry
- # asm 1: cmovae <addt0=int64#8,<addt1=int64#9
- # asm 2: cmovae <addt0=%r10,<addt1=%r11
- cmovae %r10,%r11
- # qhasm: carry? h0 += addt1
- # asm 1: add <addt1=int64#9,<h0=int64#3
- # asm 2: add <addt1=%r11,<h0=%rdx
- add %r11,%rdx
- # qhasm: carry? h1 += addt0 + carry
- # asm 1: adc <addt0=int64#8,<h1=int64#5
- # asm 2: adc <addt0=%r10,<h1=%r8
- adc %r10,%r8
- # qhasm: carry? h2 += addt0 + carry
- # asm 1: adc <addt0=int64#8,<h2=int64#6
- # asm 2: adc <addt0=%r10,<h2=%r9
- adc %r10,%r9
- # qhasm: carry? h3 += addt0 + carry
- # asm 1: adc <addt0=int64#8,<h3=int64#7
- # asm 2: adc <addt0=%r10,<h3=%rax
- adc %r10,%rax
- # qhasm: addt0 = addt1 if carry
- # asm 1: cmovc <addt1=int64#9,<addt0=int64#8
- # asm 2: cmovc <addt1=%r11,<addt0=%r10
- cmovc %r11,%r10
- # qhasm: h0 += addt0
- # asm 1: add <addt0=int64#8,<h0=int64#3
- # asm 2: add <addt0=%r10,<h0=%rdx
- add %r10,%rdx
- # qhasm: *(uint64 *)(rp + 64) = h0
- # asm 1: movq <h0=int64#3,64(<rp=int64#1)
- # asm 2: movq <h0=%rdx,64(<rp=%rdi)
- movq %rdx,64(%rdi)
- # qhasm: *(uint64 *)(rp + 72) = h1
- # asm 1: movq <h1=int64#5,72(<rp=int64#1)
- # asm 2: movq <h1=%r8,72(<rp=%rdi)
- movq %r8,72(%rdi)
- # qhasm: *(uint64 *)(rp + 80) = h2
- # asm 1: movq <h2=int64#6,80(<rp=int64#1)
- # asm 2: movq <h2=%r9,80(<rp=%rdi)
- movq %r9,80(%rdi)
- # qhasm: *(uint64 *)(rp + 88) = h3
- # asm 1: movq <h3=int64#7,88(<rp=int64#1)
- # asm 2: movq <h3=%rax,88(<rp=%rdi)
- movq %rax,88(%rdi)
- # qhasm: *(uint64 *)(rp + 0) = e0
- # asm 1: movq <e0=int64#11,0(<rp=int64#1)
- # asm 2: movq <e0=%r13,0(<rp=%rdi)
- movq %r13,0(%rdi)
- # qhasm: *(uint64 *)(rp + 8) = e1
- # asm 1: movq <e1=int64#12,8(<rp=int64#1)
- # asm 2: movq <e1=%r14,8(<rp=%rdi)
- movq %r14,8(%rdi)
- # qhasm: *(uint64 *)(rp + 16) = e2
- # asm 1: movq <e2=int64#13,16(<rp=int64#1)
- # asm 2: movq <e2=%r15,16(<rp=%rdi)
- movq %r15,16(%rdi)
- # qhasm: *(uint64 *)(rp + 24) = e3
- # asm 1: movq <e3=int64#14,24(<rp=int64#1)
- # asm 2: movq <e3=%rbx,24(<rp=%rdi)
- movq %rbx,24(%rdi)
- # qhasm: mulr4 = 0
- # asm 1: mov $0,>mulr4=int64#5
- # asm 2: mov $0,>mulr4=%r8
- mov $0,%r8
- # qhasm: mulr5 = 0
- # asm 1: mov $0,>mulr5=int64#6
- # asm 2: mov $0,>mulr5=%r9
- mov $0,%r9
- # qhasm: mulr6 = 0
- # asm 1: mov $0,>mulr6=int64#8
- # asm 2: mov $0,>mulr6=%r10
- mov $0,%r10
- # qhasm: mulr7 = 0
- # asm 1: mov $0,>mulr7=int64#9
- # asm 2: mov $0,>mulr7=%r11
- mov $0,%r11
- # qhasm: mulx0 = *(uint64 *)(pp + 96)
- # asm 1: movq 96(<pp=int64#2),>mulx0=int64#10
- # asm 2: movq 96(<pp=%rsi),>mulx0=%r12
- movq 96(%rsi),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 64)
- # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
- movq 64(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: c0 = mulrax
- # asm 1: mov <mulrax=int64#7,>c0=int64#11
- # asm 2: mov <mulrax=%rax,>c0=%r13
- mov %rax,%r13
- # qhasm: c1 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>c1=int64#12
- # asm 2: mov <mulrdx=%rdx,>c1=%r14
- mov %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(qp + 72)
- # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
- movq 72(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? c1 += mulrax
- # asm 1: add <mulrax=int64#7,<c1=int64#12
- # asm 2: add <mulrax=%rax,<c1=%r14
- add %rax,%r14
- # qhasm: c2 = 0
- # asm 1: mov $0,>c2=int64#13
- # asm 2: mov $0,>c2=%r15
- mov $0,%r15
- # qhasm: c2 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<c2=int64#13
- # asm 2: adc <mulrdx=%rdx,<c2=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(qp + 80)
- # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
- movq 80(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? c2 += mulrax
- # asm 1: add <mulrax=int64#7,<c2=int64#13
- # asm 2: add <mulrax=%rax,<c2=%r15
- add %rax,%r15
- # qhasm: c3 = 0
- # asm 1: mov $0,>c3=int64#14
- # asm 2: mov $0,>c3=%rbx
- mov $0,%rbx
- # qhasm: c3 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<c3=int64#14
- # asm 2: adc <mulrdx=%rdx,<c3=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(qp + 88)
- # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
- movq 88(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
- # asm 1: mul <mulx0=int64#10
- # asm 2: mul <mulx0=%r12
- mul %r12
- # qhasm: carry? c3 += mulrax
- # asm 1: add <mulrax=int64#7,<c3=int64#14
- # asm 2: add <mulrax=%rax,<c3=%rbx
- add %rax,%rbx
- # qhasm: mulr4 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
- adc %rdx,%r8
- # qhasm: mulx1 = *(uint64 *)(pp + 104)
- # asm 1: movq 104(<pp=int64#2),>mulx1=int64#10
- # asm 2: movq 104(<pp=%rsi),>mulx1=%r12
- movq 104(%rsi),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 64)
- # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
- movq 64(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? c1 += mulrax
- # asm 1: add <mulrax=int64#7,<c1=int64#12
- # asm 2: add <mulrax=%rax,<c1=%r14
- add %rax,%r14
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 72)
- # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
- movq 72(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? c2 += mulrax
- # asm 1: add <mulrax=int64#7,<c2=int64#13
- # asm 2: add <mulrax=%rax,<c2=%r15
- add %rax,%r15
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? c2 += mulc
- # asm 1: add <mulc=int64#15,<c2=int64#13
- # asm 2: add <mulc=%rbp,<c2=%r15
- add %rbp,%r15
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 80)
- # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
- movq 80(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? c3 += mulrax
- # asm 1: add <mulrax=int64#7,<c3=int64#14
- # asm 2: add <mulrax=%rax,<c3=%rbx
- add %rax,%rbx
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? c3 += mulc
- # asm 1: add <mulc=int64#15,<c3=int64#14
- # asm 2: add <mulc=%rbp,<c3=%rbx
- add %rbp,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 88)
- # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
- movq 88(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
- # asm 1: mul <mulx1=int64#10
- # asm 2: mul <mulx1=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulr5 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
- adc %rdx,%r9
- # qhasm: mulx2 = *(uint64 *)(pp + 112)
- # asm 1: movq 112(<pp=int64#2),>mulx2=int64#10
- # asm 2: movq 112(<pp=%rsi),>mulx2=%r12
- movq 112(%rsi),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 64)
- # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
- movq 64(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? c2 += mulrax
- # asm 1: add <mulrax=int64#7,<c2=int64#13
- # asm 2: add <mulrax=%rax,<c2=%r15
- add %rax,%r15
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 72)
- # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
- movq 72(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? c3 += mulrax
- # asm 1: add <mulrax=int64#7,<c3=int64#14
- # asm 2: add <mulrax=%rax,<c3=%rbx
- add %rax,%rbx
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? c3 += mulc
- # asm 1: add <mulc=int64#15,<c3=int64#14
- # asm 2: add <mulc=%rbp,<c3=%rbx
- add %rbp,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 80)
- # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
- movq 80(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 88)
- # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
- movq 88(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
- # asm 1: mul <mulx2=int64#10
- # asm 2: mul <mulx2=%r12
- mul %r12
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr5 += mulc
- # asm 1: add <mulc=int64#15,<mulr5=int64#6
- # asm 2: add <mulc=%rbp,<mulr5=%r9
- add %rbp,%r9
- # qhasm: mulr6 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
- adc %rdx,%r10
- # qhasm: mulx3 = *(uint64 *)(pp + 120)
- # asm 1: movq 120(<pp=int64#2),>mulx3=int64#10
- # asm 2: movq 120(<pp=%rsi),>mulx3=%r12
- movq 120(%rsi),%r12
- # qhasm: mulrax = *(uint64 *)(qp + 64)
- # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
- movq 64(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? c3 += mulrax
- # asm 1: add <mulrax=int64#7,<c3=int64#14
- # asm 2: add <mulrax=%rax,<c3=%rbx
- add %rax,%rbx
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 72)
- # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
- movq 72(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr4 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr4=int64#5
- # asm 2: add <mulrax=%rax,<mulr4=%r8
- add %rax,%r8
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr4 += mulc
- # asm 1: add <mulc=int64#15,<mulr4=int64#5
- # asm 2: add <mulc=%rbp,<mulr4=%r8
- add %rbp,%r8
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 80)
- # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
- movq 80(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#6
- # asm 2: add <mulrax=%rax,<mulr5=%r9
- add %rax,%r9
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr5 += mulc
- # asm 1: add <mulc=int64#15,<mulr5=int64#6
- # asm 2: add <mulc=%rbp,<mulr5=%r9
- add %rbp,%r9
- # qhasm: mulc = 0
- # asm 1: mov $0,>mulc=int64#15
- # asm 2: mov $0,>mulc=%rbp
- mov $0,%rbp
- # qhasm: mulc += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
- # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
- adc %rdx,%rbp
- # qhasm: mulrax = *(uint64 *)(qp + 88)
- # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
- # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
- movq 88(%rcx),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
- # asm 1: mul <mulx3=int64#10
- # asm 2: mul <mulx3=%r12
- mul %r12
- # qhasm: carry? mulr6 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr6=int64#8
- # asm 2: add <mulrax=%rax,<mulr6=%r10
- add %rax,%r10
- # qhasm: mulrdx += 0 + carry
- # asm 1: adc $0,<mulrdx=int64#3
- # asm 2: adc $0,<mulrdx=%rdx
- adc $0,%rdx
- # qhasm: carry? mulr6 += mulc
- # asm 1: add <mulc=int64#15,<mulr6=int64#8
- # asm 2: add <mulc=%rbp,<mulr6=%r10
- add %rbp,%r10
- # qhasm: mulr7 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulr4
- # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
- # asm 2: mov <mulr4=%r8,>mulrax=%rax
- mov %r8,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: mulr4 = mulrax
- # asm 1: mov <mulrax=int64#7,>mulr4=int64#4
- # asm 2: mov <mulrax=%rax,>mulr4=%rcx
- mov %rax,%rcx
- # qhasm: mulrax = mulr5
- # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
- # asm 2: mov <mulr5=%r9,>mulrax=%rax
- mov %r9,%rax
- # qhasm: mulr5 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr5=int64#5
- # asm 2: mov <mulrdx=%rdx,>mulr5=%r8
- mov %rdx,%r8
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr5 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr5=int64#5
- # asm 2: add <mulrax=%rax,<mulr5=%r8
- add %rax,%r8
- # qhasm: mulrax = mulr6
- # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
- # asm 2: mov <mulr6=%r10,>mulrax=%rax
- mov %r10,%rax
- # qhasm: mulr6 = 0
- # asm 1: mov $0,>mulr6=int64#6
- # asm 2: mov $0,>mulr6=%r9
- mov $0,%r9
- # qhasm: mulr6 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
- adc %rdx,%r9
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr6 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr6=int64#6
- # asm 2: add <mulrax=%rax,<mulr6=%r9
- add %rax,%r9
- # qhasm: mulrax = mulr7
- # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
- # asm 2: mov <mulr7=%r11,>mulrax=%rax
- mov %r11,%rax
- # qhasm: mulr7 = 0
- # asm 1: mov $0,>mulr7=int64#8
- # asm 2: mov $0,>mulr7=%r10
- mov $0,%r10
- # qhasm: mulr7 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
- adc %rdx,%r10
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
- mulq CRYPTO_NAMESPACE(38)(%rip)
- # qhasm: carry? mulr7 += mulrax
- # asm 1: add <mulrax=int64#7,<mulr7=int64#8
- # asm 2: add <mulrax=%rax,<mulr7=%r10
- add %rax,%r10
- # qhasm: mulr8 = 0
- # asm 1: mov $0,>mulr8=int64#7
- # asm 2: mov $0,>mulr8=%rax
- mov $0,%rax
- # qhasm: mulr8 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
- # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
- adc %rdx,%rax
- # qhasm: carry? c0 += mulr4
- # asm 1: add <mulr4=int64#4,<c0=int64#11
- # asm 2: add <mulr4=%rcx,<c0=%r13
- add %rcx,%r13
- # qhasm: carry? c1 += mulr5 + carry
- # asm 1: adc <mulr5=int64#5,<c1=int64#12
- # asm 2: adc <mulr5=%r8,<c1=%r14
- adc %r8,%r14
- # qhasm: carry? c2 += mulr6 + carry
- # asm 1: adc <mulr6=int64#6,<c2=int64#13
- # asm 2: adc <mulr6=%r9,<c2=%r15
- adc %r9,%r15
- # qhasm: carry? c3 += mulr7 + carry
- # asm 1: adc <mulr7=int64#8,<c3=int64#14
- # asm 2: adc <mulr7=%r10,<c3=%rbx
- adc %r10,%rbx
- # qhasm: mulzero = 0
- # asm 1: mov $0,>mulzero=int64#3
- # asm 2: mov $0,>mulzero=%rdx
- mov $0,%rdx
- # qhasm: mulr8 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
- # asm 2: adc <mulzero=%rdx,<mulr8=%rax
- adc %rdx,%rax
- # qhasm: mulr8 *= 38
- # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4
- # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx
- imulq $38,%rax,%rcx
- # qhasm: carry? c0 += mulr8
- # asm 1: add <mulr8=int64#4,<c0=int64#11
- # asm 2: add <mulr8=%rcx,<c0=%r13
- add %rcx,%r13
- # qhasm: carry? c1 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<c1=int64#12
- # asm 2: adc <mulzero=%rdx,<c1=%r14
- adc %rdx,%r14
- # qhasm: carry? c2 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<c2=int64#13
- # asm 2: adc <mulzero=%rdx,<c2=%r15
- adc %rdx,%r15
- # qhasm: carry? c3 += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<c3=int64#14
- # asm 2: adc <mulzero=%rdx,<c3=%rbx
- adc %rdx,%rbx
- # qhasm: mulzero += mulzero + carry
- # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
- # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
- adc %rdx,%rdx
- # qhasm: mulzero *= 38
- # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
- # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
- imulq $38,%rdx,%rdx
- # qhasm: c0 += mulzero
- # asm 1: add <mulzero=int64#3,<c0=int64#11
- # asm 2: add <mulzero=%rdx,<c0=%r13
- add %rdx,%r13
- # qhasm: f0 = *(uint64 *)(pp + 64)
- # asm 1: movq 64(<pp=int64#2),>f0=int64#3
- # asm 2: movq 64(<pp=%rsi),>f0=%rdx
- movq 64(%rsi),%rdx
- # qhasm: f1 = *(uint64 *)(pp + 72)
- # asm 1: movq 72(<pp=int64#2),>f1=int64#4
- # asm 2: movq 72(<pp=%rsi),>f1=%rcx
- movq 72(%rsi),%rcx
- # qhasm: f2 = *(uint64 *)(pp + 80)
- # asm 1: movq 80(<pp=int64#2),>f2=int64#5
- # asm 2: movq 80(<pp=%rsi),>f2=%r8
- movq 80(%rsi),%r8
- # qhasm: f3 = *(uint64 *)(pp + 88)
- # asm 1: movq 88(<pp=int64#2),>f3=int64#2
- # asm 2: movq 88(<pp=%rsi),>f3=%rsi
- movq 88(%rsi),%rsi
- # qhasm: carry? f0 += f0
- # asm 1: add <f0=int64#3,<f0=int64#3
- # asm 2: add <f0=%rdx,<f0=%rdx
- add %rdx,%rdx
- # qhasm: carry? f1 += f1 + carry
- # asm 1: adc <f1=int64#4,<f1=int64#4
- # asm 2: adc <f1=%rcx,<f1=%rcx
- adc %rcx,%rcx
- # qhasm: carry? f2 += f2 + carry
- # asm 1: adc <f2=int64#5,<f2=int64#5
- # asm 2: adc <f2=%r8,<f2=%r8
- adc %r8,%r8
- # qhasm: carry? f3 += f3 + carry
- # asm 1: adc <f3=int64#2,<f3=int64#2
- # asm 2: adc <f3=%rsi,<f3=%rsi
- adc %rsi,%rsi
- # qhasm: addt0 = 0
- # asm 1: mov $0,>addt0=int64#6
- # asm 2: mov $0,>addt0=%r9
- mov $0,%r9
- # qhasm: addt1 = 38
- # asm 1: mov $38,>addt1=int64#7
- # asm 2: mov $38,>addt1=%rax
- mov $38,%rax
- # qhasm: addt1 = addt0 if !carry
- # asm 1: cmovae <addt0=int64#6,<addt1=int64#7
- # asm 2: cmovae <addt0=%r9,<addt1=%rax
- cmovae %r9,%rax
- # qhasm: carry? f0 += addt1
- # asm 1: add <addt1=int64#7,<f0=int64#3
- # asm 2: add <addt1=%rax,<f0=%rdx
- add %rax,%rdx
- # qhasm: carry? f1 += addt0 + carry
- # asm 1: adc <addt0=int64#6,<f1=int64#4
- # asm 2: adc <addt0=%r9,<f1=%rcx
- adc %r9,%rcx
- # qhasm: carry? f2 += addt0 + carry
- # asm 1: adc <addt0=int64#6,<f2=int64#5
- # asm 2: adc <addt0=%r9,<f2=%r8
- adc %r9,%r8
- # qhasm: carry? f3 += addt0 + carry
- # asm 1: adc <addt0=int64#6,<f3=int64#2
- # asm 2: adc <addt0=%r9,<f3=%rsi
- adc %r9,%rsi
- # qhasm: addt0 = addt1 if carry
- # asm 1: cmovc <addt1=int64#7,<addt0=int64#6
- # asm 2: cmovc <addt1=%rax,<addt0=%r9
- cmovc %rax,%r9
- # qhasm: f0 += addt0
- # asm 1: add <addt0=int64#6,<f0=int64#3
- # asm 2: add <addt0=%r9,<f0=%rdx
- add %r9,%rdx
- # qhasm: g0 = f0
- # asm 1: mov <f0=int64#3,>g0=int64#6
- # asm 2: mov <f0=%rdx,>g0=%r9
- mov %rdx,%r9
- # qhasm: g1 = f1
- # asm 1: mov <f1=int64#4,>g1=int64#7
- # asm 2: mov <f1=%rcx,>g1=%rax
- mov %rcx,%rax
- # qhasm: g2 = f2
- # asm 1: mov <f2=int64#5,>g2=int64#8
- # asm 2: mov <f2=%r8,>g2=%r10
- mov %r8,%r10
- # qhasm: g3 = f3
- # asm 1: mov <f3=int64#2,>g3=int64#9
- # asm 2: mov <f3=%rsi,>g3=%r11
- mov %rsi,%r11
- # qhasm: carry? f0 -= c0
- # asm 1: sub <c0=int64#11,<f0=int64#3
- # asm 2: sub <c0=%r13,<f0=%rdx
- sub %r13,%rdx
- # qhasm: carry? f1 -= c1 - carry
- # asm 1: sbb <c1=int64#12,<f1=int64#4
- # asm 2: sbb <c1=%r14,<f1=%rcx
- sbb %r14,%rcx
- # qhasm: carry? f2 -= c2 - carry
- # asm 1: sbb <c2=int64#13,<f2=int64#5
- # asm 2: sbb <c2=%r15,<f2=%r8
- sbb %r15,%r8
- # qhasm: carry? f3 -= c3 - carry
- # asm 1: sbb <c3=int64#14,<f3=int64#2
- # asm 2: sbb <c3=%rbx,<f3=%rsi
- sbb %rbx,%rsi
- # qhasm: subt0 = 0
- # asm 1: mov $0,>subt0=int64#10
- # asm 2: mov $0,>subt0=%r12
- mov $0,%r12
- # qhasm: subt1 = 38
- # asm 1: mov $38,>subt1=int64#15
- # asm 2: mov $38,>subt1=%rbp
- mov $38,%rbp
- # qhasm: subt1 = subt0 if !carry
- # asm 1: cmovae <subt0=int64#10,<subt1=int64#15
- # asm 2: cmovae <subt0=%r12,<subt1=%rbp
- cmovae %r12,%rbp
- # qhasm: carry? f0 -= subt1
- # asm 1: sub <subt1=int64#15,<f0=int64#3
- # asm 2: sub <subt1=%rbp,<f0=%rdx
- sub %rbp,%rdx
- # qhasm: carry? f1 -= subt0 - carry
- # asm 1: sbb <subt0=int64#10,<f1=int64#4
- # asm 2: sbb <subt0=%r12,<f1=%rcx
- sbb %r12,%rcx
- # qhasm: carry? f2 -= subt0 - carry
- # asm 1: sbb <subt0=int64#10,<f2=int64#5
- # asm 2: sbb <subt0=%r12,<f2=%r8
- sbb %r12,%r8
- # qhasm: carry? f3 -= subt0 - carry
- # asm 1: sbb <subt0=int64#10,<f3=int64#2
- # asm 2: sbb <subt0=%r12,<f3=%rsi
- sbb %r12,%rsi
- # qhasm: subt0 = subt1 if carry
- # asm 1: cmovc <subt1=int64#15,<subt0=int64#10
- # asm 2: cmovc <subt1=%rbp,<subt0=%r12
- cmovc %rbp,%r12
- # qhasm: f0 -= subt0
- # asm 1: sub <subt0=int64#10,<f0=int64#3
- # asm 2: sub <subt0=%r12,<f0=%rdx
- sub %r12,%rdx
- # qhasm: carry? g0 += c0
- # asm 1: add <c0=int64#11,<g0=int64#6
- # asm 2: add <c0=%r13,<g0=%r9
- add %r13,%r9
- # qhasm: carry? g1 += c1 + carry
- # asm 1: adc <c1=int64#12,<g1=int64#7
- # asm 2: adc <c1=%r14,<g1=%rax
- adc %r14,%rax
- # qhasm: carry? g2 += c2 + carry
- # asm 1: adc <c2=int64#13,<g2=int64#8
- # asm 2: adc <c2=%r15,<g2=%r10
- adc %r15,%r10
- # qhasm: carry? g3 += c3 + carry
- # asm 1: adc <c3=int64#14,<g3=int64#9
- # asm 2: adc <c3=%rbx,<g3=%r11
- adc %rbx,%r11
- # qhasm: addt0 = 0
- # asm 1: mov $0,>addt0=int64#10
- # asm 2: mov $0,>addt0=%r12
- mov $0,%r12
- # qhasm: addt1 = 38
- # asm 1: mov $38,>addt1=int64#11
- # asm 2: mov $38,>addt1=%r13
- mov $38,%r13
- # qhasm: addt1 = addt0 if !carry
- # asm 1: cmovae <addt0=int64#10,<addt1=int64#11
- # asm 2: cmovae <addt0=%r12,<addt1=%r13
- cmovae %r12,%r13
- # qhasm: carry? g0 += addt1
- # asm 1: add <addt1=int64#11,<g0=int64#6
- # asm 2: add <addt1=%r13,<g0=%r9
- add %r13,%r9
- # qhasm: carry? g1 += addt0 + carry
- # asm 1: adc <addt0=int64#10,<g1=int64#7
- # asm 2: adc <addt0=%r12,<g1=%rax
- adc %r12,%rax
- # qhasm: carry? g2 += addt0 + carry
- # asm 1: adc <addt0=int64#10,<g2=int64#8
- # asm 2: adc <addt0=%r12,<g2=%r10
- adc %r12,%r10
- # qhasm: carry? g3 += addt0 + carry
- # asm 1: adc <addt0=int64#10,<g3=int64#9
- # asm 2: adc <addt0=%r12,<g3=%r11
- adc %r12,%r11
- # qhasm: addt0 = addt1 if carry
- # asm 1: cmovc <addt1=int64#11,<addt0=int64#10
- # asm 2: cmovc <addt1=%r13,<addt0=%r12
- cmovc %r13,%r12
- # qhasm: g0 += addt0
- # asm 1: add <addt0=int64#10,<g0=int64#6
- # asm 2: add <addt0=%r12,<g0=%r9
- add %r12,%r9
- # qhasm: *(uint64 *)(rp + 32) = g0
- # asm 1: movq <g0=int64#6,32(<rp=int64#1)
- # asm 2: movq <g0=%r9,32(<rp=%rdi)
- movq %r9,32(%rdi)
- # qhasm: *(uint64 *)(rp + 40) = g1
- # asm 1: movq <g1=int64#7,40(<rp=int64#1)
- # asm 2: movq <g1=%rax,40(<rp=%rdi)
- movq %rax,40(%rdi)
- # qhasm: *(uint64 *)(rp + 48) = g2
- # asm 1: movq <g2=int64#8,48(<rp=int64#1)
- # asm 2: movq <g2=%r10,48(<rp=%rdi)
- movq %r10,48(%rdi)
- # qhasm: *(uint64 *)(rp + 56) = g3
- # asm 1: movq <g3=int64#9,56(<rp=int64#1)
- # asm 2: movq <g3=%r11,56(<rp=%rdi)
- movq %r11,56(%rdi)
- # qhasm: *(uint64 *)(rp + 96) = f0
- # asm 1: movq <f0=int64#3,96(<rp=int64#1)
- # asm 2: movq <f0=%rdx,96(<rp=%rdi)
- movq %rdx,96(%rdi)
- # qhasm: *(uint64 *)(rp + 104) = f1
- # asm 1: movq <f1=int64#4,104(<rp=int64#1)
- # asm 2: movq <f1=%rcx,104(<rp=%rdi)
- movq %rcx,104(%rdi)
- # qhasm: *(uint64 *)(rp + 112) = f2
- # asm 1: movq <f2=int64#5,112(<rp=int64#1)
- # asm 2: movq <f2=%r8,112(<rp=%rdi)
- movq %r8,112(%rdi)
- # qhasm: *(uint64 *)(rp + 120) = f3
- # asm 1: movq <f3=int64#2,120(<rp=int64#1)
- # asm 2: movq <f3=%rsi,120(<rp=%rdi)
- movq %rsi,120(%rdi)
- # qhasm: caller1 = caller1_stack
- # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
- # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
- movq 0(%rsp),%r11
- # qhasm: caller2 = caller2_stack
- # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
- # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
- movq 8(%rsp),%r12
- # qhasm: caller3 = caller3_stack
- # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
- # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
- movq 16(%rsp),%r13
- # qhasm: caller4 = caller4_stack
- # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
- # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
- movq 24(%rsp),%r14
- # qhasm: caller5 = caller5_stack
- # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
- # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
- movq 32(%rsp),%r15
- # qhasm: caller6 = caller6_stack
- # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
- # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
- movq 40(%rsp),%rbx
- # qhasm: caller7 = caller7_stack
- # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
- # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
- movq 48(%rsp),%rbp
- # qhasm: leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
|