123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442 |
- # qhasm: int64 rp
- # qhasm: int64 pp
- # qhasm: input rp
- # qhasm: input pp
- # qhasm: int64 caller1
- # qhasm: int64 caller2
- # qhasm: int64 caller3
- # qhasm: int64 caller4
- # qhasm: int64 caller5
- # qhasm: int64 caller6
- # qhasm: int64 caller7
- # qhasm: caller caller1
- # qhasm: caller caller2
- # qhasm: caller caller3
- # qhasm: caller caller4
- # qhasm: caller caller5
- # qhasm: caller caller6
- # qhasm: caller caller7
- # qhasm: stack64 caller1_stack
- # qhasm: stack64 caller2_stack
- # qhasm: stack64 caller3_stack
- # qhasm: stack64 caller4_stack
- # qhasm: stack64 caller5_stack
- # qhasm: stack64 caller6_stack
- # qhasm: stack64 caller7_stack
- # qhasm: int64 rx0
- # qhasm: int64 rx1
- # qhasm: int64 rx2
- # qhasm: int64 rx3
- # qhasm: int64 rx4
- # qhasm: int64 ry0
- # qhasm: int64 ry1
- # qhasm: int64 ry2
- # qhasm: int64 ry3
- # qhasm: int64 ry4
- # qhasm: int64 rz0
- # qhasm: int64 rz1
- # qhasm: int64 rz2
- # qhasm: int64 rz3
- # qhasm: int64 rz4
- # qhasm: int64 mulr01
- # qhasm: int64 mulr11
- # qhasm: int64 mulr21
- # qhasm: int64 mulr31
- # qhasm: int64 mulr41
- # qhasm: int64 mulrax
- # qhasm: int64 mulrdx
- # qhasm: int64 mult
- # qhasm: int64 mulredmask
- # qhasm: stack64 mulx219_stack
- # qhasm: stack64 mulx319_stack
- # qhasm: stack64 mulx419_stack
- # qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
- .text
- .p2align 5
- .globl _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
- .globl CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
- _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2):
- CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2):
- mov %rsp,%r11
- and $31,%r11
- add $96,%r11
- sub %r11,%rsp
- # qhasm: caller1_stack = caller1
- # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
- # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
- movq %r11,0(%rsp)
- # qhasm: caller2_stack = caller2
- # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
- # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
- movq %r12,8(%rsp)
- # qhasm: caller3_stack = caller3
- # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
- # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
- movq %r13,16(%rsp)
- # qhasm: caller4_stack = caller4
- # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
- # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
- movq %r14,24(%rsp)
- # qhasm: caller5_stack = caller5
- # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
- # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
- movq %r15,32(%rsp)
- # qhasm: caller6_stack = caller6
- # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
- # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
- movq %rbx,40(%rsp)
- # qhasm: caller7_stack = caller7
- # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
- # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
- movq %rbp,48(%rsp)
- # qhasm: mulrax = *(uint64 *)(pp + 24)
- # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
- movq 24(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
- # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
- movq %rax,56(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: rx0 = mulrax
- # asm 1: mov <mulrax=int64#7,>rx0=int64#4
- # asm 2: mov <mulrax=%rax,>rx0=%rcx
- mov %rax,%rcx
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
- # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
- mov %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 32)
- # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
- movq 32(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
- # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
- movq %rax,64(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rx0 += mulrax
- # asm 1: add <mulrax=int64#7,<rx0=int64#4
- # asm 2: add <mulrax=%rax,<rx0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 0)
- # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
- movq 0(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rx0 += mulrax
- # asm 1: add <mulrax=int64#7,<rx0=int64#4
- # asm 2: add <mulrax=%rax,<rx0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 0)
- # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
- movq 0(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: rx1 = mulrax
- # asm 1: mov <mulrax=int64#7,>rx1=int64#6
- # asm 2: mov <mulrax=%rax,>rx1=%r9
- mov %rax,%r9
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
- mov %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 0)
- # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
- movq 0(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: rx2 = mulrax
- # asm 1: mov <mulrax=int64#7,>rx2=int64#9
- # asm 2: mov <mulrax=%rax,>rx2=%r11
- mov %rax,%r11
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
- mov %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 0)
- # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
- movq 0(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: rx3 = mulrax
- # asm 1: mov <mulrax=int64#7,>rx3=int64#11
- # asm 2: mov <mulrax=%rax,>rx3=%r13
- mov %rax,%r13
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
- mov %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 0)
- # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
- movq 0(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: rx4 = mulrax
- # asm 1: mov <mulrax=int64#7,>rx4=int64#13
- # asm 2: mov <mulrax=%rax,>rx4=%r15
- mov %rax,%r15
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
- # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
- mov %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 8)
- # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
- movq 8(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rx1 += mulrax
- # asm 1: add <mulrax=int64#7,<rx1=int64#6
- # asm 2: add <mulrax=%rax,<rx1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 8)
- # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
- movq 8(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rx2 += mulrax
- # asm 1: add <mulrax=int64#7,<rx2=int64#9
- # asm 2: add <mulrax=%rax,<rx2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 8)
- # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
- movq 8(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: carry? rx3 += mulrax
- # asm 1: add <mulrax=int64#7,<rx3=int64#11
- # asm 2: add <mulrax=%rax,<rx3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 8)
- # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
- movq 8(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rx4 += mulrax
- # asm 1: add <mulrax=int64#7,<rx4=int64#13
- # asm 2: add <mulrax=%rax,<rx4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 8)
- # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
- movq 8(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rx0 += mulrax
- # asm 1: add <mulrax=int64#7,<rx0=int64#4
- # asm 2: add <mulrax=%rax,<rx0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 16)
- # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
- movq 16(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rx2 += mulrax
- # asm 1: add <mulrax=int64#7,<rx2=int64#9
- # asm 2: add <mulrax=%rax,<rx2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 16)
- # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
- movq 16(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rx3 += mulrax
- # asm 1: add <mulrax=int64#7,<rx3=int64#11
- # asm 2: add <mulrax=%rax,<rx3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 16)
- # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
- movq 16(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: carry? rx4 += mulrax
- # asm 1: add <mulrax=int64#7,<rx4=int64#13
- # asm 2: add <mulrax=%rax,<rx4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 16)
- # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
- movq 16(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rx0 += mulrax
- # asm 1: add <mulrax=int64#7,<rx0=int64#4
- # asm 2: add <mulrax=%rax,<rx0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 16)
- # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
- movq 16(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rx1 += mulrax
- # asm 1: add <mulrax=int64#7,<rx1=int64#6
- # asm 2: add <mulrax=%rax,<rx1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 24)
- # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
- movq 24(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rx3 += mulrax
- # asm 1: add <mulrax=int64#7,<rx3=int64#11
- # asm 2: add <mulrax=%rax,<rx3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 24)
- # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
- movq 24(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rx4 += mulrax
- # asm 1: add <mulrax=int64#7,<rx4=int64#13
- # asm 2: add <mulrax=%rax,<rx4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rx1 += mulrax
- # asm 1: add <mulrax=int64#7,<rx1=int64#6
- # asm 2: add <mulrax=%rax,<rx1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rx2 += mulrax
- # asm 1: add <mulrax=int64#7,<rx2=int64#9
- # asm 2: add <mulrax=%rax,<rx2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 32)
- # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
- movq 32(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rx4 += mulrax
- # asm 1: add <mulrax=int64#7,<rx4=int64#13
- # asm 2: add <mulrax=%rax,<rx4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: carry? rx1 += mulrax
- # asm 1: add <mulrax=int64#7,<rx1=int64#6
- # asm 2: add <mulrax=%rax,<rx1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rx2 += mulrax
- # asm 1: add <mulrax=int64#7,<rx2=int64#9
- # asm 2: add <mulrax=%rax,<rx2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rx3 += mulrax
- # asm 1: add <mulrax=int64#7,<rx3=int64#11
- # asm 2: add <mulrax=%rax,<rx3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
- # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
- # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
- movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
- # qhasm: mulr01 = (mulr01.rx0) << 13
- # asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
- # asm 2: shld $13,<rx0=%rcx,<mulr01=%r8
- shld $13,%rcx,%r8
- # qhasm: rx0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx0=int64#4
- # asm 2: and <mulredmask=%rdx,<rx0=%rcx
- and %rdx,%rcx
- # qhasm: mulr11 = (mulr11.rx1) << 13
- # asm 1: shld $13,<rx1=int64#6,<mulr11=int64#8
- # asm 2: shld $13,<rx1=%r9,<mulr11=%r10
- shld $13,%r9,%r10
- # qhasm: rx1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx1=int64#6
- # asm 2: and <mulredmask=%rdx,<rx1=%r9
- and %rdx,%r9
- # qhasm: rx1 += mulr01
- # asm 1: add <mulr01=int64#5,<rx1=int64#6
- # asm 2: add <mulr01=%r8,<rx1=%r9
- add %r8,%r9
- # qhasm: mulr21 = (mulr21.rx2) << 13
- # asm 1: shld $13,<rx2=int64#9,<mulr21=int64#10
- # asm 2: shld $13,<rx2=%r11,<mulr21=%r12
- shld $13,%r11,%r12
- # qhasm: rx2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx2=int64#9
- # asm 2: and <mulredmask=%rdx,<rx2=%r11
- and %rdx,%r11
- # qhasm: rx2 += mulr11
- # asm 1: add <mulr11=int64#8,<rx2=int64#9
- # asm 2: add <mulr11=%r10,<rx2=%r11
- add %r10,%r11
- # qhasm: mulr31 = (mulr31.rx3) << 13
- # asm 1: shld $13,<rx3=int64#11,<mulr31=int64#12
- # asm 2: shld $13,<rx3=%r13,<mulr31=%r14
- shld $13,%r13,%r14
- # qhasm: rx3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx3=int64#11
- # asm 2: and <mulredmask=%rdx,<rx3=%r13
- and %rdx,%r13
- # qhasm: rx3 += mulr21
- # asm 1: add <mulr21=int64#10,<rx3=int64#11
- # asm 2: add <mulr21=%r12,<rx3=%r13
- add %r12,%r13
- # qhasm: mulr41 = (mulr41.rx4) << 13
- # asm 1: shld $13,<rx4=int64#13,<mulr41=int64#14
- # asm 2: shld $13,<rx4=%r15,<mulr41=%rbx
- shld $13,%r15,%rbx
- # qhasm: rx4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx4=int64#13
- # asm 2: and <mulredmask=%rdx,<rx4=%r15
- and %rdx,%r15
- # qhasm: rx4 += mulr31
- # asm 1: add <mulr31=int64#12,<rx4=int64#13
- # asm 2: add <mulr31=%r14,<rx4=%r15
- add %r14,%r15
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
- # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
- imulq $19,%rbx,%r8
- # qhasm: rx0 += mulr41
- # asm 1: add <mulr41=int64#5,<rx0=int64#4
- # asm 2: add <mulr41=%r8,<rx0=%rcx
- add %r8,%rcx
- # qhasm: mult = rx0
- # asm 1: mov <rx0=int64#4,>mult=int64#5
- # asm 2: mov <rx0=%rcx,>mult=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: mult += rx1
- # asm 1: add <rx1=int64#6,<mult=int64#5
- # asm 2: add <rx1=%r9,<mult=%r8
- add %r9,%r8
- # qhasm: rx1 = mult
- # asm 1: mov <mult=int64#5,>rx1=int64#6
- # asm 2: mov <mult=%r8,>rx1=%r9
- mov %r8,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: rx0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx0=int64#4
- # asm 2: and <mulredmask=%rdx,<rx0=%rcx
- and %rdx,%rcx
- # qhasm: mult += rx2
- # asm 1: add <rx2=int64#9,<mult=int64#5
- # asm 2: add <rx2=%r11,<mult=%r8
- add %r11,%r8
- # qhasm: rx2 = mult
- # asm 1: mov <mult=int64#5,>rx2=int64#7
- # asm 2: mov <mult=%r8,>rx2=%rax
- mov %r8,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: rx1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx1=int64#6
- # asm 2: and <mulredmask=%rdx,<rx1=%r9
- and %rdx,%r9
- # qhasm: mult += rx3
- # asm 1: add <rx3=int64#11,<mult=int64#5
- # asm 2: add <rx3=%r13,<mult=%r8
- add %r13,%r8
- # qhasm: rx3 = mult
- # asm 1: mov <mult=int64#5,>rx3=int64#8
- # asm 2: mov <mult=%r8,>rx3=%r10
- mov %r8,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: rx2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx2=int64#7
- # asm 2: and <mulredmask=%rdx,<rx2=%rax
- and %rdx,%rax
- # qhasm: mult += rx4
- # asm 1: add <rx4=int64#13,<mult=int64#5
- # asm 2: add <rx4=%r15,<mult=%r8
- add %r15,%r8
- # qhasm: rx4 = mult
- # asm 1: mov <mult=int64#5,>rx4=int64#9
- # asm 2: mov <mult=%r8,>rx4=%r11
- mov %r8,%r11
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: rx3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx3=int64#8
- # asm 2: and <mulredmask=%rdx,<rx3=%r10
- and %rdx,%r10
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
- # asm 2: imulq $19,<mult=%r8,>mult=%r8
- imulq $19,%r8,%r8
- # qhasm: rx0 += mult
- # asm 1: add <mult=int64#5,<rx0=int64#4
- # asm 2: add <mult=%r8,<rx0=%rcx
- add %r8,%rcx
- # qhasm: rx4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<rx4=int64#9
- # asm 2: and <mulredmask=%rdx,<rx4=%r11
- and %rdx,%r11
- # qhasm: *(uint64 *)(rp + 0) = rx0
- # asm 1: movq <rx0=int64#4,0(<rp=int64#1)
- # asm 2: movq <rx0=%rcx,0(<rp=%rdi)
- movq %rcx,0(%rdi)
- # qhasm: *(uint64 *)(rp + 8) = rx1
- # asm 1: movq <rx1=int64#6,8(<rp=int64#1)
- # asm 2: movq <rx1=%r9,8(<rp=%rdi)
- movq %r9,8(%rdi)
- # qhasm: *(uint64 *)(rp + 16) = rx2
- # asm 1: movq <rx2=int64#7,16(<rp=int64#1)
- # asm 2: movq <rx2=%rax,16(<rp=%rdi)
- movq %rax,16(%rdi)
- # qhasm: *(uint64 *)(rp + 24) = rx3
- # asm 1: movq <rx3=int64#8,24(<rp=int64#1)
- # asm 2: movq <rx3=%r10,24(<rp=%rdi)
- movq %r10,24(%rdi)
- # qhasm: *(uint64 *)(rp + 32) = rx4
- # asm 1: movq <rx4=int64#9,32(<rp=int64#1)
- # asm 2: movq <rx4=%r11,32(<rp=%rdi)
- movq %r11,32(%rdi)
- # qhasm: mulrax = *(uint64 *)(pp + 104)
- # asm 1: movq 104(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 104(<pp=%rsi),>mulrax=%rdx
- movq 104(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
- # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
- movq %rax,56(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
- # asm 1: mulq 56(<pp=int64#2)
- # asm 2: mulq 56(<pp=%rsi)
- mulq 56(%rsi)
- # qhasm: ry0 = mulrax
- # asm 1: mov <mulrax=int64#7,>ry0=int64#4
- # asm 2: mov <mulrax=%rax,>ry0=%rcx
- mov %rax,%rcx
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
- # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
- mov %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 112)
- # asm 1: movq 112(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 112(<pp=%rsi),>mulrax=%rdx
- movq 112(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
- # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
- movq %rax,64(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
- # asm 1: mulq 48(<pp=int64#2)
- # asm 2: mulq 48(<pp=%rsi)
- mulq 48(%rsi)
- # qhasm: carry? ry0 += mulrax
- # asm 1: add <mulrax=int64#7,<ry0=int64#4
- # asm 2: add <mulrax=%rax,<ry0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 80)
- # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
- movq 80(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
- # asm 1: mulq 40(<pp=int64#2)
- # asm 2: mulq 40(<pp=%rsi)
- mulq 40(%rsi)
- # qhasm: carry? ry0 += mulrax
- # asm 1: add <mulrax=int64#7,<ry0=int64#4
- # asm 2: add <mulrax=%rax,<ry0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 80)
- # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
- movq 80(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
- # asm 1: mulq 48(<pp=int64#2)
- # asm 2: mulq 48(<pp=%rsi)
- mulq 48(%rsi)
- # qhasm: ry1 = mulrax
- # asm 1: mov <mulrax=int64#7,>ry1=int64#6
- # asm 2: mov <mulrax=%rax,>ry1=%r9
- mov %rax,%r9
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
- mov %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 80)
- # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
- movq 80(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
- # asm 1: mulq 56(<pp=int64#2)
- # asm 2: mulq 56(<pp=%rsi)
- mulq 56(%rsi)
- # qhasm: ry2 = mulrax
- # asm 1: mov <mulrax=int64#7,>ry2=int64#9
- # asm 2: mov <mulrax=%rax,>ry2=%r11
- mov %rax,%r11
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
- mov %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 80)
- # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
- movq 80(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
- # asm 1: mulq 64(<pp=int64#2)
- # asm 2: mulq 64(<pp=%rsi)
- mulq 64(%rsi)
- # qhasm: ry3 = mulrax
- # asm 1: mov <mulrax=int64#7,>ry3=int64#11
- # asm 2: mov <mulrax=%rax,>ry3=%r13
- mov %rax,%r13
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
- mov %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 80)
- # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
- movq 80(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
- # asm 1: mulq 72(<pp=int64#2)
- # asm 2: mulq 72(<pp=%rsi)
- mulq 72(%rsi)
- # qhasm: ry4 = mulrax
- # asm 1: mov <mulrax=int64#7,>ry4=int64#13
- # asm 2: mov <mulrax=%rax,>ry4=%r15
- mov %rax,%r15
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
- # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
- mov %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 88)
- # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
- movq 88(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
- # asm 1: mulq 40(<pp=int64#2)
- # asm 2: mulq 40(<pp=%rsi)
- mulq 40(%rsi)
- # qhasm: carry? ry1 += mulrax
- # asm 1: add <mulrax=int64#7,<ry1=int64#6
- # asm 2: add <mulrax=%rax,<ry1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 88)
- # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
- movq 88(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
- # asm 1: mulq 48(<pp=int64#2)
- # asm 2: mulq 48(<pp=%rsi)
- mulq 48(%rsi)
- # qhasm: carry? ry2 += mulrax
- # asm 1: add <mulrax=int64#7,<ry2=int64#9
- # asm 2: add <mulrax=%rax,<ry2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 88)
- # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
- movq 88(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
- # asm 1: mulq 56(<pp=int64#2)
- # asm 2: mulq 56(<pp=%rsi)
- mulq 56(%rsi)
- # qhasm: carry? ry3 += mulrax
- # asm 1: add <mulrax=int64#7,<ry3=int64#11
- # asm 2: add <mulrax=%rax,<ry3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 88)
- # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
- movq 88(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
- # asm 1: mulq 64(<pp=int64#2)
- # asm 2: mulq 64(<pp=%rsi)
- mulq 64(%rsi)
- # qhasm: carry? ry4 += mulrax
- # asm 1: add <mulrax=int64#7,<ry4=int64#13
- # asm 2: add <mulrax=%rax,<ry4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 88)
- # asm 1: movq 88(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 88(<pp=%rsi),>mulrax=%rdx
- movq 88(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
- # asm 1: mulq 72(<pp=int64#2)
- # asm 2: mulq 72(<pp=%rsi)
- mulq 72(%rsi)
- # qhasm: carry? ry0 += mulrax
- # asm 1: add <mulrax=int64#7,<ry0=int64#4
- # asm 2: add <mulrax=%rax,<ry0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 96)
- # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
- movq 96(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
- # asm 1: mulq 40(<pp=int64#2)
- # asm 2: mulq 40(<pp=%rsi)
- mulq 40(%rsi)
- # qhasm: carry? ry2 += mulrax
- # asm 1: add <mulrax=int64#7,<ry2=int64#9
- # asm 2: add <mulrax=%rax,<ry2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 96)
- # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
- movq 96(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
- # asm 1: mulq 48(<pp=int64#2)
- # asm 2: mulq 48(<pp=%rsi)
- mulq 48(%rsi)
- # qhasm: carry? ry3 += mulrax
- # asm 1: add <mulrax=int64#7,<ry3=int64#11
- # asm 2: add <mulrax=%rax,<ry3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 96)
- # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
- movq 96(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
- # asm 1: mulq 56(<pp=int64#2)
- # asm 2: mulq 56(<pp=%rsi)
- mulq 56(%rsi)
- # qhasm: carry? ry4 += mulrax
- # asm 1: add <mulrax=int64#7,<ry4=int64#13
- # asm 2: add <mulrax=%rax,<ry4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 96)
- # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
- movq 96(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
- # asm 1: mulq 64(<pp=int64#2)
- # asm 2: mulq 64(<pp=%rsi)
- mulq 64(%rsi)
- # qhasm: carry? ry0 += mulrax
- # asm 1: add <mulrax=int64#7,<ry0=int64#4
- # asm 2: add <mulrax=%rax,<ry0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 96)
- # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
- movq 96(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
- # asm 1: mulq 72(<pp=int64#2)
- # asm 2: mulq 72(<pp=%rsi)
- mulq 72(%rsi)
- # qhasm: carry? ry1 += mulrax
- # asm 1: add <mulrax=int64#7,<ry1=int64#6
- # asm 2: add <mulrax=%rax,<ry1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 104)
- # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
- movq 104(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
- # asm 1: mulq 40(<pp=int64#2)
- # asm 2: mulq 40(<pp=%rsi)
- mulq 40(%rsi)
- # qhasm: carry? ry3 += mulrax
- # asm 1: add <mulrax=int64#7,<ry3=int64#11
- # asm 2: add <mulrax=%rax,<ry3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 104)
- # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
- movq 104(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
- # asm 1: mulq 48(<pp=int64#2)
- # asm 2: mulq 48(<pp=%rsi)
- mulq 48(%rsi)
- # qhasm: carry? ry4 += mulrax
- # asm 1: add <mulrax=int64#7,<ry4=int64#13
- # asm 2: add <mulrax=%rax,<ry4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
- # asm 1: mulq 64(<pp=int64#2)
- # asm 2: mulq 64(<pp=%rsi)
- mulq 64(%rsi)
- # qhasm: carry? ry1 += mulrax
- # asm 1: add <mulrax=int64#7,<ry1=int64#6
- # asm 2: add <mulrax=%rax,<ry1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
- # asm 1: mulq 72(<pp=int64#2)
- # asm 2: mulq 72(<pp=%rsi)
- mulq 72(%rsi)
- # qhasm: carry? ry2 += mulrax
- # asm 1: add <mulrax=int64#7,<ry2=int64#9
- # asm 2: add <mulrax=%rax,<ry2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 112)
- # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
- movq 112(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
- # asm 1: mulq 40(<pp=int64#2)
- # asm 2: mulq 40(<pp=%rsi)
- mulq 40(%rsi)
- # qhasm: carry? ry4 += mulrax
- # asm 1: add <mulrax=int64#7,<ry4=int64#13
- # asm 2: add <mulrax=%rax,<ry4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
- # asm 1: mulq 56(<pp=int64#2)
- # asm 2: mulq 56(<pp=%rsi)
- mulq 56(%rsi)
- # qhasm: carry? ry1 += mulrax
- # asm 1: add <mulrax=int64#7,<ry1=int64#6
- # asm 2: add <mulrax=%rax,<ry1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
- # asm 1: mulq 64(<pp=int64#2)
- # asm 2: mulq 64(<pp=%rsi)
- mulq 64(%rsi)
- # qhasm: carry? ry2 += mulrax
- # asm 1: add <mulrax=int64#7,<ry2=int64#9
- # asm 2: add <mulrax=%rax,<ry2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
- # asm 1: mulq 72(<pp=int64#2)
- # asm 2: mulq 72(<pp=%rsi)
- mulq 72(%rsi)
- # qhasm: carry? ry3 += mulrax
- # asm 1: add <mulrax=int64#7,<ry3=int64#11
- # asm 2: add <mulrax=%rax,<ry3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
- # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
- # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
- movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
- # qhasm: mulr01 = (mulr01.ry0) << 13
- # asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
- # asm 2: shld $13,<ry0=%rcx,<mulr01=%r8
- shld $13,%rcx,%r8
- # qhasm: ry0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry0=int64#4
- # asm 2: and <mulredmask=%rdx,<ry0=%rcx
- and %rdx,%rcx
- # qhasm: mulr11 = (mulr11.ry1) << 13
- # asm 1: shld $13,<ry1=int64#6,<mulr11=int64#8
- # asm 2: shld $13,<ry1=%r9,<mulr11=%r10
- shld $13,%r9,%r10
- # qhasm: ry1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry1=int64#6
- # asm 2: and <mulredmask=%rdx,<ry1=%r9
- and %rdx,%r9
- # qhasm: ry1 += mulr01
- # asm 1: add <mulr01=int64#5,<ry1=int64#6
- # asm 2: add <mulr01=%r8,<ry1=%r9
- add %r8,%r9
- # qhasm: mulr21 = (mulr21.ry2) << 13
- # asm 1: shld $13,<ry2=int64#9,<mulr21=int64#10
- # asm 2: shld $13,<ry2=%r11,<mulr21=%r12
- shld $13,%r11,%r12
- # qhasm: ry2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry2=int64#9
- # asm 2: and <mulredmask=%rdx,<ry2=%r11
- and %rdx,%r11
- # qhasm: ry2 += mulr11
- # asm 1: add <mulr11=int64#8,<ry2=int64#9
- # asm 2: add <mulr11=%r10,<ry2=%r11
- add %r10,%r11
- # qhasm: mulr31 = (mulr31.ry3) << 13
- # asm 1: shld $13,<ry3=int64#11,<mulr31=int64#12
- # asm 2: shld $13,<ry3=%r13,<mulr31=%r14
- shld $13,%r13,%r14
- # qhasm: ry3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry3=int64#11
- # asm 2: and <mulredmask=%rdx,<ry3=%r13
- and %rdx,%r13
- # qhasm: ry3 += mulr21
- # asm 1: add <mulr21=int64#10,<ry3=int64#11
- # asm 2: add <mulr21=%r12,<ry3=%r13
- add %r12,%r13
- # qhasm: mulr41 = (mulr41.ry4) << 13
- # asm 1: shld $13,<ry4=int64#13,<mulr41=int64#14
- # asm 2: shld $13,<ry4=%r15,<mulr41=%rbx
- shld $13,%r15,%rbx
- # qhasm: ry4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry4=int64#13
- # asm 2: and <mulredmask=%rdx,<ry4=%r15
- and %rdx,%r15
- # qhasm: ry4 += mulr31
- # asm 1: add <mulr31=int64#12,<ry4=int64#13
- # asm 2: add <mulr31=%r14,<ry4=%r15
- add %r14,%r15
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
- # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
- imulq $19,%rbx,%r8
- # qhasm: ry0 += mulr41
- # asm 1: add <mulr41=int64#5,<ry0=int64#4
- # asm 2: add <mulr41=%r8,<ry0=%rcx
- add %r8,%rcx
- # qhasm: mult = ry0
- # asm 1: mov <ry0=int64#4,>mult=int64#5
- # asm 2: mov <ry0=%rcx,>mult=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: mult += ry1
- # asm 1: add <ry1=int64#6,<mult=int64#5
- # asm 2: add <ry1=%r9,<mult=%r8
- add %r9,%r8
- # qhasm: ry1 = mult
- # asm 1: mov <mult=int64#5,>ry1=int64#6
- # asm 2: mov <mult=%r8,>ry1=%r9
- mov %r8,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: ry0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry0=int64#4
- # asm 2: and <mulredmask=%rdx,<ry0=%rcx
- and %rdx,%rcx
- # qhasm: mult += ry2
- # asm 1: add <ry2=int64#9,<mult=int64#5
- # asm 2: add <ry2=%r11,<mult=%r8
- add %r11,%r8
- # qhasm: ry2 = mult
- # asm 1: mov <mult=int64#5,>ry2=int64#7
- # asm 2: mov <mult=%r8,>ry2=%rax
- mov %r8,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: ry1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry1=int64#6
- # asm 2: and <mulredmask=%rdx,<ry1=%r9
- and %rdx,%r9
- # qhasm: mult += ry3
- # asm 1: add <ry3=int64#11,<mult=int64#5
- # asm 2: add <ry3=%r13,<mult=%r8
- add %r13,%r8
- # qhasm: ry3 = mult
- # asm 1: mov <mult=int64#5,>ry3=int64#8
- # asm 2: mov <mult=%r8,>ry3=%r10
- mov %r8,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: ry2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry2=int64#7
- # asm 2: and <mulredmask=%rdx,<ry2=%rax
- and %rdx,%rax
- # qhasm: mult += ry4
- # asm 1: add <ry4=int64#13,<mult=int64#5
- # asm 2: add <ry4=%r15,<mult=%r8
- add %r15,%r8
- # qhasm: ry4 = mult
- # asm 1: mov <mult=int64#5,>ry4=int64#9
- # asm 2: mov <mult=%r8,>ry4=%r11
- mov %r8,%r11
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#5
- # asm 2: shr $51,<mult=%r8
- shr $51,%r8
- # qhasm: ry3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry3=int64#8
- # asm 2: and <mulredmask=%rdx,<ry3=%r10
- and %rdx,%r10
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
- # asm 2: imulq $19,<mult=%r8,>mult=%r8
- imulq $19,%r8,%r8
- # qhasm: ry0 += mult
- # asm 1: add <mult=int64#5,<ry0=int64#4
- # asm 2: add <mult=%r8,<ry0=%rcx
- add %r8,%rcx
- # qhasm: ry4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<ry4=int64#9
- # asm 2: and <mulredmask=%rdx,<ry4=%r11
- and %rdx,%r11
- # qhasm: *(uint64 *)(rp + 40) = ry0
- # asm 1: movq <ry0=int64#4,40(<rp=int64#1)
- # asm 2: movq <ry0=%rcx,40(<rp=%rdi)
- movq %rcx,40(%rdi)
- # qhasm: *(uint64 *)(rp + 48) = ry1
- # asm 1: movq <ry1=int64#6,48(<rp=int64#1)
- # asm 2: movq <ry1=%r9,48(<rp=%rdi)
- movq %r9,48(%rdi)
- # qhasm: *(uint64 *)(rp + 56) = ry2
- # asm 1: movq <ry2=int64#7,56(<rp=int64#1)
- # asm 2: movq <ry2=%rax,56(<rp=%rdi)
- movq %rax,56(%rdi)
- # qhasm: *(uint64 *)(rp + 64) = ry3
- # asm 1: movq <ry3=int64#8,64(<rp=int64#1)
- # asm 2: movq <ry3=%r10,64(<rp=%rdi)
- movq %r10,64(%rdi)
- # qhasm: *(uint64 *)(rp + 72) = ry4
- # asm 1: movq <ry4=int64#9,72(<rp=int64#1)
- # asm 2: movq <ry4=%r11,72(<rp=%rdi)
- movq %r11,72(%rdi)
- # qhasm: mulrax = *(uint64 *)(pp + 64)
- # asm 1: movq 64(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 64(<pp=%rsi),>mulrax=%rdx
- movq 64(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
- # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
- movq %rax,56(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: rz0 = mulrax
- # asm 1: mov <mulrax=int64#7,>rz0=int64#4
- # asm 2: mov <mulrax=%rax,>rz0=%rcx
- mov %rax,%rcx
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
- # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
- mov %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 72)
- # asm 1: movq 72(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 72(<pp=%rsi),>mulrax=%rdx
- movq 72(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
- # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
- movq %rax,64(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rz0 += mulrax
- # asm 1: add <mulrax=int64#7,<rz0=int64#4
- # asm 2: add <mulrax=%rax,<rz0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 40)
- # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
- movq 40(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rz0 += mulrax
- # asm 1: add <mulrax=int64#7,<rz0=int64#4
- # asm 2: add <mulrax=%rax,<rz0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 40)
- # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
- movq 40(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: rz1 = mulrax
- # asm 1: mov <mulrax=int64#7,>rz1=int64#6
- # asm 2: mov <mulrax=%rax,>rz1=%r9
- mov %rax,%r9
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
- mov %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 40)
- # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
- movq 40(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: rz2 = mulrax
- # asm 1: mov <mulrax=int64#7,>rz2=int64#9
- # asm 2: mov <mulrax=%rax,>rz2=%r11
- mov %rax,%r11
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
- mov %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 40)
- # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
- movq 40(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: rz3 = mulrax
- # asm 1: mov <mulrax=int64#7,>rz3=int64#11
- # asm 2: mov <mulrax=%rax,>rz3=%r13
- mov %rax,%r13
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
- mov %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 40)
- # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
- movq 40(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: rz4 = mulrax
- # asm 1: mov <mulrax=int64#7,>rz4=int64#13
- # asm 2: mov <mulrax=%rax,>rz4=%r15
- mov %rax,%r15
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
- # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
- mov %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 48)
- # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
- movq 48(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rz1 += mulrax
- # asm 1: add <mulrax=int64#7,<rz1=int64#6
- # asm 2: add <mulrax=%rax,<rz1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 48)
- # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
- movq 48(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rz2 += mulrax
- # asm 1: add <mulrax=int64#7,<rz2=int64#9
- # asm 2: add <mulrax=%rax,<rz2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 48)
- # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
- movq 48(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: carry? rz3 += mulrax
- # asm 1: add <mulrax=int64#7,<rz3=int64#11
- # asm 2: add <mulrax=%rax,<rz3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 48)
- # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
- movq 48(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rz4 += mulrax
- # asm 1: add <mulrax=int64#7,<rz4=int64#13
- # asm 2: add <mulrax=%rax,<rz4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 48)
- # asm 1: movq 48(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 48(<pp=%rsi),>mulrax=%rdx
- movq 48(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rz0 += mulrax
- # asm 1: add <mulrax=int64#7,<rz0=int64#4
- # asm 2: add <mulrax=%rax,<rz0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 56)
- # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
- movq 56(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rz2 += mulrax
- # asm 1: add <mulrax=int64#7,<rz2=int64#9
- # asm 2: add <mulrax=%rax,<rz2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 56)
- # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
- movq 56(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rz3 += mulrax
- # asm 1: add <mulrax=int64#7,<rz3=int64#11
- # asm 2: add <mulrax=%rax,<rz3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 56)
- # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
- movq 56(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: carry? rz4 += mulrax
- # asm 1: add <mulrax=int64#7,<rz4=int64#13
- # asm 2: add <mulrax=%rax,<rz4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = *(uint64 *)(pp + 56)
- # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
- movq 56(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rz0 += mulrax
- # asm 1: add <mulrax=int64#7,<rz0=int64#4
- # asm 2: add <mulrax=%rax,<rz0=%rcx
- add %rax,%rcx
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
- # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
- adc %rdx,%r8
- # qhasm: mulrax = *(uint64 *)(pp + 56)
- # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
- # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
- movq 56(%rsi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rz1 += mulrax
- # asm 1: add <mulrax=int64#7,<rz1=int64#6
- # asm 2: add <mulrax=%rax,<rz1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = *(uint64 *)(pp + 64)
- # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
- movq 64(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rz3 += mulrax
- # asm 1: add <mulrax=int64#7,<rz3=int64#11
- # asm 2: add <mulrax=%rax,<rz3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulrax = *(uint64 *)(pp + 64)
- # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
- movq 64(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
- # asm 1: mulq 128(<pp=int64#2)
- # asm 2: mulq 128(<pp=%rsi)
- mulq 128(%rsi)
- # qhasm: carry? rz4 += mulrax
- # asm 1: add <mulrax=int64#7,<rz4=int64#13
- # asm 2: add <mulrax=%rax,<rz4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rz1 += mulrax
- # asm 1: add <mulrax=int64#7,<rz1=int64#6
- # asm 2: add <mulrax=%rax,<rz1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rz2 += mulrax
- # asm 1: add <mulrax=int64#7,<rz2=int64#9
- # asm 2: add <mulrax=%rax,<rz2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = *(uint64 *)(pp + 72)
- # asm 1: movq 72(<pp=int64#2),>mulrax=int64#7
- # asm 2: movq 72(<pp=%rsi),>mulrax=%rax
- movq 72(%rsi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
- # asm 1: mulq 120(<pp=int64#2)
- # asm 2: mulq 120(<pp=%rsi)
- mulq 120(%rsi)
- # qhasm: carry? rz4 += mulrax
- # asm 1: add <mulrax=int64#7,<rz4=int64#13
- # asm 2: add <mulrax=%rax,<rz4=%r15
- add %rax,%r15
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
- # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
- adc %rdx,%rbx
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
- # asm 1: mulq 136(<pp=int64#2)
- # asm 2: mulq 136(<pp=%rsi)
- mulq 136(%rsi)
- # qhasm: carry? rz1 += mulrax
- # asm 1: add <mulrax=int64#7,<rz1=int64#6
- # asm 2: add <mulrax=%rax,<rz1=%r9
- add %rax,%r9
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
- adc %rdx,%r10
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
- # asm 1: mulq 144(<pp=int64#2)
- # asm 2: mulq 144(<pp=%rsi)
- mulq 144(%rsi)
- # qhasm: carry? rz2 += mulrax
- # asm 1: add <mulrax=int64#7,<rz2=int64#9
- # asm 2: add <mulrax=%rax,<rz2=%r11
- add %rax,%r11
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
- adc %rdx,%r12
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
- # asm 1: mulq 152(<pp=int64#2)
- # asm 2: mulq 152(<pp=%rsi)
- mulq 152(%rsi)
- # qhasm: carry? rz3 += mulrax
- # asm 1: add <mulrax=int64#7,<rz3=int64#11
- # asm 2: add <mulrax=%rax,<rz3=%r13
- add %rax,%r13
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
- adc %rdx,%r14
- # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
- # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
- # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
- movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
- # qhasm: mulr01 = (mulr01.rz0) << 13
- # asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
- # asm 2: shld $13,<rz0=%rcx,<mulr01=%r8
- shld $13,%rcx,%r8
- # qhasm: rz0 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz0=int64#4
- # asm 2: and <mulredmask=%rsi,<rz0=%rcx
- and %rsi,%rcx
- # qhasm: mulr11 = (mulr11.rz1) << 13
- # asm 1: shld $13,<rz1=int64#6,<mulr11=int64#8
- # asm 2: shld $13,<rz1=%r9,<mulr11=%r10
- shld $13,%r9,%r10
- # qhasm: rz1 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz1=int64#6
- # asm 2: and <mulredmask=%rsi,<rz1=%r9
- and %rsi,%r9
- # qhasm: rz1 += mulr01
- # asm 1: add <mulr01=int64#5,<rz1=int64#6
- # asm 2: add <mulr01=%r8,<rz1=%r9
- add %r8,%r9
- # qhasm: mulr21 = (mulr21.rz2) << 13
- # asm 1: shld $13,<rz2=int64#9,<mulr21=int64#10
- # asm 2: shld $13,<rz2=%r11,<mulr21=%r12
- shld $13,%r11,%r12
- # qhasm: rz2 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz2=int64#9
- # asm 2: and <mulredmask=%rsi,<rz2=%r11
- and %rsi,%r11
- # qhasm: rz2 += mulr11
- # asm 1: add <mulr11=int64#8,<rz2=int64#9
- # asm 2: add <mulr11=%r10,<rz2=%r11
- add %r10,%r11
- # qhasm: mulr31 = (mulr31.rz3) << 13
- # asm 1: shld $13,<rz3=int64#11,<mulr31=int64#12
- # asm 2: shld $13,<rz3=%r13,<mulr31=%r14
- shld $13,%r13,%r14
- # qhasm: rz3 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz3=int64#11
- # asm 2: and <mulredmask=%rsi,<rz3=%r13
- and %rsi,%r13
- # qhasm: rz3 += mulr21
- # asm 1: add <mulr21=int64#10,<rz3=int64#11
- # asm 2: add <mulr21=%r12,<rz3=%r13
- add %r12,%r13
- # qhasm: mulr41 = (mulr41.rz4) << 13
- # asm 1: shld $13,<rz4=int64#13,<mulr41=int64#14
- # asm 2: shld $13,<rz4=%r15,<mulr41=%rbx
- shld $13,%r15,%rbx
- # qhasm: rz4 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz4=int64#13
- # asm 2: and <mulredmask=%rsi,<rz4=%r15
- and %rsi,%r15
- # qhasm: rz4 += mulr31
- # asm 1: add <mulr31=int64#12,<rz4=int64#13
- # asm 2: add <mulr31=%r14,<rz4=%r15
- add %r14,%r15
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#3
- # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%rdx
- imulq $19,%rbx,%rdx
- # qhasm: rz0 += mulr41
- # asm 1: add <mulr41=int64#3,<rz0=int64#4
- # asm 2: add <mulr41=%rdx,<rz0=%rcx
- add %rdx,%rcx
- # qhasm: mult = rz0
- # asm 1: mov <rz0=int64#4,>mult=int64#3
- # asm 2: mov <rz0=%rcx,>mult=%rdx
- mov %rcx,%rdx
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#3
- # asm 2: shr $51,<mult=%rdx
- shr $51,%rdx
- # qhasm: mult += rz1
- # asm 1: add <rz1=int64#6,<mult=int64#3
- # asm 2: add <rz1=%r9,<mult=%rdx
- add %r9,%rdx
- # qhasm: rz1 = mult
- # asm 1: mov <mult=int64#3,>rz1=int64#5
- # asm 2: mov <mult=%rdx,>rz1=%r8
- mov %rdx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#3
- # asm 2: shr $51,<mult=%rdx
- shr $51,%rdx
- # qhasm: rz0 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz0=int64#4
- # asm 2: and <mulredmask=%rsi,<rz0=%rcx
- and %rsi,%rcx
- # qhasm: mult += rz2
- # asm 1: add <rz2=int64#9,<mult=int64#3
- # asm 2: add <rz2=%r11,<mult=%rdx
- add %r11,%rdx
- # qhasm: rz2 = mult
- # asm 1: mov <mult=int64#3,>rz2=int64#6
- # asm 2: mov <mult=%rdx,>rz2=%r9
- mov %rdx,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#3
- # asm 2: shr $51,<mult=%rdx
- shr $51,%rdx
- # qhasm: rz1 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz1=int64#5
- # asm 2: and <mulredmask=%rsi,<rz1=%r8
- and %rsi,%r8
- # qhasm: mult += rz3
- # asm 1: add <rz3=int64#11,<mult=int64#3
- # asm 2: add <rz3=%r13,<mult=%rdx
- add %r13,%rdx
- # qhasm: rz3 = mult
- # asm 1: mov <mult=int64#3,>rz3=int64#7
- # asm 2: mov <mult=%rdx,>rz3=%rax
- mov %rdx,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#3
- # asm 2: shr $51,<mult=%rdx
- shr $51,%rdx
- # qhasm: rz2 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz2=int64#6
- # asm 2: and <mulredmask=%rsi,<rz2=%r9
- and %rsi,%r9
- # qhasm: mult += rz4
- # asm 1: add <rz4=int64#13,<mult=int64#3
- # asm 2: add <rz4=%r15,<mult=%rdx
- add %r15,%rdx
- # qhasm: rz4 = mult
- # asm 1: mov <mult=int64#3,>rz4=int64#8
- # asm 2: mov <mult=%rdx,>rz4=%r10
- mov %rdx,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#3
- # asm 2: shr $51,<mult=%rdx
- shr $51,%rdx
- # qhasm: rz3 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz3=int64#7
- # asm 2: and <mulredmask=%rsi,<rz3=%rax
- and %rsi,%rax
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#3,>mult=int64#3
- # asm 2: imulq $19,<mult=%rdx,>mult=%rdx
- imulq $19,%rdx,%rdx
- # qhasm: rz0 += mult
- # asm 1: add <mult=int64#3,<rz0=int64#4
- # asm 2: add <mult=%rdx,<rz0=%rcx
- add %rdx,%rcx
- # qhasm: rz4 &= mulredmask
- # asm 1: and <mulredmask=int64#2,<rz4=int64#8
- # asm 2: and <mulredmask=%rsi,<rz4=%r10
- and %rsi,%r10
- # qhasm: *(uint64 *)(rp + 80) = rz0
- # asm 1: movq <rz0=int64#4,80(<rp=int64#1)
- # asm 2: movq <rz0=%rcx,80(<rp=%rdi)
- movq %rcx,80(%rdi)
- # qhasm: *(uint64 *)(rp + 88) = rz1
- # asm 1: movq <rz1=int64#5,88(<rp=int64#1)
- # asm 2: movq <rz1=%r8,88(<rp=%rdi)
- movq %r8,88(%rdi)
- # qhasm: *(uint64 *)(rp + 96) = rz2
- # asm 1: movq <rz2=int64#6,96(<rp=int64#1)
- # asm 2: movq <rz2=%r9,96(<rp=%rdi)
- movq %r9,96(%rdi)
- # qhasm: *(uint64 *)(rp + 104) = rz3
- # asm 1: movq <rz3=int64#7,104(<rp=int64#1)
- # asm 2: movq <rz3=%rax,104(<rp=%rdi)
- movq %rax,104(%rdi)
- # qhasm: *(uint64 *)(rp + 112) = rz4
- # asm 1: movq <rz4=int64#8,112(<rp=int64#1)
- # asm 2: movq <rz4=%r10,112(<rp=%rdi)
- movq %r10,112(%rdi)
- # qhasm: caller1 = caller1_stack
- # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
- # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
- movq 0(%rsp),%r11
- # qhasm: caller2 = caller2_stack
- # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
- # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
- movq 8(%rsp),%r12
- # qhasm: caller3 = caller3_stack
- # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
- # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
- movq 16(%rsp),%r13
- # qhasm: caller4 = caller4_stack
- # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
- # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
- movq 24(%rsp),%r14
- # qhasm: caller5 = caller5_stack
- # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
- # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
- movq 32(%rsp),%r15
- # qhasm: caller6 = caller6_stack
- # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
- # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
- movq 40(%rsp),%rbx
- # qhasm: caller7 = caller7_stack
- # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
- # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
- movq 48(%rsp),%rbp
- # qhasm: leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
|