ge25519_nielsadd_p1p1.S 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072
  1. # qhasm: int64 rp
  2. # qhasm: int64 pp
  3. # qhasm: int64 qp
  4. # qhasm: input rp
  5. # qhasm: input pp
  6. # qhasm: input qp
  7. # qhasm: int64 caller1
  8. # qhasm: int64 caller2
  9. # qhasm: int64 caller3
  10. # qhasm: int64 caller4
  11. # qhasm: int64 caller5
  12. # qhasm: int64 caller6
  13. # qhasm: int64 caller7
  14. # qhasm: caller caller1
  15. # qhasm: caller caller2
  16. # qhasm: caller caller3
  17. # qhasm: caller caller4
  18. # qhasm: caller caller5
  19. # qhasm: caller caller6
  20. # qhasm: caller caller7
  21. # qhasm: stack64 caller1_stack
  22. # qhasm: stack64 caller2_stack
  23. # qhasm: stack64 caller3_stack
  24. # qhasm: stack64 caller4_stack
  25. # qhasm: stack64 caller5_stack
  26. # qhasm: stack64 caller6_stack
  27. # qhasm: stack64 caller7_stack
  28. # qhasm: int64 a0
  29. # qhasm: int64 a1
  30. # qhasm: int64 a2
  31. # qhasm: int64 a3
  32. # qhasm: stack64 a0_stack
  33. # qhasm: stack64 a1_stack
  34. # qhasm: stack64 a2_stack
  35. # qhasm: stack64 a3_stack
  36. # qhasm: int64 b0
  37. # qhasm: int64 b1
  38. # qhasm: int64 b2
  39. # qhasm: int64 b3
  40. # qhasm: stack64 b0_stack
  41. # qhasm: stack64 b1_stack
  42. # qhasm: stack64 b2_stack
  43. # qhasm: stack64 b3_stack
  44. # qhasm: int64 c0
  45. # qhasm: int64 c1
  46. # qhasm: int64 c2
  47. # qhasm: int64 c3
  48. # qhasm: stack64 c0_stack
  49. # qhasm: stack64 c1_stack
  50. # qhasm: stack64 c2_stack
  51. # qhasm: stack64 c3_stack
  52. # qhasm: int64 d0
  53. # qhasm: int64 d1
  54. # qhasm: int64 d2
  55. # qhasm: int64 d3
  56. # qhasm: stack64 d0_stack
  57. # qhasm: stack64 d1_stack
  58. # qhasm: stack64 d2_stack
  59. # qhasm: stack64 d3_stack
  60. # qhasm: int64 e0
  61. # qhasm: int64 e1
  62. # qhasm: int64 e2
  63. # qhasm: int64 e3
  64. # qhasm: stack64 e0_stack
  65. # qhasm: stack64 e1_stack
  66. # qhasm: stack64 e2_stack
  67. # qhasm: stack64 e3_stack
  68. # qhasm: int64 f0
  69. # qhasm: int64 f1
  70. # qhasm: int64 f2
  71. # qhasm: int64 f3
  72. # qhasm: stack64 f0_stack
  73. # qhasm: stack64 f1_stack
  74. # qhasm: stack64 f2_stack
  75. # qhasm: stack64 f3_stack
  76. # qhasm: int64 g0
  77. # qhasm: int64 g1
  78. # qhasm: int64 g2
  79. # qhasm: int64 g3
  80. # qhasm: stack64 g0_stack
  81. # qhasm: stack64 g1_stack
  82. # qhasm: stack64 g2_stack
  83. # qhasm: stack64 g3_stack
  84. # qhasm: int64 h0
  85. # qhasm: int64 h1
  86. # qhasm: int64 h2
  87. # qhasm: int64 h3
  88. # qhasm: stack64 h0_stack
  89. # qhasm: stack64 h1_stack
  90. # qhasm: stack64 h2_stack
  91. # qhasm: stack64 h3_stack
  92. # qhasm: int64 qt0
  93. # qhasm: int64 qt1
  94. # qhasm: int64 qt2
  95. # qhasm: int64 qt3
  96. # qhasm: stack64 qt0_stack
  97. # qhasm: stack64 qt1_stack
  98. # qhasm: stack64 qt2_stack
  99. # qhasm: stack64 qt3_stack
  100. # qhasm: int64 t10
  101. # qhasm: int64 t11
  102. # qhasm: int64 t12
  103. # qhasm: int64 t13
  104. # qhasm: stack64 t10_stack
  105. # qhasm: stack64 t11_stack
  106. # qhasm: stack64 t12_stack
  107. # qhasm: stack64 t13_stack
  108. # qhasm: int64 t20
  109. # qhasm: int64 t21
  110. # qhasm: int64 t22
  111. # qhasm: int64 t23
  112. # qhasm: stack64 t20_stack
  113. # qhasm: stack64 t21_stack
  114. # qhasm: stack64 t22_stack
  115. # qhasm: stack64 t23_stack
  116. # qhasm: int64 rx0
  117. # qhasm: int64 rx1
  118. # qhasm: int64 rx2
  119. # qhasm: int64 rx3
  120. # qhasm: int64 ry0
  121. # qhasm: int64 ry1
  122. # qhasm: int64 ry2
  123. # qhasm: int64 ry3
  124. # qhasm: int64 rz0
  125. # qhasm: int64 rz1
  126. # qhasm: int64 rz2
  127. # qhasm: int64 rz3
  128. # qhasm: int64 rt0
  129. # qhasm: int64 rt1
  130. # qhasm: int64 rt2
  131. # qhasm: int64 rt3
  132. # qhasm: int64 mulr4
  133. # qhasm: int64 mulr5
  134. # qhasm: int64 mulr6
  135. # qhasm: int64 mulr7
  136. # qhasm: int64 mulr8
  137. # qhasm: int64 mulrax
  138. # qhasm: int64 mulrdx
  139. # qhasm: int64 mulx0
  140. # qhasm: int64 mulx1
  141. # qhasm: int64 mulx2
  142. # qhasm: int64 mulx3
  143. # qhasm: int64 mulc
  144. # qhasm: int64 mulzero
  145. # qhasm: int64 muli38
  146. # qhasm: int64 addt0
  147. # qhasm: int64 addt1
  148. # qhasm: int64 subt0
  149. # qhasm: int64 subt1
  150. # qhasm: enter CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1)
  151. .text
  152. .p2align 5
  153. .globl _CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1)
  154. .globl CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1)
  155. _CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1):
  156. CRYPTO_NAMESPACE(ge25519_nielsadd_p1p1):
  157. mov %rsp,%r11
  158. and $31,%r11
  159. add $128,%r11
  160. sub %r11,%rsp
  161. # qhasm: caller1_stack = caller1
  162. # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
  163. # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
  164. movq %r11,0(%rsp)
  165. # qhasm: caller2_stack = caller2
  166. # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
  167. # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
  168. movq %r12,8(%rsp)
  169. # qhasm: caller3_stack = caller3
  170. # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
  171. # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
  172. movq %r13,16(%rsp)
  173. # qhasm: caller4_stack = caller4
  174. # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
  175. # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
  176. movq %r14,24(%rsp)
  177. # qhasm: caller5_stack = caller5
  178. # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
  179. # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
  180. movq %r15,32(%rsp)
  181. # qhasm: caller6_stack = caller6
  182. # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
  183. # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
  184. movq %rbx,40(%rsp)
  185. # qhasm: caller7_stack = caller7
  186. # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
  187. # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
  188. movq %rbp,48(%rsp)
  189. # qhasm: qp = qp
  190. # asm 1: mov <qp=int64#3,>qp=int64#4
  191. # asm 2: mov <qp=%rdx,>qp=%rcx
  192. mov %rdx,%rcx
  193. # qhasm: a0 = *(uint64 *)(pp + 32)
  194. # asm 1: movq 32(<pp=int64#2),>a0=int64#3
  195. # asm 2: movq 32(<pp=%rsi),>a0=%rdx
  196. movq 32(%rsi),%rdx
  197. # qhasm: a1 = *(uint64 *)(pp + 40)
  198. # asm 1: movq 40(<pp=int64#2),>a1=int64#5
  199. # asm 2: movq 40(<pp=%rsi),>a1=%r8
  200. movq 40(%rsi),%r8
  201. # qhasm: a2 = *(uint64 *)(pp + 48)
  202. # asm 1: movq 48(<pp=int64#2),>a2=int64#6
  203. # asm 2: movq 48(<pp=%rsi),>a2=%r9
  204. movq 48(%rsi),%r9
  205. # qhasm: a3 = *(uint64 *)(pp + 56)
  206. # asm 1: movq 56(<pp=int64#2),>a3=int64#7
  207. # asm 2: movq 56(<pp=%rsi),>a3=%rax
  208. movq 56(%rsi),%rax
  209. # qhasm: b0 = a0
  210. # asm 1: mov <a0=int64#3,>b0=int64#8
  211. # asm 2: mov <a0=%rdx,>b0=%r10
  212. mov %rdx,%r10
  213. # qhasm: b1 = a1
  214. # asm 1: mov <a1=int64#5,>b1=int64#9
  215. # asm 2: mov <a1=%r8,>b1=%r11
  216. mov %r8,%r11
  217. # qhasm: b2 = a2
  218. # asm 1: mov <a2=int64#6,>b2=int64#10
  219. # asm 2: mov <a2=%r9,>b2=%r12
  220. mov %r9,%r12
  221. # qhasm: b3 = a3
  222. # asm 1: mov <a3=int64#7,>b3=int64#11
  223. # asm 2: mov <a3=%rax,>b3=%r13
  224. mov %rax,%r13
  225. # qhasm: carry? a0 -= *(uint64 *) (pp + 0)
  226. # asm 1: subq 0(<pp=int64#2),<a0=int64#3
  227. # asm 2: subq 0(<pp=%rsi),<a0=%rdx
  228. subq 0(%rsi),%rdx
  229. # qhasm: carry? a1 -= *(uint64 *) (pp + 8) - carry
  230. # asm 1: sbbq 8(<pp=int64#2),<a1=int64#5
  231. # asm 2: sbbq 8(<pp=%rsi),<a1=%r8
  232. sbbq 8(%rsi),%r8
  233. # qhasm: carry? a2 -= *(uint64 *) (pp + 16) - carry
  234. # asm 1: sbbq 16(<pp=int64#2),<a2=int64#6
  235. # asm 2: sbbq 16(<pp=%rsi),<a2=%r9
  236. sbbq 16(%rsi),%r9
  237. # qhasm: carry? a3 -= *(uint64 *) (pp + 24) - carry
  238. # asm 1: sbbq 24(<pp=int64#2),<a3=int64#7
  239. # asm 2: sbbq 24(<pp=%rsi),<a3=%rax
  240. sbbq 24(%rsi),%rax
  241. # qhasm: subt0 = 0
  242. # asm 1: mov $0,>subt0=int64#12
  243. # asm 2: mov $0,>subt0=%r14
  244. mov $0,%r14
  245. # qhasm: subt1 = 38
  246. # asm 1: mov $38,>subt1=int64#13
  247. # asm 2: mov $38,>subt1=%r15
  248. mov $38,%r15
  249. # qhasm: subt1 = subt0 if !carry
  250. # asm 1: cmovae <subt0=int64#12,<subt1=int64#13
  251. # asm 2: cmovae <subt0=%r14,<subt1=%r15
  252. cmovae %r14,%r15
  253. # qhasm: carry? a0 -= subt1
  254. # asm 1: sub <subt1=int64#13,<a0=int64#3
  255. # asm 2: sub <subt1=%r15,<a0=%rdx
  256. sub %r15,%rdx
  257. # qhasm: carry? a1 -= subt0 - carry
  258. # asm 1: sbb <subt0=int64#12,<a1=int64#5
  259. # asm 2: sbb <subt0=%r14,<a1=%r8
  260. sbb %r14,%r8
  261. # qhasm: carry? a2 -= subt0 - carry
  262. # asm 1: sbb <subt0=int64#12,<a2=int64#6
  263. # asm 2: sbb <subt0=%r14,<a2=%r9
  264. sbb %r14,%r9
  265. # qhasm: carry? a3 -= subt0 - carry
  266. # asm 1: sbb <subt0=int64#12,<a3=int64#7
  267. # asm 2: sbb <subt0=%r14,<a3=%rax
  268. sbb %r14,%rax
  269. # qhasm: subt0 = subt1 if carry
  270. # asm 1: cmovc <subt1=int64#13,<subt0=int64#12
  271. # asm 2: cmovc <subt1=%r15,<subt0=%r14
  272. cmovc %r15,%r14
  273. # qhasm: a0 -= subt0
  274. # asm 1: sub <subt0=int64#12,<a0=int64#3
  275. # asm 2: sub <subt0=%r14,<a0=%rdx
  276. sub %r14,%rdx
  277. # qhasm: carry? b0 += *(uint64 *) (pp + 0)
  278. # asm 1: addq 0(<pp=int64#2),<b0=int64#8
  279. # asm 2: addq 0(<pp=%rsi),<b0=%r10
  280. addq 0(%rsi),%r10
  281. # qhasm: carry? b1 += *(uint64 *) (pp + 8) + carry
  282. # asm 1: adcq 8(<pp=int64#2),<b1=int64#9
  283. # asm 2: adcq 8(<pp=%rsi),<b1=%r11
  284. adcq 8(%rsi),%r11
  285. # qhasm: carry? b2 += *(uint64 *) (pp + 16) + carry
  286. # asm 1: adcq 16(<pp=int64#2),<b2=int64#10
  287. # asm 2: adcq 16(<pp=%rsi),<b2=%r12
  288. adcq 16(%rsi),%r12
  289. # qhasm: carry? b3 += *(uint64 *) (pp + 24) + carry
  290. # asm 1: adcq 24(<pp=int64#2),<b3=int64#11
  291. # asm 2: adcq 24(<pp=%rsi),<b3=%r13
  292. adcq 24(%rsi),%r13
  293. # qhasm: addt0 = 0
  294. # asm 1: mov $0,>addt0=int64#12
  295. # asm 2: mov $0,>addt0=%r14
  296. mov $0,%r14
  297. # qhasm: addt1 = 38
  298. # asm 1: mov $38,>addt1=int64#13
  299. # asm 2: mov $38,>addt1=%r15
  300. mov $38,%r15
  301. # qhasm: addt1 = addt0 if !carry
  302. # asm 1: cmovae <addt0=int64#12,<addt1=int64#13
  303. # asm 2: cmovae <addt0=%r14,<addt1=%r15
  304. cmovae %r14,%r15
  305. # qhasm: carry? b0 += addt1
  306. # asm 1: add <addt1=int64#13,<b0=int64#8
  307. # asm 2: add <addt1=%r15,<b0=%r10
  308. add %r15,%r10
  309. # qhasm: carry? b1 += addt0 + carry
  310. # asm 1: adc <addt0=int64#12,<b1=int64#9
  311. # asm 2: adc <addt0=%r14,<b1=%r11
  312. adc %r14,%r11
  313. # qhasm: carry? b2 += addt0 + carry
  314. # asm 1: adc <addt0=int64#12,<b2=int64#10
  315. # asm 2: adc <addt0=%r14,<b2=%r12
  316. adc %r14,%r12
  317. # qhasm: carry? b3 += addt0 + carry
  318. # asm 1: adc <addt0=int64#12,<b3=int64#11
  319. # asm 2: adc <addt0=%r14,<b3=%r13
  320. adc %r14,%r13
  321. # qhasm: addt0 = addt1 if carry
  322. # asm 1: cmovc <addt1=int64#13,<addt0=int64#12
  323. # asm 2: cmovc <addt1=%r15,<addt0=%r14
  324. cmovc %r15,%r14
  325. # qhasm: b0 += addt0
  326. # asm 1: add <addt0=int64#12,<b0=int64#8
  327. # asm 2: add <addt0=%r14,<b0=%r10
  328. add %r14,%r10
  329. # qhasm: a0_stack = a0
  330. # asm 1: movq <a0=int64#3,>a0_stack=stack64#8
  331. # asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
  332. movq %rdx,56(%rsp)
  333. # qhasm: a1_stack = a1
  334. # asm 1: movq <a1=int64#5,>a1_stack=stack64#9
  335. # asm 2: movq <a1=%r8,>a1_stack=64(%rsp)
  336. movq %r8,64(%rsp)
  337. # qhasm: a2_stack = a2
  338. # asm 1: movq <a2=int64#6,>a2_stack=stack64#10
  339. # asm 2: movq <a2=%r9,>a2_stack=72(%rsp)
  340. movq %r9,72(%rsp)
  341. # qhasm: a3_stack = a3
  342. # asm 1: movq <a3=int64#7,>a3_stack=stack64#11
  343. # asm 2: movq <a3=%rax,>a3_stack=80(%rsp)
  344. movq %rax,80(%rsp)
  345. # qhasm: b0_stack = b0
  346. # asm 1: movq <b0=int64#8,>b0_stack=stack64#12
  347. # asm 2: movq <b0=%r10,>b0_stack=88(%rsp)
  348. movq %r10,88(%rsp)
  349. # qhasm: b1_stack = b1
  350. # asm 1: movq <b1=int64#9,>b1_stack=stack64#13
  351. # asm 2: movq <b1=%r11,>b1_stack=96(%rsp)
  352. movq %r11,96(%rsp)
  353. # qhasm: b2_stack = b2
  354. # asm 1: movq <b2=int64#10,>b2_stack=stack64#14
  355. # asm 2: movq <b2=%r12,>b2_stack=104(%rsp)
  356. movq %r12,104(%rsp)
  357. # qhasm: b3_stack = b3
  358. # asm 1: movq <b3=int64#11,>b3_stack=stack64#15
  359. # asm 2: movq <b3=%r13,>b3_stack=112(%rsp)
  360. movq %r13,112(%rsp)
  361. # qhasm: mulr4 = 0
  362. # asm 1: mov $0,>mulr4=int64#5
  363. # asm 2: mov $0,>mulr4=%r8
  364. mov $0,%r8
  365. # qhasm: mulr5 = 0
  366. # asm 1: mov $0,>mulr5=int64#6
  367. # asm 2: mov $0,>mulr5=%r9
  368. mov $0,%r9
  369. # qhasm: mulr6 = 0
  370. # asm 1: mov $0,>mulr6=int64#8
  371. # asm 2: mov $0,>mulr6=%r10
  372. mov $0,%r10
  373. # qhasm: mulr7 = 0
  374. # asm 1: mov $0,>mulr7=int64#9
  375. # asm 2: mov $0,>mulr7=%r11
  376. mov $0,%r11
  377. # qhasm: mulx0 = a0_stack
  378. # asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10
  379. # asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12
  380. movq 56(%rsp),%r12
  381. # qhasm: mulrax = *(uint64 *)(qp + 0)
  382. # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
  383. # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
  384. movq 0(%rcx),%rax
  385. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  386. # asm 1: mul <mulx0=int64#10
  387. # asm 2: mul <mulx0=%r12
  388. mul %r12
  389. # qhasm: a0 = mulrax
  390. # asm 1: mov <mulrax=int64#7,>a0=int64#11
  391. # asm 2: mov <mulrax=%rax,>a0=%r13
  392. mov %rax,%r13
  393. # qhasm: a1 = mulrdx
  394. # asm 1: mov <mulrdx=int64#3,>a1=int64#12
  395. # asm 2: mov <mulrdx=%rdx,>a1=%r14
  396. mov %rdx,%r14
  397. # qhasm: mulrax = *(uint64 *)(qp + 8)
  398. # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
  399. # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
  400. movq 8(%rcx),%rax
  401. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  402. # asm 1: mul <mulx0=int64#10
  403. # asm 2: mul <mulx0=%r12
  404. mul %r12
  405. # qhasm: carry? a1 += mulrax
  406. # asm 1: add <mulrax=int64#7,<a1=int64#12
  407. # asm 2: add <mulrax=%rax,<a1=%r14
  408. add %rax,%r14
  409. # qhasm: a2 = 0
  410. # asm 1: mov $0,>a2=int64#13
  411. # asm 2: mov $0,>a2=%r15
  412. mov $0,%r15
  413. # qhasm: a2 += mulrdx + carry
  414. # asm 1: adc <mulrdx=int64#3,<a2=int64#13
  415. # asm 2: adc <mulrdx=%rdx,<a2=%r15
  416. adc %rdx,%r15
  417. # qhasm: mulrax = *(uint64 *)(qp + 16)
  418. # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
  419. # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
  420. movq 16(%rcx),%rax
  421. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  422. # asm 1: mul <mulx0=int64#10
  423. # asm 2: mul <mulx0=%r12
  424. mul %r12
  425. # qhasm: carry? a2 += mulrax
  426. # asm 1: add <mulrax=int64#7,<a2=int64#13
  427. # asm 2: add <mulrax=%rax,<a2=%r15
  428. add %rax,%r15
  429. # qhasm: a3 = 0
  430. # asm 1: mov $0,>a3=int64#14
  431. # asm 2: mov $0,>a3=%rbx
  432. mov $0,%rbx
  433. # qhasm: a3 += mulrdx + carry
  434. # asm 1: adc <mulrdx=int64#3,<a3=int64#14
  435. # asm 2: adc <mulrdx=%rdx,<a3=%rbx
  436. adc %rdx,%rbx
  437. # qhasm: mulrax = *(uint64 *)(qp + 24)
  438. # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
  439. # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
  440. movq 24(%rcx),%rax
  441. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  442. # asm 1: mul <mulx0=int64#10
  443. # asm 2: mul <mulx0=%r12
  444. mul %r12
  445. # qhasm: carry? a3 += mulrax
  446. # asm 1: add <mulrax=int64#7,<a3=int64#14
  447. # asm 2: add <mulrax=%rax,<a3=%rbx
  448. add %rax,%rbx
  449. # qhasm: mulr4 += mulrdx + carry
  450. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  451. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  452. adc %rdx,%r8
  453. # qhasm: mulx1 = a1_stack
  454. # asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10
  455. # asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12
  456. movq 64(%rsp),%r12
  457. # qhasm: mulrax = *(uint64 *)(qp + 0)
  458. # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
  459. # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
  460. movq 0(%rcx),%rax
  461. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  462. # asm 1: mul <mulx1=int64#10
  463. # asm 2: mul <mulx1=%r12
  464. mul %r12
  465. # qhasm: carry? a1 += mulrax
  466. # asm 1: add <mulrax=int64#7,<a1=int64#12
  467. # asm 2: add <mulrax=%rax,<a1=%r14
  468. add %rax,%r14
  469. # qhasm: mulc = 0
  470. # asm 1: mov $0,>mulc=int64#15
  471. # asm 2: mov $0,>mulc=%rbp
  472. mov $0,%rbp
  473. # qhasm: mulc += mulrdx + carry
  474. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  475. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  476. adc %rdx,%rbp
  477. # qhasm: mulrax = *(uint64 *)(qp + 8)
  478. # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
  479. # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
  480. movq 8(%rcx),%rax
  481. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  482. # asm 1: mul <mulx1=int64#10
  483. # asm 2: mul <mulx1=%r12
  484. mul %r12
  485. # qhasm: carry? a2 += mulrax
  486. # asm 1: add <mulrax=int64#7,<a2=int64#13
  487. # asm 2: add <mulrax=%rax,<a2=%r15
  488. add %rax,%r15
  489. # qhasm: mulrdx += 0 + carry
  490. # asm 1: adc $0,<mulrdx=int64#3
  491. # asm 2: adc $0,<mulrdx=%rdx
  492. adc $0,%rdx
  493. # qhasm: carry? a2 += mulc
  494. # asm 1: add <mulc=int64#15,<a2=int64#13
  495. # asm 2: add <mulc=%rbp,<a2=%r15
  496. add %rbp,%r15
  497. # qhasm: mulc = 0
  498. # asm 1: mov $0,>mulc=int64#15
  499. # asm 2: mov $0,>mulc=%rbp
  500. mov $0,%rbp
  501. # qhasm: mulc += mulrdx + carry
  502. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  503. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  504. adc %rdx,%rbp
  505. # qhasm: mulrax = *(uint64 *)(qp + 16)
  506. # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
  507. # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
  508. movq 16(%rcx),%rax
  509. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  510. # asm 1: mul <mulx1=int64#10
  511. # asm 2: mul <mulx1=%r12
  512. mul %r12
  513. # qhasm: carry? a3 += mulrax
  514. # asm 1: add <mulrax=int64#7,<a3=int64#14
  515. # asm 2: add <mulrax=%rax,<a3=%rbx
  516. add %rax,%rbx
  517. # qhasm: mulrdx += 0 + carry
  518. # asm 1: adc $0,<mulrdx=int64#3
  519. # asm 2: adc $0,<mulrdx=%rdx
  520. adc $0,%rdx
  521. # qhasm: carry? a3 += mulc
  522. # asm 1: add <mulc=int64#15,<a3=int64#14
  523. # asm 2: add <mulc=%rbp,<a3=%rbx
  524. add %rbp,%rbx
  525. # qhasm: mulc = 0
  526. # asm 1: mov $0,>mulc=int64#15
  527. # asm 2: mov $0,>mulc=%rbp
  528. mov $0,%rbp
  529. # qhasm: mulc += mulrdx + carry
  530. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  531. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  532. adc %rdx,%rbp
  533. # qhasm: mulrax = *(uint64 *)(qp + 24)
  534. # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
  535. # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
  536. movq 24(%rcx),%rax
  537. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  538. # asm 1: mul <mulx1=int64#10
  539. # asm 2: mul <mulx1=%r12
  540. mul %r12
  541. # qhasm: carry? mulr4 += mulrax
  542. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  543. # asm 2: add <mulrax=%rax,<mulr4=%r8
  544. add %rax,%r8
  545. # qhasm: mulrdx += 0 + carry
  546. # asm 1: adc $0,<mulrdx=int64#3
  547. # asm 2: adc $0,<mulrdx=%rdx
  548. adc $0,%rdx
  549. # qhasm: carry? mulr4 += mulc
  550. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  551. # asm 2: add <mulc=%rbp,<mulr4=%r8
  552. add %rbp,%r8
  553. # qhasm: mulr5 += mulrdx + carry
  554. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  555. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  556. adc %rdx,%r9
  557. # qhasm: mulx2 = a2_stack
  558. # asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10
  559. # asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12
  560. movq 72(%rsp),%r12
  561. # qhasm: mulrax = *(uint64 *)(qp + 0)
  562. # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
  563. # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
  564. movq 0(%rcx),%rax
  565. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  566. # asm 1: mul <mulx2=int64#10
  567. # asm 2: mul <mulx2=%r12
  568. mul %r12
  569. # qhasm: carry? a2 += mulrax
  570. # asm 1: add <mulrax=int64#7,<a2=int64#13
  571. # asm 2: add <mulrax=%rax,<a2=%r15
  572. add %rax,%r15
  573. # qhasm: mulc = 0
  574. # asm 1: mov $0,>mulc=int64#15
  575. # asm 2: mov $0,>mulc=%rbp
  576. mov $0,%rbp
  577. # qhasm: mulc += mulrdx + carry
  578. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  579. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  580. adc %rdx,%rbp
  581. # qhasm: mulrax = *(uint64 *)(qp + 8)
  582. # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
  583. # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
  584. movq 8(%rcx),%rax
  585. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  586. # asm 1: mul <mulx2=int64#10
  587. # asm 2: mul <mulx2=%r12
  588. mul %r12
  589. # qhasm: carry? a3 += mulrax
  590. # asm 1: add <mulrax=int64#7,<a3=int64#14
  591. # asm 2: add <mulrax=%rax,<a3=%rbx
  592. add %rax,%rbx
  593. # qhasm: mulrdx += 0 + carry
  594. # asm 1: adc $0,<mulrdx=int64#3
  595. # asm 2: adc $0,<mulrdx=%rdx
  596. adc $0,%rdx
  597. # qhasm: carry? a3 += mulc
  598. # asm 1: add <mulc=int64#15,<a3=int64#14
  599. # asm 2: add <mulc=%rbp,<a3=%rbx
  600. add %rbp,%rbx
  601. # qhasm: mulc = 0
  602. # asm 1: mov $0,>mulc=int64#15
  603. # asm 2: mov $0,>mulc=%rbp
  604. mov $0,%rbp
  605. # qhasm: mulc += mulrdx + carry
  606. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  607. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  608. adc %rdx,%rbp
  609. # qhasm: mulrax = *(uint64 *)(qp + 16)
  610. # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
  611. # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
  612. movq 16(%rcx),%rax
  613. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  614. # asm 1: mul <mulx2=int64#10
  615. # asm 2: mul <mulx2=%r12
  616. mul %r12
  617. # qhasm: carry? mulr4 += mulrax
  618. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  619. # asm 2: add <mulrax=%rax,<mulr4=%r8
  620. add %rax,%r8
  621. # qhasm: mulrdx += 0 + carry
  622. # asm 1: adc $0,<mulrdx=int64#3
  623. # asm 2: adc $0,<mulrdx=%rdx
  624. adc $0,%rdx
  625. # qhasm: carry? mulr4 += mulc
  626. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  627. # asm 2: add <mulc=%rbp,<mulr4=%r8
  628. add %rbp,%r8
  629. # qhasm: mulc = 0
  630. # asm 1: mov $0,>mulc=int64#15
  631. # asm 2: mov $0,>mulc=%rbp
  632. mov $0,%rbp
  633. # qhasm: mulc += mulrdx + carry
  634. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  635. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  636. adc %rdx,%rbp
  637. # qhasm: mulrax = *(uint64 *)(qp + 24)
  638. # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
  639. # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
  640. movq 24(%rcx),%rax
  641. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  642. # asm 1: mul <mulx2=int64#10
  643. # asm 2: mul <mulx2=%r12
  644. mul %r12
  645. # qhasm: carry? mulr5 += mulrax
  646. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  647. # asm 2: add <mulrax=%rax,<mulr5=%r9
  648. add %rax,%r9
  649. # qhasm: mulrdx += 0 + carry
  650. # asm 1: adc $0,<mulrdx=int64#3
  651. # asm 2: adc $0,<mulrdx=%rdx
  652. adc $0,%rdx
  653. # qhasm: carry? mulr5 += mulc
  654. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  655. # asm 2: add <mulc=%rbp,<mulr5=%r9
  656. add %rbp,%r9
  657. # qhasm: mulr6 += mulrdx + carry
  658. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  659. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  660. adc %rdx,%r10
  661. # qhasm: mulx3 = a3_stack
  662. # asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10
  663. # asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12
  664. movq 80(%rsp),%r12
  665. # qhasm: mulrax = *(uint64 *)(qp + 0)
  666. # asm 1: movq 0(<qp=int64#4),>mulrax=int64#7
  667. # asm 2: movq 0(<qp=%rcx),>mulrax=%rax
  668. movq 0(%rcx),%rax
  669. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  670. # asm 1: mul <mulx3=int64#10
  671. # asm 2: mul <mulx3=%r12
  672. mul %r12
  673. # qhasm: carry? a3 += mulrax
  674. # asm 1: add <mulrax=int64#7,<a3=int64#14
  675. # asm 2: add <mulrax=%rax,<a3=%rbx
  676. add %rax,%rbx
  677. # qhasm: mulc = 0
  678. # asm 1: mov $0,>mulc=int64#15
  679. # asm 2: mov $0,>mulc=%rbp
  680. mov $0,%rbp
  681. # qhasm: mulc += mulrdx + carry
  682. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  683. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  684. adc %rdx,%rbp
  685. # qhasm: mulrax = *(uint64 *)(qp + 8)
  686. # asm 1: movq 8(<qp=int64#4),>mulrax=int64#7
  687. # asm 2: movq 8(<qp=%rcx),>mulrax=%rax
  688. movq 8(%rcx),%rax
  689. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  690. # asm 1: mul <mulx3=int64#10
  691. # asm 2: mul <mulx3=%r12
  692. mul %r12
  693. # qhasm: carry? mulr4 += mulrax
  694. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  695. # asm 2: add <mulrax=%rax,<mulr4=%r8
  696. add %rax,%r8
  697. # qhasm: mulrdx += 0 + carry
  698. # asm 1: adc $0,<mulrdx=int64#3
  699. # asm 2: adc $0,<mulrdx=%rdx
  700. adc $0,%rdx
  701. # qhasm: carry? mulr4 += mulc
  702. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  703. # asm 2: add <mulc=%rbp,<mulr4=%r8
  704. add %rbp,%r8
  705. # qhasm: mulc = 0
  706. # asm 1: mov $0,>mulc=int64#15
  707. # asm 2: mov $0,>mulc=%rbp
  708. mov $0,%rbp
  709. # qhasm: mulc += mulrdx + carry
  710. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  711. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  712. adc %rdx,%rbp
  713. # qhasm: mulrax = *(uint64 *)(qp + 16)
  714. # asm 1: movq 16(<qp=int64#4),>mulrax=int64#7
  715. # asm 2: movq 16(<qp=%rcx),>mulrax=%rax
  716. movq 16(%rcx),%rax
  717. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  718. # asm 1: mul <mulx3=int64#10
  719. # asm 2: mul <mulx3=%r12
  720. mul %r12
  721. # qhasm: carry? mulr5 += mulrax
  722. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  723. # asm 2: add <mulrax=%rax,<mulr5=%r9
  724. add %rax,%r9
  725. # qhasm: mulrdx += 0 + carry
  726. # asm 1: adc $0,<mulrdx=int64#3
  727. # asm 2: adc $0,<mulrdx=%rdx
  728. adc $0,%rdx
  729. # qhasm: carry? mulr5 += mulc
  730. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  731. # asm 2: add <mulc=%rbp,<mulr5=%r9
  732. add %rbp,%r9
  733. # qhasm: mulc = 0
  734. # asm 1: mov $0,>mulc=int64#15
  735. # asm 2: mov $0,>mulc=%rbp
  736. mov $0,%rbp
  737. # qhasm: mulc += mulrdx + carry
  738. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  739. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  740. adc %rdx,%rbp
  741. # qhasm: mulrax = *(uint64 *)(qp + 24)
  742. # asm 1: movq 24(<qp=int64#4),>mulrax=int64#7
  743. # asm 2: movq 24(<qp=%rcx),>mulrax=%rax
  744. movq 24(%rcx),%rax
  745. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  746. # asm 1: mul <mulx3=int64#10
  747. # asm 2: mul <mulx3=%r12
  748. mul %r12
  749. # qhasm: carry? mulr6 += mulrax
  750. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  751. # asm 2: add <mulrax=%rax,<mulr6=%r10
  752. add %rax,%r10
  753. # qhasm: mulrdx += 0 + carry
  754. # asm 1: adc $0,<mulrdx=int64#3
  755. # asm 2: adc $0,<mulrdx=%rdx
  756. adc $0,%rdx
  757. # qhasm: carry? mulr6 += mulc
  758. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  759. # asm 2: add <mulc=%rbp,<mulr6=%r10
  760. add %rbp,%r10
  761. # qhasm: mulr7 += mulrdx + carry
  762. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  763. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  764. adc %rdx,%r11
  765. # qhasm: mulrax = mulr4
  766. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  767. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  768. mov %r8,%rax
  769. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  770. mulq CRYPTO_NAMESPACE(38)(%rip)
  771. # qhasm: mulr4 = mulrax
  772. # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
  773. # asm 2: mov <mulrax=%rax,>mulr4=%r8
  774. mov %rax,%r8
  775. # qhasm: mulrax = mulr5
  776. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  777. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  778. mov %r9,%rax
  779. # qhasm: mulr5 = mulrdx
  780. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
  781. # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
  782. mov %rdx,%r9
  783. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  784. mulq CRYPTO_NAMESPACE(38)(%rip)
  785. # qhasm: carry? mulr5 += mulrax
  786. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  787. # asm 2: add <mulrax=%rax,<mulr5=%r9
  788. add %rax,%r9
  789. # qhasm: mulrax = mulr6
  790. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  791. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  792. mov %r10,%rax
  793. # qhasm: mulr6 = 0
  794. # asm 1: mov $0,>mulr6=int64#8
  795. # asm 2: mov $0,>mulr6=%r10
  796. mov $0,%r10
  797. # qhasm: mulr6 += mulrdx + carry
  798. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  799. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  800. adc %rdx,%r10
  801. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  802. mulq CRYPTO_NAMESPACE(38)(%rip)
  803. # qhasm: carry? mulr6 += mulrax
  804. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  805. # asm 2: add <mulrax=%rax,<mulr6=%r10
  806. add %rax,%r10
  807. # qhasm: mulrax = mulr7
  808. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  809. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  810. mov %r11,%rax
  811. # qhasm: mulr7 = 0
  812. # asm 1: mov $0,>mulr7=int64#9
  813. # asm 2: mov $0,>mulr7=%r11
  814. mov $0,%r11
  815. # qhasm: mulr7 += mulrdx + carry
  816. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  817. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  818. adc %rdx,%r11
  819. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  820. mulq CRYPTO_NAMESPACE(38)(%rip)
  821. # qhasm: carry? mulr7 += mulrax
  822. # asm 1: add <mulrax=int64#7,<mulr7=int64#9
  823. # asm 2: add <mulrax=%rax,<mulr7=%r11
  824. add %rax,%r11
  825. # qhasm: mulr8 = 0
  826. # asm 1: mov $0,>mulr8=int64#7
  827. # asm 2: mov $0,>mulr8=%rax
  828. mov $0,%rax
  829. # qhasm: mulr8 += mulrdx + carry
  830. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  831. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  832. adc %rdx,%rax
  833. # qhasm: carry? a0 += mulr4
  834. # asm 1: add <mulr4=int64#5,<a0=int64#11
  835. # asm 2: add <mulr4=%r8,<a0=%r13
  836. add %r8,%r13
  837. # qhasm: carry? a1 += mulr5 + carry
  838. # asm 1: adc <mulr5=int64#6,<a1=int64#12
  839. # asm 2: adc <mulr5=%r9,<a1=%r14
  840. adc %r9,%r14
  841. # qhasm: carry? a2 += mulr6 + carry
  842. # asm 1: adc <mulr6=int64#8,<a2=int64#13
  843. # asm 2: adc <mulr6=%r10,<a2=%r15
  844. adc %r10,%r15
  845. # qhasm: carry? a3 += mulr7 + carry
  846. # asm 1: adc <mulr7=int64#9,<a3=int64#14
  847. # asm 2: adc <mulr7=%r11,<a3=%rbx
  848. adc %r11,%rbx
  849. # qhasm: mulzero = 0
  850. # asm 1: mov $0,>mulzero=int64#3
  851. # asm 2: mov $0,>mulzero=%rdx
  852. mov $0,%rdx
  853. # qhasm: mulr8 += mulzero + carry
  854. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  855. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  856. adc %rdx,%rax
  857. # qhasm: mulr8 *= 38
  858. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
  859. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
  860. imulq $38,%rax,%r8
  861. # qhasm: carry? a0 += mulr8
  862. # asm 1: add <mulr8=int64#5,<a0=int64#11
  863. # asm 2: add <mulr8=%r8,<a0=%r13
  864. add %r8,%r13
  865. # qhasm: carry? a1 += mulzero + carry
  866. # asm 1: adc <mulzero=int64#3,<a1=int64#12
  867. # asm 2: adc <mulzero=%rdx,<a1=%r14
  868. adc %rdx,%r14
  869. # qhasm: carry? a2 += mulzero + carry
  870. # asm 1: adc <mulzero=int64#3,<a2=int64#13
  871. # asm 2: adc <mulzero=%rdx,<a2=%r15
  872. adc %rdx,%r15
  873. # qhasm: carry? a3 += mulzero + carry
  874. # asm 1: adc <mulzero=int64#3,<a3=int64#14
  875. # asm 2: adc <mulzero=%rdx,<a3=%rbx
  876. adc %rdx,%rbx
  877. # qhasm: mulzero += mulzero + carry
  878. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  879. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  880. adc %rdx,%rdx
  881. # qhasm: mulzero *= 38
  882. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  883. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  884. imulq $38,%rdx,%rdx
  885. # qhasm: a0 += mulzero
  886. # asm 1: add <mulzero=int64#3,<a0=int64#11
  887. # asm 2: add <mulzero=%rdx,<a0=%r13
  888. add %rdx,%r13
  889. # qhasm: a0_stack = a0
  890. # asm 1: movq <a0=int64#11,>a0_stack=stack64#8
  891. # asm 2: movq <a0=%r13,>a0_stack=56(%rsp)
  892. movq %r13,56(%rsp)
  893. # qhasm: a1_stack = a1
  894. # asm 1: movq <a1=int64#12,>a1_stack=stack64#9
  895. # asm 2: movq <a1=%r14,>a1_stack=64(%rsp)
  896. movq %r14,64(%rsp)
  897. # qhasm: a2_stack = a2
  898. # asm 1: movq <a2=int64#13,>a2_stack=stack64#10
  899. # asm 2: movq <a2=%r15,>a2_stack=72(%rsp)
  900. movq %r15,72(%rsp)
  901. # qhasm: a3_stack = a3
  902. # asm 1: movq <a3=int64#14,>a3_stack=stack64#11
  903. # asm 2: movq <a3=%rbx,>a3_stack=80(%rsp)
  904. movq %rbx,80(%rsp)
  905. # qhasm: mulr4 = 0
  906. # asm 1: mov $0,>mulr4=int64#5
  907. # asm 2: mov $0,>mulr4=%r8
  908. mov $0,%r8
  909. # qhasm: mulr5 = 0
  910. # asm 1: mov $0,>mulr5=int64#6
  911. # asm 2: mov $0,>mulr5=%r9
  912. mov $0,%r9
  913. # qhasm: mulr6 = 0
  914. # asm 1: mov $0,>mulr6=int64#8
  915. # asm 2: mov $0,>mulr6=%r10
  916. mov $0,%r10
  917. # qhasm: mulr7 = 0
  918. # asm 1: mov $0,>mulr7=int64#9
  919. # asm 2: mov $0,>mulr7=%r11
  920. mov $0,%r11
  921. # qhasm: mulx0 = b0_stack
  922. # asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10
  923. # asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12
  924. movq 88(%rsp),%r12
  925. # qhasm: mulrax = *(uint64 *)(qp + 32)
  926. # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
  927. # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
  928. movq 32(%rcx),%rax
  929. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  930. # asm 1: mul <mulx0=int64#10
  931. # asm 2: mul <mulx0=%r12
  932. mul %r12
  933. # qhasm: e0 = mulrax
  934. # asm 1: mov <mulrax=int64#7,>e0=int64#11
  935. # asm 2: mov <mulrax=%rax,>e0=%r13
  936. mov %rax,%r13
  937. # qhasm: e1 = mulrdx
  938. # asm 1: mov <mulrdx=int64#3,>e1=int64#12
  939. # asm 2: mov <mulrdx=%rdx,>e1=%r14
  940. mov %rdx,%r14
  941. # qhasm: mulrax = *(uint64 *)(qp + 40)
  942. # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
  943. # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
  944. movq 40(%rcx),%rax
  945. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  946. # asm 1: mul <mulx0=int64#10
  947. # asm 2: mul <mulx0=%r12
  948. mul %r12
  949. # qhasm: carry? e1 += mulrax
  950. # asm 1: add <mulrax=int64#7,<e1=int64#12
  951. # asm 2: add <mulrax=%rax,<e1=%r14
  952. add %rax,%r14
  953. # qhasm: e2 = 0
  954. # asm 1: mov $0,>e2=int64#13
  955. # asm 2: mov $0,>e2=%r15
  956. mov $0,%r15
  957. # qhasm: e2 += mulrdx + carry
  958. # asm 1: adc <mulrdx=int64#3,<e2=int64#13
  959. # asm 2: adc <mulrdx=%rdx,<e2=%r15
  960. adc %rdx,%r15
  961. # qhasm: mulrax = *(uint64 *)(qp + 48)
  962. # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
  963. # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
  964. movq 48(%rcx),%rax
  965. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  966. # asm 1: mul <mulx0=int64#10
  967. # asm 2: mul <mulx0=%r12
  968. mul %r12
  969. # qhasm: carry? e2 += mulrax
  970. # asm 1: add <mulrax=int64#7,<e2=int64#13
  971. # asm 2: add <mulrax=%rax,<e2=%r15
  972. add %rax,%r15
  973. # qhasm: e3 = 0
  974. # asm 1: mov $0,>e3=int64#14
  975. # asm 2: mov $0,>e3=%rbx
  976. mov $0,%rbx
  977. # qhasm: e3 += mulrdx + carry
  978. # asm 1: adc <mulrdx=int64#3,<e3=int64#14
  979. # asm 2: adc <mulrdx=%rdx,<e3=%rbx
  980. adc %rdx,%rbx
  981. # qhasm: mulrax = *(uint64 *)(qp + 56)
  982. # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
  983. # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
  984. movq 56(%rcx),%rax
  985. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  986. # asm 1: mul <mulx0=int64#10
  987. # asm 2: mul <mulx0=%r12
  988. mul %r12
  989. # qhasm: carry? e3 += mulrax
  990. # asm 1: add <mulrax=int64#7,<e3=int64#14
  991. # asm 2: add <mulrax=%rax,<e3=%rbx
  992. add %rax,%rbx
  993. # qhasm: mulr4 += mulrdx + carry
  994. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  995. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  996. adc %rdx,%r8
  997. # qhasm: mulx1 = b1_stack
  998. # asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10
  999. # asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12
  1000. movq 96(%rsp),%r12
  1001. # qhasm: mulrax = *(uint64 *)(qp + 32)
  1002. # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
  1003. # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
  1004. movq 32(%rcx),%rax
  1005. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1006. # asm 1: mul <mulx1=int64#10
  1007. # asm 2: mul <mulx1=%r12
  1008. mul %r12
  1009. # qhasm: carry? e1 += mulrax
  1010. # asm 1: add <mulrax=int64#7,<e1=int64#12
  1011. # asm 2: add <mulrax=%rax,<e1=%r14
  1012. add %rax,%r14
  1013. # qhasm: mulc = 0
  1014. # asm 1: mov $0,>mulc=int64#15
  1015. # asm 2: mov $0,>mulc=%rbp
  1016. mov $0,%rbp
  1017. # qhasm: mulc += mulrdx + carry
  1018. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1019. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1020. adc %rdx,%rbp
  1021. # qhasm: mulrax = *(uint64 *)(qp + 40)
  1022. # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
  1023. # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
  1024. movq 40(%rcx),%rax
  1025. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1026. # asm 1: mul <mulx1=int64#10
  1027. # asm 2: mul <mulx1=%r12
  1028. mul %r12
  1029. # qhasm: carry? e2 += mulrax
  1030. # asm 1: add <mulrax=int64#7,<e2=int64#13
  1031. # asm 2: add <mulrax=%rax,<e2=%r15
  1032. add %rax,%r15
  1033. # qhasm: mulrdx += 0 + carry
  1034. # asm 1: adc $0,<mulrdx=int64#3
  1035. # asm 2: adc $0,<mulrdx=%rdx
  1036. adc $0,%rdx
  1037. # qhasm: carry? e2 += mulc
  1038. # asm 1: add <mulc=int64#15,<e2=int64#13
  1039. # asm 2: add <mulc=%rbp,<e2=%r15
  1040. add %rbp,%r15
  1041. # qhasm: mulc = 0
  1042. # asm 1: mov $0,>mulc=int64#15
  1043. # asm 2: mov $0,>mulc=%rbp
  1044. mov $0,%rbp
  1045. # qhasm: mulc += mulrdx + carry
  1046. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1047. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1048. adc %rdx,%rbp
  1049. # qhasm: mulrax = *(uint64 *)(qp + 48)
  1050. # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
  1051. # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
  1052. movq 48(%rcx),%rax
  1053. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1054. # asm 1: mul <mulx1=int64#10
  1055. # asm 2: mul <mulx1=%r12
  1056. mul %r12
  1057. # qhasm: carry? e3 += mulrax
  1058. # asm 1: add <mulrax=int64#7,<e3=int64#14
  1059. # asm 2: add <mulrax=%rax,<e3=%rbx
  1060. add %rax,%rbx
  1061. # qhasm: mulrdx += 0 + carry
  1062. # asm 1: adc $0,<mulrdx=int64#3
  1063. # asm 2: adc $0,<mulrdx=%rdx
  1064. adc $0,%rdx
  1065. # qhasm: carry? e3 += mulc
  1066. # asm 1: add <mulc=int64#15,<e3=int64#14
  1067. # asm 2: add <mulc=%rbp,<e3=%rbx
  1068. add %rbp,%rbx
  1069. # qhasm: mulc = 0
  1070. # asm 1: mov $0,>mulc=int64#15
  1071. # asm 2: mov $0,>mulc=%rbp
  1072. mov $0,%rbp
  1073. # qhasm: mulc += mulrdx + carry
  1074. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1075. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1076. adc %rdx,%rbp
  1077. # qhasm: mulrax = *(uint64 *)(qp + 56)
  1078. # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
  1079. # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
  1080. movq 56(%rcx),%rax
  1081. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1082. # asm 1: mul <mulx1=int64#10
  1083. # asm 2: mul <mulx1=%r12
  1084. mul %r12
  1085. # qhasm: carry? mulr4 += mulrax
  1086. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1087. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1088. add %rax,%r8
  1089. # qhasm: mulrdx += 0 + carry
  1090. # asm 1: adc $0,<mulrdx=int64#3
  1091. # asm 2: adc $0,<mulrdx=%rdx
  1092. adc $0,%rdx
  1093. # qhasm: carry? mulr4 += mulc
  1094. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1095. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1096. add %rbp,%r8
  1097. # qhasm: mulr5 += mulrdx + carry
  1098. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  1099. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  1100. adc %rdx,%r9
  1101. # qhasm: mulx2 = b2_stack
  1102. # asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10
  1103. # asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12
  1104. movq 104(%rsp),%r12
  1105. # qhasm: mulrax = *(uint64 *)(qp + 32)
  1106. # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
  1107. # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
  1108. movq 32(%rcx),%rax
  1109. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1110. # asm 1: mul <mulx2=int64#10
  1111. # asm 2: mul <mulx2=%r12
  1112. mul %r12
  1113. # qhasm: carry? e2 += mulrax
  1114. # asm 1: add <mulrax=int64#7,<e2=int64#13
  1115. # asm 2: add <mulrax=%rax,<e2=%r15
  1116. add %rax,%r15
  1117. # qhasm: mulc = 0
  1118. # asm 1: mov $0,>mulc=int64#15
  1119. # asm 2: mov $0,>mulc=%rbp
  1120. mov $0,%rbp
  1121. # qhasm: mulc += mulrdx + carry
  1122. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1123. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1124. adc %rdx,%rbp
  1125. # qhasm: mulrax = *(uint64 *)(qp + 40)
  1126. # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
  1127. # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
  1128. movq 40(%rcx),%rax
  1129. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1130. # asm 1: mul <mulx2=int64#10
  1131. # asm 2: mul <mulx2=%r12
  1132. mul %r12
  1133. # qhasm: carry? e3 += mulrax
  1134. # asm 1: add <mulrax=int64#7,<e3=int64#14
  1135. # asm 2: add <mulrax=%rax,<e3=%rbx
  1136. add %rax,%rbx
  1137. # qhasm: mulrdx += 0 + carry
  1138. # asm 1: adc $0,<mulrdx=int64#3
  1139. # asm 2: adc $0,<mulrdx=%rdx
  1140. adc $0,%rdx
  1141. # qhasm: carry? e3 += mulc
  1142. # asm 1: add <mulc=int64#15,<e3=int64#14
  1143. # asm 2: add <mulc=%rbp,<e3=%rbx
  1144. add %rbp,%rbx
  1145. # qhasm: mulc = 0
  1146. # asm 1: mov $0,>mulc=int64#15
  1147. # asm 2: mov $0,>mulc=%rbp
  1148. mov $0,%rbp
  1149. # qhasm: mulc += mulrdx + carry
  1150. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1151. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1152. adc %rdx,%rbp
  1153. # qhasm: mulrax = *(uint64 *)(qp + 48)
  1154. # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
  1155. # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
  1156. movq 48(%rcx),%rax
  1157. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1158. # asm 1: mul <mulx2=int64#10
  1159. # asm 2: mul <mulx2=%r12
  1160. mul %r12
  1161. # qhasm: carry? mulr4 += mulrax
  1162. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1163. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1164. add %rax,%r8
  1165. # qhasm: mulrdx += 0 + carry
  1166. # asm 1: adc $0,<mulrdx=int64#3
  1167. # asm 2: adc $0,<mulrdx=%rdx
  1168. adc $0,%rdx
  1169. # qhasm: carry? mulr4 += mulc
  1170. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1171. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1172. add %rbp,%r8
  1173. # qhasm: mulc = 0
  1174. # asm 1: mov $0,>mulc=int64#15
  1175. # asm 2: mov $0,>mulc=%rbp
  1176. mov $0,%rbp
  1177. # qhasm: mulc += mulrdx + carry
  1178. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1179. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1180. adc %rdx,%rbp
  1181. # qhasm: mulrax = *(uint64 *)(qp + 56)
  1182. # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
  1183. # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
  1184. movq 56(%rcx),%rax
  1185. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1186. # asm 1: mul <mulx2=int64#10
  1187. # asm 2: mul <mulx2=%r12
  1188. mul %r12
  1189. # qhasm: carry? mulr5 += mulrax
  1190. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1191. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1192. add %rax,%r9
  1193. # qhasm: mulrdx += 0 + carry
  1194. # asm 1: adc $0,<mulrdx=int64#3
  1195. # asm 2: adc $0,<mulrdx=%rdx
  1196. adc $0,%rdx
  1197. # qhasm: carry? mulr5 += mulc
  1198. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  1199. # asm 2: add <mulc=%rbp,<mulr5=%r9
  1200. add %rbp,%r9
  1201. # qhasm: mulr6 += mulrdx + carry
  1202. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  1203. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  1204. adc %rdx,%r10
  1205. # qhasm: mulx3 = b3_stack
  1206. # asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10
  1207. # asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12
  1208. movq 112(%rsp),%r12
  1209. # qhasm: mulrax = *(uint64 *)(qp + 32)
  1210. # asm 1: movq 32(<qp=int64#4),>mulrax=int64#7
  1211. # asm 2: movq 32(<qp=%rcx),>mulrax=%rax
  1212. movq 32(%rcx),%rax
  1213. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1214. # asm 1: mul <mulx3=int64#10
  1215. # asm 2: mul <mulx3=%r12
  1216. mul %r12
  1217. # qhasm: carry? e3 += mulrax
  1218. # asm 1: add <mulrax=int64#7,<e3=int64#14
  1219. # asm 2: add <mulrax=%rax,<e3=%rbx
  1220. add %rax,%rbx
  1221. # qhasm: mulc = 0
  1222. # asm 1: mov $0,>mulc=int64#15
  1223. # asm 2: mov $0,>mulc=%rbp
  1224. mov $0,%rbp
  1225. # qhasm: mulc += mulrdx + carry
  1226. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1227. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1228. adc %rdx,%rbp
  1229. # qhasm: mulrax = *(uint64 *)(qp + 40)
  1230. # asm 1: movq 40(<qp=int64#4),>mulrax=int64#7
  1231. # asm 2: movq 40(<qp=%rcx),>mulrax=%rax
  1232. movq 40(%rcx),%rax
  1233. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1234. # asm 1: mul <mulx3=int64#10
  1235. # asm 2: mul <mulx3=%r12
  1236. mul %r12
  1237. # qhasm: carry? mulr4 += mulrax
  1238. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1239. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1240. add %rax,%r8
  1241. # qhasm: mulrdx += 0 + carry
  1242. # asm 1: adc $0,<mulrdx=int64#3
  1243. # asm 2: adc $0,<mulrdx=%rdx
  1244. adc $0,%rdx
  1245. # qhasm: carry? mulr4 += mulc
  1246. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1247. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1248. add %rbp,%r8
  1249. # qhasm: mulc = 0
  1250. # asm 1: mov $0,>mulc=int64#15
  1251. # asm 2: mov $0,>mulc=%rbp
  1252. mov $0,%rbp
  1253. # qhasm: mulc += mulrdx + carry
  1254. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1255. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1256. adc %rdx,%rbp
  1257. # qhasm: mulrax = *(uint64 *)(qp + 48)
  1258. # asm 1: movq 48(<qp=int64#4),>mulrax=int64#7
  1259. # asm 2: movq 48(<qp=%rcx),>mulrax=%rax
  1260. movq 48(%rcx),%rax
  1261. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1262. # asm 1: mul <mulx3=int64#10
  1263. # asm 2: mul <mulx3=%r12
  1264. mul %r12
  1265. # qhasm: carry? mulr5 += mulrax
  1266. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1267. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1268. add %rax,%r9
  1269. # qhasm: mulrdx += 0 + carry
  1270. # asm 1: adc $0,<mulrdx=int64#3
  1271. # asm 2: adc $0,<mulrdx=%rdx
  1272. adc $0,%rdx
  1273. # qhasm: carry? mulr5 += mulc
  1274. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  1275. # asm 2: add <mulc=%rbp,<mulr5=%r9
  1276. add %rbp,%r9
  1277. # qhasm: mulc = 0
  1278. # asm 1: mov $0,>mulc=int64#15
  1279. # asm 2: mov $0,>mulc=%rbp
  1280. mov $0,%rbp
  1281. # qhasm: mulc += mulrdx + carry
  1282. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1283. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1284. adc %rdx,%rbp
  1285. # qhasm: mulrax = *(uint64 *)(qp + 56)
  1286. # asm 1: movq 56(<qp=int64#4),>mulrax=int64#7
  1287. # asm 2: movq 56(<qp=%rcx),>mulrax=%rax
  1288. movq 56(%rcx),%rax
  1289. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1290. # asm 1: mul <mulx3=int64#10
  1291. # asm 2: mul <mulx3=%r12
  1292. mul %r12
  1293. # qhasm: carry? mulr6 += mulrax
  1294. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  1295. # asm 2: add <mulrax=%rax,<mulr6=%r10
  1296. add %rax,%r10
  1297. # qhasm: mulrdx += 0 + carry
  1298. # asm 1: adc $0,<mulrdx=int64#3
  1299. # asm 2: adc $0,<mulrdx=%rdx
  1300. adc $0,%rdx
  1301. # qhasm: carry? mulr6 += mulc
  1302. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  1303. # asm 2: add <mulc=%rbp,<mulr6=%r10
  1304. add %rbp,%r10
  1305. # qhasm: mulr7 += mulrdx + carry
  1306. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  1307. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  1308. adc %rdx,%r11
  1309. # qhasm: mulrax = mulr4
  1310. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  1311. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  1312. mov %r8,%rax
  1313. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1314. mulq CRYPTO_NAMESPACE(38)(%rip)
  1315. # qhasm: mulr4 = mulrax
  1316. # asm 1: mov <mulrax=int64#7,>mulr4=int64#5
  1317. # asm 2: mov <mulrax=%rax,>mulr4=%r8
  1318. mov %rax,%r8
  1319. # qhasm: mulrax = mulr5
  1320. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  1321. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  1322. mov %r9,%rax
  1323. # qhasm: mulr5 = mulrdx
  1324. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#6
  1325. # asm 2: mov <mulrdx=%rdx,>mulr5=%r9
  1326. mov %rdx,%r9
  1327. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1328. mulq CRYPTO_NAMESPACE(38)(%rip)
  1329. # qhasm: carry? mulr5 += mulrax
  1330. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1331. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1332. add %rax,%r9
  1333. # qhasm: mulrax = mulr6
  1334. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  1335. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  1336. mov %r10,%rax
  1337. # qhasm: mulr6 = 0
  1338. # asm 1: mov $0,>mulr6=int64#8
  1339. # asm 2: mov $0,>mulr6=%r10
  1340. mov $0,%r10
  1341. # qhasm: mulr6 += mulrdx + carry
  1342. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  1343. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  1344. adc %rdx,%r10
  1345. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1346. mulq CRYPTO_NAMESPACE(38)(%rip)
  1347. # qhasm: carry? mulr6 += mulrax
  1348. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  1349. # asm 2: add <mulrax=%rax,<mulr6=%r10
  1350. add %rax,%r10
  1351. # qhasm: mulrax = mulr7
  1352. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  1353. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  1354. mov %r11,%rax
  1355. # qhasm: mulr7 = 0
  1356. # asm 1: mov $0,>mulr7=int64#9
  1357. # asm 2: mov $0,>mulr7=%r11
  1358. mov $0,%r11
  1359. # qhasm: mulr7 += mulrdx + carry
  1360. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  1361. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  1362. adc %rdx,%r11
  1363. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1364. mulq CRYPTO_NAMESPACE(38)(%rip)
  1365. # qhasm: carry? mulr7 += mulrax
  1366. # asm 1: add <mulrax=int64#7,<mulr7=int64#9
  1367. # asm 2: add <mulrax=%rax,<mulr7=%r11
  1368. add %rax,%r11
  1369. # qhasm: mulr8 = 0
  1370. # asm 1: mov $0,>mulr8=int64#7
  1371. # asm 2: mov $0,>mulr8=%rax
  1372. mov $0,%rax
  1373. # qhasm: mulr8 += mulrdx + carry
  1374. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  1375. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  1376. adc %rdx,%rax
  1377. # qhasm: carry? e0 += mulr4
  1378. # asm 1: add <mulr4=int64#5,<e0=int64#11
  1379. # asm 2: add <mulr4=%r8,<e0=%r13
  1380. add %r8,%r13
  1381. # qhasm: carry? e1 += mulr5 + carry
  1382. # asm 1: adc <mulr5=int64#6,<e1=int64#12
  1383. # asm 2: adc <mulr5=%r9,<e1=%r14
  1384. adc %r9,%r14
  1385. # qhasm: carry? e2 += mulr6 + carry
  1386. # asm 1: adc <mulr6=int64#8,<e2=int64#13
  1387. # asm 2: adc <mulr6=%r10,<e2=%r15
  1388. adc %r10,%r15
  1389. # qhasm: carry? e3 += mulr7 + carry
  1390. # asm 1: adc <mulr7=int64#9,<e3=int64#14
  1391. # asm 2: adc <mulr7=%r11,<e3=%rbx
  1392. adc %r11,%rbx
  1393. # qhasm: mulzero = 0
  1394. # asm 1: mov $0,>mulzero=int64#3
  1395. # asm 2: mov $0,>mulzero=%rdx
  1396. mov $0,%rdx
  1397. # qhasm: mulr8 += mulzero + carry
  1398. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  1399. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  1400. adc %rdx,%rax
  1401. # qhasm: mulr8 *= 38
  1402. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5
  1403. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8
  1404. imulq $38,%rax,%r8
  1405. # qhasm: carry? e0 += mulr8
  1406. # asm 1: add <mulr8=int64#5,<e0=int64#11
  1407. # asm 2: add <mulr8=%r8,<e0=%r13
  1408. add %r8,%r13
  1409. # qhasm: carry? e1 += mulzero + carry
  1410. # asm 1: adc <mulzero=int64#3,<e1=int64#12
  1411. # asm 2: adc <mulzero=%rdx,<e1=%r14
  1412. adc %rdx,%r14
  1413. # qhasm: carry? e2 += mulzero + carry
  1414. # asm 1: adc <mulzero=int64#3,<e2=int64#13
  1415. # asm 2: adc <mulzero=%rdx,<e2=%r15
  1416. adc %rdx,%r15
  1417. # qhasm: carry? e3 += mulzero + carry
  1418. # asm 1: adc <mulzero=int64#3,<e3=int64#14
  1419. # asm 2: adc <mulzero=%rdx,<e3=%rbx
  1420. adc %rdx,%rbx
  1421. # qhasm: mulzero += mulzero + carry
  1422. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  1423. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  1424. adc %rdx,%rdx
  1425. # qhasm: mulzero *= 38
  1426. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  1427. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  1428. imulq $38,%rdx,%rdx
  1429. # qhasm: e0 += mulzero
  1430. # asm 1: add <mulzero=int64#3,<e0=int64#11
  1431. # asm 2: add <mulzero=%rdx,<e0=%r13
  1432. add %rdx,%r13
  1433. # qhasm: h0 = e0
  1434. # asm 1: mov <e0=int64#11,>h0=int64#3
  1435. # asm 2: mov <e0=%r13,>h0=%rdx
  1436. mov %r13,%rdx
  1437. # qhasm: h1 = e1
  1438. # asm 1: mov <e1=int64#12,>h1=int64#5
  1439. # asm 2: mov <e1=%r14,>h1=%r8
  1440. mov %r14,%r8
  1441. # qhasm: h2 = e2
  1442. # asm 1: mov <e2=int64#13,>h2=int64#6
  1443. # asm 2: mov <e2=%r15,>h2=%r9
  1444. mov %r15,%r9
  1445. # qhasm: h3 = e3
  1446. # asm 1: mov <e3=int64#14,>h3=int64#7
  1447. # asm 2: mov <e3=%rbx,>h3=%rax
  1448. mov %rbx,%rax
  1449. # qhasm: carry? e0 -= a0_stack
  1450. # asm 1: subq <a0_stack=stack64#8,<e0=int64#11
  1451. # asm 2: subq <a0_stack=56(%rsp),<e0=%r13
  1452. subq 56(%rsp),%r13
  1453. # qhasm: carry? e1 -= a1_stack - carry
  1454. # asm 1: sbbq <a1_stack=stack64#9,<e1=int64#12
  1455. # asm 2: sbbq <a1_stack=64(%rsp),<e1=%r14
  1456. sbbq 64(%rsp),%r14
  1457. # qhasm: carry? e2 -= a2_stack - carry
  1458. # asm 1: sbbq <a2_stack=stack64#10,<e2=int64#13
  1459. # asm 2: sbbq <a2_stack=72(%rsp),<e2=%r15
  1460. sbbq 72(%rsp),%r15
  1461. # qhasm: carry? e3 -= a3_stack - carry
  1462. # asm 1: sbbq <a3_stack=stack64#11,<e3=int64#14
  1463. # asm 2: sbbq <a3_stack=80(%rsp),<e3=%rbx
  1464. sbbq 80(%rsp),%rbx
  1465. # qhasm: subt0 = 0
  1466. # asm 1: mov $0,>subt0=int64#8
  1467. # asm 2: mov $0,>subt0=%r10
  1468. mov $0,%r10
  1469. # qhasm: subt1 = 38
  1470. # asm 1: mov $38,>subt1=int64#9
  1471. # asm 2: mov $38,>subt1=%r11
  1472. mov $38,%r11
  1473. # qhasm: subt1 = subt0 if !carry
  1474. # asm 1: cmovae <subt0=int64#8,<subt1=int64#9
  1475. # asm 2: cmovae <subt0=%r10,<subt1=%r11
  1476. cmovae %r10,%r11
  1477. # qhasm: carry? e0 -= subt1
  1478. # asm 1: sub <subt1=int64#9,<e0=int64#11
  1479. # asm 2: sub <subt1=%r11,<e0=%r13
  1480. sub %r11,%r13
  1481. # qhasm: carry? e1 -= subt0 - carry
  1482. # asm 1: sbb <subt0=int64#8,<e1=int64#12
  1483. # asm 2: sbb <subt0=%r10,<e1=%r14
  1484. sbb %r10,%r14
  1485. # qhasm: carry? e2 -= subt0 - carry
  1486. # asm 1: sbb <subt0=int64#8,<e2=int64#13
  1487. # asm 2: sbb <subt0=%r10,<e2=%r15
  1488. sbb %r10,%r15
  1489. # qhasm: carry? e3 -= subt0 - carry
  1490. # asm 1: sbb <subt0=int64#8,<e3=int64#14
  1491. # asm 2: sbb <subt0=%r10,<e3=%rbx
  1492. sbb %r10,%rbx
  1493. # qhasm: subt0 = subt1 if carry
  1494. # asm 1: cmovc <subt1=int64#9,<subt0=int64#8
  1495. # asm 2: cmovc <subt1=%r11,<subt0=%r10
  1496. cmovc %r11,%r10
  1497. # qhasm: e0 -= subt0
  1498. # asm 1: sub <subt0=int64#8,<e0=int64#11
  1499. # asm 2: sub <subt0=%r10,<e0=%r13
  1500. sub %r10,%r13
  1501. # qhasm: carry? h0 += a0_stack
  1502. # asm 1: addq <a0_stack=stack64#8,<h0=int64#3
  1503. # asm 2: addq <a0_stack=56(%rsp),<h0=%rdx
  1504. addq 56(%rsp),%rdx
  1505. # qhasm: carry? h1 += a1_stack + carry
  1506. # asm 1: adcq <a1_stack=stack64#9,<h1=int64#5
  1507. # asm 2: adcq <a1_stack=64(%rsp),<h1=%r8
  1508. adcq 64(%rsp),%r8
  1509. # qhasm: carry? h2 += a2_stack + carry
  1510. # asm 1: adcq <a2_stack=stack64#10,<h2=int64#6
  1511. # asm 2: adcq <a2_stack=72(%rsp),<h2=%r9
  1512. adcq 72(%rsp),%r9
  1513. # qhasm: carry? h3 += a3_stack + carry
  1514. # asm 1: adcq <a3_stack=stack64#11,<h3=int64#7
  1515. # asm 2: adcq <a3_stack=80(%rsp),<h3=%rax
  1516. adcq 80(%rsp),%rax
  1517. # qhasm: addt0 = 0
  1518. # asm 1: mov $0,>addt0=int64#8
  1519. # asm 2: mov $0,>addt0=%r10
  1520. mov $0,%r10
  1521. # qhasm: addt1 = 38
  1522. # asm 1: mov $38,>addt1=int64#9
  1523. # asm 2: mov $38,>addt1=%r11
  1524. mov $38,%r11
  1525. # qhasm: addt1 = addt0 if !carry
  1526. # asm 1: cmovae <addt0=int64#8,<addt1=int64#9
  1527. # asm 2: cmovae <addt0=%r10,<addt1=%r11
  1528. cmovae %r10,%r11
  1529. # qhasm: carry? h0 += addt1
  1530. # asm 1: add <addt1=int64#9,<h0=int64#3
  1531. # asm 2: add <addt1=%r11,<h0=%rdx
  1532. add %r11,%rdx
  1533. # qhasm: carry? h1 += addt0 + carry
  1534. # asm 1: adc <addt0=int64#8,<h1=int64#5
  1535. # asm 2: adc <addt0=%r10,<h1=%r8
  1536. adc %r10,%r8
  1537. # qhasm: carry? h2 += addt0 + carry
  1538. # asm 1: adc <addt0=int64#8,<h2=int64#6
  1539. # asm 2: adc <addt0=%r10,<h2=%r9
  1540. adc %r10,%r9
  1541. # qhasm: carry? h3 += addt0 + carry
  1542. # asm 1: adc <addt0=int64#8,<h3=int64#7
  1543. # asm 2: adc <addt0=%r10,<h3=%rax
  1544. adc %r10,%rax
  1545. # qhasm: addt0 = addt1 if carry
  1546. # asm 1: cmovc <addt1=int64#9,<addt0=int64#8
  1547. # asm 2: cmovc <addt1=%r11,<addt0=%r10
  1548. cmovc %r11,%r10
  1549. # qhasm: h0 += addt0
  1550. # asm 1: add <addt0=int64#8,<h0=int64#3
  1551. # asm 2: add <addt0=%r10,<h0=%rdx
  1552. add %r10,%rdx
  1553. # qhasm: *(uint64 *)(rp + 64) = h0
  1554. # asm 1: movq <h0=int64#3,64(<rp=int64#1)
  1555. # asm 2: movq <h0=%rdx,64(<rp=%rdi)
  1556. movq %rdx,64(%rdi)
  1557. # qhasm: *(uint64 *)(rp + 72) = h1
  1558. # asm 1: movq <h1=int64#5,72(<rp=int64#1)
  1559. # asm 2: movq <h1=%r8,72(<rp=%rdi)
  1560. movq %r8,72(%rdi)
  1561. # qhasm: *(uint64 *)(rp + 80) = h2
  1562. # asm 1: movq <h2=int64#6,80(<rp=int64#1)
  1563. # asm 2: movq <h2=%r9,80(<rp=%rdi)
  1564. movq %r9,80(%rdi)
  1565. # qhasm: *(uint64 *)(rp + 88) = h3
  1566. # asm 1: movq <h3=int64#7,88(<rp=int64#1)
  1567. # asm 2: movq <h3=%rax,88(<rp=%rdi)
  1568. movq %rax,88(%rdi)
  1569. # qhasm: *(uint64 *)(rp + 0) = e0
  1570. # asm 1: movq <e0=int64#11,0(<rp=int64#1)
  1571. # asm 2: movq <e0=%r13,0(<rp=%rdi)
  1572. movq %r13,0(%rdi)
  1573. # qhasm: *(uint64 *)(rp + 8) = e1
  1574. # asm 1: movq <e1=int64#12,8(<rp=int64#1)
  1575. # asm 2: movq <e1=%r14,8(<rp=%rdi)
  1576. movq %r14,8(%rdi)
  1577. # qhasm: *(uint64 *)(rp + 16) = e2
  1578. # asm 1: movq <e2=int64#13,16(<rp=int64#1)
  1579. # asm 2: movq <e2=%r15,16(<rp=%rdi)
  1580. movq %r15,16(%rdi)
  1581. # qhasm: *(uint64 *)(rp + 24) = e3
  1582. # asm 1: movq <e3=int64#14,24(<rp=int64#1)
  1583. # asm 2: movq <e3=%rbx,24(<rp=%rdi)
  1584. movq %rbx,24(%rdi)
  1585. # qhasm: mulr4 = 0
  1586. # asm 1: mov $0,>mulr4=int64#5
  1587. # asm 2: mov $0,>mulr4=%r8
  1588. mov $0,%r8
  1589. # qhasm: mulr5 = 0
  1590. # asm 1: mov $0,>mulr5=int64#6
  1591. # asm 2: mov $0,>mulr5=%r9
  1592. mov $0,%r9
  1593. # qhasm: mulr6 = 0
  1594. # asm 1: mov $0,>mulr6=int64#8
  1595. # asm 2: mov $0,>mulr6=%r10
  1596. mov $0,%r10
  1597. # qhasm: mulr7 = 0
  1598. # asm 1: mov $0,>mulr7=int64#9
  1599. # asm 2: mov $0,>mulr7=%r11
  1600. mov $0,%r11
  1601. # qhasm: mulx0 = *(uint64 *)(pp + 96)
  1602. # asm 1: movq 96(<pp=int64#2),>mulx0=int64#10
  1603. # asm 2: movq 96(<pp=%rsi),>mulx0=%r12
  1604. movq 96(%rsi),%r12
  1605. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1606. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  1607. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  1608. movq 64(%rcx),%rax
  1609. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1610. # asm 1: mul <mulx0=int64#10
  1611. # asm 2: mul <mulx0=%r12
  1612. mul %r12
  1613. # qhasm: c0 = mulrax
  1614. # asm 1: mov <mulrax=int64#7,>c0=int64#11
  1615. # asm 2: mov <mulrax=%rax,>c0=%r13
  1616. mov %rax,%r13
  1617. # qhasm: c1 = mulrdx
  1618. # asm 1: mov <mulrdx=int64#3,>c1=int64#12
  1619. # asm 2: mov <mulrdx=%rdx,>c1=%r14
  1620. mov %rdx,%r14
  1621. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1622. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  1623. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  1624. movq 72(%rcx),%rax
  1625. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1626. # asm 1: mul <mulx0=int64#10
  1627. # asm 2: mul <mulx0=%r12
  1628. mul %r12
  1629. # qhasm: carry? c1 += mulrax
  1630. # asm 1: add <mulrax=int64#7,<c1=int64#12
  1631. # asm 2: add <mulrax=%rax,<c1=%r14
  1632. add %rax,%r14
  1633. # qhasm: c2 = 0
  1634. # asm 1: mov $0,>c2=int64#13
  1635. # asm 2: mov $0,>c2=%r15
  1636. mov $0,%r15
  1637. # qhasm: c2 += mulrdx + carry
  1638. # asm 1: adc <mulrdx=int64#3,<c2=int64#13
  1639. # asm 2: adc <mulrdx=%rdx,<c2=%r15
  1640. adc %rdx,%r15
  1641. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1642. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  1643. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  1644. movq 80(%rcx),%rax
  1645. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1646. # asm 1: mul <mulx0=int64#10
  1647. # asm 2: mul <mulx0=%r12
  1648. mul %r12
  1649. # qhasm: carry? c2 += mulrax
  1650. # asm 1: add <mulrax=int64#7,<c2=int64#13
  1651. # asm 2: add <mulrax=%rax,<c2=%r15
  1652. add %rax,%r15
  1653. # qhasm: c3 = 0
  1654. # asm 1: mov $0,>c3=int64#14
  1655. # asm 2: mov $0,>c3=%rbx
  1656. mov $0,%rbx
  1657. # qhasm: c3 += mulrdx + carry
  1658. # asm 1: adc <mulrdx=int64#3,<c3=int64#14
  1659. # asm 2: adc <mulrdx=%rdx,<c3=%rbx
  1660. adc %rdx,%rbx
  1661. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1662. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  1663. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  1664. movq 88(%rcx),%rax
  1665. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1666. # asm 1: mul <mulx0=int64#10
  1667. # asm 2: mul <mulx0=%r12
  1668. mul %r12
  1669. # qhasm: carry? c3 += mulrax
  1670. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1671. # asm 2: add <mulrax=%rax,<c3=%rbx
  1672. add %rax,%rbx
  1673. # qhasm: mulr4 += mulrdx + carry
  1674. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
  1675. # asm 2: adc <mulrdx=%rdx,<mulr4=%r8
  1676. adc %rdx,%r8
  1677. # qhasm: mulx1 = *(uint64 *)(pp + 104)
  1678. # asm 1: movq 104(<pp=int64#2),>mulx1=int64#10
  1679. # asm 2: movq 104(<pp=%rsi),>mulx1=%r12
  1680. movq 104(%rsi),%r12
  1681. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1682. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  1683. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  1684. movq 64(%rcx),%rax
  1685. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1686. # asm 1: mul <mulx1=int64#10
  1687. # asm 2: mul <mulx1=%r12
  1688. mul %r12
  1689. # qhasm: carry? c1 += mulrax
  1690. # asm 1: add <mulrax=int64#7,<c1=int64#12
  1691. # asm 2: add <mulrax=%rax,<c1=%r14
  1692. add %rax,%r14
  1693. # qhasm: mulc = 0
  1694. # asm 1: mov $0,>mulc=int64#15
  1695. # asm 2: mov $0,>mulc=%rbp
  1696. mov $0,%rbp
  1697. # qhasm: mulc += mulrdx + carry
  1698. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1699. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1700. adc %rdx,%rbp
  1701. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1702. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  1703. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  1704. movq 72(%rcx),%rax
  1705. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1706. # asm 1: mul <mulx1=int64#10
  1707. # asm 2: mul <mulx1=%r12
  1708. mul %r12
  1709. # qhasm: carry? c2 += mulrax
  1710. # asm 1: add <mulrax=int64#7,<c2=int64#13
  1711. # asm 2: add <mulrax=%rax,<c2=%r15
  1712. add %rax,%r15
  1713. # qhasm: mulrdx += 0 + carry
  1714. # asm 1: adc $0,<mulrdx=int64#3
  1715. # asm 2: adc $0,<mulrdx=%rdx
  1716. adc $0,%rdx
  1717. # qhasm: carry? c2 += mulc
  1718. # asm 1: add <mulc=int64#15,<c2=int64#13
  1719. # asm 2: add <mulc=%rbp,<c2=%r15
  1720. add %rbp,%r15
  1721. # qhasm: mulc = 0
  1722. # asm 1: mov $0,>mulc=int64#15
  1723. # asm 2: mov $0,>mulc=%rbp
  1724. mov $0,%rbp
  1725. # qhasm: mulc += mulrdx + carry
  1726. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1727. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1728. adc %rdx,%rbp
  1729. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1730. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  1731. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  1732. movq 80(%rcx),%rax
  1733. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1734. # asm 1: mul <mulx1=int64#10
  1735. # asm 2: mul <mulx1=%r12
  1736. mul %r12
  1737. # qhasm: carry? c3 += mulrax
  1738. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1739. # asm 2: add <mulrax=%rax,<c3=%rbx
  1740. add %rax,%rbx
  1741. # qhasm: mulrdx += 0 + carry
  1742. # asm 1: adc $0,<mulrdx=int64#3
  1743. # asm 2: adc $0,<mulrdx=%rdx
  1744. adc $0,%rdx
  1745. # qhasm: carry? c3 += mulc
  1746. # asm 1: add <mulc=int64#15,<c3=int64#14
  1747. # asm 2: add <mulc=%rbp,<c3=%rbx
  1748. add %rbp,%rbx
  1749. # qhasm: mulc = 0
  1750. # asm 1: mov $0,>mulc=int64#15
  1751. # asm 2: mov $0,>mulc=%rbp
  1752. mov $0,%rbp
  1753. # qhasm: mulc += mulrdx + carry
  1754. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1755. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1756. adc %rdx,%rbp
  1757. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1758. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  1759. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  1760. movq 88(%rcx),%rax
  1761. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1762. # asm 1: mul <mulx1=int64#10
  1763. # asm 2: mul <mulx1=%r12
  1764. mul %r12
  1765. # qhasm: carry? mulr4 += mulrax
  1766. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1767. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1768. add %rax,%r8
  1769. # qhasm: mulrdx += 0 + carry
  1770. # asm 1: adc $0,<mulrdx=int64#3
  1771. # asm 2: adc $0,<mulrdx=%rdx
  1772. adc $0,%rdx
  1773. # qhasm: carry? mulr4 += mulc
  1774. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1775. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1776. add %rbp,%r8
  1777. # qhasm: mulr5 += mulrdx + carry
  1778. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
  1779. # asm 2: adc <mulrdx=%rdx,<mulr5=%r9
  1780. adc %rdx,%r9
  1781. # qhasm: mulx2 = *(uint64 *)(pp + 112)
  1782. # asm 1: movq 112(<pp=int64#2),>mulx2=int64#10
  1783. # asm 2: movq 112(<pp=%rsi),>mulx2=%r12
  1784. movq 112(%rsi),%r12
  1785. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1786. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  1787. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  1788. movq 64(%rcx),%rax
  1789. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1790. # asm 1: mul <mulx2=int64#10
  1791. # asm 2: mul <mulx2=%r12
  1792. mul %r12
  1793. # qhasm: carry? c2 += mulrax
  1794. # asm 1: add <mulrax=int64#7,<c2=int64#13
  1795. # asm 2: add <mulrax=%rax,<c2=%r15
  1796. add %rax,%r15
  1797. # qhasm: mulc = 0
  1798. # asm 1: mov $0,>mulc=int64#15
  1799. # asm 2: mov $0,>mulc=%rbp
  1800. mov $0,%rbp
  1801. # qhasm: mulc += mulrdx + carry
  1802. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1803. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1804. adc %rdx,%rbp
  1805. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1806. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  1807. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  1808. movq 72(%rcx),%rax
  1809. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1810. # asm 1: mul <mulx2=int64#10
  1811. # asm 2: mul <mulx2=%r12
  1812. mul %r12
  1813. # qhasm: carry? c3 += mulrax
  1814. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1815. # asm 2: add <mulrax=%rax,<c3=%rbx
  1816. add %rax,%rbx
  1817. # qhasm: mulrdx += 0 + carry
  1818. # asm 1: adc $0,<mulrdx=int64#3
  1819. # asm 2: adc $0,<mulrdx=%rdx
  1820. adc $0,%rdx
  1821. # qhasm: carry? c3 += mulc
  1822. # asm 1: add <mulc=int64#15,<c3=int64#14
  1823. # asm 2: add <mulc=%rbp,<c3=%rbx
  1824. add %rbp,%rbx
  1825. # qhasm: mulc = 0
  1826. # asm 1: mov $0,>mulc=int64#15
  1827. # asm 2: mov $0,>mulc=%rbp
  1828. mov $0,%rbp
  1829. # qhasm: mulc += mulrdx + carry
  1830. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1831. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1832. adc %rdx,%rbp
  1833. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1834. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  1835. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  1836. movq 80(%rcx),%rax
  1837. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1838. # asm 1: mul <mulx2=int64#10
  1839. # asm 2: mul <mulx2=%r12
  1840. mul %r12
  1841. # qhasm: carry? mulr4 += mulrax
  1842. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1843. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1844. add %rax,%r8
  1845. # qhasm: mulrdx += 0 + carry
  1846. # asm 1: adc $0,<mulrdx=int64#3
  1847. # asm 2: adc $0,<mulrdx=%rdx
  1848. adc $0,%rdx
  1849. # qhasm: carry? mulr4 += mulc
  1850. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1851. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1852. add %rbp,%r8
  1853. # qhasm: mulc = 0
  1854. # asm 1: mov $0,>mulc=int64#15
  1855. # asm 2: mov $0,>mulc=%rbp
  1856. mov $0,%rbp
  1857. # qhasm: mulc += mulrdx + carry
  1858. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1859. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1860. adc %rdx,%rbp
  1861. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1862. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  1863. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  1864. movq 88(%rcx),%rax
  1865. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1866. # asm 1: mul <mulx2=int64#10
  1867. # asm 2: mul <mulx2=%r12
  1868. mul %r12
  1869. # qhasm: carry? mulr5 += mulrax
  1870. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1871. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1872. add %rax,%r9
  1873. # qhasm: mulrdx += 0 + carry
  1874. # asm 1: adc $0,<mulrdx=int64#3
  1875. # asm 2: adc $0,<mulrdx=%rdx
  1876. adc $0,%rdx
  1877. # qhasm: carry? mulr5 += mulc
  1878. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  1879. # asm 2: add <mulc=%rbp,<mulr5=%r9
  1880. add %rbp,%r9
  1881. # qhasm: mulr6 += mulrdx + carry
  1882. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
  1883. # asm 2: adc <mulrdx=%rdx,<mulr6=%r10
  1884. adc %rdx,%r10
  1885. # qhasm: mulx3 = *(uint64 *)(pp + 120)
  1886. # asm 1: movq 120(<pp=int64#2),>mulx3=int64#10
  1887. # asm 2: movq 120(<pp=%rsi),>mulx3=%r12
  1888. movq 120(%rsi),%r12
  1889. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1890. # asm 1: movq 64(<qp=int64#4),>mulrax=int64#7
  1891. # asm 2: movq 64(<qp=%rcx),>mulrax=%rax
  1892. movq 64(%rcx),%rax
  1893. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1894. # asm 1: mul <mulx3=int64#10
  1895. # asm 2: mul <mulx3=%r12
  1896. mul %r12
  1897. # qhasm: carry? c3 += mulrax
  1898. # asm 1: add <mulrax=int64#7,<c3=int64#14
  1899. # asm 2: add <mulrax=%rax,<c3=%rbx
  1900. add %rax,%rbx
  1901. # qhasm: mulc = 0
  1902. # asm 1: mov $0,>mulc=int64#15
  1903. # asm 2: mov $0,>mulc=%rbp
  1904. mov $0,%rbp
  1905. # qhasm: mulc += mulrdx + carry
  1906. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1907. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1908. adc %rdx,%rbp
  1909. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1910. # asm 1: movq 72(<qp=int64#4),>mulrax=int64#7
  1911. # asm 2: movq 72(<qp=%rcx),>mulrax=%rax
  1912. movq 72(%rcx),%rax
  1913. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1914. # asm 1: mul <mulx3=int64#10
  1915. # asm 2: mul <mulx3=%r12
  1916. mul %r12
  1917. # qhasm: carry? mulr4 += mulrax
  1918. # asm 1: add <mulrax=int64#7,<mulr4=int64#5
  1919. # asm 2: add <mulrax=%rax,<mulr4=%r8
  1920. add %rax,%r8
  1921. # qhasm: mulrdx += 0 + carry
  1922. # asm 1: adc $0,<mulrdx=int64#3
  1923. # asm 2: adc $0,<mulrdx=%rdx
  1924. adc $0,%rdx
  1925. # qhasm: carry? mulr4 += mulc
  1926. # asm 1: add <mulc=int64#15,<mulr4=int64#5
  1927. # asm 2: add <mulc=%rbp,<mulr4=%r8
  1928. add %rbp,%r8
  1929. # qhasm: mulc = 0
  1930. # asm 1: mov $0,>mulc=int64#15
  1931. # asm 2: mov $0,>mulc=%rbp
  1932. mov $0,%rbp
  1933. # qhasm: mulc += mulrdx + carry
  1934. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1935. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1936. adc %rdx,%rbp
  1937. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1938. # asm 1: movq 80(<qp=int64#4),>mulrax=int64#7
  1939. # asm 2: movq 80(<qp=%rcx),>mulrax=%rax
  1940. movq 80(%rcx),%rax
  1941. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1942. # asm 1: mul <mulx3=int64#10
  1943. # asm 2: mul <mulx3=%r12
  1944. mul %r12
  1945. # qhasm: carry? mulr5 += mulrax
  1946. # asm 1: add <mulrax=int64#7,<mulr5=int64#6
  1947. # asm 2: add <mulrax=%rax,<mulr5=%r9
  1948. add %rax,%r9
  1949. # qhasm: mulrdx += 0 + carry
  1950. # asm 1: adc $0,<mulrdx=int64#3
  1951. # asm 2: adc $0,<mulrdx=%rdx
  1952. adc $0,%rdx
  1953. # qhasm: carry? mulr5 += mulc
  1954. # asm 1: add <mulc=int64#15,<mulr5=int64#6
  1955. # asm 2: add <mulc=%rbp,<mulr5=%r9
  1956. add %rbp,%r9
  1957. # qhasm: mulc = 0
  1958. # asm 1: mov $0,>mulc=int64#15
  1959. # asm 2: mov $0,>mulc=%rbp
  1960. mov $0,%rbp
  1961. # qhasm: mulc += mulrdx + carry
  1962. # asm 1: adc <mulrdx=int64#3,<mulc=int64#15
  1963. # asm 2: adc <mulrdx=%rdx,<mulc=%rbp
  1964. adc %rdx,%rbp
  1965. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1966. # asm 1: movq 88(<qp=int64#4),>mulrax=int64#7
  1967. # asm 2: movq 88(<qp=%rcx),>mulrax=%rax
  1968. movq 88(%rcx),%rax
  1969. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1970. # asm 1: mul <mulx3=int64#10
  1971. # asm 2: mul <mulx3=%r12
  1972. mul %r12
  1973. # qhasm: carry? mulr6 += mulrax
  1974. # asm 1: add <mulrax=int64#7,<mulr6=int64#8
  1975. # asm 2: add <mulrax=%rax,<mulr6=%r10
  1976. add %rax,%r10
  1977. # qhasm: mulrdx += 0 + carry
  1978. # asm 1: adc $0,<mulrdx=int64#3
  1979. # asm 2: adc $0,<mulrdx=%rdx
  1980. adc $0,%rdx
  1981. # qhasm: carry? mulr6 += mulc
  1982. # asm 1: add <mulc=int64#15,<mulr6=int64#8
  1983. # asm 2: add <mulc=%rbp,<mulr6=%r10
  1984. add %rbp,%r10
  1985. # qhasm: mulr7 += mulrdx + carry
  1986. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
  1987. # asm 2: adc <mulrdx=%rdx,<mulr7=%r11
  1988. adc %rdx,%r11
  1989. # qhasm: mulrax = mulr4
  1990. # asm 1: mov <mulr4=int64#5,>mulrax=int64#7
  1991. # asm 2: mov <mulr4=%r8,>mulrax=%rax
  1992. mov %r8,%rax
  1993. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1994. mulq CRYPTO_NAMESPACE(38)(%rip)
  1995. # qhasm: mulr4 = mulrax
  1996. # asm 1: mov <mulrax=int64#7,>mulr4=int64#4
  1997. # asm 2: mov <mulrax=%rax,>mulr4=%rcx
  1998. mov %rax,%rcx
  1999. # qhasm: mulrax = mulr5
  2000. # asm 1: mov <mulr5=int64#6,>mulrax=int64#7
  2001. # asm 2: mov <mulr5=%r9,>mulrax=%rax
  2002. mov %r9,%rax
  2003. # qhasm: mulr5 = mulrdx
  2004. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#5
  2005. # asm 2: mov <mulrdx=%rdx,>mulr5=%r8
  2006. mov %rdx,%r8
  2007. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2008. mulq CRYPTO_NAMESPACE(38)(%rip)
  2009. # qhasm: carry? mulr5 += mulrax
  2010. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  2011. # asm 2: add <mulrax=%rax,<mulr5=%r8
  2012. add %rax,%r8
  2013. # qhasm: mulrax = mulr6
  2014. # asm 1: mov <mulr6=int64#8,>mulrax=int64#7
  2015. # asm 2: mov <mulr6=%r10,>mulrax=%rax
  2016. mov %r10,%rax
  2017. # qhasm: mulr6 = 0
  2018. # asm 1: mov $0,>mulr6=int64#6
  2019. # asm 2: mov $0,>mulr6=%r9
  2020. mov $0,%r9
  2021. # qhasm: mulr6 += mulrdx + carry
  2022. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  2023. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  2024. adc %rdx,%r9
  2025. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2026. mulq CRYPTO_NAMESPACE(38)(%rip)
  2027. # qhasm: carry? mulr6 += mulrax
  2028. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  2029. # asm 2: add <mulrax=%rax,<mulr6=%r9
  2030. add %rax,%r9
  2031. # qhasm: mulrax = mulr7
  2032. # asm 1: mov <mulr7=int64#9,>mulrax=int64#7
  2033. # asm 2: mov <mulr7=%r11,>mulrax=%rax
  2034. mov %r11,%rax
  2035. # qhasm: mulr7 = 0
  2036. # asm 1: mov $0,>mulr7=int64#8
  2037. # asm 2: mov $0,>mulr7=%r10
  2038. mov $0,%r10
  2039. # qhasm: mulr7 += mulrdx + carry
  2040. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  2041. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  2042. adc %rdx,%r10
  2043. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2044. mulq CRYPTO_NAMESPACE(38)(%rip)
  2045. # qhasm: carry? mulr7 += mulrax
  2046. # asm 1: add <mulrax=int64#7,<mulr7=int64#8
  2047. # asm 2: add <mulrax=%rax,<mulr7=%r10
  2048. add %rax,%r10
  2049. # qhasm: mulr8 = 0
  2050. # asm 1: mov $0,>mulr8=int64#7
  2051. # asm 2: mov $0,>mulr8=%rax
  2052. mov $0,%rax
  2053. # qhasm: mulr8 += mulrdx + carry
  2054. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  2055. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  2056. adc %rdx,%rax
  2057. # qhasm: carry? c0 += mulr4
  2058. # asm 1: add <mulr4=int64#4,<c0=int64#11
  2059. # asm 2: add <mulr4=%rcx,<c0=%r13
  2060. add %rcx,%r13
  2061. # qhasm: carry? c1 += mulr5 + carry
  2062. # asm 1: adc <mulr5=int64#5,<c1=int64#12
  2063. # asm 2: adc <mulr5=%r8,<c1=%r14
  2064. adc %r8,%r14
  2065. # qhasm: carry? c2 += mulr6 + carry
  2066. # asm 1: adc <mulr6=int64#6,<c2=int64#13
  2067. # asm 2: adc <mulr6=%r9,<c2=%r15
  2068. adc %r9,%r15
  2069. # qhasm: carry? c3 += mulr7 + carry
  2070. # asm 1: adc <mulr7=int64#8,<c3=int64#14
  2071. # asm 2: adc <mulr7=%r10,<c3=%rbx
  2072. adc %r10,%rbx
  2073. # qhasm: mulzero = 0
  2074. # asm 1: mov $0,>mulzero=int64#3
  2075. # asm 2: mov $0,>mulzero=%rdx
  2076. mov $0,%rdx
  2077. # qhasm: mulr8 += mulzero + carry
  2078. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  2079. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  2080. adc %rdx,%rax
  2081. # qhasm: mulr8 *= 38
  2082. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4
  2083. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx
  2084. imulq $38,%rax,%rcx
  2085. # qhasm: carry? c0 += mulr8
  2086. # asm 1: add <mulr8=int64#4,<c0=int64#11
  2087. # asm 2: add <mulr8=%rcx,<c0=%r13
  2088. add %rcx,%r13
  2089. # qhasm: carry? c1 += mulzero + carry
  2090. # asm 1: adc <mulzero=int64#3,<c1=int64#12
  2091. # asm 2: adc <mulzero=%rdx,<c1=%r14
  2092. adc %rdx,%r14
  2093. # qhasm: carry? c2 += mulzero + carry
  2094. # asm 1: adc <mulzero=int64#3,<c2=int64#13
  2095. # asm 2: adc <mulzero=%rdx,<c2=%r15
  2096. adc %rdx,%r15
  2097. # qhasm: carry? c3 += mulzero + carry
  2098. # asm 1: adc <mulzero=int64#3,<c3=int64#14
  2099. # asm 2: adc <mulzero=%rdx,<c3=%rbx
  2100. adc %rdx,%rbx
  2101. # qhasm: mulzero += mulzero + carry
  2102. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  2103. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  2104. adc %rdx,%rdx
  2105. # qhasm: mulzero *= 38
  2106. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  2107. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  2108. imulq $38,%rdx,%rdx
  2109. # qhasm: c0 += mulzero
  2110. # asm 1: add <mulzero=int64#3,<c0=int64#11
  2111. # asm 2: add <mulzero=%rdx,<c0=%r13
  2112. add %rdx,%r13
  2113. # qhasm: f0 = *(uint64 *)(pp + 64)
  2114. # asm 1: movq 64(<pp=int64#2),>f0=int64#3
  2115. # asm 2: movq 64(<pp=%rsi),>f0=%rdx
  2116. movq 64(%rsi),%rdx
  2117. # qhasm: f1 = *(uint64 *)(pp + 72)
  2118. # asm 1: movq 72(<pp=int64#2),>f1=int64#4
  2119. # asm 2: movq 72(<pp=%rsi),>f1=%rcx
  2120. movq 72(%rsi),%rcx
  2121. # qhasm: f2 = *(uint64 *)(pp + 80)
  2122. # asm 1: movq 80(<pp=int64#2),>f2=int64#5
  2123. # asm 2: movq 80(<pp=%rsi),>f2=%r8
  2124. movq 80(%rsi),%r8
  2125. # qhasm: f3 = *(uint64 *)(pp + 88)
  2126. # asm 1: movq 88(<pp=int64#2),>f3=int64#2
  2127. # asm 2: movq 88(<pp=%rsi),>f3=%rsi
  2128. movq 88(%rsi),%rsi
  2129. # qhasm: carry? f0 += f0
  2130. # asm 1: add <f0=int64#3,<f0=int64#3
  2131. # asm 2: add <f0=%rdx,<f0=%rdx
  2132. add %rdx,%rdx
  2133. # qhasm: carry? f1 += f1 + carry
  2134. # asm 1: adc <f1=int64#4,<f1=int64#4
  2135. # asm 2: adc <f1=%rcx,<f1=%rcx
  2136. adc %rcx,%rcx
  2137. # qhasm: carry? f2 += f2 + carry
  2138. # asm 1: adc <f2=int64#5,<f2=int64#5
  2139. # asm 2: adc <f2=%r8,<f2=%r8
  2140. adc %r8,%r8
  2141. # qhasm: carry? f3 += f3 + carry
  2142. # asm 1: adc <f3=int64#2,<f3=int64#2
  2143. # asm 2: adc <f3=%rsi,<f3=%rsi
  2144. adc %rsi,%rsi
  2145. # qhasm: addt0 = 0
  2146. # asm 1: mov $0,>addt0=int64#6
  2147. # asm 2: mov $0,>addt0=%r9
  2148. mov $0,%r9
  2149. # qhasm: addt1 = 38
  2150. # asm 1: mov $38,>addt1=int64#7
  2151. # asm 2: mov $38,>addt1=%rax
  2152. mov $38,%rax
  2153. # qhasm: addt1 = addt0 if !carry
  2154. # asm 1: cmovae <addt0=int64#6,<addt1=int64#7
  2155. # asm 2: cmovae <addt0=%r9,<addt1=%rax
  2156. cmovae %r9,%rax
  2157. # qhasm: carry? f0 += addt1
  2158. # asm 1: add <addt1=int64#7,<f0=int64#3
  2159. # asm 2: add <addt1=%rax,<f0=%rdx
  2160. add %rax,%rdx
  2161. # qhasm: carry? f1 += addt0 + carry
  2162. # asm 1: adc <addt0=int64#6,<f1=int64#4
  2163. # asm 2: adc <addt0=%r9,<f1=%rcx
  2164. adc %r9,%rcx
  2165. # qhasm: carry? f2 += addt0 + carry
  2166. # asm 1: adc <addt0=int64#6,<f2=int64#5
  2167. # asm 2: adc <addt0=%r9,<f2=%r8
  2168. adc %r9,%r8
  2169. # qhasm: carry? f3 += addt0 + carry
  2170. # asm 1: adc <addt0=int64#6,<f3=int64#2
  2171. # asm 2: adc <addt0=%r9,<f3=%rsi
  2172. adc %r9,%rsi
  2173. # qhasm: addt0 = addt1 if carry
  2174. # asm 1: cmovc <addt1=int64#7,<addt0=int64#6
  2175. # asm 2: cmovc <addt1=%rax,<addt0=%r9
  2176. cmovc %rax,%r9
  2177. # qhasm: f0 += addt0
  2178. # asm 1: add <addt0=int64#6,<f0=int64#3
  2179. # asm 2: add <addt0=%r9,<f0=%rdx
  2180. add %r9,%rdx
  2181. # qhasm: g0 = f0
  2182. # asm 1: mov <f0=int64#3,>g0=int64#6
  2183. # asm 2: mov <f0=%rdx,>g0=%r9
  2184. mov %rdx,%r9
  2185. # qhasm: g1 = f1
  2186. # asm 1: mov <f1=int64#4,>g1=int64#7
  2187. # asm 2: mov <f1=%rcx,>g1=%rax
  2188. mov %rcx,%rax
  2189. # qhasm: g2 = f2
  2190. # asm 1: mov <f2=int64#5,>g2=int64#8
  2191. # asm 2: mov <f2=%r8,>g2=%r10
  2192. mov %r8,%r10
  2193. # qhasm: g3 = f3
  2194. # asm 1: mov <f3=int64#2,>g3=int64#9
  2195. # asm 2: mov <f3=%rsi,>g3=%r11
  2196. mov %rsi,%r11
  2197. # qhasm: carry? f0 -= c0
  2198. # asm 1: sub <c0=int64#11,<f0=int64#3
  2199. # asm 2: sub <c0=%r13,<f0=%rdx
  2200. sub %r13,%rdx
  2201. # qhasm: carry? f1 -= c1 - carry
  2202. # asm 1: sbb <c1=int64#12,<f1=int64#4
  2203. # asm 2: sbb <c1=%r14,<f1=%rcx
  2204. sbb %r14,%rcx
  2205. # qhasm: carry? f2 -= c2 - carry
  2206. # asm 1: sbb <c2=int64#13,<f2=int64#5
  2207. # asm 2: sbb <c2=%r15,<f2=%r8
  2208. sbb %r15,%r8
  2209. # qhasm: carry? f3 -= c3 - carry
  2210. # asm 1: sbb <c3=int64#14,<f3=int64#2
  2211. # asm 2: sbb <c3=%rbx,<f3=%rsi
  2212. sbb %rbx,%rsi
  2213. # qhasm: subt0 = 0
  2214. # asm 1: mov $0,>subt0=int64#10
  2215. # asm 2: mov $0,>subt0=%r12
  2216. mov $0,%r12
  2217. # qhasm: subt1 = 38
  2218. # asm 1: mov $38,>subt1=int64#15
  2219. # asm 2: mov $38,>subt1=%rbp
  2220. mov $38,%rbp
  2221. # qhasm: subt1 = subt0 if !carry
  2222. # asm 1: cmovae <subt0=int64#10,<subt1=int64#15
  2223. # asm 2: cmovae <subt0=%r12,<subt1=%rbp
  2224. cmovae %r12,%rbp
  2225. # qhasm: carry? f0 -= subt1
  2226. # asm 1: sub <subt1=int64#15,<f0=int64#3
  2227. # asm 2: sub <subt1=%rbp,<f0=%rdx
  2228. sub %rbp,%rdx
  2229. # qhasm: carry? f1 -= subt0 - carry
  2230. # asm 1: sbb <subt0=int64#10,<f1=int64#4
  2231. # asm 2: sbb <subt0=%r12,<f1=%rcx
  2232. sbb %r12,%rcx
  2233. # qhasm: carry? f2 -= subt0 - carry
  2234. # asm 1: sbb <subt0=int64#10,<f2=int64#5
  2235. # asm 2: sbb <subt0=%r12,<f2=%r8
  2236. sbb %r12,%r8
  2237. # qhasm: carry? f3 -= subt0 - carry
  2238. # asm 1: sbb <subt0=int64#10,<f3=int64#2
  2239. # asm 2: sbb <subt0=%r12,<f3=%rsi
  2240. sbb %r12,%rsi
  2241. # qhasm: subt0 = subt1 if carry
  2242. # asm 1: cmovc <subt1=int64#15,<subt0=int64#10
  2243. # asm 2: cmovc <subt1=%rbp,<subt0=%r12
  2244. cmovc %rbp,%r12
  2245. # qhasm: f0 -= subt0
  2246. # asm 1: sub <subt0=int64#10,<f0=int64#3
  2247. # asm 2: sub <subt0=%r12,<f0=%rdx
  2248. sub %r12,%rdx
  2249. # qhasm: carry? g0 += c0
  2250. # asm 1: add <c0=int64#11,<g0=int64#6
  2251. # asm 2: add <c0=%r13,<g0=%r9
  2252. add %r13,%r9
  2253. # qhasm: carry? g1 += c1 + carry
  2254. # asm 1: adc <c1=int64#12,<g1=int64#7
  2255. # asm 2: adc <c1=%r14,<g1=%rax
  2256. adc %r14,%rax
  2257. # qhasm: carry? g2 += c2 + carry
  2258. # asm 1: adc <c2=int64#13,<g2=int64#8
  2259. # asm 2: adc <c2=%r15,<g2=%r10
  2260. adc %r15,%r10
  2261. # qhasm: carry? g3 += c3 + carry
  2262. # asm 1: adc <c3=int64#14,<g3=int64#9
  2263. # asm 2: adc <c3=%rbx,<g3=%r11
  2264. adc %rbx,%r11
  2265. # qhasm: addt0 = 0
  2266. # asm 1: mov $0,>addt0=int64#10
  2267. # asm 2: mov $0,>addt0=%r12
  2268. mov $0,%r12
  2269. # qhasm: addt1 = 38
  2270. # asm 1: mov $38,>addt1=int64#11
  2271. # asm 2: mov $38,>addt1=%r13
  2272. mov $38,%r13
  2273. # qhasm: addt1 = addt0 if !carry
  2274. # asm 1: cmovae <addt0=int64#10,<addt1=int64#11
  2275. # asm 2: cmovae <addt0=%r12,<addt1=%r13
  2276. cmovae %r12,%r13
  2277. # qhasm: carry? g0 += addt1
  2278. # asm 1: add <addt1=int64#11,<g0=int64#6
  2279. # asm 2: add <addt1=%r13,<g0=%r9
  2280. add %r13,%r9
  2281. # qhasm: carry? g1 += addt0 + carry
  2282. # asm 1: adc <addt0=int64#10,<g1=int64#7
  2283. # asm 2: adc <addt0=%r12,<g1=%rax
  2284. adc %r12,%rax
  2285. # qhasm: carry? g2 += addt0 + carry
  2286. # asm 1: adc <addt0=int64#10,<g2=int64#8
  2287. # asm 2: adc <addt0=%r12,<g2=%r10
  2288. adc %r12,%r10
  2289. # qhasm: carry? g3 += addt0 + carry
  2290. # asm 1: adc <addt0=int64#10,<g3=int64#9
  2291. # asm 2: adc <addt0=%r12,<g3=%r11
  2292. adc %r12,%r11
  2293. # qhasm: addt0 = addt1 if carry
  2294. # asm 1: cmovc <addt1=int64#11,<addt0=int64#10
  2295. # asm 2: cmovc <addt1=%r13,<addt0=%r12
  2296. cmovc %r13,%r12
  2297. # qhasm: g0 += addt0
  2298. # asm 1: add <addt0=int64#10,<g0=int64#6
  2299. # asm 2: add <addt0=%r12,<g0=%r9
  2300. add %r12,%r9
  2301. # qhasm: *(uint64 *)(rp + 32) = g0
  2302. # asm 1: movq <g0=int64#6,32(<rp=int64#1)
  2303. # asm 2: movq <g0=%r9,32(<rp=%rdi)
  2304. movq %r9,32(%rdi)
  2305. # qhasm: *(uint64 *)(rp + 40) = g1
  2306. # asm 1: movq <g1=int64#7,40(<rp=int64#1)
  2307. # asm 2: movq <g1=%rax,40(<rp=%rdi)
  2308. movq %rax,40(%rdi)
  2309. # qhasm: *(uint64 *)(rp + 48) = g2
  2310. # asm 1: movq <g2=int64#8,48(<rp=int64#1)
  2311. # asm 2: movq <g2=%r10,48(<rp=%rdi)
  2312. movq %r10,48(%rdi)
  2313. # qhasm: *(uint64 *)(rp + 56) = g3
  2314. # asm 1: movq <g3=int64#9,56(<rp=int64#1)
  2315. # asm 2: movq <g3=%r11,56(<rp=%rdi)
  2316. movq %r11,56(%rdi)
  2317. # qhasm: *(uint64 *)(rp + 96) = f0
  2318. # asm 1: movq <f0=int64#3,96(<rp=int64#1)
  2319. # asm 2: movq <f0=%rdx,96(<rp=%rdi)
  2320. movq %rdx,96(%rdi)
  2321. # qhasm: *(uint64 *)(rp + 104) = f1
  2322. # asm 1: movq <f1=int64#4,104(<rp=int64#1)
  2323. # asm 2: movq <f1=%rcx,104(<rp=%rdi)
  2324. movq %rcx,104(%rdi)
  2325. # qhasm: *(uint64 *)(rp + 112) = f2
  2326. # asm 1: movq <f2=int64#5,112(<rp=int64#1)
  2327. # asm 2: movq <f2=%r8,112(<rp=%rdi)
  2328. movq %r8,112(%rdi)
  2329. # qhasm: *(uint64 *)(rp + 120) = f3
  2330. # asm 1: movq <f3=int64#2,120(<rp=int64#1)
  2331. # asm 2: movq <f3=%rsi,120(<rp=%rdi)
  2332. movq %rsi,120(%rdi)
  2333. # qhasm: caller1 = caller1_stack
  2334. # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
  2335. # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
  2336. movq 0(%rsp),%r11
  2337. # qhasm: caller2 = caller2_stack
  2338. # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
  2339. # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
  2340. movq 8(%rsp),%r12
  2341. # qhasm: caller3 = caller3_stack
  2342. # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
  2343. # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
  2344. movq 16(%rsp),%r13
  2345. # qhasm: caller4 = caller4_stack
  2346. # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
  2347. # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
  2348. movq 24(%rsp),%r14
  2349. # qhasm: caller5 = caller5_stack
  2350. # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
  2351. # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
  2352. movq 32(%rsp),%r15
  2353. # qhasm: caller6 = caller6_stack
  2354. # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
  2355. # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
  2356. movq 40(%rsp),%rbx
  2357. # qhasm: caller7 = caller7_stack
  2358. # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
  2359. # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
  2360. movq 48(%rsp),%rbp
  2361. # qhasm: leave
  2362. add %r11,%rsp
  2363. mov %rdi,%rax
  2364. mov %rsi,%rdx
  2365. ret