1
0

ge25519_p1p1_to_p3.S 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202
  1. # qhasm: int64 rp
  2. # qhasm: int64 pp
  3. # qhasm: input rp
  4. # qhasm: input pp
  5. # qhasm: int64 caller1
  6. # qhasm: int64 caller2
  7. # qhasm: int64 caller3
  8. # qhasm: int64 caller4
  9. # qhasm: int64 caller5
  10. # qhasm: int64 caller6
  11. # qhasm: int64 caller7
  12. # qhasm: caller caller1
  13. # qhasm: caller caller2
  14. # qhasm: caller caller3
  15. # qhasm: caller caller4
  16. # qhasm: caller caller5
  17. # qhasm: caller caller6
  18. # qhasm: caller caller7
  19. # qhasm: stack64 caller1_stack
  20. # qhasm: stack64 caller2_stack
  21. # qhasm: stack64 caller3_stack
  22. # qhasm: stack64 caller4_stack
  23. # qhasm: stack64 caller5_stack
  24. # qhasm: stack64 caller6_stack
  25. # qhasm: stack64 caller7_stack
  26. # qhasm: int64 rx0
  27. # qhasm: int64 rx1
  28. # qhasm: int64 rx2
  29. # qhasm: int64 rx3
  30. # qhasm: int64 rx4
  31. # qhasm: int64 ry0
  32. # qhasm: int64 ry1
  33. # qhasm: int64 ry2
  34. # qhasm: int64 ry3
  35. # qhasm: int64 ry4
  36. # qhasm: int64 rz0
  37. # qhasm: int64 rz1
  38. # qhasm: int64 rz2
  39. # qhasm: int64 rz3
  40. # qhasm: int64 rz4
  41. # qhasm: int64 rt0
  42. # qhasm: int64 rt1
  43. # qhasm: int64 rt2
  44. # qhasm: int64 rt3
  45. # qhasm: int64 rt4
  46. # qhasm: int64 mulr01
  47. # qhasm: int64 mulr11
  48. # qhasm: int64 mulr21
  49. # qhasm: int64 mulr31
  50. # qhasm: int64 mulr41
  51. # qhasm: int64 mulrax
  52. # qhasm: int64 mulrdx
  53. # qhasm: int64 mult
  54. # qhasm: int64 mulredmask
  55. # qhasm: stack64 mulx219_stack
  56. # qhasm: stack64 mulx319_stack
  57. # qhasm: stack64 mulx419_stack
  58. # qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
  59. .text
  60. .p2align 5
  61. .globl _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
  62. .globl CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
  63. _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3):
  64. CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3):
  65. mov %rsp,%r11
  66. and $31,%r11
  67. add $96,%r11
  68. sub %r11,%rsp
  69. # qhasm: caller1_stack = caller1
  70. # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
  71. # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
  72. movq %r11,0(%rsp)
  73. # qhasm: caller2_stack = caller2
  74. # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
  75. # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
  76. movq %r12,8(%rsp)
  77. # qhasm: caller3_stack = caller3
  78. # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
  79. # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
  80. movq %r13,16(%rsp)
  81. # qhasm: caller4_stack = caller4
  82. # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
  83. # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
  84. movq %r14,24(%rsp)
  85. # qhasm: caller5_stack = caller5
  86. # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
  87. # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
  88. movq %r15,32(%rsp)
  89. # qhasm: caller6_stack = caller6
  90. # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
  91. # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
  92. movq %rbx,40(%rsp)
  93. # qhasm: caller7_stack = caller7
  94. # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
  95. # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
  96. movq %rbp,48(%rsp)
  97. # qhasm: mulrax = *(uint64 *)(pp + 24)
  98. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
  99. # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
  100. movq 24(%rsi),%rdx
  101. # qhasm: mulrax *= 19
  102. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  103. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  104. imulq $19,%rdx,%rax
  105. # qhasm: mulx319_stack = mulrax
  106. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  107. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  108. movq %rax,56(%rsp)
  109. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  110. # asm 1: mulq 136(<pp=int64#2)
  111. # asm 2: mulq 136(<pp=%rsi)
  112. mulq 136(%rsi)
  113. # qhasm: rx0 = mulrax
  114. # asm 1: mov <mulrax=int64#7,>rx0=int64#4
  115. # asm 2: mov <mulrax=%rax,>rx0=%rcx
  116. mov %rax,%rcx
  117. # qhasm: mulr01 = mulrdx
  118. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  119. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  120. mov %rdx,%r8
  121. # qhasm: mulrax = *(uint64 *)(pp + 32)
  122. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
  123. # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
  124. movq 32(%rsi),%rdx
  125. # qhasm: mulrax *= 19
  126. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  127. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  128. imulq $19,%rdx,%rax
  129. # qhasm: mulx419_stack = mulrax
  130. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  131. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  132. movq %rax,64(%rsp)
  133. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  134. # asm 1: mulq 128(<pp=int64#2)
  135. # asm 2: mulq 128(<pp=%rsi)
  136. mulq 128(%rsi)
  137. # qhasm: carry? rx0 += mulrax
  138. # asm 1: add <mulrax=int64#7,<rx0=int64#4
  139. # asm 2: add <mulrax=%rax,<rx0=%rcx
  140. add %rax,%rcx
  141. # qhasm: mulr01 += mulrdx + carry
  142. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  143. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  144. adc %rdx,%r8
  145. # qhasm: mulrax = *(uint64 *)(pp + 0)
  146. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  147. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  148. movq 0(%rsi),%rax
  149. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  150. # asm 1: mulq 120(<pp=int64#2)
  151. # asm 2: mulq 120(<pp=%rsi)
  152. mulq 120(%rsi)
  153. # qhasm: carry? rx0 += mulrax
  154. # asm 1: add <mulrax=int64#7,<rx0=int64#4
  155. # asm 2: add <mulrax=%rax,<rx0=%rcx
  156. add %rax,%rcx
  157. # qhasm: mulr01 += mulrdx + carry
  158. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  159. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  160. adc %rdx,%r8
  161. # qhasm: mulrax = *(uint64 *)(pp + 0)
  162. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  163. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  164. movq 0(%rsi),%rax
  165. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  166. # asm 1: mulq 128(<pp=int64#2)
  167. # asm 2: mulq 128(<pp=%rsi)
  168. mulq 128(%rsi)
  169. # qhasm: rx1 = mulrax
  170. # asm 1: mov <mulrax=int64#7,>rx1=int64#6
  171. # asm 2: mov <mulrax=%rax,>rx1=%r9
  172. mov %rax,%r9
  173. # qhasm: mulr11 = mulrdx
  174. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  175. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  176. mov %rdx,%r10
  177. # qhasm: mulrax = *(uint64 *)(pp + 0)
  178. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  179. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  180. movq 0(%rsi),%rax
  181. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  182. # asm 1: mulq 136(<pp=int64#2)
  183. # asm 2: mulq 136(<pp=%rsi)
  184. mulq 136(%rsi)
  185. # qhasm: rx2 = mulrax
  186. # asm 1: mov <mulrax=int64#7,>rx2=int64#9
  187. # asm 2: mov <mulrax=%rax,>rx2=%r11
  188. mov %rax,%r11
  189. # qhasm: mulr21 = mulrdx
  190. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  191. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  192. mov %rdx,%r12
  193. # qhasm: mulrax = *(uint64 *)(pp + 0)
  194. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  195. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  196. movq 0(%rsi),%rax
  197. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  198. # asm 1: mulq 144(<pp=int64#2)
  199. # asm 2: mulq 144(<pp=%rsi)
  200. mulq 144(%rsi)
  201. # qhasm: rx3 = mulrax
  202. # asm 1: mov <mulrax=int64#7,>rx3=int64#11
  203. # asm 2: mov <mulrax=%rax,>rx3=%r13
  204. mov %rax,%r13
  205. # qhasm: mulr31 = mulrdx
  206. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  207. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  208. mov %rdx,%r14
  209. # qhasm: mulrax = *(uint64 *)(pp + 0)
  210. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  211. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  212. movq 0(%rsi),%rax
  213. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  214. # asm 1: mulq 152(<pp=int64#2)
  215. # asm 2: mulq 152(<pp=%rsi)
  216. mulq 152(%rsi)
  217. # qhasm: rx4 = mulrax
  218. # asm 1: mov <mulrax=int64#7,>rx4=int64#13
  219. # asm 2: mov <mulrax=%rax,>rx4=%r15
  220. mov %rax,%r15
  221. # qhasm: mulr41 = mulrdx
  222. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  223. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  224. mov %rdx,%rbx
  225. # qhasm: mulrax = *(uint64 *)(pp + 8)
  226. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  227. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  228. movq 8(%rsi),%rax
  229. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  230. # asm 1: mulq 120(<pp=int64#2)
  231. # asm 2: mulq 120(<pp=%rsi)
  232. mulq 120(%rsi)
  233. # qhasm: carry? rx1 += mulrax
  234. # asm 1: add <mulrax=int64#7,<rx1=int64#6
  235. # asm 2: add <mulrax=%rax,<rx1=%r9
  236. add %rax,%r9
  237. # qhasm: mulr11 += mulrdx + carry
  238. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  239. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  240. adc %rdx,%r10
  241. # qhasm: mulrax = *(uint64 *)(pp + 8)
  242. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  243. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  244. movq 8(%rsi),%rax
  245. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  246. # asm 1: mulq 128(<pp=int64#2)
  247. # asm 2: mulq 128(<pp=%rsi)
  248. mulq 128(%rsi)
  249. # qhasm: carry? rx2 += mulrax
  250. # asm 1: add <mulrax=int64#7,<rx2=int64#9
  251. # asm 2: add <mulrax=%rax,<rx2=%r11
  252. add %rax,%r11
  253. # qhasm: mulr21 += mulrdx + carry
  254. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  255. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  256. adc %rdx,%r12
  257. # qhasm: mulrax = *(uint64 *)(pp + 8)
  258. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  259. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  260. movq 8(%rsi),%rax
  261. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  262. # asm 1: mulq 136(<pp=int64#2)
  263. # asm 2: mulq 136(<pp=%rsi)
  264. mulq 136(%rsi)
  265. # qhasm: carry? rx3 += mulrax
  266. # asm 1: add <mulrax=int64#7,<rx3=int64#11
  267. # asm 2: add <mulrax=%rax,<rx3=%r13
  268. add %rax,%r13
  269. # qhasm: mulr31 += mulrdx + carry
  270. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  271. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  272. adc %rdx,%r14
  273. # qhasm: mulrax = *(uint64 *)(pp + 8)
  274. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  275. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  276. movq 8(%rsi),%rax
  277. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  278. # asm 1: mulq 144(<pp=int64#2)
  279. # asm 2: mulq 144(<pp=%rsi)
  280. mulq 144(%rsi)
  281. # qhasm: carry? rx4 += mulrax
  282. # asm 1: add <mulrax=int64#7,<rx4=int64#13
  283. # asm 2: add <mulrax=%rax,<rx4=%r15
  284. add %rax,%r15
  285. # qhasm: mulr41 += mulrdx + carry
  286. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  287. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  288. adc %rdx,%rbx
  289. # qhasm: mulrax = *(uint64 *)(pp + 8)
  290. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
  291. # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
  292. movq 8(%rsi),%rdx
  293. # qhasm: mulrax *= 19
  294. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  295. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  296. imulq $19,%rdx,%rax
  297. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  298. # asm 1: mulq 152(<pp=int64#2)
  299. # asm 2: mulq 152(<pp=%rsi)
  300. mulq 152(%rsi)
  301. # qhasm: carry? rx0 += mulrax
  302. # asm 1: add <mulrax=int64#7,<rx0=int64#4
  303. # asm 2: add <mulrax=%rax,<rx0=%rcx
  304. add %rax,%rcx
  305. # qhasm: mulr01 += mulrdx + carry
  306. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  307. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  308. adc %rdx,%r8
  309. # qhasm: mulrax = *(uint64 *)(pp + 16)
  310. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  311. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  312. movq 16(%rsi),%rax
  313. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  314. # asm 1: mulq 120(<pp=int64#2)
  315. # asm 2: mulq 120(<pp=%rsi)
  316. mulq 120(%rsi)
  317. # qhasm: carry? rx2 += mulrax
  318. # asm 1: add <mulrax=int64#7,<rx2=int64#9
  319. # asm 2: add <mulrax=%rax,<rx2=%r11
  320. add %rax,%r11
  321. # qhasm: mulr21 += mulrdx + carry
  322. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  323. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  324. adc %rdx,%r12
  325. # qhasm: mulrax = *(uint64 *)(pp + 16)
  326. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  327. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  328. movq 16(%rsi),%rax
  329. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  330. # asm 1: mulq 128(<pp=int64#2)
  331. # asm 2: mulq 128(<pp=%rsi)
  332. mulq 128(%rsi)
  333. # qhasm: carry? rx3 += mulrax
  334. # asm 1: add <mulrax=int64#7,<rx3=int64#11
  335. # asm 2: add <mulrax=%rax,<rx3=%r13
  336. add %rax,%r13
  337. # qhasm: mulr31 += mulrdx + carry
  338. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  339. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  340. adc %rdx,%r14
  341. # qhasm: mulrax = *(uint64 *)(pp + 16)
  342. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  343. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  344. movq 16(%rsi),%rax
  345. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  346. # asm 1: mulq 136(<pp=int64#2)
  347. # asm 2: mulq 136(<pp=%rsi)
  348. mulq 136(%rsi)
  349. # qhasm: carry? rx4 += mulrax
  350. # asm 1: add <mulrax=int64#7,<rx4=int64#13
  351. # asm 2: add <mulrax=%rax,<rx4=%r15
  352. add %rax,%r15
  353. # qhasm: mulr41 += mulrdx + carry
  354. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  355. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  356. adc %rdx,%rbx
  357. # qhasm: mulrax = *(uint64 *)(pp + 16)
  358. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  359. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  360. movq 16(%rsi),%rdx
  361. # qhasm: mulrax *= 19
  362. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  363. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  364. imulq $19,%rdx,%rax
  365. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  366. # asm 1: mulq 144(<pp=int64#2)
  367. # asm 2: mulq 144(<pp=%rsi)
  368. mulq 144(%rsi)
  369. # qhasm: carry? rx0 += mulrax
  370. # asm 1: add <mulrax=int64#7,<rx0=int64#4
  371. # asm 2: add <mulrax=%rax,<rx0=%rcx
  372. add %rax,%rcx
  373. # qhasm: mulr01 += mulrdx + carry
  374. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  375. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  376. adc %rdx,%r8
  377. # qhasm: mulrax = *(uint64 *)(pp + 16)
  378. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  379. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  380. movq 16(%rsi),%rdx
  381. # qhasm: mulrax *= 19
  382. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  383. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  384. imulq $19,%rdx,%rax
  385. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  386. # asm 1: mulq 152(<pp=int64#2)
  387. # asm 2: mulq 152(<pp=%rsi)
  388. mulq 152(%rsi)
  389. # qhasm: carry? rx1 += mulrax
  390. # asm 1: add <mulrax=int64#7,<rx1=int64#6
  391. # asm 2: add <mulrax=%rax,<rx1=%r9
  392. add %rax,%r9
  393. # qhasm: mulr11 += mulrdx + carry
  394. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  395. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  396. adc %rdx,%r10
  397. # qhasm: mulrax = *(uint64 *)(pp + 24)
  398. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  399. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  400. movq 24(%rsi),%rax
  401. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  402. # asm 1: mulq 120(<pp=int64#2)
  403. # asm 2: mulq 120(<pp=%rsi)
  404. mulq 120(%rsi)
  405. # qhasm: carry? rx3 += mulrax
  406. # asm 1: add <mulrax=int64#7,<rx3=int64#11
  407. # asm 2: add <mulrax=%rax,<rx3=%r13
  408. add %rax,%r13
  409. # qhasm: mulr31 += mulrdx + carry
  410. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  411. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  412. adc %rdx,%r14
  413. # qhasm: mulrax = *(uint64 *)(pp + 24)
  414. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  415. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  416. movq 24(%rsi),%rax
  417. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  418. # asm 1: mulq 128(<pp=int64#2)
  419. # asm 2: mulq 128(<pp=%rsi)
  420. mulq 128(%rsi)
  421. # qhasm: carry? rx4 += mulrax
  422. # asm 1: add <mulrax=int64#7,<rx4=int64#13
  423. # asm 2: add <mulrax=%rax,<rx4=%r15
  424. add %rax,%r15
  425. # qhasm: mulr41 += mulrdx + carry
  426. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  427. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  428. adc %rdx,%rbx
  429. # qhasm: mulrax = mulx319_stack
  430. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  431. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  432. movq 56(%rsp),%rax
  433. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  434. # asm 1: mulq 144(<pp=int64#2)
  435. # asm 2: mulq 144(<pp=%rsi)
  436. mulq 144(%rsi)
  437. # qhasm: carry? rx1 += mulrax
  438. # asm 1: add <mulrax=int64#7,<rx1=int64#6
  439. # asm 2: add <mulrax=%rax,<rx1=%r9
  440. add %rax,%r9
  441. # qhasm: mulr11 += mulrdx + carry
  442. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  443. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  444. adc %rdx,%r10
  445. # qhasm: mulrax = mulx319_stack
  446. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  447. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  448. movq 56(%rsp),%rax
  449. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  450. # asm 1: mulq 152(<pp=int64#2)
  451. # asm 2: mulq 152(<pp=%rsi)
  452. mulq 152(%rsi)
  453. # qhasm: carry? rx2 += mulrax
  454. # asm 1: add <mulrax=int64#7,<rx2=int64#9
  455. # asm 2: add <mulrax=%rax,<rx2=%r11
  456. add %rax,%r11
  457. # qhasm: mulr21 += mulrdx + carry
  458. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  459. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  460. adc %rdx,%r12
  461. # qhasm: mulrax = *(uint64 *)(pp + 32)
  462. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  463. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  464. movq 32(%rsi),%rax
  465. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  466. # asm 1: mulq 120(<pp=int64#2)
  467. # asm 2: mulq 120(<pp=%rsi)
  468. mulq 120(%rsi)
  469. # qhasm: carry? rx4 += mulrax
  470. # asm 1: add <mulrax=int64#7,<rx4=int64#13
  471. # asm 2: add <mulrax=%rax,<rx4=%r15
  472. add %rax,%r15
  473. # qhasm: mulr41 += mulrdx + carry
  474. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  475. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  476. adc %rdx,%rbx
  477. # qhasm: mulrax = mulx419_stack
  478. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  479. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  480. movq 64(%rsp),%rax
  481. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  482. # asm 1: mulq 136(<pp=int64#2)
  483. # asm 2: mulq 136(<pp=%rsi)
  484. mulq 136(%rsi)
  485. # qhasm: carry? rx1 += mulrax
  486. # asm 1: add <mulrax=int64#7,<rx1=int64#6
  487. # asm 2: add <mulrax=%rax,<rx1=%r9
  488. add %rax,%r9
  489. # qhasm: mulr11 += mulrdx + carry
  490. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  491. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  492. adc %rdx,%r10
  493. # qhasm: mulrax = mulx419_stack
  494. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  495. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  496. movq 64(%rsp),%rax
  497. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  498. # asm 1: mulq 144(<pp=int64#2)
  499. # asm 2: mulq 144(<pp=%rsi)
  500. mulq 144(%rsi)
  501. # qhasm: carry? rx2 += mulrax
  502. # asm 1: add <mulrax=int64#7,<rx2=int64#9
  503. # asm 2: add <mulrax=%rax,<rx2=%r11
  504. add %rax,%r11
  505. # qhasm: mulr21 += mulrdx + carry
  506. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  507. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  508. adc %rdx,%r12
  509. # qhasm: mulrax = mulx419_stack
  510. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  511. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  512. movq 64(%rsp),%rax
  513. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  514. # asm 1: mulq 152(<pp=int64#2)
  515. # asm 2: mulq 152(<pp=%rsi)
  516. mulq 152(%rsi)
  517. # qhasm: carry? rx3 += mulrax
  518. # asm 1: add <mulrax=int64#7,<rx3=int64#11
  519. # asm 2: add <mulrax=%rax,<rx3=%r13
  520. add %rax,%r13
  521. # qhasm: mulr31 += mulrdx + carry
  522. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  523. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  524. adc %rdx,%r14
  525. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  526. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  527. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  528. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  529. # qhasm: mulr01 = (mulr01.rx0) << 13
  530. # asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
  531. # asm 2: shld $13,<rx0=%rcx,<mulr01=%r8
  532. shld $13,%rcx,%r8
  533. # qhasm: rx0 &= mulredmask
  534. # asm 1: and <mulredmask=int64#3,<rx0=int64#4
  535. # asm 2: and <mulredmask=%rdx,<rx0=%rcx
  536. and %rdx,%rcx
  537. # qhasm: mulr11 = (mulr11.rx1) << 13
  538. # asm 1: shld $13,<rx1=int64#6,<mulr11=int64#8
  539. # asm 2: shld $13,<rx1=%r9,<mulr11=%r10
  540. shld $13,%r9,%r10
  541. # qhasm: rx1 &= mulredmask
  542. # asm 1: and <mulredmask=int64#3,<rx1=int64#6
  543. # asm 2: and <mulredmask=%rdx,<rx1=%r9
  544. and %rdx,%r9
  545. # qhasm: rx1 += mulr01
  546. # asm 1: add <mulr01=int64#5,<rx1=int64#6
  547. # asm 2: add <mulr01=%r8,<rx1=%r9
  548. add %r8,%r9
  549. # qhasm: mulr21 = (mulr21.rx2) << 13
  550. # asm 1: shld $13,<rx2=int64#9,<mulr21=int64#10
  551. # asm 2: shld $13,<rx2=%r11,<mulr21=%r12
  552. shld $13,%r11,%r12
  553. # qhasm: rx2 &= mulredmask
  554. # asm 1: and <mulredmask=int64#3,<rx2=int64#9
  555. # asm 2: and <mulredmask=%rdx,<rx2=%r11
  556. and %rdx,%r11
  557. # qhasm: rx2 += mulr11
  558. # asm 1: add <mulr11=int64#8,<rx2=int64#9
  559. # asm 2: add <mulr11=%r10,<rx2=%r11
  560. add %r10,%r11
  561. # qhasm: mulr31 = (mulr31.rx3) << 13
  562. # asm 1: shld $13,<rx3=int64#11,<mulr31=int64#12
  563. # asm 2: shld $13,<rx3=%r13,<mulr31=%r14
  564. shld $13,%r13,%r14
  565. # qhasm: rx3 &= mulredmask
  566. # asm 1: and <mulredmask=int64#3,<rx3=int64#11
  567. # asm 2: and <mulredmask=%rdx,<rx3=%r13
  568. and %rdx,%r13
  569. # qhasm: rx3 += mulr21
  570. # asm 1: add <mulr21=int64#10,<rx3=int64#11
  571. # asm 2: add <mulr21=%r12,<rx3=%r13
  572. add %r12,%r13
  573. # qhasm: mulr41 = (mulr41.rx4) << 13
  574. # asm 1: shld $13,<rx4=int64#13,<mulr41=int64#14
  575. # asm 2: shld $13,<rx4=%r15,<mulr41=%rbx
  576. shld $13,%r15,%rbx
  577. # qhasm: rx4 &= mulredmask
  578. # asm 1: and <mulredmask=int64#3,<rx4=int64#13
  579. # asm 2: and <mulredmask=%rdx,<rx4=%r15
  580. and %rdx,%r15
  581. # qhasm: rx4 += mulr31
  582. # asm 1: add <mulr31=int64#12,<rx4=int64#13
  583. # asm 2: add <mulr31=%r14,<rx4=%r15
  584. add %r14,%r15
  585. # qhasm: mulr41 = mulr41 * 19
  586. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
  587. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
  588. imulq $19,%rbx,%r8
  589. # qhasm: rx0 += mulr41
  590. # asm 1: add <mulr41=int64#5,<rx0=int64#4
  591. # asm 2: add <mulr41=%r8,<rx0=%rcx
  592. add %r8,%rcx
  593. # qhasm: mult = rx0
  594. # asm 1: mov <rx0=int64#4,>mult=int64#5
  595. # asm 2: mov <rx0=%rcx,>mult=%r8
  596. mov %rcx,%r8
  597. # qhasm: (uint64) mult >>= 51
  598. # asm 1: shr $51,<mult=int64#5
  599. # asm 2: shr $51,<mult=%r8
  600. shr $51,%r8
  601. # qhasm: mult += rx1
  602. # asm 1: add <rx1=int64#6,<mult=int64#5
  603. # asm 2: add <rx1=%r9,<mult=%r8
  604. add %r9,%r8
  605. # qhasm: rx1 = mult
  606. # asm 1: mov <mult=int64#5,>rx1=int64#6
  607. # asm 2: mov <mult=%r8,>rx1=%r9
  608. mov %r8,%r9
  609. # qhasm: (uint64) mult >>= 51
  610. # asm 1: shr $51,<mult=int64#5
  611. # asm 2: shr $51,<mult=%r8
  612. shr $51,%r8
  613. # qhasm: rx0 &= mulredmask
  614. # asm 1: and <mulredmask=int64#3,<rx0=int64#4
  615. # asm 2: and <mulredmask=%rdx,<rx0=%rcx
  616. and %rdx,%rcx
  617. # qhasm: mult += rx2
  618. # asm 1: add <rx2=int64#9,<mult=int64#5
  619. # asm 2: add <rx2=%r11,<mult=%r8
  620. add %r11,%r8
  621. # qhasm: rx2 = mult
  622. # asm 1: mov <mult=int64#5,>rx2=int64#7
  623. # asm 2: mov <mult=%r8,>rx2=%rax
  624. mov %r8,%rax
  625. # qhasm: (uint64) mult >>= 51
  626. # asm 1: shr $51,<mult=int64#5
  627. # asm 2: shr $51,<mult=%r8
  628. shr $51,%r8
  629. # qhasm: rx1 &= mulredmask
  630. # asm 1: and <mulredmask=int64#3,<rx1=int64#6
  631. # asm 2: and <mulredmask=%rdx,<rx1=%r9
  632. and %rdx,%r9
  633. # qhasm: mult += rx3
  634. # asm 1: add <rx3=int64#11,<mult=int64#5
  635. # asm 2: add <rx3=%r13,<mult=%r8
  636. add %r13,%r8
  637. # qhasm: rx3 = mult
  638. # asm 1: mov <mult=int64#5,>rx3=int64#8
  639. # asm 2: mov <mult=%r8,>rx3=%r10
  640. mov %r8,%r10
  641. # qhasm: (uint64) mult >>= 51
  642. # asm 1: shr $51,<mult=int64#5
  643. # asm 2: shr $51,<mult=%r8
  644. shr $51,%r8
  645. # qhasm: rx2 &= mulredmask
  646. # asm 1: and <mulredmask=int64#3,<rx2=int64#7
  647. # asm 2: and <mulredmask=%rdx,<rx2=%rax
  648. and %rdx,%rax
  649. # qhasm: mult += rx4
  650. # asm 1: add <rx4=int64#13,<mult=int64#5
  651. # asm 2: add <rx4=%r15,<mult=%r8
  652. add %r15,%r8
  653. # qhasm: rx4 = mult
  654. # asm 1: mov <mult=int64#5,>rx4=int64#9
  655. # asm 2: mov <mult=%r8,>rx4=%r11
  656. mov %r8,%r11
  657. # qhasm: (uint64) mult >>= 51
  658. # asm 1: shr $51,<mult=int64#5
  659. # asm 2: shr $51,<mult=%r8
  660. shr $51,%r8
  661. # qhasm: rx3 &= mulredmask
  662. # asm 1: and <mulredmask=int64#3,<rx3=int64#8
  663. # asm 2: and <mulredmask=%rdx,<rx3=%r10
  664. and %rdx,%r10
  665. # qhasm: mult *= 19
  666. # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
  667. # asm 2: imulq $19,<mult=%r8,>mult=%r8
  668. imulq $19,%r8,%r8
  669. # qhasm: rx0 += mult
  670. # asm 1: add <mult=int64#5,<rx0=int64#4
  671. # asm 2: add <mult=%r8,<rx0=%rcx
  672. add %r8,%rcx
  673. # qhasm: rx4 &= mulredmask
  674. # asm 1: and <mulredmask=int64#3,<rx4=int64#9
  675. # asm 2: and <mulredmask=%rdx,<rx4=%r11
  676. and %rdx,%r11
  677. # qhasm: *(uint64 *)(rp + 0) = rx0
  678. # asm 1: movq <rx0=int64#4,0(<rp=int64#1)
  679. # asm 2: movq <rx0=%rcx,0(<rp=%rdi)
  680. movq %rcx,0(%rdi)
  681. # qhasm: *(uint64 *)(rp + 8) = rx1
  682. # asm 1: movq <rx1=int64#6,8(<rp=int64#1)
  683. # asm 2: movq <rx1=%r9,8(<rp=%rdi)
  684. movq %r9,8(%rdi)
  685. # qhasm: *(uint64 *)(rp + 16) = rx2
  686. # asm 1: movq <rx2=int64#7,16(<rp=int64#1)
  687. # asm 2: movq <rx2=%rax,16(<rp=%rdi)
  688. movq %rax,16(%rdi)
  689. # qhasm: *(uint64 *)(rp + 24) = rx3
  690. # asm 1: movq <rx3=int64#8,24(<rp=int64#1)
  691. # asm 2: movq <rx3=%r10,24(<rp=%rdi)
  692. movq %r10,24(%rdi)
  693. # qhasm: *(uint64 *)(rp + 32) = rx4
  694. # asm 1: movq <rx4=int64#9,32(<rp=int64#1)
  695. # asm 2: movq <rx4=%r11,32(<rp=%rdi)
  696. movq %r11,32(%rdi)
  697. # qhasm: mulrax = *(uint64 *)(pp + 104)
  698. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#3
  699. # asm 2: movq 104(<pp=%rsi),>mulrax=%rdx
  700. movq 104(%rsi),%rdx
  701. # qhasm: mulrax *= 19
  702. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  703. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  704. imulq $19,%rdx,%rax
  705. # qhasm: mulx319_stack = mulrax
  706. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  707. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  708. movq %rax,56(%rsp)
  709. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  710. # asm 1: mulq 56(<pp=int64#2)
  711. # asm 2: mulq 56(<pp=%rsi)
  712. mulq 56(%rsi)
  713. # qhasm: ry0 = mulrax
  714. # asm 1: mov <mulrax=int64#7,>ry0=int64#4
  715. # asm 2: mov <mulrax=%rax,>ry0=%rcx
  716. mov %rax,%rcx
  717. # qhasm: mulr01 = mulrdx
  718. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  719. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  720. mov %rdx,%r8
  721. # qhasm: mulrax = *(uint64 *)(pp + 112)
  722. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#3
  723. # asm 2: movq 112(<pp=%rsi),>mulrax=%rdx
  724. movq 112(%rsi),%rdx
  725. # qhasm: mulrax *= 19
  726. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  727. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  728. imulq $19,%rdx,%rax
  729. # qhasm: mulx419_stack = mulrax
  730. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  731. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  732. movq %rax,64(%rsp)
  733. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  734. # asm 1: mulq 48(<pp=int64#2)
  735. # asm 2: mulq 48(<pp=%rsi)
  736. mulq 48(%rsi)
  737. # qhasm: carry? ry0 += mulrax
  738. # asm 1: add <mulrax=int64#7,<ry0=int64#4
  739. # asm 2: add <mulrax=%rax,<ry0=%rcx
  740. add %rax,%rcx
  741. # qhasm: mulr01 += mulrdx + carry
  742. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  743. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  744. adc %rdx,%r8
  745. # qhasm: mulrax = *(uint64 *)(pp + 80)
  746. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  747. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  748. movq 80(%rsi),%rax
  749. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  750. # asm 1: mulq 40(<pp=int64#2)
  751. # asm 2: mulq 40(<pp=%rsi)
  752. mulq 40(%rsi)
  753. # qhasm: carry? ry0 += mulrax
  754. # asm 1: add <mulrax=int64#7,<ry0=int64#4
  755. # asm 2: add <mulrax=%rax,<ry0=%rcx
  756. add %rax,%rcx
  757. # qhasm: mulr01 += mulrdx + carry
  758. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  759. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  760. adc %rdx,%r8
  761. # qhasm: mulrax = *(uint64 *)(pp + 80)
  762. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  763. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  764. movq 80(%rsi),%rax
  765. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  766. # asm 1: mulq 48(<pp=int64#2)
  767. # asm 2: mulq 48(<pp=%rsi)
  768. mulq 48(%rsi)
  769. # qhasm: ry1 = mulrax
  770. # asm 1: mov <mulrax=int64#7,>ry1=int64#6
  771. # asm 2: mov <mulrax=%rax,>ry1=%r9
  772. mov %rax,%r9
  773. # qhasm: mulr11 = mulrdx
  774. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  775. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  776. mov %rdx,%r10
  777. # qhasm: mulrax = *(uint64 *)(pp + 80)
  778. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  779. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  780. movq 80(%rsi),%rax
  781. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  782. # asm 1: mulq 56(<pp=int64#2)
  783. # asm 2: mulq 56(<pp=%rsi)
  784. mulq 56(%rsi)
  785. # qhasm: ry2 = mulrax
  786. # asm 1: mov <mulrax=int64#7,>ry2=int64#9
  787. # asm 2: mov <mulrax=%rax,>ry2=%r11
  788. mov %rax,%r11
  789. # qhasm: mulr21 = mulrdx
  790. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  791. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  792. mov %rdx,%r12
  793. # qhasm: mulrax = *(uint64 *)(pp + 80)
  794. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  795. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  796. movq 80(%rsi),%rax
  797. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  798. # asm 1: mulq 64(<pp=int64#2)
  799. # asm 2: mulq 64(<pp=%rsi)
  800. mulq 64(%rsi)
  801. # qhasm: ry3 = mulrax
  802. # asm 1: mov <mulrax=int64#7,>ry3=int64#11
  803. # asm 2: mov <mulrax=%rax,>ry3=%r13
  804. mov %rax,%r13
  805. # qhasm: mulr31 = mulrdx
  806. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  807. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  808. mov %rdx,%r14
  809. # qhasm: mulrax = *(uint64 *)(pp + 80)
  810. # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
  811. # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
  812. movq 80(%rsi),%rax
  813. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  814. # asm 1: mulq 72(<pp=int64#2)
  815. # asm 2: mulq 72(<pp=%rsi)
  816. mulq 72(%rsi)
  817. # qhasm: ry4 = mulrax
  818. # asm 1: mov <mulrax=int64#7,>ry4=int64#13
  819. # asm 2: mov <mulrax=%rax,>ry4=%r15
  820. mov %rax,%r15
  821. # qhasm: mulr41 = mulrdx
  822. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  823. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  824. mov %rdx,%rbx
  825. # qhasm: mulrax = *(uint64 *)(pp + 88)
  826. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  827. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  828. movq 88(%rsi),%rax
  829. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  830. # asm 1: mulq 40(<pp=int64#2)
  831. # asm 2: mulq 40(<pp=%rsi)
  832. mulq 40(%rsi)
  833. # qhasm: carry? ry1 += mulrax
  834. # asm 1: add <mulrax=int64#7,<ry1=int64#6
  835. # asm 2: add <mulrax=%rax,<ry1=%r9
  836. add %rax,%r9
  837. # qhasm: mulr11 += mulrdx + carry
  838. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  839. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  840. adc %rdx,%r10
  841. # qhasm: mulrax = *(uint64 *)(pp + 88)
  842. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  843. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  844. movq 88(%rsi),%rax
  845. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  846. # asm 1: mulq 48(<pp=int64#2)
  847. # asm 2: mulq 48(<pp=%rsi)
  848. mulq 48(%rsi)
  849. # qhasm: carry? ry2 += mulrax
  850. # asm 1: add <mulrax=int64#7,<ry2=int64#9
  851. # asm 2: add <mulrax=%rax,<ry2=%r11
  852. add %rax,%r11
  853. # qhasm: mulr21 += mulrdx + carry
  854. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  855. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  856. adc %rdx,%r12
  857. # qhasm: mulrax = *(uint64 *)(pp + 88)
  858. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  859. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  860. movq 88(%rsi),%rax
  861. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  862. # asm 1: mulq 56(<pp=int64#2)
  863. # asm 2: mulq 56(<pp=%rsi)
  864. mulq 56(%rsi)
  865. # qhasm: carry? ry3 += mulrax
  866. # asm 1: add <mulrax=int64#7,<ry3=int64#11
  867. # asm 2: add <mulrax=%rax,<ry3=%r13
  868. add %rax,%r13
  869. # qhasm: mulr31 += mulrdx + carry
  870. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  871. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  872. adc %rdx,%r14
  873. # qhasm: mulrax = *(uint64 *)(pp + 88)
  874. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
  875. # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
  876. movq 88(%rsi),%rax
  877. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  878. # asm 1: mulq 64(<pp=int64#2)
  879. # asm 2: mulq 64(<pp=%rsi)
  880. mulq 64(%rsi)
  881. # qhasm: carry? ry4 += mulrax
  882. # asm 1: add <mulrax=int64#7,<ry4=int64#13
  883. # asm 2: add <mulrax=%rax,<ry4=%r15
  884. add %rax,%r15
  885. # qhasm: mulr41 += mulrdx + carry
  886. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  887. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  888. adc %rdx,%rbx
  889. # qhasm: mulrax = *(uint64 *)(pp + 88)
  890. # asm 1: movq 88(<pp=int64#2),>mulrax=int64#3
  891. # asm 2: movq 88(<pp=%rsi),>mulrax=%rdx
  892. movq 88(%rsi),%rdx
  893. # qhasm: mulrax *= 19
  894. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  895. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  896. imulq $19,%rdx,%rax
  897. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  898. # asm 1: mulq 72(<pp=int64#2)
  899. # asm 2: mulq 72(<pp=%rsi)
  900. mulq 72(%rsi)
  901. # qhasm: carry? ry0 += mulrax
  902. # asm 1: add <mulrax=int64#7,<ry0=int64#4
  903. # asm 2: add <mulrax=%rax,<ry0=%rcx
  904. add %rax,%rcx
  905. # qhasm: mulr01 += mulrdx + carry
  906. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  907. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  908. adc %rdx,%r8
  909. # qhasm: mulrax = *(uint64 *)(pp + 96)
  910. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  911. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  912. movq 96(%rsi),%rax
  913. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  914. # asm 1: mulq 40(<pp=int64#2)
  915. # asm 2: mulq 40(<pp=%rsi)
  916. mulq 40(%rsi)
  917. # qhasm: carry? ry2 += mulrax
  918. # asm 1: add <mulrax=int64#7,<ry2=int64#9
  919. # asm 2: add <mulrax=%rax,<ry2=%r11
  920. add %rax,%r11
  921. # qhasm: mulr21 += mulrdx + carry
  922. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  923. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  924. adc %rdx,%r12
  925. # qhasm: mulrax = *(uint64 *)(pp + 96)
  926. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  927. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  928. movq 96(%rsi),%rax
  929. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  930. # asm 1: mulq 48(<pp=int64#2)
  931. # asm 2: mulq 48(<pp=%rsi)
  932. mulq 48(%rsi)
  933. # qhasm: carry? ry3 += mulrax
  934. # asm 1: add <mulrax=int64#7,<ry3=int64#11
  935. # asm 2: add <mulrax=%rax,<ry3=%r13
  936. add %rax,%r13
  937. # qhasm: mulr31 += mulrdx + carry
  938. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  939. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  940. adc %rdx,%r14
  941. # qhasm: mulrax = *(uint64 *)(pp + 96)
  942. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  943. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  944. movq 96(%rsi),%rax
  945. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  946. # asm 1: mulq 56(<pp=int64#2)
  947. # asm 2: mulq 56(<pp=%rsi)
  948. mulq 56(%rsi)
  949. # qhasm: carry? ry4 += mulrax
  950. # asm 1: add <mulrax=int64#7,<ry4=int64#13
  951. # asm 2: add <mulrax=%rax,<ry4=%r15
  952. add %rax,%r15
  953. # qhasm: mulr41 += mulrdx + carry
  954. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  955. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  956. adc %rdx,%rbx
  957. # qhasm: mulrax = *(uint64 *)(pp + 96)
  958. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
  959. # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
  960. movq 96(%rsi),%rdx
  961. # qhasm: mulrax *= 19
  962. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  963. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  964. imulq $19,%rdx,%rax
  965. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  966. # asm 1: mulq 64(<pp=int64#2)
  967. # asm 2: mulq 64(<pp=%rsi)
  968. mulq 64(%rsi)
  969. # qhasm: carry? ry0 += mulrax
  970. # asm 1: add <mulrax=int64#7,<ry0=int64#4
  971. # asm 2: add <mulrax=%rax,<ry0=%rcx
  972. add %rax,%rcx
  973. # qhasm: mulr01 += mulrdx + carry
  974. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  975. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  976. adc %rdx,%r8
  977. # qhasm: mulrax = *(uint64 *)(pp + 96)
  978. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
  979. # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
  980. movq 96(%rsi),%rdx
  981. # qhasm: mulrax *= 19
  982. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  983. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  984. imulq $19,%rdx,%rax
  985. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  986. # asm 1: mulq 72(<pp=int64#2)
  987. # asm 2: mulq 72(<pp=%rsi)
  988. mulq 72(%rsi)
  989. # qhasm: carry? ry1 += mulrax
  990. # asm 1: add <mulrax=int64#7,<ry1=int64#6
  991. # asm 2: add <mulrax=%rax,<ry1=%r9
  992. add %rax,%r9
  993. # qhasm: mulr11 += mulrdx + carry
  994. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  995. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  996. adc %rdx,%r10
  997. # qhasm: mulrax = *(uint64 *)(pp + 104)
  998. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  999. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1000. movq 104(%rsi),%rax
  1001. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  1002. # asm 1: mulq 40(<pp=int64#2)
  1003. # asm 2: mulq 40(<pp=%rsi)
  1004. mulq 40(%rsi)
  1005. # qhasm: carry? ry3 += mulrax
  1006. # asm 1: add <mulrax=int64#7,<ry3=int64#11
  1007. # asm 2: add <mulrax=%rax,<ry3=%r13
  1008. add %rax,%r13
  1009. # qhasm: mulr31 += mulrdx + carry
  1010. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1011. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1012. adc %rdx,%r14
  1013. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1014. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1015. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1016. movq 104(%rsi),%rax
  1017. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
  1018. # asm 1: mulq 48(<pp=int64#2)
  1019. # asm 2: mulq 48(<pp=%rsi)
  1020. mulq 48(%rsi)
  1021. # qhasm: carry? ry4 += mulrax
  1022. # asm 1: add <mulrax=int64#7,<ry4=int64#13
  1023. # asm 2: add <mulrax=%rax,<ry4=%r15
  1024. add %rax,%r15
  1025. # qhasm: mulr41 += mulrdx + carry
  1026. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1027. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1028. adc %rdx,%rbx
  1029. # qhasm: mulrax = mulx319_stack
  1030. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  1031. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  1032. movq 56(%rsp),%rax
  1033. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  1034. # asm 1: mulq 64(<pp=int64#2)
  1035. # asm 2: mulq 64(<pp=%rsi)
  1036. mulq 64(%rsi)
  1037. # qhasm: carry? ry1 += mulrax
  1038. # asm 1: add <mulrax=int64#7,<ry1=int64#6
  1039. # asm 2: add <mulrax=%rax,<ry1=%r9
  1040. add %rax,%r9
  1041. # qhasm: mulr11 += mulrdx + carry
  1042. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1043. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1044. adc %rdx,%r10
  1045. # qhasm: mulrax = mulx319_stack
  1046. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  1047. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  1048. movq 56(%rsp),%rax
  1049. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  1050. # asm 1: mulq 72(<pp=int64#2)
  1051. # asm 2: mulq 72(<pp=%rsi)
  1052. mulq 72(%rsi)
  1053. # qhasm: carry? ry2 += mulrax
  1054. # asm 1: add <mulrax=int64#7,<ry2=int64#9
  1055. # asm 2: add <mulrax=%rax,<ry2=%r11
  1056. add %rax,%r11
  1057. # qhasm: mulr21 += mulrdx + carry
  1058. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1059. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1060. adc %rdx,%r12
  1061. # qhasm: mulrax = *(uint64 *)(pp + 112)
  1062. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  1063. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  1064. movq 112(%rsi),%rax
  1065. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
  1066. # asm 1: mulq 40(<pp=int64#2)
  1067. # asm 2: mulq 40(<pp=%rsi)
  1068. mulq 40(%rsi)
  1069. # qhasm: carry? ry4 += mulrax
  1070. # asm 1: add <mulrax=int64#7,<ry4=int64#13
  1071. # asm 2: add <mulrax=%rax,<ry4=%r15
  1072. add %rax,%r15
  1073. # qhasm: mulr41 += mulrdx + carry
  1074. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1075. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1076. adc %rdx,%rbx
  1077. # qhasm: mulrax = mulx419_stack
  1078. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1079. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1080. movq 64(%rsp),%rax
  1081. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
  1082. # asm 1: mulq 56(<pp=int64#2)
  1083. # asm 2: mulq 56(<pp=%rsi)
  1084. mulq 56(%rsi)
  1085. # qhasm: carry? ry1 += mulrax
  1086. # asm 1: add <mulrax=int64#7,<ry1=int64#6
  1087. # asm 2: add <mulrax=%rax,<ry1=%r9
  1088. add %rax,%r9
  1089. # qhasm: mulr11 += mulrdx + carry
  1090. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1091. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1092. adc %rdx,%r10
  1093. # qhasm: mulrax = mulx419_stack
  1094. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1095. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1096. movq 64(%rsp),%rax
  1097. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
  1098. # asm 1: mulq 64(<pp=int64#2)
  1099. # asm 2: mulq 64(<pp=%rsi)
  1100. mulq 64(%rsi)
  1101. # qhasm: carry? ry2 += mulrax
  1102. # asm 1: add <mulrax=int64#7,<ry2=int64#9
  1103. # asm 2: add <mulrax=%rax,<ry2=%r11
  1104. add %rax,%r11
  1105. # qhasm: mulr21 += mulrdx + carry
  1106. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1107. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1108. adc %rdx,%r12
  1109. # qhasm: mulrax = mulx419_stack
  1110. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1111. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1112. movq 64(%rsp),%rax
  1113. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
  1114. # asm 1: mulq 72(<pp=int64#2)
  1115. # asm 2: mulq 72(<pp=%rsi)
  1116. mulq 72(%rsi)
  1117. # qhasm: carry? ry3 += mulrax
  1118. # asm 1: add <mulrax=int64#7,<ry3=int64#11
  1119. # asm 2: add <mulrax=%rax,<ry3=%r13
  1120. add %rax,%r13
  1121. # qhasm: mulr31 += mulrdx + carry
  1122. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1123. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1124. adc %rdx,%r14
  1125. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  1126. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  1127. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  1128. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  1129. # qhasm: mulr01 = (mulr01.ry0) << 13
  1130. # asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
  1131. # asm 2: shld $13,<ry0=%rcx,<mulr01=%r8
  1132. shld $13,%rcx,%r8
  1133. # qhasm: ry0 &= mulredmask
  1134. # asm 1: and <mulredmask=int64#3,<ry0=int64#4
  1135. # asm 2: and <mulredmask=%rdx,<ry0=%rcx
  1136. and %rdx,%rcx
  1137. # qhasm: mulr11 = (mulr11.ry1) << 13
  1138. # asm 1: shld $13,<ry1=int64#6,<mulr11=int64#8
  1139. # asm 2: shld $13,<ry1=%r9,<mulr11=%r10
  1140. shld $13,%r9,%r10
  1141. # qhasm: ry1 &= mulredmask
  1142. # asm 1: and <mulredmask=int64#3,<ry1=int64#6
  1143. # asm 2: and <mulredmask=%rdx,<ry1=%r9
  1144. and %rdx,%r9
  1145. # qhasm: ry1 += mulr01
  1146. # asm 1: add <mulr01=int64#5,<ry1=int64#6
  1147. # asm 2: add <mulr01=%r8,<ry1=%r9
  1148. add %r8,%r9
  1149. # qhasm: mulr21 = (mulr21.ry2) << 13
  1150. # asm 1: shld $13,<ry2=int64#9,<mulr21=int64#10
  1151. # asm 2: shld $13,<ry2=%r11,<mulr21=%r12
  1152. shld $13,%r11,%r12
  1153. # qhasm: ry2 &= mulredmask
  1154. # asm 1: and <mulredmask=int64#3,<ry2=int64#9
  1155. # asm 2: and <mulredmask=%rdx,<ry2=%r11
  1156. and %rdx,%r11
  1157. # qhasm: ry2 += mulr11
  1158. # asm 1: add <mulr11=int64#8,<ry2=int64#9
  1159. # asm 2: add <mulr11=%r10,<ry2=%r11
  1160. add %r10,%r11
  1161. # qhasm: mulr31 = (mulr31.ry3) << 13
  1162. # asm 1: shld $13,<ry3=int64#11,<mulr31=int64#12
  1163. # asm 2: shld $13,<ry3=%r13,<mulr31=%r14
  1164. shld $13,%r13,%r14
  1165. # qhasm: ry3 &= mulredmask
  1166. # asm 1: and <mulredmask=int64#3,<ry3=int64#11
  1167. # asm 2: and <mulredmask=%rdx,<ry3=%r13
  1168. and %rdx,%r13
  1169. # qhasm: ry3 += mulr21
  1170. # asm 1: add <mulr21=int64#10,<ry3=int64#11
  1171. # asm 2: add <mulr21=%r12,<ry3=%r13
  1172. add %r12,%r13
  1173. # qhasm: mulr41 = (mulr41.ry4) << 13
  1174. # asm 1: shld $13,<ry4=int64#13,<mulr41=int64#14
  1175. # asm 2: shld $13,<ry4=%r15,<mulr41=%rbx
  1176. shld $13,%r15,%rbx
  1177. # qhasm: ry4 &= mulredmask
  1178. # asm 1: and <mulredmask=int64#3,<ry4=int64#13
  1179. # asm 2: and <mulredmask=%rdx,<ry4=%r15
  1180. and %rdx,%r15
  1181. # qhasm: ry4 += mulr31
  1182. # asm 1: add <mulr31=int64#12,<ry4=int64#13
  1183. # asm 2: add <mulr31=%r14,<ry4=%r15
  1184. add %r14,%r15
  1185. # qhasm: mulr41 = mulr41 * 19
  1186. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
  1187. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
  1188. imulq $19,%rbx,%r8
  1189. # qhasm: ry0 += mulr41
  1190. # asm 1: add <mulr41=int64#5,<ry0=int64#4
  1191. # asm 2: add <mulr41=%r8,<ry0=%rcx
  1192. add %r8,%rcx
  1193. # qhasm: mult = ry0
  1194. # asm 1: mov <ry0=int64#4,>mult=int64#5
  1195. # asm 2: mov <ry0=%rcx,>mult=%r8
  1196. mov %rcx,%r8
  1197. # qhasm: (uint64) mult >>= 51
  1198. # asm 1: shr $51,<mult=int64#5
  1199. # asm 2: shr $51,<mult=%r8
  1200. shr $51,%r8
  1201. # qhasm: mult += ry1
  1202. # asm 1: add <ry1=int64#6,<mult=int64#5
  1203. # asm 2: add <ry1=%r9,<mult=%r8
  1204. add %r9,%r8
  1205. # qhasm: ry1 = mult
  1206. # asm 1: mov <mult=int64#5,>ry1=int64#6
  1207. # asm 2: mov <mult=%r8,>ry1=%r9
  1208. mov %r8,%r9
  1209. # qhasm: (uint64) mult >>= 51
  1210. # asm 1: shr $51,<mult=int64#5
  1211. # asm 2: shr $51,<mult=%r8
  1212. shr $51,%r8
  1213. # qhasm: ry0 &= mulredmask
  1214. # asm 1: and <mulredmask=int64#3,<ry0=int64#4
  1215. # asm 2: and <mulredmask=%rdx,<ry0=%rcx
  1216. and %rdx,%rcx
  1217. # qhasm: mult += ry2
  1218. # asm 1: add <ry2=int64#9,<mult=int64#5
  1219. # asm 2: add <ry2=%r11,<mult=%r8
  1220. add %r11,%r8
  1221. # qhasm: ry2 = mult
  1222. # asm 1: mov <mult=int64#5,>ry2=int64#7
  1223. # asm 2: mov <mult=%r8,>ry2=%rax
  1224. mov %r8,%rax
  1225. # qhasm: (uint64) mult >>= 51
  1226. # asm 1: shr $51,<mult=int64#5
  1227. # asm 2: shr $51,<mult=%r8
  1228. shr $51,%r8
  1229. # qhasm: ry1 &= mulredmask
  1230. # asm 1: and <mulredmask=int64#3,<ry1=int64#6
  1231. # asm 2: and <mulredmask=%rdx,<ry1=%r9
  1232. and %rdx,%r9
  1233. # qhasm: mult += ry3
  1234. # asm 1: add <ry3=int64#11,<mult=int64#5
  1235. # asm 2: add <ry3=%r13,<mult=%r8
  1236. add %r13,%r8
  1237. # qhasm: ry3 = mult
  1238. # asm 1: mov <mult=int64#5,>ry3=int64#8
  1239. # asm 2: mov <mult=%r8,>ry3=%r10
  1240. mov %r8,%r10
  1241. # qhasm: (uint64) mult >>= 51
  1242. # asm 1: shr $51,<mult=int64#5
  1243. # asm 2: shr $51,<mult=%r8
  1244. shr $51,%r8
  1245. # qhasm: ry2 &= mulredmask
  1246. # asm 1: and <mulredmask=int64#3,<ry2=int64#7
  1247. # asm 2: and <mulredmask=%rdx,<ry2=%rax
  1248. and %rdx,%rax
  1249. # qhasm: mult += ry4
  1250. # asm 1: add <ry4=int64#13,<mult=int64#5
  1251. # asm 2: add <ry4=%r15,<mult=%r8
  1252. add %r15,%r8
  1253. # qhasm: ry4 = mult
  1254. # asm 1: mov <mult=int64#5,>ry4=int64#9
  1255. # asm 2: mov <mult=%r8,>ry4=%r11
  1256. mov %r8,%r11
  1257. # qhasm: (uint64) mult >>= 51
  1258. # asm 1: shr $51,<mult=int64#5
  1259. # asm 2: shr $51,<mult=%r8
  1260. shr $51,%r8
  1261. # qhasm: ry3 &= mulredmask
  1262. # asm 1: and <mulredmask=int64#3,<ry3=int64#8
  1263. # asm 2: and <mulredmask=%rdx,<ry3=%r10
  1264. and %rdx,%r10
  1265. # qhasm: mult *= 19
  1266. # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
  1267. # asm 2: imulq $19,<mult=%r8,>mult=%r8
  1268. imulq $19,%r8,%r8
  1269. # qhasm: ry0 += mult
  1270. # asm 1: add <mult=int64#5,<ry0=int64#4
  1271. # asm 2: add <mult=%r8,<ry0=%rcx
  1272. add %r8,%rcx
  1273. # qhasm: ry4 &= mulredmask
  1274. # asm 1: and <mulredmask=int64#3,<ry4=int64#9
  1275. # asm 2: and <mulredmask=%rdx,<ry4=%r11
  1276. and %rdx,%r11
  1277. # qhasm: *(uint64 *)(rp + 40) = ry0
  1278. # asm 1: movq <ry0=int64#4,40(<rp=int64#1)
  1279. # asm 2: movq <ry0=%rcx,40(<rp=%rdi)
  1280. movq %rcx,40(%rdi)
  1281. # qhasm: *(uint64 *)(rp + 48) = ry1
  1282. # asm 1: movq <ry1=int64#6,48(<rp=int64#1)
  1283. # asm 2: movq <ry1=%r9,48(<rp=%rdi)
  1284. movq %r9,48(%rdi)
  1285. # qhasm: *(uint64 *)(rp + 56) = ry2
  1286. # asm 1: movq <ry2=int64#7,56(<rp=int64#1)
  1287. # asm 2: movq <ry2=%rax,56(<rp=%rdi)
  1288. movq %rax,56(%rdi)
  1289. # qhasm: *(uint64 *)(rp + 64) = ry3
  1290. # asm 1: movq <ry3=int64#8,64(<rp=int64#1)
  1291. # asm 2: movq <ry3=%r10,64(<rp=%rdi)
  1292. movq %r10,64(%rdi)
  1293. # qhasm: *(uint64 *)(rp + 72) = ry4
  1294. # asm 1: movq <ry4=int64#9,72(<rp=int64#1)
  1295. # asm 2: movq <ry4=%r11,72(<rp=%rdi)
  1296. movq %r11,72(%rdi)
  1297. # qhasm: mulrax = *(uint64 *)(pp + 64)
  1298. # asm 1: movq 64(<pp=int64#2),>mulrax=int64#3
  1299. # asm 2: movq 64(<pp=%rsi),>mulrax=%rdx
  1300. movq 64(%rsi),%rdx
  1301. # qhasm: mulrax *= 19
  1302. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1303. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1304. imulq $19,%rdx,%rax
  1305. # qhasm: mulx319_stack = mulrax
  1306. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  1307. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  1308. movq %rax,56(%rsp)
  1309. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1310. # asm 1: mulq 136(<pp=int64#2)
  1311. # asm 2: mulq 136(<pp=%rsi)
  1312. mulq 136(%rsi)
  1313. # qhasm: rz0 = mulrax
  1314. # asm 1: mov <mulrax=int64#7,>rz0=int64#4
  1315. # asm 2: mov <mulrax=%rax,>rz0=%rcx
  1316. mov %rax,%rcx
  1317. # qhasm: mulr01 = mulrdx
  1318. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  1319. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  1320. mov %rdx,%r8
  1321. # qhasm: mulrax = *(uint64 *)(pp + 72)
  1322. # asm 1: movq 72(<pp=int64#2),>mulrax=int64#3
  1323. # asm 2: movq 72(<pp=%rsi),>mulrax=%rdx
  1324. movq 72(%rsi),%rdx
  1325. # qhasm: mulrax *= 19
  1326. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1327. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1328. imulq $19,%rdx,%rax
  1329. # qhasm: mulx419_stack = mulrax
  1330. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  1331. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  1332. movq %rax,64(%rsp)
  1333. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1334. # asm 1: mulq 128(<pp=int64#2)
  1335. # asm 2: mulq 128(<pp=%rsi)
  1336. mulq 128(%rsi)
  1337. # qhasm: carry? rz0 += mulrax
  1338. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1339. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1340. add %rax,%rcx
  1341. # qhasm: mulr01 += mulrdx + carry
  1342. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1343. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1344. adc %rdx,%r8
  1345. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1346. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1347. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1348. movq 40(%rsi),%rax
  1349. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1350. # asm 1: mulq 120(<pp=int64#2)
  1351. # asm 2: mulq 120(<pp=%rsi)
  1352. mulq 120(%rsi)
  1353. # qhasm: carry? rz0 += mulrax
  1354. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1355. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1356. add %rax,%rcx
  1357. # qhasm: mulr01 += mulrdx + carry
  1358. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1359. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1360. adc %rdx,%r8
  1361. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1362. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1363. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1364. movq 40(%rsi),%rax
  1365. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1366. # asm 1: mulq 128(<pp=int64#2)
  1367. # asm 2: mulq 128(<pp=%rsi)
  1368. mulq 128(%rsi)
  1369. # qhasm: rz1 = mulrax
  1370. # asm 1: mov <mulrax=int64#7,>rz1=int64#6
  1371. # asm 2: mov <mulrax=%rax,>rz1=%r9
  1372. mov %rax,%r9
  1373. # qhasm: mulr11 = mulrdx
  1374. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  1375. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  1376. mov %rdx,%r10
  1377. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1378. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1379. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1380. movq 40(%rsi),%rax
  1381. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1382. # asm 1: mulq 136(<pp=int64#2)
  1383. # asm 2: mulq 136(<pp=%rsi)
  1384. mulq 136(%rsi)
  1385. # qhasm: rz2 = mulrax
  1386. # asm 1: mov <mulrax=int64#7,>rz2=int64#9
  1387. # asm 2: mov <mulrax=%rax,>rz2=%r11
  1388. mov %rax,%r11
  1389. # qhasm: mulr21 = mulrdx
  1390. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  1391. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  1392. mov %rdx,%r12
  1393. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1394. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1395. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1396. movq 40(%rsi),%rax
  1397. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1398. # asm 1: mulq 144(<pp=int64#2)
  1399. # asm 2: mulq 144(<pp=%rsi)
  1400. mulq 144(%rsi)
  1401. # qhasm: rz3 = mulrax
  1402. # asm 1: mov <mulrax=int64#7,>rz3=int64#11
  1403. # asm 2: mov <mulrax=%rax,>rz3=%r13
  1404. mov %rax,%r13
  1405. # qhasm: mulr31 = mulrdx
  1406. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  1407. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  1408. mov %rdx,%r14
  1409. # qhasm: mulrax = *(uint64 *)(pp + 40)
  1410. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  1411. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  1412. movq 40(%rsi),%rax
  1413. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1414. # asm 1: mulq 152(<pp=int64#2)
  1415. # asm 2: mulq 152(<pp=%rsi)
  1416. mulq 152(%rsi)
  1417. # qhasm: rz4 = mulrax
  1418. # asm 1: mov <mulrax=int64#7,>rz4=int64#13
  1419. # asm 2: mov <mulrax=%rax,>rz4=%r15
  1420. mov %rax,%r15
  1421. # qhasm: mulr41 = mulrdx
  1422. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  1423. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  1424. mov %rdx,%rbx
  1425. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1426. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1427. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1428. movq 48(%rsi),%rax
  1429. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1430. # asm 1: mulq 120(<pp=int64#2)
  1431. # asm 2: mulq 120(<pp=%rsi)
  1432. mulq 120(%rsi)
  1433. # qhasm: carry? rz1 += mulrax
  1434. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1435. # asm 2: add <mulrax=%rax,<rz1=%r9
  1436. add %rax,%r9
  1437. # qhasm: mulr11 += mulrdx + carry
  1438. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1439. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1440. adc %rdx,%r10
  1441. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1442. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1443. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1444. movq 48(%rsi),%rax
  1445. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1446. # asm 1: mulq 128(<pp=int64#2)
  1447. # asm 2: mulq 128(<pp=%rsi)
  1448. mulq 128(%rsi)
  1449. # qhasm: carry? rz2 += mulrax
  1450. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1451. # asm 2: add <mulrax=%rax,<rz2=%r11
  1452. add %rax,%r11
  1453. # qhasm: mulr21 += mulrdx + carry
  1454. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1455. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1456. adc %rdx,%r12
  1457. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1458. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1459. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1460. movq 48(%rsi),%rax
  1461. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1462. # asm 1: mulq 136(<pp=int64#2)
  1463. # asm 2: mulq 136(<pp=%rsi)
  1464. mulq 136(%rsi)
  1465. # qhasm: carry? rz3 += mulrax
  1466. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1467. # asm 2: add <mulrax=%rax,<rz3=%r13
  1468. add %rax,%r13
  1469. # qhasm: mulr31 += mulrdx + carry
  1470. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1471. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1472. adc %rdx,%r14
  1473. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1474. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  1475. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  1476. movq 48(%rsi),%rax
  1477. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1478. # asm 1: mulq 144(<pp=int64#2)
  1479. # asm 2: mulq 144(<pp=%rsi)
  1480. mulq 144(%rsi)
  1481. # qhasm: carry? rz4 += mulrax
  1482. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1483. # asm 2: add <mulrax=%rax,<rz4=%r15
  1484. add %rax,%r15
  1485. # qhasm: mulr41 += mulrdx + carry
  1486. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1487. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1488. adc %rdx,%rbx
  1489. # qhasm: mulrax = *(uint64 *)(pp + 48)
  1490. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#3
  1491. # asm 2: movq 48(<pp=%rsi),>mulrax=%rdx
  1492. movq 48(%rsi),%rdx
  1493. # qhasm: mulrax *= 19
  1494. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1495. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1496. imulq $19,%rdx,%rax
  1497. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1498. # asm 1: mulq 152(<pp=int64#2)
  1499. # asm 2: mulq 152(<pp=%rsi)
  1500. mulq 152(%rsi)
  1501. # qhasm: carry? rz0 += mulrax
  1502. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1503. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1504. add %rax,%rcx
  1505. # qhasm: mulr01 += mulrdx + carry
  1506. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1507. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1508. adc %rdx,%r8
  1509. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1510. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1511. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1512. movq 56(%rsi),%rax
  1513. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1514. # asm 1: mulq 120(<pp=int64#2)
  1515. # asm 2: mulq 120(<pp=%rsi)
  1516. mulq 120(%rsi)
  1517. # qhasm: carry? rz2 += mulrax
  1518. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1519. # asm 2: add <mulrax=%rax,<rz2=%r11
  1520. add %rax,%r11
  1521. # qhasm: mulr21 += mulrdx + carry
  1522. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1523. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1524. adc %rdx,%r12
  1525. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1526. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1527. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1528. movq 56(%rsi),%rax
  1529. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1530. # asm 1: mulq 128(<pp=int64#2)
  1531. # asm 2: mulq 128(<pp=%rsi)
  1532. mulq 128(%rsi)
  1533. # qhasm: carry? rz3 += mulrax
  1534. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1535. # asm 2: add <mulrax=%rax,<rz3=%r13
  1536. add %rax,%r13
  1537. # qhasm: mulr31 += mulrdx + carry
  1538. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1539. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1540. adc %rdx,%r14
  1541. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1542. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1543. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1544. movq 56(%rsi),%rax
  1545. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1546. # asm 1: mulq 136(<pp=int64#2)
  1547. # asm 2: mulq 136(<pp=%rsi)
  1548. mulq 136(%rsi)
  1549. # qhasm: carry? rz4 += mulrax
  1550. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1551. # asm 2: add <mulrax=%rax,<rz4=%r15
  1552. add %rax,%r15
  1553. # qhasm: mulr41 += mulrdx + carry
  1554. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1555. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1556. adc %rdx,%rbx
  1557. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1558. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
  1559. # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
  1560. movq 56(%rsi),%rdx
  1561. # qhasm: mulrax *= 19
  1562. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1563. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1564. imulq $19,%rdx,%rax
  1565. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1566. # asm 1: mulq 144(<pp=int64#2)
  1567. # asm 2: mulq 144(<pp=%rsi)
  1568. mulq 144(%rsi)
  1569. # qhasm: carry? rz0 += mulrax
  1570. # asm 1: add <mulrax=int64#7,<rz0=int64#4
  1571. # asm 2: add <mulrax=%rax,<rz0=%rcx
  1572. add %rax,%rcx
  1573. # qhasm: mulr01 += mulrdx + carry
  1574. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1575. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1576. adc %rdx,%r8
  1577. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1578. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
  1579. # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
  1580. movq 56(%rsi),%rdx
  1581. # qhasm: mulrax *= 19
  1582. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1583. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1584. imulq $19,%rdx,%rax
  1585. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1586. # asm 1: mulq 152(<pp=int64#2)
  1587. # asm 2: mulq 152(<pp=%rsi)
  1588. mulq 152(%rsi)
  1589. # qhasm: carry? rz1 += mulrax
  1590. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1591. # asm 2: add <mulrax=%rax,<rz1=%r9
  1592. add %rax,%r9
  1593. # qhasm: mulr11 += mulrdx + carry
  1594. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1595. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1596. adc %rdx,%r10
  1597. # qhasm: mulrax = *(uint64 *)(pp + 64)
  1598. # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
  1599. # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
  1600. movq 64(%rsi),%rax
  1601. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1602. # asm 1: mulq 120(<pp=int64#2)
  1603. # asm 2: mulq 120(<pp=%rsi)
  1604. mulq 120(%rsi)
  1605. # qhasm: carry? rz3 += mulrax
  1606. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1607. # asm 2: add <mulrax=%rax,<rz3=%r13
  1608. add %rax,%r13
  1609. # qhasm: mulr31 += mulrdx + carry
  1610. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1611. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1612. adc %rdx,%r14
  1613. # qhasm: mulrax = *(uint64 *)(pp + 64)
  1614. # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
  1615. # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
  1616. movq 64(%rsi),%rax
  1617. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
  1618. # asm 1: mulq 128(<pp=int64#2)
  1619. # asm 2: mulq 128(<pp=%rsi)
  1620. mulq 128(%rsi)
  1621. # qhasm: carry? rz4 += mulrax
  1622. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1623. # asm 2: add <mulrax=%rax,<rz4=%r15
  1624. add %rax,%r15
  1625. # qhasm: mulr41 += mulrdx + carry
  1626. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1627. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1628. adc %rdx,%rbx
  1629. # qhasm: mulrax = mulx319_stack
  1630. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  1631. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  1632. movq 56(%rsp),%rax
  1633. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1634. # asm 1: mulq 144(<pp=int64#2)
  1635. # asm 2: mulq 144(<pp=%rsi)
  1636. mulq 144(%rsi)
  1637. # qhasm: carry? rz1 += mulrax
  1638. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1639. # asm 2: add <mulrax=%rax,<rz1=%r9
  1640. add %rax,%r9
  1641. # qhasm: mulr11 += mulrdx + carry
  1642. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1643. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1644. adc %rdx,%r10
  1645. # qhasm: mulrax = mulx319_stack
  1646. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  1647. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  1648. movq 56(%rsp),%rax
  1649. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1650. # asm 1: mulq 152(<pp=int64#2)
  1651. # asm 2: mulq 152(<pp=%rsi)
  1652. mulq 152(%rsi)
  1653. # qhasm: carry? rz2 += mulrax
  1654. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1655. # asm 2: add <mulrax=%rax,<rz2=%r11
  1656. add %rax,%r11
  1657. # qhasm: mulr21 += mulrdx + carry
  1658. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1659. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1660. adc %rdx,%r12
  1661. # qhasm: mulrax = *(uint64 *)(pp + 72)
  1662. # asm 1: movq 72(<pp=int64#2),>mulrax=int64#7
  1663. # asm 2: movq 72(<pp=%rsi),>mulrax=%rax
  1664. movq 72(%rsi),%rax
  1665. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
  1666. # asm 1: mulq 120(<pp=int64#2)
  1667. # asm 2: mulq 120(<pp=%rsi)
  1668. mulq 120(%rsi)
  1669. # qhasm: carry? rz4 += mulrax
  1670. # asm 1: add <mulrax=int64#7,<rz4=int64#13
  1671. # asm 2: add <mulrax=%rax,<rz4=%r15
  1672. add %rax,%r15
  1673. # qhasm: mulr41 += mulrdx + carry
  1674. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  1675. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  1676. adc %rdx,%rbx
  1677. # qhasm: mulrax = mulx419_stack
  1678. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1679. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1680. movq 64(%rsp),%rax
  1681. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
  1682. # asm 1: mulq 136(<pp=int64#2)
  1683. # asm 2: mulq 136(<pp=%rsi)
  1684. mulq 136(%rsi)
  1685. # qhasm: carry? rz1 += mulrax
  1686. # asm 1: add <mulrax=int64#7,<rz1=int64#6
  1687. # asm 2: add <mulrax=%rax,<rz1=%r9
  1688. add %rax,%r9
  1689. # qhasm: mulr11 += mulrdx + carry
  1690. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  1691. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  1692. adc %rdx,%r10
  1693. # qhasm: mulrax = mulx419_stack
  1694. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1695. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1696. movq 64(%rsp),%rax
  1697. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
  1698. # asm 1: mulq 144(<pp=int64#2)
  1699. # asm 2: mulq 144(<pp=%rsi)
  1700. mulq 144(%rsi)
  1701. # qhasm: carry? rz2 += mulrax
  1702. # asm 1: add <mulrax=int64#7,<rz2=int64#9
  1703. # asm 2: add <mulrax=%rax,<rz2=%r11
  1704. add %rax,%r11
  1705. # qhasm: mulr21 += mulrdx + carry
  1706. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  1707. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  1708. adc %rdx,%r12
  1709. # qhasm: mulrax = mulx419_stack
  1710. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  1711. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  1712. movq 64(%rsp),%rax
  1713. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
  1714. # asm 1: mulq 152(<pp=int64#2)
  1715. # asm 2: mulq 152(<pp=%rsi)
  1716. mulq 152(%rsi)
  1717. # qhasm: carry? rz3 += mulrax
  1718. # asm 1: add <mulrax=int64#7,<rz3=int64#11
  1719. # asm 2: add <mulrax=%rax,<rz3=%r13
  1720. add %rax,%r13
  1721. # qhasm: mulr31 += mulrdx + carry
  1722. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  1723. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  1724. adc %rdx,%r14
  1725. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  1726. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
  1727. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
  1728. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
  1729. # qhasm: mulr01 = (mulr01.rz0) << 13
  1730. # asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
  1731. # asm 2: shld $13,<rz0=%rcx,<mulr01=%r8
  1732. shld $13,%rcx,%r8
  1733. # qhasm: rz0 &= mulredmask
  1734. # asm 1: and <mulredmask=int64#3,<rz0=int64#4
  1735. # asm 2: and <mulredmask=%rdx,<rz0=%rcx
  1736. and %rdx,%rcx
  1737. # qhasm: mulr11 = (mulr11.rz1) << 13
  1738. # asm 1: shld $13,<rz1=int64#6,<mulr11=int64#8
  1739. # asm 2: shld $13,<rz1=%r9,<mulr11=%r10
  1740. shld $13,%r9,%r10
  1741. # qhasm: rz1 &= mulredmask
  1742. # asm 1: and <mulredmask=int64#3,<rz1=int64#6
  1743. # asm 2: and <mulredmask=%rdx,<rz1=%r9
  1744. and %rdx,%r9
  1745. # qhasm: rz1 += mulr01
  1746. # asm 1: add <mulr01=int64#5,<rz1=int64#6
  1747. # asm 2: add <mulr01=%r8,<rz1=%r9
  1748. add %r8,%r9
  1749. # qhasm: mulr21 = (mulr21.rz2) << 13
  1750. # asm 1: shld $13,<rz2=int64#9,<mulr21=int64#10
  1751. # asm 2: shld $13,<rz2=%r11,<mulr21=%r12
  1752. shld $13,%r11,%r12
  1753. # qhasm: rz2 &= mulredmask
  1754. # asm 1: and <mulredmask=int64#3,<rz2=int64#9
  1755. # asm 2: and <mulredmask=%rdx,<rz2=%r11
  1756. and %rdx,%r11
  1757. # qhasm: rz2 += mulr11
  1758. # asm 1: add <mulr11=int64#8,<rz2=int64#9
  1759. # asm 2: add <mulr11=%r10,<rz2=%r11
  1760. add %r10,%r11
  1761. # qhasm: mulr31 = (mulr31.rz3) << 13
  1762. # asm 1: shld $13,<rz3=int64#11,<mulr31=int64#12
  1763. # asm 2: shld $13,<rz3=%r13,<mulr31=%r14
  1764. shld $13,%r13,%r14
  1765. # qhasm: rz3 &= mulredmask
  1766. # asm 1: and <mulredmask=int64#3,<rz3=int64#11
  1767. # asm 2: and <mulredmask=%rdx,<rz3=%r13
  1768. and %rdx,%r13
  1769. # qhasm: rz3 += mulr21
  1770. # asm 1: add <mulr21=int64#10,<rz3=int64#11
  1771. # asm 2: add <mulr21=%r12,<rz3=%r13
  1772. add %r12,%r13
  1773. # qhasm: mulr41 = (mulr41.rz4) << 13
  1774. # asm 1: shld $13,<rz4=int64#13,<mulr41=int64#14
  1775. # asm 2: shld $13,<rz4=%r15,<mulr41=%rbx
  1776. shld $13,%r15,%rbx
  1777. # qhasm: rz4 &= mulredmask
  1778. # asm 1: and <mulredmask=int64#3,<rz4=int64#13
  1779. # asm 2: and <mulredmask=%rdx,<rz4=%r15
  1780. and %rdx,%r15
  1781. # qhasm: rz4 += mulr31
  1782. # asm 1: add <mulr31=int64#12,<rz4=int64#13
  1783. # asm 2: add <mulr31=%r14,<rz4=%r15
  1784. add %r14,%r15
  1785. # qhasm: mulr41 = mulr41 * 19
  1786. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
  1787. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
  1788. imulq $19,%rbx,%r8
  1789. # qhasm: rz0 += mulr41
  1790. # asm 1: add <mulr41=int64#5,<rz0=int64#4
  1791. # asm 2: add <mulr41=%r8,<rz0=%rcx
  1792. add %r8,%rcx
  1793. # qhasm: mult = rz0
  1794. # asm 1: mov <rz0=int64#4,>mult=int64#5
  1795. # asm 2: mov <rz0=%rcx,>mult=%r8
  1796. mov %rcx,%r8
  1797. # qhasm: (uint64) mult >>= 51
  1798. # asm 1: shr $51,<mult=int64#5
  1799. # asm 2: shr $51,<mult=%r8
  1800. shr $51,%r8
  1801. # qhasm: mult += rz1
  1802. # asm 1: add <rz1=int64#6,<mult=int64#5
  1803. # asm 2: add <rz1=%r9,<mult=%r8
  1804. add %r9,%r8
  1805. # qhasm: rz1 = mult
  1806. # asm 1: mov <mult=int64#5,>rz1=int64#6
  1807. # asm 2: mov <mult=%r8,>rz1=%r9
  1808. mov %r8,%r9
  1809. # qhasm: (uint64) mult >>= 51
  1810. # asm 1: shr $51,<mult=int64#5
  1811. # asm 2: shr $51,<mult=%r8
  1812. shr $51,%r8
  1813. # qhasm: rz0 &= mulredmask
  1814. # asm 1: and <mulredmask=int64#3,<rz0=int64#4
  1815. # asm 2: and <mulredmask=%rdx,<rz0=%rcx
  1816. and %rdx,%rcx
  1817. # qhasm: mult += rz2
  1818. # asm 1: add <rz2=int64#9,<mult=int64#5
  1819. # asm 2: add <rz2=%r11,<mult=%r8
  1820. add %r11,%r8
  1821. # qhasm: rz2 = mult
  1822. # asm 1: mov <mult=int64#5,>rz2=int64#7
  1823. # asm 2: mov <mult=%r8,>rz2=%rax
  1824. mov %r8,%rax
  1825. # qhasm: (uint64) mult >>= 51
  1826. # asm 1: shr $51,<mult=int64#5
  1827. # asm 2: shr $51,<mult=%r8
  1828. shr $51,%r8
  1829. # qhasm: rz1 &= mulredmask
  1830. # asm 1: and <mulredmask=int64#3,<rz1=int64#6
  1831. # asm 2: and <mulredmask=%rdx,<rz1=%r9
  1832. and %rdx,%r9
  1833. # qhasm: mult += rz3
  1834. # asm 1: add <rz3=int64#11,<mult=int64#5
  1835. # asm 2: add <rz3=%r13,<mult=%r8
  1836. add %r13,%r8
  1837. # qhasm: rz3 = mult
  1838. # asm 1: mov <mult=int64#5,>rz3=int64#8
  1839. # asm 2: mov <mult=%r8,>rz3=%r10
  1840. mov %r8,%r10
  1841. # qhasm: (uint64) mult >>= 51
  1842. # asm 1: shr $51,<mult=int64#5
  1843. # asm 2: shr $51,<mult=%r8
  1844. shr $51,%r8
  1845. # qhasm: rz2 &= mulredmask
  1846. # asm 1: and <mulredmask=int64#3,<rz2=int64#7
  1847. # asm 2: and <mulredmask=%rdx,<rz2=%rax
  1848. and %rdx,%rax
  1849. # qhasm: mult += rz4
  1850. # asm 1: add <rz4=int64#13,<mult=int64#5
  1851. # asm 2: add <rz4=%r15,<mult=%r8
  1852. add %r15,%r8
  1853. # qhasm: rz4 = mult
  1854. # asm 1: mov <mult=int64#5,>rz4=int64#9
  1855. # asm 2: mov <mult=%r8,>rz4=%r11
  1856. mov %r8,%r11
  1857. # qhasm: (uint64) mult >>= 51
  1858. # asm 1: shr $51,<mult=int64#5
  1859. # asm 2: shr $51,<mult=%r8
  1860. shr $51,%r8
  1861. # qhasm: rz3 &= mulredmask
  1862. # asm 1: and <mulredmask=int64#3,<rz3=int64#8
  1863. # asm 2: and <mulredmask=%rdx,<rz3=%r10
  1864. and %rdx,%r10
  1865. # qhasm: mult *= 19
  1866. # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
  1867. # asm 2: imulq $19,<mult=%r8,>mult=%r8
  1868. imulq $19,%r8,%r8
  1869. # qhasm: rz0 += mult
  1870. # asm 1: add <mult=int64#5,<rz0=int64#4
  1871. # asm 2: add <mult=%r8,<rz0=%rcx
  1872. add %r8,%rcx
  1873. # qhasm: rz4 &= mulredmask
  1874. # asm 1: and <mulredmask=int64#3,<rz4=int64#9
  1875. # asm 2: and <mulredmask=%rdx,<rz4=%r11
  1876. and %rdx,%r11
  1877. # qhasm: *(uint64 *)(rp + 80) = rz0
  1878. # asm 1: movq <rz0=int64#4,80(<rp=int64#1)
  1879. # asm 2: movq <rz0=%rcx,80(<rp=%rdi)
  1880. movq %rcx,80(%rdi)
  1881. # qhasm: *(uint64 *)(rp + 88) = rz1
  1882. # asm 1: movq <rz1=int64#6,88(<rp=int64#1)
  1883. # asm 2: movq <rz1=%r9,88(<rp=%rdi)
  1884. movq %r9,88(%rdi)
  1885. # qhasm: *(uint64 *)(rp + 96) = rz2
  1886. # asm 1: movq <rz2=int64#7,96(<rp=int64#1)
  1887. # asm 2: movq <rz2=%rax,96(<rp=%rdi)
  1888. movq %rax,96(%rdi)
  1889. # qhasm: *(uint64 *)(rp + 104) = rz3
  1890. # asm 1: movq <rz3=int64#8,104(<rp=int64#1)
  1891. # asm 2: movq <rz3=%r10,104(<rp=%rdi)
  1892. movq %r10,104(%rdi)
  1893. # qhasm: *(uint64 *)(rp + 112) = rz4
  1894. # asm 1: movq <rz4=int64#9,112(<rp=int64#1)
  1895. # asm 2: movq <rz4=%r11,112(<rp=%rdi)
  1896. movq %r11,112(%rdi)
  1897. # qhasm: mulrax = *(uint64 *)(pp + 24)
  1898. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
  1899. # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
  1900. movq 24(%rsi),%rdx
  1901. # qhasm: mulrax *= 19
  1902. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1903. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1904. imulq $19,%rdx,%rax
  1905. # qhasm: mulx319_stack = mulrax
  1906. # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
  1907. # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
  1908. movq %rax,56(%rsp)
  1909. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  1910. # asm 1: mulq 96(<pp=int64#2)
  1911. # asm 2: mulq 96(<pp=%rsi)
  1912. mulq 96(%rsi)
  1913. # qhasm: rt0 = mulrax
  1914. # asm 1: mov <mulrax=int64#7,>rt0=int64#4
  1915. # asm 2: mov <mulrax=%rax,>rt0=%rcx
  1916. mov %rax,%rcx
  1917. # qhasm: mulr01 = mulrdx
  1918. # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
  1919. # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
  1920. mov %rdx,%r8
  1921. # qhasm: mulrax = *(uint64 *)(pp + 32)
  1922. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
  1923. # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
  1924. movq 32(%rsi),%rdx
  1925. # qhasm: mulrax *= 19
  1926. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  1927. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  1928. imulq $19,%rdx,%rax
  1929. # qhasm: mulx419_stack = mulrax
  1930. # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
  1931. # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
  1932. movq %rax,64(%rsp)
  1933. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  1934. # asm 1: mulq 88(<pp=int64#2)
  1935. # asm 2: mulq 88(<pp=%rsi)
  1936. mulq 88(%rsi)
  1937. # qhasm: carry? rt0 += mulrax
  1938. # asm 1: add <mulrax=int64#7,<rt0=int64#4
  1939. # asm 2: add <mulrax=%rax,<rt0=%rcx
  1940. add %rax,%rcx
  1941. # qhasm: mulr01 += mulrdx + carry
  1942. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1943. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1944. adc %rdx,%r8
  1945. # qhasm: mulrax = *(uint64 *)(pp + 0)
  1946. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  1947. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  1948. movq 0(%rsi),%rax
  1949. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  1950. # asm 1: mulq 80(<pp=int64#2)
  1951. # asm 2: mulq 80(<pp=%rsi)
  1952. mulq 80(%rsi)
  1953. # qhasm: carry? rt0 += mulrax
  1954. # asm 1: add <mulrax=int64#7,<rt0=int64#4
  1955. # asm 2: add <mulrax=%rax,<rt0=%rcx
  1956. add %rax,%rcx
  1957. # qhasm: mulr01 += mulrdx + carry
  1958. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  1959. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  1960. adc %rdx,%r8
  1961. # qhasm: mulrax = *(uint64 *)(pp + 0)
  1962. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  1963. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  1964. movq 0(%rsi),%rax
  1965. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  1966. # asm 1: mulq 88(<pp=int64#2)
  1967. # asm 2: mulq 88(<pp=%rsi)
  1968. mulq 88(%rsi)
  1969. # qhasm: rt1 = mulrax
  1970. # asm 1: mov <mulrax=int64#7,>rt1=int64#6
  1971. # asm 2: mov <mulrax=%rax,>rt1=%r9
  1972. mov %rax,%r9
  1973. # qhasm: mulr11 = mulrdx
  1974. # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
  1975. # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
  1976. mov %rdx,%r10
  1977. # qhasm: mulrax = *(uint64 *)(pp + 0)
  1978. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  1979. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  1980. movq 0(%rsi),%rax
  1981. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  1982. # asm 1: mulq 96(<pp=int64#2)
  1983. # asm 2: mulq 96(<pp=%rsi)
  1984. mulq 96(%rsi)
  1985. # qhasm: rt2 = mulrax
  1986. # asm 1: mov <mulrax=int64#7,>rt2=int64#9
  1987. # asm 2: mov <mulrax=%rax,>rt2=%r11
  1988. mov %rax,%r11
  1989. # qhasm: mulr21 = mulrdx
  1990. # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
  1991. # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
  1992. mov %rdx,%r12
  1993. # qhasm: mulrax = *(uint64 *)(pp + 0)
  1994. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  1995. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  1996. movq 0(%rsi),%rax
  1997. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  1998. # asm 1: mulq 104(<pp=int64#2)
  1999. # asm 2: mulq 104(<pp=%rsi)
  2000. mulq 104(%rsi)
  2001. # qhasm: rt3 = mulrax
  2002. # asm 1: mov <mulrax=int64#7,>rt3=int64#11
  2003. # asm 2: mov <mulrax=%rax,>rt3=%r13
  2004. mov %rax,%r13
  2005. # qhasm: mulr31 = mulrdx
  2006. # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
  2007. # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
  2008. mov %rdx,%r14
  2009. # qhasm: mulrax = *(uint64 *)(pp + 0)
  2010. # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
  2011. # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
  2012. movq 0(%rsi),%rax
  2013. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2014. # asm 1: mulq 112(<pp=int64#2)
  2015. # asm 2: mulq 112(<pp=%rsi)
  2016. mulq 112(%rsi)
  2017. # qhasm: rt4 = mulrax
  2018. # asm 1: mov <mulrax=int64#7,>rt4=int64#13
  2019. # asm 2: mov <mulrax=%rax,>rt4=%r15
  2020. mov %rax,%r15
  2021. # qhasm: mulr41 = mulrdx
  2022. # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
  2023. # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
  2024. mov %rdx,%rbx
  2025. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2026. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2027. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2028. movq 8(%rsi),%rax
  2029. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2030. # asm 1: mulq 80(<pp=int64#2)
  2031. # asm 2: mulq 80(<pp=%rsi)
  2032. mulq 80(%rsi)
  2033. # qhasm: carry? rt1 += mulrax
  2034. # asm 1: add <mulrax=int64#7,<rt1=int64#6
  2035. # asm 2: add <mulrax=%rax,<rt1=%r9
  2036. add %rax,%r9
  2037. # qhasm: mulr11 += mulrdx + carry
  2038. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2039. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2040. adc %rdx,%r10
  2041. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2042. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2043. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2044. movq 8(%rsi),%rax
  2045. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2046. # asm 1: mulq 88(<pp=int64#2)
  2047. # asm 2: mulq 88(<pp=%rsi)
  2048. mulq 88(%rsi)
  2049. # qhasm: carry? rt2 += mulrax
  2050. # asm 1: add <mulrax=int64#7,<rt2=int64#9
  2051. # asm 2: add <mulrax=%rax,<rt2=%r11
  2052. add %rax,%r11
  2053. # qhasm: mulr21 += mulrdx + carry
  2054. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2055. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2056. adc %rdx,%r12
  2057. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2058. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2059. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2060. movq 8(%rsi),%rax
  2061. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2062. # asm 1: mulq 96(<pp=int64#2)
  2063. # asm 2: mulq 96(<pp=%rsi)
  2064. mulq 96(%rsi)
  2065. # qhasm: carry? rt3 += mulrax
  2066. # asm 1: add <mulrax=int64#7,<rt3=int64#11
  2067. # asm 2: add <mulrax=%rax,<rt3=%r13
  2068. add %rax,%r13
  2069. # qhasm: mulr31 += mulrdx + carry
  2070. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2071. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2072. adc %rdx,%r14
  2073. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2074. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
  2075. # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
  2076. movq 8(%rsi),%rax
  2077. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2078. # asm 1: mulq 104(<pp=int64#2)
  2079. # asm 2: mulq 104(<pp=%rsi)
  2080. mulq 104(%rsi)
  2081. # qhasm: carry? rt4 += mulrax
  2082. # asm 1: add <mulrax=int64#7,<rt4=int64#13
  2083. # asm 2: add <mulrax=%rax,<rt4=%r15
  2084. add %rax,%r15
  2085. # qhasm: mulr41 += mulrdx + carry
  2086. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2087. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2088. adc %rdx,%rbx
  2089. # qhasm: mulrax = *(uint64 *)(pp + 8)
  2090. # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
  2091. # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
  2092. movq 8(%rsi),%rdx
  2093. # qhasm: mulrax *= 19
  2094. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2095. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2096. imulq $19,%rdx,%rax
  2097. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2098. # asm 1: mulq 112(<pp=int64#2)
  2099. # asm 2: mulq 112(<pp=%rsi)
  2100. mulq 112(%rsi)
  2101. # qhasm: carry? rt0 += mulrax
  2102. # asm 1: add <mulrax=int64#7,<rt0=int64#4
  2103. # asm 2: add <mulrax=%rax,<rt0=%rcx
  2104. add %rax,%rcx
  2105. # qhasm: mulr01 += mulrdx + carry
  2106. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  2107. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  2108. adc %rdx,%r8
  2109. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2110. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  2111. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  2112. movq 16(%rsi),%rax
  2113. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2114. # asm 1: mulq 80(<pp=int64#2)
  2115. # asm 2: mulq 80(<pp=%rsi)
  2116. mulq 80(%rsi)
  2117. # qhasm: carry? rt2 += mulrax
  2118. # asm 1: add <mulrax=int64#7,<rt2=int64#9
  2119. # asm 2: add <mulrax=%rax,<rt2=%r11
  2120. add %rax,%r11
  2121. # qhasm: mulr21 += mulrdx + carry
  2122. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2123. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2124. adc %rdx,%r12
  2125. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2126. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  2127. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  2128. movq 16(%rsi),%rax
  2129. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2130. # asm 1: mulq 88(<pp=int64#2)
  2131. # asm 2: mulq 88(<pp=%rsi)
  2132. mulq 88(%rsi)
  2133. # qhasm: carry? rt3 += mulrax
  2134. # asm 1: add <mulrax=int64#7,<rt3=int64#11
  2135. # asm 2: add <mulrax=%rax,<rt3=%r13
  2136. add %rax,%r13
  2137. # qhasm: mulr31 += mulrdx + carry
  2138. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2139. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2140. adc %rdx,%r14
  2141. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2142. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
  2143. # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
  2144. movq 16(%rsi),%rax
  2145. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2146. # asm 1: mulq 96(<pp=int64#2)
  2147. # asm 2: mulq 96(<pp=%rsi)
  2148. mulq 96(%rsi)
  2149. # qhasm: carry? rt4 += mulrax
  2150. # asm 1: add <mulrax=int64#7,<rt4=int64#13
  2151. # asm 2: add <mulrax=%rax,<rt4=%r15
  2152. add %rax,%r15
  2153. # qhasm: mulr41 += mulrdx + carry
  2154. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2155. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2156. adc %rdx,%rbx
  2157. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2158. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  2159. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  2160. movq 16(%rsi),%rdx
  2161. # qhasm: mulrax *= 19
  2162. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2163. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2164. imulq $19,%rdx,%rax
  2165. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2166. # asm 1: mulq 104(<pp=int64#2)
  2167. # asm 2: mulq 104(<pp=%rsi)
  2168. mulq 104(%rsi)
  2169. # qhasm: carry? rt0 += mulrax
  2170. # asm 1: add <mulrax=int64#7,<rt0=int64#4
  2171. # asm 2: add <mulrax=%rax,<rt0=%rcx
  2172. add %rax,%rcx
  2173. # qhasm: mulr01 += mulrdx + carry
  2174. # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
  2175. # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
  2176. adc %rdx,%r8
  2177. # qhasm: mulrax = *(uint64 *)(pp + 16)
  2178. # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
  2179. # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
  2180. movq 16(%rsi),%rdx
  2181. # qhasm: mulrax *= 19
  2182. # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
  2183. # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
  2184. imulq $19,%rdx,%rax
  2185. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2186. # asm 1: mulq 112(<pp=int64#2)
  2187. # asm 2: mulq 112(<pp=%rsi)
  2188. mulq 112(%rsi)
  2189. # qhasm: carry? rt1 += mulrax
  2190. # asm 1: add <mulrax=int64#7,<rt1=int64#6
  2191. # asm 2: add <mulrax=%rax,<rt1=%r9
  2192. add %rax,%r9
  2193. # qhasm: mulr11 += mulrdx + carry
  2194. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2195. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2196. adc %rdx,%r10
  2197. # qhasm: mulrax = *(uint64 *)(pp + 24)
  2198. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  2199. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  2200. movq 24(%rsi),%rax
  2201. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2202. # asm 1: mulq 80(<pp=int64#2)
  2203. # asm 2: mulq 80(<pp=%rsi)
  2204. mulq 80(%rsi)
  2205. # qhasm: carry? rt3 += mulrax
  2206. # asm 1: add <mulrax=int64#7,<rt3=int64#11
  2207. # asm 2: add <mulrax=%rax,<rt3=%r13
  2208. add %rax,%r13
  2209. # qhasm: mulr31 += mulrdx + carry
  2210. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2211. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2212. adc %rdx,%r14
  2213. # qhasm: mulrax = *(uint64 *)(pp + 24)
  2214. # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
  2215. # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
  2216. movq 24(%rsi),%rax
  2217. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
  2218. # asm 1: mulq 88(<pp=int64#2)
  2219. # asm 2: mulq 88(<pp=%rsi)
  2220. mulq 88(%rsi)
  2221. # qhasm: carry? rt4 += mulrax
  2222. # asm 1: add <mulrax=int64#7,<rt4=int64#13
  2223. # asm 2: add <mulrax=%rax,<rt4=%r15
  2224. add %rax,%r15
  2225. # qhasm: mulr41 += mulrdx + carry
  2226. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2227. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2228. adc %rdx,%rbx
  2229. # qhasm: mulrax = mulx319_stack
  2230. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  2231. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  2232. movq 56(%rsp),%rax
  2233. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2234. # asm 1: mulq 104(<pp=int64#2)
  2235. # asm 2: mulq 104(<pp=%rsi)
  2236. mulq 104(%rsi)
  2237. # qhasm: carry? rt1 += mulrax
  2238. # asm 1: add <mulrax=int64#7,<rt1=int64#6
  2239. # asm 2: add <mulrax=%rax,<rt1=%r9
  2240. add %rax,%r9
  2241. # qhasm: mulr11 += mulrdx + carry
  2242. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2243. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2244. adc %rdx,%r10
  2245. # qhasm: mulrax = mulx319_stack
  2246. # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
  2247. # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
  2248. movq 56(%rsp),%rax
  2249. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2250. # asm 1: mulq 112(<pp=int64#2)
  2251. # asm 2: mulq 112(<pp=%rsi)
  2252. mulq 112(%rsi)
  2253. # qhasm: carry? rt2 += mulrax
  2254. # asm 1: add <mulrax=int64#7,<rt2=int64#9
  2255. # asm 2: add <mulrax=%rax,<rt2=%r11
  2256. add %rax,%r11
  2257. # qhasm: mulr21 += mulrdx + carry
  2258. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2259. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2260. adc %rdx,%r12
  2261. # qhasm: mulrax = *(uint64 *)(pp + 32)
  2262. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  2263. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  2264. movq 32(%rsi),%rax
  2265. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
  2266. # asm 1: mulq 80(<pp=int64#2)
  2267. # asm 2: mulq 80(<pp=%rsi)
  2268. mulq 80(%rsi)
  2269. # qhasm: carry? rt4 += mulrax
  2270. # asm 1: add <mulrax=int64#7,<rt4=int64#13
  2271. # asm 2: add <mulrax=%rax,<rt4=%r15
  2272. add %rax,%r15
  2273. # qhasm: mulr41 += mulrdx + carry
  2274. # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
  2275. # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
  2276. adc %rdx,%rbx
  2277. # qhasm: mulrax = mulx419_stack
  2278. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  2279. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  2280. movq 64(%rsp),%rax
  2281. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
  2282. # asm 1: mulq 96(<pp=int64#2)
  2283. # asm 2: mulq 96(<pp=%rsi)
  2284. mulq 96(%rsi)
  2285. # qhasm: carry? rt1 += mulrax
  2286. # asm 1: add <mulrax=int64#7,<rt1=int64#6
  2287. # asm 2: add <mulrax=%rax,<rt1=%r9
  2288. add %rax,%r9
  2289. # qhasm: mulr11 += mulrdx + carry
  2290. # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
  2291. # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
  2292. adc %rdx,%r10
  2293. # qhasm: mulrax = mulx419_stack
  2294. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  2295. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  2296. movq 64(%rsp),%rax
  2297. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
  2298. # asm 1: mulq 104(<pp=int64#2)
  2299. # asm 2: mulq 104(<pp=%rsi)
  2300. mulq 104(%rsi)
  2301. # qhasm: carry? rt2 += mulrax
  2302. # asm 1: add <mulrax=int64#7,<rt2=int64#9
  2303. # asm 2: add <mulrax=%rax,<rt2=%r11
  2304. add %rax,%r11
  2305. # qhasm: mulr21 += mulrdx + carry
  2306. # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
  2307. # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
  2308. adc %rdx,%r12
  2309. # qhasm: mulrax = mulx419_stack
  2310. # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
  2311. # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
  2312. movq 64(%rsp),%rax
  2313. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
  2314. # asm 1: mulq 112(<pp=int64#2)
  2315. # asm 2: mulq 112(<pp=%rsi)
  2316. mulq 112(%rsi)
  2317. # qhasm: carry? rt3 += mulrax
  2318. # asm 1: add <mulrax=int64#7,<rt3=int64#11
  2319. # asm 2: add <mulrax=%rax,<rt3=%r13
  2320. add %rax,%r13
  2321. # qhasm: mulr31 += mulrdx + carry
  2322. # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
  2323. # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
  2324. adc %rdx,%r14
  2325. # qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
  2326. # asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
  2327. # asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
  2328. movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
  2329. # qhasm: mulr01 = (mulr01.rt0) << 13
  2330. # asm 1: shld $13,<rt0=int64#4,<mulr01=int64#5
  2331. # asm 2: shld $13,<rt0=%rcx,<mulr01=%r8
  2332. shld $13,%rcx,%r8
  2333. # qhasm: rt0 &= mulredmask
  2334. # asm 1: and <mulredmask=int64#2,<rt0=int64#4
  2335. # asm 2: and <mulredmask=%rsi,<rt0=%rcx
  2336. and %rsi,%rcx
  2337. # qhasm: mulr11 = (mulr11.rt1) << 13
  2338. # asm 1: shld $13,<rt1=int64#6,<mulr11=int64#8
  2339. # asm 2: shld $13,<rt1=%r9,<mulr11=%r10
  2340. shld $13,%r9,%r10
  2341. # qhasm: rt1 &= mulredmask
  2342. # asm 1: and <mulredmask=int64#2,<rt1=int64#6
  2343. # asm 2: and <mulredmask=%rsi,<rt1=%r9
  2344. and %rsi,%r9
  2345. # qhasm: rt1 += mulr01
  2346. # asm 1: add <mulr01=int64#5,<rt1=int64#6
  2347. # asm 2: add <mulr01=%r8,<rt1=%r9
  2348. add %r8,%r9
  2349. # qhasm: mulr21 = (mulr21.rt2) << 13
  2350. # asm 1: shld $13,<rt2=int64#9,<mulr21=int64#10
  2351. # asm 2: shld $13,<rt2=%r11,<mulr21=%r12
  2352. shld $13,%r11,%r12
  2353. # qhasm: rt2 &= mulredmask
  2354. # asm 1: and <mulredmask=int64#2,<rt2=int64#9
  2355. # asm 2: and <mulredmask=%rsi,<rt2=%r11
  2356. and %rsi,%r11
  2357. # qhasm: rt2 += mulr11
  2358. # asm 1: add <mulr11=int64#8,<rt2=int64#9
  2359. # asm 2: add <mulr11=%r10,<rt2=%r11
  2360. add %r10,%r11
  2361. # qhasm: mulr31 = (mulr31.rt3) << 13
  2362. # asm 1: shld $13,<rt3=int64#11,<mulr31=int64#12
  2363. # asm 2: shld $13,<rt3=%r13,<mulr31=%r14
  2364. shld $13,%r13,%r14
  2365. # qhasm: rt3 &= mulredmask
  2366. # asm 1: and <mulredmask=int64#2,<rt3=int64#11
  2367. # asm 2: and <mulredmask=%rsi,<rt3=%r13
  2368. and %rsi,%r13
  2369. # qhasm: rt3 += mulr21
  2370. # asm 1: add <mulr21=int64#10,<rt3=int64#11
  2371. # asm 2: add <mulr21=%r12,<rt3=%r13
  2372. add %r12,%r13
  2373. # qhasm: mulr41 = (mulr41.rt4) << 13
  2374. # asm 1: shld $13,<rt4=int64#13,<mulr41=int64#14
  2375. # asm 2: shld $13,<rt4=%r15,<mulr41=%rbx
  2376. shld $13,%r15,%rbx
  2377. # qhasm: rt4 &= mulredmask
  2378. # asm 1: and <mulredmask=int64#2,<rt4=int64#13
  2379. # asm 2: and <mulredmask=%rsi,<rt4=%r15
  2380. and %rsi,%r15
  2381. # qhasm: rt4 += mulr31
  2382. # asm 1: add <mulr31=int64#12,<rt4=int64#13
  2383. # asm 2: add <mulr31=%r14,<rt4=%r15
  2384. add %r14,%r15
  2385. # qhasm: mulr41 = mulr41 * 19
  2386. # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#3
  2387. # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%rdx
  2388. imulq $19,%rbx,%rdx
  2389. # qhasm: rt0 += mulr41
  2390. # asm 1: add <mulr41=int64#3,<rt0=int64#4
  2391. # asm 2: add <mulr41=%rdx,<rt0=%rcx
  2392. add %rdx,%rcx
  2393. # qhasm: mult = rt0
  2394. # asm 1: mov <rt0=int64#4,>mult=int64#3
  2395. # asm 2: mov <rt0=%rcx,>mult=%rdx
  2396. mov %rcx,%rdx
  2397. # qhasm: (uint64) mult >>= 51
  2398. # asm 1: shr $51,<mult=int64#3
  2399. # asm 2: shr $51,<mult=%rdx
  2400. shr $51,%rdx
  2401. # qhasm: mult += rt1
  2402. # asm 1: add <rt1=int64#6,<mult=int64#3
  2403. # asm 2: add <rt1=%r9,<mult=%rdx
  2404. add %r9,%rdx
  2405. # qhasm: rt1 = mult
  2406. # asm 1: mov <mult=int64#3,>rt1=int64#5
  2407. # asm 2: mov <mult=%rdx,>rt1=%r8
  2408. mov %rdx,%r8
  2409. # qhasm: (uint64) mult >>= 51
  2410. # asm 1: shr $51,<mult=int64#3
  2411. # asm 2: shr $51,<mult=%rdx
  2412. shr $51,%rdx
  2413. # qhasm: rt0 &= mulredmask
  2414. # asm 1: and <mulredmask=int64#2,<rt0=int64#4
  2415. # asm 2: and <mulredmask=%rsi,<rt0=%rcx
  2416. and %rsi,%rcx
  2417. # qhasm: mult += rt2
  2418. # asm 1: add <rt2=int64#9,<mult=int64#3
  2419. # asm 2: add <rt2=%r11,<mult=%rdx
  2420. add %r11,%rdx
  2421. # qhasm: rt2 = mult
  2422. # asm 1: mov <mult=int64#3,>rt2=int64#6
  2423. # asm 2: mov <mult=%rdx,>rt2=%r9
  2424. mov %rdx,%r9
  2425. # qhasm: (uint64) mult >>= 51
  2426. # asm 1: shr $51,<mult=int64#3
  2427. # asm 2: shr $51,<mult=%rdx
  2428. shr $51,%rdx
  2429. # qhasm: rt1 &= mulredmask
  2430. # asm 1: and <mulredmask=int64#2,<rt1=int64#5
  2431. # asm 2: and <mulredmask=%rsi,<rt1=%r8
  2432. and %rsi,%r8
  2433. # qhasm: mult += rt3
  2434. # asm 1: add <rt3=int64#11,<mult=int64#3
  2435. # asm 2: add <rt3=%r13,<mult=%rdx
  2436. add %r13,%rdx
  2437. # qhasm: rt3 = mult
  2438. # asm 1: mov <mult=int64#3,>rt3=int64#7
  2439. # asm 2: mov <mult=%rdx,>rt3=%rax
  2440. mov %rdx,%rax
  2441. # qhasm: (uint64) mult >>= 51
  2442. # asm 1: shr $51,<mult=int64#3
  2443. # asm 2: shr $51,<mult=%rdx
  2444. shr $51,%rdx
  2445. # qhasm: rt2 &= mulredmask
  2446. # asm 1: and <mulredmask=int64#2,<rt2=int64#6
  2447. # asm 2: and <mulredmask=%rsi,<rt2=%r9
  2448. and %rsi,%r9
  2449. # qhasm: mult += rt4
  2450. # asm 1: add <rt4=int64#13,<mult=int64#3
  2451. # asm 2: add <rt4=%r15,<mult=%rdx
  2452. add %r15,%rdx
  2453. # qhasm: rt4 = mult
  2454. # asm 1: mov <mult=int64#3,>rt4=int64#8
  2455. # asm 2: mov <mult=%rdx,>rt4=%r10
  2456. mov %rdx,%r10
  2457. # qhasm: (uint64) mult >>= 51
  2458. # asm 1: shr $51,<mult=int64#3
  2459. # asm 2: shr $51,<mult=%rdx
  2460. shr $51,%rdx
  2461. # qhasm: rt3 &= mulredmask
  2462. # asm 1: and <mulredmask=int64#2,<rt3=int64#7
  2463. # asm 2: and <mulredmask=%rsi,<rt3=%rax
  2464. and %rsi,%rax
  2465. # qhasm: mult *= 19
  2466. # asm 1: imulq $19,<mult=int64#3,>mult=int64#3
  2467. # asm 2: imulq $19,<mult=%rdx,>mult=%rdx
  2468. imulq $19,%rdx,%rdx
  2469. # qhasm: rt0 += mult
  2470. # asm 1: add <mult=int64#3,<rt0=int64#4
  2471. # asm 2: add <mult=%rdx,<rt0=%rcx
  2472. add %rdx,%rcx
  2473. # qhasm: rt4 &= mulredmask
  2474. # asm 1: and <mulredmask=int64#2,<rt4=int64#8
  2475. # asm 2: and <mulredmask=%rsi,<rt4=%r10
  2476. and %rsi,%r10
  2477. # qhasm: *(uint64 *)(rp + 120) = rt0
  2478. # asm 1: movq <rt0=int64#4,120(<rp=int64#1)
  2479. # asm 2: movq <rt0=%rcx,120(<rp=%rdi)
  2480. movq %rcx,120(%rdi)
  2481. # qhasm: *(uint64 *)(rp + 128) = rt1
  2482. # asm 1: movq <rt1=int64#5,128(<rp=int64#1)
  2483. # asm 2: movq <rt1=%r8,128(<rp=%rdi)
  2484. movq %r8,128(%rdi)
  2485. # qhasm: *(uint64 *)(rp + 136) = rt2
  2486. # asm 1: movq <rt2=int64#6,136(<rp=int64#1)
  2487. # asm 2: movq <rt2=%r9,136(<rp=%rdi)
  2488. movq %r9,136(%rdi)
  2489. # qhasm: *(uint64 *)(rp + 144) = rt3
  2490. # asm 1: movq <rt3=int64#7,144(<rp=int64#1)
  2491. # asm 2: movq <rt3=%rax,144(<rp=%rdi)
  2492. movq %rax,144(%rdi)
  2493. # qhasm: *(uint64 *)(rp + 152) = rt4
  2494. # asm 1: movq <rt4=int64#8,152(<rp=int64#1)
  2495. # asm 2: movq <rt4=%r10,152(<rp=%rdi)
  2496. movq %r10,152(%rdi)
  2497. # qhasm: caller1 = caller1_stack
  2498. # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
  2499. # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
  2500. movq 0(%rsp),%r11
  2501. # qhasm: caller2 = caller2_stack
  2502. # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
  2503. # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
  2504. movq 8(%rsp),%r12
  2505. # qhasm: caller3 = caller3_stack
  2506. # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
  2507. # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
  2508. movq 16(%rsp),%r13
  2509. # qhasm: caller4 = caller4_stack
  2510. # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
  2511. # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
  2512. movq 24(%rsp),%r14
  2513. # qhasm: caller5 = caller5_stack
  2514. # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
  2515. # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
  2516. movq 32(%rsp),%r15
  2517. # qhasm: caller6 = caller6_stack
  2518. # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
  2519. # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
  2520. movq 40(%rsp),%rbx
  2521. # qhasm: caller7 = caller7_stack
  2522. # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
  2523. # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
  2524. movq 48(%rsp),%rbp
  2525. # qhasm: leave
  2526. add %r11,%rsp
  2527. mov %rdi,%rax
  2528. mov %rsi,%rdx
  2529. ret