# qhasm: int64 r11 # qhasm: int64 r12 # qhasm: int64 r13 # qhasm: int64 r14 # qhasm: int64 r15 # qhasm: int64 rbx # qhasm: int64 rbp # qhasm: caller r11 # qhasm: caller r12 # qhasm: caller r13 # qhasm: caller r14 # qhasm: caller r15 # qhasm: caller rbx # qhasm: caller rbp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 arg1 # qhasm: int64 arg2 # qhasm: int64 arg3 # qhasm: int64 arg4 # qhasm: int64 arg5 # qhasm: input arg1 # qhasm: input arg2 # qhasm: input arg3 # qhasm: input arg4 # qhasm: input arg5 # qhasm: int64 out_stack # qhasm: int64 out # qhasm: int64 r # qhasm: int64 s # qhasm: int64 m # qhasm: int64 l # qhasm: int64 m0 # qhasm: int64 m1 # qhasm: int64 m2 # qhasm: int64 m3 # qhasm: float80 a0 # qhasm: float80 a1 # qhasm: float80 a2 # qhasm: float80 a3 # qhasm: float80 h0 # qhasm: float80 h1 # qhasm: float80 h2 # qhasm: float80 h3 # qhasm: float80 x0 # qhasm: float80 x1 # qhasm: float80 x2 # qhasm: float80 x3 # qhasm: float80 y0 # qhasm: float80 y1 # qhasm: float80 y2 # qhasm: float80 y3 # qhasm: float80 r0x0 # qhasm: float80 r1x0 # qhasm: float80 r2x0 # qhasm: float80 r3x0 # qhasm: float80 r0x1 # qhasm: float80 r1x1 # qhasm: float80 r2x1 # qhasm: float80 sr3x1 # qhasm: float80 r0x2 # qhasm: float80 r1x2 # qhasm: float80 sr2x2 # qhasm: float80 sr3x2 # qhasm: float80 r0x3 # qhasm: float80 sr1x3 # qhasm: float80 sr2x3 # qhasm: float80 sr3x3 # qhasm: stack64 d0 # qhasm: stack64 d1 # qhasm: stack64 d2 # qhasm: stack64 d3 # qhasm: stack64 r0 # qhasm: stack64 r1 # qhasm: stack64 r2 # qhasm: stack64 r3 # qhasm: stack64 sr1 # qhasm: stack64 sr2 # qhasm: stack64 sr3 # qhasm: enter poly1305_amd64 stackaligned4096 poly1305_amd64_constants .text .p2align 5 .globl _poly1305_amd64 .globl poly1305_amd64 _poly1305_amd64: poly1305_amd64: mov %rsp,%r11 sub $poly1305_amd64_constants,%r11 and $4095,%r11 add $192,%r11 sub %r11,%rsp # qhasm: r11_stack = r11 # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=32(%rsp) movq %r11,32(%rsp) # qhasm: r12_stack = r12 # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=40(%rsp) movq %r12,40(%rsp) # qhasm: r13_stack = r13 # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=48(%rsp) movq %r13,48(%rsp) # qhasm: r14_stack = r14 # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=56(%rsp) movq %r14,56(%rsp) # qhasm: r15_stack = r15 # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=64(%rsp) movq %r15,64(%rsp) # qhasm: rbx_stack = rbx # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=72(%rsp) movq %rbx,72(%rsp) # qhasm: rbp_stack = rbp # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=80(%rsp) movq %rbp,80(%rsp) # qhasm: out_stack = arg1 # asm 1: mov out_stack=int64#6 # asm 2: mov out_stack=%r9 mov %rdi,%r9 # qhasm: round *(uint16 *) &poly1305_amd64_rounding fldcw poly1305_amd64_rounding(%rip) # qhasm: r = arg2 # asm 1: mov r=int64#1 # asm 2: mov r=%rdi mov %rsi,%rdi # qhasm: a0 = *(int32 *) (r + 0) # asm 1: fildl 0(r0=stack64#8 # asm 2: fstpl >r0=88(%rsp) fstpl 88(%rsp) # comment:fpstackfrombottom: # qhasm: a1 = *(int32 *) (r + 4) # asm 1: fildl 4(r1=stack64#9 # asm 2: fstl >r1=96(%rsp) fstl 96(%rsp) # comment:fpstackfrombottom:sr1=stack64#10 # asm 2: fstpl >sr1=104(%rsp) fstpl 104(%rsp) # comment:fpstackfrombottom: # qhasm: a2 = *(int32 *) (r + 8) # asm 1: fildl 8(r2=stack64#11 # asm 2: fstl >r2=112(%rsp) fstl 112(%rsp) # comment:fpstackfrombottom:sr2=stack64#12 # asm 2: fstpl >sr2=120(%rsp) fstpl 120(%rsp) # comment:fpstackfrombottom: # qhasm: a3 = *(int32 *) (r + 12) # asm 1: fildl 12(r3=stack64#13 # asm 2: fstl >r3=128(%rsp) fstl 128(%rsp) # comment:fpstackfrombottom:sr3=stack64#14 # asm 2: fstpl >sr3=136(%rsp) fstpl 136(%rsp) # comment:fpstackfrombottom: # qhasm: h3 = 0 fldz # comment:fpstackfrombottom:d0=stack64#15 # asm 2: movl $0x43300000,>d0=148(%rsp) movl $0x43300000,148(%rsp) # comment:fpstackfrombottom:d1=stack64#16 # asm 2: movl $0x45300000,>d1=156(%rsp) movl $0x45300000,156(%rsp) # comment:fpstackfrombottom:d2=stack64#17 # asm 2: movl $0x47300000,>d2=164(%rsp) movl $0x47300000,164(%rsp) # comment:fpstackfrombottom:d3=stack64#18 # asm 2: movl $0x49300000,>d3=172(%rsp) movl $0x49300000,172(%rsp) # comment:fpstackfrombottom:m=int64#2 # asm 2: mov m=%rsi mov %rcx,%rsi # comment:fpstackfrombottom:l=int64#4 # asm 2: mov l=%rcx mov %r8,%rcx # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12(m3=%edi movl 12(%rsi),%edi # comment:fpstackfrombottom:m2=int64#5d # asm 2: movl 8(m2=%r8d movl 8(%rsi),%r8d # comment:fpstackfrombottom:m1=int64#7d # asm 2: movl 4(m1=%eax movl 4(%rsi),%eax # comment:fpstackfrombottom:m0=int64#8d # asm 2: movl 0(m0=%r10d movl 0(%rsi),%r10d # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12(m3=%edi movl 12(%rsi),%edi # comment:fpstackfrombottom:m2=int64#5d # asm 2: movl 8(m2=%r8d movl 8(%rsi),%r8d # comment:fpstackfrombottom:m1=int64#7d # asm 2: movl 4(m1=%eax movl 4(%rsi),%eax # comment:fpstackfrombottom:m0=int64#8d # asm 2: movl 0(m0=%r10d movl 0(%rsi),%r10d # comment:fpstackfrombottom:lastchunk=stack128#1 # asm 2: movl $0,>lastchunk=0(%rsp) movl $0,0(%rsp) # comment:fpstackfrombottom:destination=int64#1 # asm 2: leaq destination=%rdi leaq 0(%rsp),%rdi # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12+m3=%edi movl 12+0(%rsp),%edi # comment:fpstackfrombottom:m2=int64#2d # asm 2: movl 8+m2=%esi movl 8+0(%rsp),%esi # comment:fpstackfrombottom:m1=int64#4d # asm 2: movl 4+m1=%ecx movl 4+0(%rsp),%ecx # comment:fpstackfrombottom:m0=int64#5d # asm 2: movl m0=%r8d movl 0(%rsp),%r8d # comment:fpstackfrombottom:d0=stack64#8 # asm 2: fstpl >d0=88(%rsp) fstpl 88(%rsp) # comment:fpstackfrombottom:d1=stack64#9 # asm 2: fstpl >d1=96(%rsp) fstpl 96(%rsp) # comment:fpstackfrombottom:d2=stack64#10 # asm 2: fstpl >d2=104(%rsp) fstpl 104(%rsp) # comment:fpstackfrombottom:d3=stack64#11 # asm 2: fstpl >d3=112(%rsp) fstpl 112(%rsp) # comment:fpstackfrombottom: # qhasm: int64 f0 # qhasm: int64 f1 # qhasm: int64 f2 # qhasm: int64 f3 # qhasm: int64 f4 # qhasm: int64 g0 # qhasm: int64 g1 # qhasm: int64 g2 # qhasm: int64 g3 # qhasm: int64 f # qhasm: int64 notf # qhasm: g0 = top d0 # asm 1: movl g0=int64#1d # asm 2: movl g0=%edi movl 92(%rsp),%edi # qhasm: (uint32) g0 &= 63 # asm 1: and $63,g1=int64#2d # asm 2: movl g1=%esi movl 100(%rsp),%esi # qhasm: (uint32) g1 &= 63 # asm 1: and $63,g2=int64#4d # asm 2: movl g2=%ecx movl 108(%rsp),%ecx # qhasm: (uint32) g2 &= 63 # asm 1: and $63,g3=int64#5d # asm 2: movl g3=%r8d movl 116(%rsp),%r8d # qhasm: (uint32) g3 &= 63 # asm 1: and $63,f1=int64#7d # asm 2: movl f1=%eax movl 96(%rsp),%eax # qhasm: carry? (uint32) f1 += g0 # asm 1: add f2=int64#1d # asm 2: movl f2=%edi movl 104(%rsp),%edi # qhasm: carry? (uint32) f2 += g1 + carry # asm 1: adc f3=int64#2d # asm 2: movl f3=%esi movl 112(%rsp),%esi # qhasm: carry? (uint32) f3 += g2 + carry # asm 1: adc f4=int64#4 # asm 2: mov $0,>f4=%rcx mov $0,%rcx # qhasm: (uint32) f4 += g3 + carry # asm 1: adc g0=int64#5 # asm 2: mov $5,>g0=%r8 mov $5,%r8 # qhasm: f0 = bottom d0 # asm 1: movl f0=int64#8d # asm 2: movl f0=%r10d movl 88(%rsp),%r10d # qhasm: carry? (uint32) g0 += f0 # asm 1: add g1=int64#9 # asm 2: mov $0,>g1=%r11 mov $0,%r11 # qhasm: carry? (uint32) g1 += f1 + carry # asm 1: adc g2=int64#10 # asm 2: mov $0,>g2=%r12 mov $0,%r12 # qhasm: carry? (uint32) g2 += f2 + carry # asm 1: adc g3=int64#11 # asm 2: mov $0,>g3=%r13 mov $0,%r13 # qhasm: carry? (uint32) g3 += f3 + carry # asm 1: adc f=int64#12 # asm 2: mov $-4,>f=%r14 mov $-4,%r14 # qhasm: (uint32) f += f4 + carry # asm 1: adc >= 16 # asm 1: sar $16,notf=int64#4 # asm 2: mov notf=%rcx mov %r14,%rcx # qhasm: (uint32) notf ^= -1 # asm 1: xor $-1,s=int64#3 # asm 2: mov s=%rdx mov %rdx,%rdx # qhasm: carry? (uint32) f0 += *(uint32 *) (s + 0) # asm 1: addl 0(out=int64#3 # asm 2: mov out=%rdx mov %r9,%rdx # qhasm: *(uint32 *) (out + 0) = f0 # asm 1: movl r11=int64#9 # asm 2: movq r11=%r11 movq 32(%rsp),%r11 # qhasm: r12 = r12_stack # asm 1: movq r12=int64#10 # asm 2: movq r12=%r12 movq 40(%rsp),%r12 # qhasm: r13 = r13_stack # asm 1: movq r13=int64#11 # asm 2: movq r13=%r13 movq 48(%rsp),%r13 # qhasm: r14 = r14_stack # asm 1: movq r14=int64#12 # asm 2: movq r14=%r14 movq 56(%rsp),%r14 # qhasm: r15 = r15_stack # asm 1: movq r15=int64#13 # asm 2: movq r15=%r15 movq 64(%rsp),%r15 # qhasm: rbx = rbx_stack # asm 1: movq rbx=int64#14 # asm 2: movq rbx=%rbx movq 72(%rsp),%rbx # qhasm: rbp = rbp_stack # asm 1: movq rbp=int64#15 # asm 2: movq rbp=%rbp movq 80(%rsp),%rbp # qhasm: leave add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx ret