# qhasm: int64 a # qhasm: int64 b # qhasm: int64 c # qhasm: int64 d # qhasm: int64 zero # qhasm: int64 arg1 # qhasm: int64 arg2 # qhasm: int64 arg3 # qhasm: int64 arg4 # qhasm: input arg1 # qhasm: input arg2 # qhasm: input arg3 # qhasm: input arg4 # qhasm: int64 r11 # qhasm: int64 r12 # qhasm: int64 r13 # qhasm: int64 r14 # qhasm: int64 r15 # qhasm: int64 rbx # qhasm: int64 rbp # qhasm: caller r11 # qhasm: caller r12 # qhasm: caller r13 # qhasm: caller r14 # qhasm: caller r15 # qhasm: caller rbx # qhasm: caller rbp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 k # qhasm: int64 kbits # qhasm: int64 iv # qhasm: stack64 i_backup # qhasm: int64 i # qhasm: stack64 x_backup # qhasm: int64 x # qhasm: stack64 m_backup # qhasm: int64 m # qhasm: stack64 out_backup # qhasm: int64 out # qhasm: stack64 bytes_backup # qhasm: int64 bytes # qhasm: int64 in0 # qhasm: int64 in2 # qhasm: int64 in4 # qhasm: int64 in6 # qhasm: int64 in8 # qhasm: int64 in10 # qhasm: int64 in12 # qhasm: int64 in14 # qhasm: int64 out0 # qhasm: int64 out2 # qhasm: int64 out4 # qhasm: int64 out6 # qhasm: int64 out8 # qhasm: int64 out10 # qhasm: int64 out12 # qhasm: int64 out14 # qhasm: int64 x0 # qhasm: int64 x1 # qhasm: int64 x2 # qhasm: int64 x3 # qhasm: int64 x4 # qhasm: int64 x5 # qhasm: int64 x6 # qhasm: int64 x7 # qhasm: int64 x8 # qhasm: int64 x9 # qhasm: int64 x10 # qhasm: int64 x11 # qhasm: int64 x12 # qhasm: int64 x13 # qhasm: int64 x14 # qhasm: int64 x15 # qhasm: stack64 x5_stack # qhasm: stack64 x10_stack # qhasm: stack64 x15_stack # qhasm: stack64 j0 # qhasm: stack64 j2 # qhasm: stack64 j4 # qhasm: stack64 j6 # qhasm: stack64 j8 # qhasm: stack64 j10 # qhasm: stack64 j12 # qhasm: stack64 j14 # qhasm: stack512 tmp # qhasm: stack64 ctarget # qhasm: enter ECRYPT_keystream_bytes .text .p2align 5 .globl _ECRYPT_keystream_bytes .globl ECRYPT_keystream_bytes _ECRYPT_keystream_bytes: ECRYPT_keystream_bytes: mov %rsp,%r11 and $31,%r11 add $256,%r11 sub %r11,%rsp # qhasm: x = arg1 # asm 1: mov x=int64#5 # asm 2: mov x=%r8 mov %rdi,%r8 # qhasm: m = arg2 # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rsi,%rsi # qhasm: out = m # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rsi,%rdi # qhasm: bytes = arg3 # asm 1: mov bytes=int64#3 # asm 2: mov bytes=%rdx mov %rdx,%rdx # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: zero = 0 # asm 1: mov $0,>zero=int64#7 # asm 2: mov $0,>zero=%rax mov $0,%rax # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %rdx,%rcx # qhasm: while (i) { *out++ = zero; --i } rep stosb # qhasm: out -= bytes # asm 1: sub x=int64#5 # asm 2: mov x=%r8 mov %rdi,%r8 # qhasm: m = arg2 # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rsi,%rsi # qhasm: out = arg3 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdx,%rdi # qhasm: bytes = arg4 # asm 1: mov bytes=int64#3 # asm 2: mov bytes=%rdx mov %rcx,%rdx # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by jump # qhasm: goto start jmp ._start # qhasm: enter ECRYPT_encrypt_bytes .text .p2align 5 .globl _ECRYPT_encrypt_bytes .globl ECRYPT_encrypt_bytes _ECRYPT_encrypt_bytes: ECRYPT_encrypt_bytes: mov %rsp,%r11 and $31,%r11 add $256,%r11 sub %r11,%rsp # qhasm: x = arg1 # asm 1: mov x=int64#5 # asm 2: mov x=%r8 mov %rdi,%r8 # qhasm: m = arg2 # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rsi,%rsi # qhasm: out = arg3 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdx,%rdi # qhasm: bytes = arg4 # asm 1: mov bytes=int64#3 # asm 2: mov bytes=%rdx mov %rcx,%rdx # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: r11_stack = r11 # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=0(%rsp) movq %r11,0(%rsp) # qhasm: r12_stack = r12 # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=8(%rsp) movq %r12,8(%rsp) # qhasm: r13_stack = r13 # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=16(%rsp) movq %r13,16(%rsp) # qhasm: r14_stack = r14 # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=24(%rsp) movq %r14,24(%rsp) # qhasm: r15_stack = r15 # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=32(%rsp) movq %r15,32(%rsp) # qhasm: rbx_stack = rbx # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=40(%rsp) movq %rbx,40(%rsp) # qhasm: rbp_stack = rbp # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=48(%rsp) movq %rbp,48(%rsp) # qhasm: in0 = *(uint64 *) (x + 0) # asm 1: movq 0(in0=int64#4 # asm 2: movq 0(in0=%rcx movq 0(%r8),%rcx # qhasm: in2 = *(uint64 *) (x + 8) # asm 1: movq 8(in2=int64#6 # asm 2: movq 8(in2=%r9 movq 8(%r8),%r9 # qhasm: in4 = *(uint64 *) (x + 16) # asm 1: movq 16(in4=int64#7 # asm 2: movq 16(in4=%rax movq 16(%r8),%rax # qhasm: in6 = *(uint64 *) (x + 24) # asm 1: movq 24(in6=int64#8 # asm 2: movq 24(in6=%r10 movq 24(%r8),%r10 # qhasm: in8 = *(uint64 *) (x + 32) # asm 1: movq 32(in8=int64#9 # asm 2: movq 32(in8=%r11 movq 32(%r8),%r11 # qhasm: in10 = *(uint64 *) (x + 40) # asm 1: movq 40(in10=int64#10 # asm 2: movq 40(in10=%r12 movq 40(%r8),%r12 # qhasm: in12 = *(uint64 *) (x + 48) # asm 1: movq 48(in12=int64#11 # asm 2: movq 48(in12=%r13 movq 48(%r8),%r13 # qhasm: in14 = *(uint64 *) (x + 56) # asm 1: movq 56(in14=int64#12 # asm 2: movq 56(in14=%r14 movq 56(%r8),%r14 # qhasm: j0 = in0 # asm 1: movq j0=stack64#8 # asm 2: movq j0=56(%rsp) movq %rcx,56(%rsp) # qhasm: j2 = in2 # asm 1: movq j2=stack64#9 # asm 2: movq j2=64(%rsp) movq %r9,64(%rsp) # qhasm: j4 = in4 # asm 1: movq j4=stack64#10 # asm 2: movq j4=72(%rsp) movq %rax,72(%rsp) # qhasm: j6 = in6 # asm 1: movq j6=stack64#11 # asm 2: movq j6=80(%rsp) movq %r10,80(%rsp) # qhasm: j8 = in8 # asm 1: movq j8=stack64#12 # asm 2: movq j8=88(%rsp) movq %r11,88(%rsp) # qhasm: j10 = in10 # asm 1: movq j10=stack64#13 # asm 2: movq j10=96(%rsp) movq %r12,96(%rsp) # qhasm: j12 = in12 # asm 1: movq j12=stack64#14 # asm 2: movq j12=104(%rsp) movq %r13,104(%rsp) # qhasm: j14 = in14 # asm 1: movq j14=stack64#15 # asm 2: movq j14=112(%rsp) movq %r14,112(%rsp) # qhasm: x_backup = x # asm 1: movq x_backup=stack64#16 # asm 2: movq x_backup=120(%rsp) movq %r8,120(%rsp) # qhasm: bytesatleast1: ._bytesatleast1: # qhasm: unsignedctarget=stack64#17 # asm 2: movq ctarget=128(%rsp) movq %rdi,128(%rsp) # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 192(%rsp),%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %rdx,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 192(%rsp),%rdi # qhasm: m = &tmp # asm 1: leaq m=int64#2 # asm 2: leaq m=%rsi leaq 192(%rsp),%rsi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: out_backup = out # asm 1: movq out_backup=stack64#18 # asm 2: movq out_backup=136(%rsp) movq %rdi,136(%rsp) # qhasm: m_backup = m # asm 1: movq m_backup=stack64#19 # asm 2: movq m_backup=144(%rsp) movq %rsi,144(%rsp) # qhasm: bytes_backup = bytes # asm 1: movq bytes_backup=stack64#20 # asm 2: movq bytes_backup=152(%rsp) movq %rdx,152(%rsp) # qhasm: x1 = j0 # asm 1: movq x1=int64#1 # asm 2: movq x1=%rdi movq 56(%rsp),%rdi # qhasm: x0 = x1 # asm 1: mov x0=int64#3 # asm 2: mov x0=%rdx mov %rdi,%rdx # qhasm: (uint64) x1 >>= 32 # asm 1: shr $32,x3=int64#2 # asm 2: movq x3=%rsi movq 64(%rsp),%rsi # qhasm: x2 = x3 # asm 1: mov x2=int64#4 # asm 2: mov x2=%rcx mov %rsi,%rcx # qhasm: (uint64) x3 >>= 32 # asm 1: shr $32,x5=int64#5 # asm 2: movq x5=%r8 movq 72(%rsp),%r8 # qhasm: x4 = x5 # asm 1: mov x4=int64#6 # asm 2: mov x4=%r9 mov %r8,%r9 # qhasm: (uint64) x5 >>= 32 # asm 1: shr $32,x5_stack=stack64#21 # asm 2: movq x5_stack=160(%rsp) movq %r8,160(%rsp) # qhasm: x7 = j6 # asm 1: movq x7=int64#5 # asm 2: movq x7=%r8 movq 80(%rsp),%r8 # qhasm: x6 = x7 # asm 1: mov x6=int64#7 # asm 2: mov x6=%rax mov %r8,%rax # qhasm: (uint64) x7 >>= 32 # asm 1: shr $32,x9=int64#8 # asm 2: movq x9=%r10 movq 88(%rsp),%r10 # qhasm: x8 = x9 # asm 1: mov x8=int64#9 # asm 2: mov x8=%r11 mov %r10,%r11 # qhasm: (uint64) x9 >>= 32 # asm 1: shr $32,x11=int64#10 # asm 2: movq x11=%r12 movq 96(%rsp),%r12 # qhasm: x10 = x11 # asm 1: mov x10=int64#11 # asm 2: mov x10=%r13 mov %r12,%r13 # qhasm: x10_stack = x10 # asm 1: movq x10_stack=stack64#22 # asm 2: movq x10_stack=168(%rsp) movq %r13,168(%rsp) # qhasm: (uint64) x11 >>= 32 # asm 1: shr $32,x13=int64#11 # asm 2: movq x13=%r13 movq 104(%rsp),%r13 # qhasm: x12 = x13 # asm 1: mov x12=int64#12 # asm 2: mov x12=%r14 mov %r13,%r14 # qhasm: (uint64) x13 >>= 32 # asm 1: shr $32,x15=int64#13 # asm 2: movq x15=%r15 movq 112(%rsp),%r15 # qhasm: x14 = x15 # asm 1: mov x14=int64#14 # asm 2: mov x14=%rbx mov %r15,%rbx # qhasm: (uint64) x15 >>= 32 # asm 1: shr $32,x15_stack=stack64#23 # asm 2: movq x15_stack=176(%rsp) movq %r15,176(%rsp) # qhasm: i = 20 # asm 1: mov $20,>i=int64#13 # asm 2: mov $20,>i=%r15 mov $20,%r15 # qhasm: mainloop: ._mainloop: # qhasm: i_backup = i # asm 1: movq i_backup=stack64#24 # asm 2: movq i_backup=184(%rsp) movq %r15,184(%rsp) # qhasm: x5 = x5_stack # asm 1: movq x5=int64#13 # asm 2: movq x5=%r15 movq 160(%rsp),%r15 # qhasm: a = x12 + x0 # asm 1: lea (a=int64#15 # asm 2: lea (a=%rbp lea (%r14,%rdx),%rbp # qhasm: (uint32) a <<<= 7 # asm 1: rol $7,b=int64#15 # asm 2: lea (b=%rbp lea (%rdi,%r15),%rbp # qhasm: (uint32) b <<<= 7 # asm 1: rol $7,a=int64#15 # asm 2: lea (a=%rbp lea (%rdx,%r9),%rbp # qhasm: (uint32) a <<<= 9 # asm 1: rol $9,b=int64#15 # asm 2: lea (b=%rbp lea (%r15,%r10),%rbp # qhasm: (uint32) b <<<= 9 # asm 1: rol $9,a=int64#15 # asm 2: lea (a=%rbp lea (%r9,%r11),%rbp # qhasm: (uint32) a <<<= 13 # asm 1: rol $13,b=int64#15 # asm 2: lea (b=%rbp lea (%r10,%r13),%rbp # qhasm: (uint32) b <<<= 13 # asm 1: rol $13,a=int64#15 # asm 2: lea (a=%rbp lea (%r11,%r14),%rbp # qhasm: (uint32) a <<<= 18 # asm 1: rol $18,b=int64#15 # asm 2: lea (b=%rbp lea (%r13,%rdi),%rbp # qhasm: (uint32) b <<<= 18 # asm 1: rol $18,x10=int64#15 # asm 2: movq x10=%rbp movq 168(%rsp),%rbp # qhasm: x5_stack = x5 # asm 1: movq x5_stack=stack64#21 # asm 2: movq x5_stack=160(%rsp) movq %r15,160(%rsp) # qhasm: c = x6 + x10 # asm 1: lea (c=int64#13 # asm 2: lea (c=%r15 lea (%rax,%rbp),%r15 # qhasm: (uint32) c <<<= 7 # asm 1: rol $7,c=int64#13 # asm 2: lea (c=%r15 lea (%rbp,%rbx),%r15 # qhasm: (uint32) c <<<= 9 # asm 1: rol $9,c=int64#13 # asm 2: lea (c=%r15 lea (%rbx,%rcx),%r15 # qhasm: (uint32) c <<<= 13 # asm 1: rol $13,c=int64#13 # asm 2: lea (c=%r15 lea (%rcx,%rax),%r15 # qhasm: (uint32) c <<<= 18 # asm 1: rol $18,x15=int64#13 # asm 2: movq x15=%r15 movq 176(%rsp),%r15 # qhasm: x10_stack = x10 # asm 1: movq x10_stack=stack64#22 # asm 2: movq x10_stack=168(%rsp) movq %rbp,168(%rsp) # qhasm: d = x11 + x15 # asm 1: lea (d=int64#15 # asm 2: lea (d=%rbp lea (%r12,%r15),%rbp # qhasm: (uint32) d <<<= 7 # asm 1: rol $7,d=int64#15 # asm 2: lea (d=%rbp lea (%r15,%rsi),%rbp # qhasm: (uint32) d <<<= 9 # asm 1: rol $9,d=int64#15 # asm 2: lea (d=%rbp lea (%rsi,%r8),%rbp # qhasm: (uint32) d <<<= 13 # asm 1: rol $13,d=int64#15 # asm 2: lea (d=%rbp lea (%r8,%r12),%rbp # qhasm: (uint32) d <<<= 18 # asm 1: rol $18,x15_stack=stack64#23 # asm 2: movq x15_stack=176(%rsp) movq %r15,176(%rsp) # qhasm: x5 = x5_stack # asm 1: movq x5=int64#13 # asm 2: movq x5=%r15 movq 160(%rsp),%r15 # qhasm: a = x3 + x0 # asm 1: lea (a=int64#15 # asm 2: lea (a=%rbp lea (%rsi,%rdx),%rbp # qhasm: (uint32) a <<<= 7 # asm 1: rol $7,b=int64#15 # asm 2: lea (b=%rbp lea (%r9,%r15),%rbp # qhasm: (uint32) b <<<= 7 # asm 1: rol $7,a=int64#15 # asm 2: lea (a=%rbp lea (%rdx,%rdi),%rbp # qhasm: (uint32) a <<<= 9 # asm 1: rol $9,b=int64#15 # asm 2: lea (b=%rbp lea (%r15,%rax),%rbp # qhasm: (uint32) b <<<= 9 # asm 1: rol $9,a=int64#15 # asm 2: lea (a=%rbp lea (%rdi,%rcx),%rbp # qhasm: (uint32) a <<<= 13 # asm 1: rol $13,b=int64#15 # asm 2: lea (b=%rbp lea (%rax,%r8),%rbp # qhasm: (uint32) b <<<= 13 # asm 1: rol $13,a=int64#15 # asm 2: lea (a=%rbp lea (%rcx,%rsi),%rbp # qhasm: (uint32) a <<<= 18 # asm 1: rol $18,b=int64#15 # asm 2: lea (b=%rbp lea (%r8,%r9),%rbp # qhasm: (uint32) b <<<= 18 # asm 1: rol $18,x10=int64#15 # asm 2: movq x10=%rbp movq 168(%rsp),%rbp # qhasm: x5_stack = x5 # asm 1: movq x5_stack=stack64#21 # asm 2: movq x5_stack=160(%rsp) movq %r15,160(%rsp) # qhasm: c = x9 + x10 # asm 1: lea (c=int64#13 # asm 2: lea (c=%r15 lea (%r10,%rbp),%r15 # qhasm: (uint32) c <<<= 7 # asm 1: rol $7,c=int64#13 # asm 2: lea (c=%r15 lea (%rbp,%r12),%r15 # qhasm: (uint32) c <<<= 9 # asm 1: rol $9,c=int64#13 # asm 2: lea (c=%r15 lea (%r12,%r11),%r15 # qhasm: (uint32) c <<<= 13 # asm 1: rol $13,c=int64#13 # asm 2: lea (c=%r15 lea (%r11,%r10),%r15 # qhasm: (uint32) c <<<= 18 # asm 1: rol $18,x15=int64#13 # asm 2: movq x15=%r15 movq 176(%rsp),%r15 # qhasm: x10_stack = x10 # asm 1: movq x10_stack=stack64#22 # asm 2: movq x10_stack=168(%rsp) movq %rbp,168(%rsp) # qhasm: d = x14 + x15 # asm 1: lea (d=int64#15 # asm 2: lea (d=%rbp lea (%rbx,%r15),%rbp # qhasm: (uint32) d <<<= 7 # asm 1: rol $7,d=int64#15 # asm 2: lea (d=%rbp lea (%r15,%r14),%rbp # qhasm: (uint32) d <<<= 9 # asm 1: rol $9,d=int64#15 # asm 2: lea (d=%rbp lea (%r14,%r13),%rbp # qhasm: (uint32) d <<<= 13 # asm 1: rol $13,d=int64#15 # asm 2: lea (d=%rbp lea (%r13,%rbx),%rbp # qhasm: (uint32) d <<<= 18 # asm 1: rol $18,x15_stack=stack64#23 # asm 2: movq x15_stack=176(%rsp) movq %r15,176(%rsp) # qhasm: x5 = x5_stack # asm 1: movq x5=int64#13 # asm 2: movq x5=%r15 movq 160(%rsp),%r15 # qhasm: a = x12 + x0 # asm 1: lea (a=int64#15 # asm 2: lea (a=%rbp lea (%r14,%rdx),%rbp # qhasm: (uint32) a <<<= 7 # asm 1: rol $7,b=int64#15 # asm 2: lea (b=%rbp lea (%rdi,%r15),%rbp # qhasm: (uint32) b <<<= 7 # asm 1: rol $7,a=int64#15 # asm 2: lea (a=%rbp lea (%rdx,%r9),%rbp # qhasm: (uint32) a <<<= 9 # asm 1: rol $9,b=int64#15 # asm 2: lea (b=%rbp lea (%r15,%r10),%rbp # qhasm: (uint32) b <<<= 9 # asm 1: rol $9,a=int64#15 # asm 2: lea (a=%rbp lea (%r9,%r11),%rbp # qhasm: (uint32) a <<<= 13 # asm 1: rol $13,b=int64#15 # asm 2: lea (b=%rbp lea (%r10,%r13),%rbp # qhasm: (uint32) b <<<= 13 # asm 1: rol $13,a=int64#15 # asm 2: lea (a=%rbp lea (%r11,%r14),%rbp # qhasm: (uint32) a <<<= 18 # asm 1: rol $18,b=int64#15 # asm 2: lea (b=%rbp lea (%r13,%rdi),%rbp # qhasm: (uint32) b <<<= 18 # asm 1: rol $18,x10=int64#15 # asm 2: movq x10=%rbp movq 168(%rsp),%rbp # qhasm: x5_stack = x5 # asm 1: movq x5_stack=stack64#21 # asm 2: movq x5_stack=160(%rsp) movq %r15,160(%rsp) # qhasm: c = x6 + x10 # asm 1: lea (c=int64#13 # asm 2: lea (c=%r15 lea (%rax,%rbp),%r15 # qhasm: (uint32) c <<<= 7 # asm 1: rol $7,c=int64#13 # asm 2: lea (c=%r15 lea (%rbp,%rbx),%r15 # qhasm: (uint32) c <<<= 9 # asm 1: rol $9,c=int64#13 # asm 2: lea (c=%r15 lea (%rbx,%rcx),%r15 # qhasm: (uint32) c <<<= 13 # asm 1: rol $13,c=int64#13 # asm 2: lea (c=%r15 lea (%rcx,%rax),%r15 # qhasm: (uint32) c <<<= 18 # asm 1: rol $18,x15=int64#13 # asm 2: movq x15=%r15 movq 176(%rsp),%r15 # qhasm: x10_stack = x10 # asm 1: movq x10_stack=stack64#22 # asm 2: movq x10_stack=168(%rsp) movq %rbp,168(%rsp) # qhasm: d = x11 + x15 # asm 1: lea (d=int64#15 # asm 2: lea (d=%rbp lea (%r12,%r15),%rbp # qhasm: (uint32) d <<<= 7 # asm 1: rol $7,d=int64#15 # asm 2: lea (d=%rbp lea (%r15,%rsi),%rbp # qhasm: (uint32) d <<<= 9 # asm 1: rol $9,d=int64#15 # asm 2: lea (d=%rbp lea (%rsi,%r8),%rbp # qhasm: (uint32) d <<<= 13 # asm 1: rol $13,d=int64#15 # asm 2: lea (d=%rbp lea (%r8,%r12),%rbp # qhasm: (uint32) d <<<= 18 # asm 1: rol $18,x15_stack=stack64#23 # asm 2: movq x15_stack=176(%rsp) movq %r15,176(%rsp) # qhasm: x5 = x5_stack # asm 1: movq x5=int64#13 # asm 2: movq x5=%r15 movq 160(%rsp),%r15 # qhasm: a = x3 + x0 # asm 1: lea (a=int64#15 # asm 2: lea (a=%rbp lea (%rsi,%rdx),%rbp # qhasm: (uint32) a <<<= 7 # asm 1: rol $7,b=int64#15 # asm 2: lea (b=%rbp lea (%r9,%r15),%rbp # qhasm: (uint32) b <<<= 7 # asm 1: rol $7,a=int64#15 # asm 2: lea (a=%rbp lea (%rdx,%rdi),%rbp # qhasm: (uint32) a <<<= 9 # asm 1: rol $9,b=int64#15 # asm 2: lea (b=%rbp lea (%r15,%rax),%rbp # qhasm: (uint32) b <<<= 9 # asm 1: rol $9,a=int64#15 # asm 2: lea (a=%rbp lea (%rdi,%rcx),%rbp # qhasm: (uint32) a <<<= 13 # asm 1: rol $13,b=int64#15 # asm 2: lea (b=%rbp lea (%rax,%r8),%rbp # qhasm: (uint32) b <<<= 13 # asm 1: rol $13,a=int64#15 # asm 2: lea (a=%rbp lea (%rcx,%rsi),%rbp # qhasm: (uint32) a <<<= 18 # asm 1: rol $18,b=int64#15 # asm 2: lea (b=%rbp lea (%r8,%r9),%rbp # qhasm: (uint32) b <<<= 18 # asm 1: rol $18,x10=int64#15 # asm 2: movq x10=%rbp movq 168(%rsp),%rbp # qhasm: x5_stack = x5 # asm 1: movq x5_stack=stack64#21 # asm 2: movq x5_stack=160(%rsp) movq %r15,160(%rsp) # qhasm: c = x9 + x10 # asm 1: lea (c=int64#13 # asm 2: lea (c=%r15 lea (%r10,%rbp),%r15 # qhasm: (uint32) c <<<= 7 # asm 1: rol $7,c=int64#13 # asm 2: lea (c=%r15 lea (%rbp,%r12),%r15 # qhasm: (uint32) c <<<= 9 # asm 1: rol $9,c=int64#13 # asm 2: lea (c=%r15 lea (%r12,%r11),%r15 # qhasm: (uint32) c <<<= 13 # asm 1: rol $13,c=int64#13 # asm 2: lea (c=%r15 lea (%r11,%r10),%r15 # qhasm: (uint32) c <<<= 18 # asm 1: rol $18,x15=int64#13 # asm 2: movq x15=%r15 movq 176(%rsp),%r15 # qhasm: x10_stack = x10 # asm 1: movq x10_stack=stack64#22 # asm 2: movq x10_stack=168(%rsp) movq %rbp,168(%rsp) # qhasm: d = x14 + x15 # asm 1: lea (d=int64#15 # asm 2: lea (d=%rbp lea (%rbx,%r15),%rbp # qhasm: (uint32) d <<<= 7 # asm 1: rol $7,d=int64#15 # asm 2: lea (d=%rbp lea (%r15,%r14),%rbp # qhasm: (uint32) d <<<= 9 # asm 1: rol $9,d=int64#15 # asm 2: lea (d=%rbp lea (%r14,%r13),%rbp # qhasm: (uint32) d <<<= 13 # asm 1: rol $13,d=int64#15 # asm 2: lea (d=%rbp lea (%r13,%rbx),%rbp # qhasm: (uint32) d <<<= 18 # asm 1: rol $18,x15_stack=stack64#23 # asm 2: movq x15_stack=176(%rsp) movq %r15,176(%rsp) # qhasm: i = i_backup # asm 1: movq i=int64#13 # asm 2: movq i=%r15 movq 184(%rsp),%r15 # qhasm: unsigned>? i -= 4 # asm 1: sub $4, ja ._mainloop # qhasm: (uint32) x2 += j2 # asm 1: addl >= 32 # asm 1: shr $32,>= 32 # asm 1: shr $32,>= 32 # asm 1: shr $32,>= 32 # asm 1: shr $32,>= 32 # asm 1: shr $32,x5=int64#1 # asm 2: movq x5=%rdi movq 160(%rsp),%rdi # qhasm: (uint32) x4 += j4 # asm 1: addl >= 32 # asm 1: shr $32,x10=int64#5 # asm 2: movq x10=%r8 movq 168(%rsp),%r8 # qhasm: (uint32) x10 += j10 # asm 1: addl >= 32 # asm 1: shr $32,x15=int64#1 # asm 2: movq x15=%rdi movq 176(%rsp),%rdi # qhasm: (uint32) x14 += j14 # asm 1: addl >= 32 # asm 1: shr $32,out=int64#1 # asm 2: movq out=%rdi movq 136(%rsp),%rdi # qhasm: m = m_backup # asm 1: movq m=int64#2 # asm 2: movq m=%rsi movq 144(%rsp),%rsi # qhasm: x0 ^= *(uint64 *) (m + 0) # asm 1: xorq 0(bytes=int64#3 # asm 2: movq bytes=%rdx movq 152(%rsp),%rdx # qhasm: in8 = j8 # asm 1: movq in8=int64#4 # asm 2: movq in8=%rcx movq 88(%rsp),%rcx # qhasm: in8 += 1 # asm 1: add $1,j8=stack64#12 # asm 2: movq j8=88(%rsp) movq %rcx,88(%rsp) # qhasm: unsigned>? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: out = ctarget # asm 1: movq out=int64#1 # asm 2: movq out=%rdi movq 128(%rsp),%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %rdx,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # qhasm: x = x_backup # asm 1: movq x=int64#1 # asm 2: movq x=%rdi movq 120(%rsp),%rdi # qhasm: in8 = j8 # asm 1: movq in8=int64#2 # asm 2: movq in8=%rsi movq 88(%rsp),%rsi # qhasm: *(uint64 *) (x + 32) = in8 # asm 1: movq r11=int64#9 # asm 2: movq r11=%r11 movq 0(%rsp),%r11 # qhasm: r12 = r12_stack # asm 1: movq r12=int64#10 # asm 2: movq r12=%r12 movq 8(%rsp),%r12 # qhasm: r13 = r13_stack # asm 1: movq r13=int64#11 # asm 2: movq r13=%r13 movq 16(%rsp),%r13 # qhasm: r14 = r14_stack # asm 1: movq r14=int64#12 # asm 2: movq r14=%r14 movq 24(%rsp),%r14 # qhasm: r15 = r15_stack # asm 1: movq r15=int64#13 # asm 2: movq r15=%r15 movq 32(%rsp),%r15 # qhasm: rbx = rbx_stack # asm 1: movq rbx=int64#14 # asm 2: movq rbx=%rbx movq 40(%rsp),%rbx # qhasm: rbp = rbp_stack # asm 1: movq rbp=int64#15 # asm 2: movq rbp=%rbp movq 48(%rsp),%rbp # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: leave add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64,k=int64#2 # asm 2: mov k=%rsi mov %rsi,%rsi # qhasm: kbits = arg3 # asm 1: mov kbits=int64#3 # asm 2: mov kbits=%rdx mov %rdx,%rdx # qhasm: x = arg1 # asm 1: mov x=int64#1 # asm 2: mov x=%rdi mov %rdi,%rdi # qhasm: in0 = *(uint64 *) (k + 0) # asm 1: movq 0(in0=int64#5 # asm 2: movq 0(in0=%r8 movq 0(%rsi),%r8 # qhasm: in2 = *(uint64 *) (k + 8) # asm 1: movq 8(in2=int64#6 # asm 2: movq 8(in2=%r9 movq 8(%rsi),%r9 # qhasm: *(uint64 *) (x + 4) = in0 # asm 1: movq in10=int64#3 # asm 2: movq 16(in10=%rdx movq 16(%rsi),%rdx # qhasm: in12 = *(uint64 *) (k + 24) # asm 1: movq 24(in12=int64#2 # asm 2: movq 24(in12=%rsi movq 24(%rsi),%rsi # qhasm: *(uint64 *) (x + 44) = in10 # asm 1: movq in0=int64#2 # asm 2: mov $1634760805,>in0=%rsi mov $1634760805,%rsi # qhasm: in4 = 857760878 # asm 1: mov $857760878,>in4=int64#3 # asm 2: mov $857760878,>in4=%rdx mov $857760878,%rdx # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int64#4 # asm 2: mov $2036477234,>in10=%rcx mov $2036477234,%rcx # qhasm: in14 = 1797285236 # asm 1: mov $1797285236,>in14=int64#5 # asm 2: mov $1797285236,>in14=%r8 mov $1797285236,%r8 # qhasm: *(uint32 *) (x + 0) = in0 # asm 1: movl in10=int64#3 # asm 2: movq 0(in10=%rdx movq 0(%rsi),%rdx # qhasm: in12 = *(uint64 *) (k + 8) # asm 1: movq 8(in12=int64#2 # asm 2: movq 8(in12=%rsi movq 8(%rsi),%rsi # qhasm: *(uint64 *) (x + 44) = in10 # asm 1: movq in0=int64#2 # asm 2: mov $1634760805,>in0=%rsi mov $1634760805,%rsi # qhasm: in4 = 824206446 # asm 1: mov $824206446,>in4=int64#3 # asm 2: mov $824206446,>in4=%rdx mov $824206446,%rdx # qhasm: in10 = 2036477238 # asm 1: mov $2036477238,>in10=int64#4 # asm 2: mov $2036477238,>in10=%rcx mov $2036477238,%rcx # qhasm: in14 = 1797285236 # asm 1: mov $1797285236,>in14=int64#5 # asm 2: mov $1797285236,>in14=%r8 mov $1797285236,%r8 # qhasm: *(uint32 *) (x + 0) = in0 # asm 1: movl iv=int64#2 # asm 2: mov iv=%rsi mov %rsi,%rsi # qhasm: x = arg1 # asm 1: mov x=int64#1 # asm 2: mov x=%rdi mov %rdi,%rdi # qhasm: in6 = *(uint64 *) (iv + 0) # asm 1: movq 0(in6=int64#2 # asm 2: movq 0(in6=%rsi movq 0(%rsi),%rsi # qhasm: in8 = 0 # asm 1: mov $0,>in8=int64#5 # asm 2: mov $0,>in8=%r8 mov $0,%r8 # qhasm: *(uint64 *) (x + 24) = in6 # asm 1: movq