# qhasm: int64 r11_caller # qhasm: int64 r12_caller # qhasm: int64 r13_caller # qhasm: int64 r14_caller # qhasm: int64 r15_caller # qhasm: int64 rbx_caller # qhasm: int64 rbp_caller # qhasm: caller r11_caller # qhasm: caller r12_caller # qhasm: caller r13_caller # qhasm: caller r14_caller # qhasm: caller r15_caller # qhasm: caller rbx_caller # qhasm: caller rbp_caller # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 a # qhasm: int64 b # qhasm: int64 c # qhasm: int64 d # qhasm: int64 loader # qhasm: int64 arith # qhasm: int64 ha # qhasm: int64 hb # qhasm: int64 hc # qhasm: int64 hd # qhasm: int64 state # qhasm: int64 in # qhasm: int64 inlen # qhasm: enter md5blocks_amd64_1 .text .p2align 5 .globl _md5blocks_amd64_1 .globl md5blocks_amd64_1 _md5blocks_amd64_1: md5blocks_amd64_1: mov %rsp,%r11 and $31,%r11 add $64,%r11 sub %r11,%rsp # qhasm: input state # qhasm: input in # qhasm: input inlen # qhasm: r11_stack = r11_caller # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=0(%rsp) movq %r11,0(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=8(%rsp) movq %r12,8(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=16(%rsp) movq %r13,16(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=24(%rsp) movq %r14,24(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=32(%rsp) movq %r15,32(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=40(%rsp) movq %rbx,40(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=48(%rsp) movq %rbp,48(%rsp) # qhasm: a = *(uint32 *) (state + 0) # asm 1: movl 0(a=int64#4d # asm 2: movl 0(a=%ecx movl 0(%rdi),%ecx # qhasm: b = *(uint32 *) (state + 4) # asm 1: movl 4(b=int64#5d # asm 2: movl 4(b=%r8d movl 4(%rdi),%r8d # qhasm: c = *(uint32 *) (state + 8) # asm 1: movl 8(c=int64#6d # asm 2: movl 8(c=%r9d movl 8(%rdi),%r9d # qhasm: d = *(uint32 *) (state + 12) # asm 1: movl 12(d=int64#7d # asm 2: movl 12(d=%eax movl 12(%rdi),%eax # qhasm: unsignedloader=int64#8d # asm 2: movl 0(loader=%r10d movl 0(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= d # asm 1: xor ha=int64#10 # asm 2: mov ha=%r12 mov %rcx,%r12 # qhasm: a += loader + 0xd76aa478 # asm 1: lea 0xd76aa478(a=int64#4 # asm 2: lea 0xd76aa478(a=%rcx lea 0xd76aa478(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add arith=int64#8 # asm 2: mov arith=%r10 mov %r8,%r10 # qhasm: arith ^= c # asm 1: xor loader=int64#9d # asm 2: movl 4(loader=%r11d movl 4(%rsi),%r11d # qhasm: arith &= a # asm 1: and hd=int64#11 # asm 2: mov hd=%r13 mov %rax,%r13 # qhasm: d += loader + 0xe8c7b756 # asm 1: lea 0xe8c7b756(d=int64#7 # asm 2: lea 0xe8c7b756(d=%rax lea 0xe8c7b756(%rax,%r11),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 8(loader=%r10d movl 8(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= b # asm 1: xor hc=int64#12 # asm 2: mov hc=%r14 mov %r9,%r14 # qhasm: c += loader + 0x242070db # asm 1: lea 0x242070db(c=int64#6 # asm 2: lea 0x242070db(c=%r9 lea 0x242070db(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 12(loader=%r10d movl 12(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= a # asm 1: xor hb=int64#13 # asm 2: mov hb=%r15 mov %r8,%r15 # qhasm: b += loader + 0xc1bdceee # asm 1: lea 0xc1bdceee(b=int64#5 # asm 2: lea 0xc1bdceee(b=%r8 lea 0xc1bdceee(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 16(loader=%r10d movl 16(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0xf57c0faf(a=%rcx lea 0xf57c0faf(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 20(loader=%r10d movl 20(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0x4787c62a(d=%rax lea 0x4787c62a(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 24(loader=%r10d movl 24(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0xa8304613(c=%r9 lea 0xa8304613(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 28(loader=%r10d movl 28(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0xfd469501(b=%r8 lea 0xfd469501(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 32(loader=%r10d movl 32(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0x698098d8(a=%rcx lea 0x698098d8(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 36(loader=%r10d movl 36(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0x8b44f7af(d=%rax lea 0x8b44f7af(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 40(loader=%r10d movl 40(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0xffff5bb1(c=%r9 lea 0xffff5bb1(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 44(loader=%r10d movl 44(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0x895cd7be(b=%r8 lea 0x895cd7be(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 48(loader=%r10d movl 48(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0x6b901122(a=%rcx lea 0x6b901122(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 52(loader=%r10d movl 52(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0xfd987193(d=%rax lea 0xfd987193(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 56(loader=%r10d movl 56(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0xa679438e(c=%r9 lea 0xa679438e(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 60(loader=%r10d movl 60(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0x49b40821(b=%r8 lea 0x49b40821(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 4(loader=%r10d movl 4(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor a=int64#4 # asm 2: lea 0xf61e2562(a=%rcx lea 0xf61e2562(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 24(loader=%r10d movl 24(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor d=int64#7 # asm 2: lea 0xc040b340(d=%rax lea 0xc040b340(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 44(loader=%r10d movl 44(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor c=int64#6 # asm 2: lea 0x265e5a51(c=%r9 lea 0x265e5a51(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 0(loader=%r10d movl 0(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor b=int64#5 # asm 2: lea 0xe9b6c7aa(b=%r8 lea 0xe9b6c7aa(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 20(loader=%r10d movl 20(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor a=int64#4 # asm 2: lea 0xd62f105d(a=%rcx lea 0xd62f105d(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 40(loader=%r10d movl 40(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor d=int64#7 # asm 2: lea 0x02441453(d=%rax lea 0x02441453(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 60(loader=%r10d movl 60(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor c=int64#6 # asm 2: lea 0xd8a1e681(c=%r9 lea 0xd8a1e681(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 16(loader=%r10d movl 16(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor b=int64#5 # asm 2: lea 0xe7d3fbc8(b=%r8 lea 0xe7d3fbc8(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 36(loader=%r10d movl 36(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor a=int64#4 # asm 2: lea 0x21e1cde6(a=%rcx lea 0x21e1cde6(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 56(loader=%r10d movl 56(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor d=int64#7 # asm 2: lea 0xc33707d6(d=%rax lea 0xc33707d6(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 12(loader=%r10d movl 12(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor c=int64#6 # asm 2: lea 0xf4d50d87(c=%r9 lea 0xf4d50d87(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 32(loader=%r10d movl 32(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor b=int64#5 # asm 2: lea 0x455a14ed(b=%r8 lea 0x455a14ed(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 52(loader=%r10d movl 52(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor a=int64#4 # asm 2: lea 0xa9e3e905(a=%rcx lea 0xa9e3e905(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 8(loader=%r10d movl 8(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor d=int64#7 # asm 2: lea 0xfcefa3f8(d=%rax lea 0xfcefa3f8(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 28(loader=%r10d movl 28(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor c=int64#6 # asm 2: lea 0x676f02d9(c=%r9 lea 0x676f02d9(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 48(loader=%r10d movl 48(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor b=int64#5 # asm 2: lea 0x8d2a4c8a(b=%r8 lea 0x8d2a4c8a(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 20(loader=%r10d movl 20(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor a=int64#4 # asm 2: lea 0xfffa3942(a=%rcx lea 0xfffa3942(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 32(loader=%r10d movl 32(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor d=int64#7 # asm 2: lea 0x8771f681(d=%rax lea 0x8771f681(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 44(loader=%r10d movl 44(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor c=int64#6 # asm 2: lea 0x6d9d6122(c=%r9 lea 0x6d9d6122(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 56(loader=%r10d movl 56(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor b=int64#5 # asm 2: lea 0xfde5380c(b=%r8 lea 0xfde5380c(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 4(loader=%r10d movl 4(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor a=int64#4 # asm 2: lea 0xa4beea44(a=%rcx lea 0xa4beea44(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 16(loader=%r10d movl 16(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor d=int64#7 # asm 2: lea 0x4bdecfa9(d=%rax lea 0x4bdecfa9(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 28(loader=%r10d movl 28(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor c=int64#6 # asm 2: lea 0xf6bb4b60(c=%r9 lea 0xf6bb4b60(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 40(loader=%r10d movl 40(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor b=int64#5 # asm 2: lea 0xbebfbc70(b=%r8 lea 0xbebfbc70(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 52(loader=%r10d movl 52(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor a=int64#4 # asm 2: lea 0x289b7ec6(a=%rcx lea 0x289b7ec6(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 0(loader=%r10d movl 0(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor d=int64#7 # asm 2: lea 0xeaa127fa(d=%rax lea 0xeaa127fa(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 12(loader=%r10d movl 12(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor c=int64#6 # asm 2: lea 0xd4ef3085(c=%r9 lea 0xd4ef3085(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 24(loader=%r10d movl 24(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor b=int64#5 # asm 2: lea 0x04881d05(b=%r8 lea 0x04881d05(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 36(loader=%r10d movl 36(%rsi),%r10d # qhasm: arith = d # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rax,%r11 # qhasm: arith ^= c # asm 1: xor a=int64#4 # asm 2: lea 0xd9d4d039(a=%rcx lea 0xd9d4d039(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 48(loader=%r10d movl 48(%rsi),%r10d # qhasm: arith = c # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r9,%r11 # qhasm: arith ^= b # asm 1: xor d=int64#7 # asm 2: lea 0xe6db99e5(d=%rax lea 0xe6db99e5(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 60(loader=%r10d movl 60(%rsi),%r10d # qhasm: arith = b # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %r8,%r11 # qhasm: arith ^= a # asm 1: xor c=int64#6 # asm 2: lea 0x1fa27cf8(c=%r9 lea 0x1fa27cf8(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 8(loader=%r10d movl 8(%rsi),%r10d # qhasm: arith = a # asm 1: mov arith=int64#9 # asm 2: mov arith=%r11 mov %rcx,%r11 # qhasm: arith ^= d # asm 1: xor b=int64#5 # asm 2: lea 0xc4ac5665(b=%r8 lea 0xc4ac5665(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 0(loader=%r10d movl 0(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0xf4292244(a=%rcx lea 0xf4292244(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 28(loader=%r10d movl 28(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0x432aff97(d=%rax lea 0x432aff97(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 56(loader=%r10d movl 56(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0xab9423a7(c=%r9 lea 0xab9423a7(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 20(loader=%r10d movl 20(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0xfc93a039(b=%r8 lea 0xfc93a039(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 48(loader=%r10d movl 48(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0x655b59c3(a=%rcx lea 0x655b59c3(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 12(loader=%r10d movl 12(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0x8f0ccc92(d=%rax lea 0x8f0ccc92(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 40(loader=%r10d movl 40(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0xffeff47d(c=%r9 lea 0xffeff47d(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 4(loader=%r10d movl 4(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0x85845dd1(b=%r8 lea 0x85845dd1(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 32(loader=%r10d movl 32(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0x6fa87e4f(a=%rcx lea 0x6fa87e4f(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 60(loader=%r10d movl 60(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0xfe2ce6e0(d=%rax lea 0xfe2ce6e0(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 24(loader=%r10d movl 24(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0xa3014314(c=%r9 lea 0xa3014314(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 52(loader=%r10d movl 52(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0x4e0811a1(b=%r8 lea 0x4e0811a1(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add loader=int64#8d # asm 2: movl 16(loader=%r10d movl 16(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= d # asm 1: xor a=int64#4 # asm 2: lea 0xf7537e82(a=%rcx lea 0xf7537e82(%rcx,%r10),%rcx # qhasm: a += arith # asm 1: add loader=int64#8d # asm 2: movl 44(loader=%r10d movl 44(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= c # asm 1: xor d=int64#7 # asm 2: lea 0xbd3af235(d=%rax lea 0xbd3af235(%rax,%r10),%rax # qhasm: d += arith # asm 1: add loader=int64#8d # asm 2: movl 8(loader=%r10d movl 8(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= b # asm 1: xor c=int64#6 # asm 2: lea 0x2ad7d2bb(c=%r9 lea 0x2ad7d2bb(%r9,%r10),%r9 # qhasm: c += arith # asm 1: add loader=int64#8d # asm 2: movl 36(loader=%r10d movl 36(%rsi),%r10d # qhasm: arith = -1 # asm 1: mov $-1,>arith=int64#9 # asm 2: mov $-1,>arith=%r11 mov $-1,%r11 # qhasm: arith ^= a # asm 1: xor b=int64#5 # asm 2: lea 0xeb86d391(b=%r8 lea 0xeb86d391(%r8,%r10),%r8 # qhasm: b += arith # asm 1: add r11_caller=int64#9 # asm 2: movq r11_caller=%r11 movq 0(%rsp),%r11 # qhasm: r12_caller = r12_stack # asm 1: movq r12_caller=int64#10 # asm 2: movq r12_caller=%r12 movq 8(%rsp),%r12 # qhasm: r13_caller = r13_stack # asm 1: movq r13_caller=int64#11 # asm 2: movq r13_caller=%r13 movq 16(%rsp),%r13 # qhasm: r14_caller = r14_stack # asm 1: movq r14_caller=int64#12 # asm 2: movq r14_caller=%r14 movq 24(%rsp),%r14 # qhasm: r15_caller = r15_stack # asm 1: movq r15_caller=int64#13 # asm 2: movq r15_caller=%r15 movq 32(%rsp),%r15 # qhasm: rbx_caller = rbx_stack # asm 1: movq rbx_caller=int64#14 # asm 2: movq rbx_caller=%rbx movq 40(%rsp),%rbx # qhasm: rbp_caller = rbp_stack # asm 1: movq rbp_caller=int64#15 # asm 2: movq rbp_caller=%rbp movq 48(%rsp),%rbp # qhasm: leave add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx ret