# qhasm: float64 scale # qhasm: float64 alpha0 # qhasm: float64 alpha32 # qhasm: float64 alpha64 # qhasm: float64 alpha96 # qhasm: float64 alpha130 # qhasm: float64 h0 # qhasm: float64 h1 # qhasm: float64 h2 # qhasm: float64 h3 # qhasm: float64 h4 # qhasm: float64 h5 # qhasm: float64 h6 # qhasm: float64 h7 # qhasm: float64 y7 # qhasm: float64 y6 # qhasm: float64 y1 # qhasm: float64 y0 # qhasm: float64 y5 # qhasm: float64 y4 # qhasm: float64 x7 # qhasm: float64 x6 # qhasm: float64 x1 # qhasm: float64 x0 # qhasm: float64 y3 # qhasm: float64 y2 # qhasm: float64 r3low # qhasm: float64 r0low # qhasm: float64 r3high # qhasm: float64 r0high # qhasm: float64 sr1low # qhasm: float64 x5 # qhasm: float64 r3lowx0 # qhasm: float64 sr1high # qhasm: float64 x4 # qhasm: float64 r0lowx6 # qhasm: float64 r1low # qhasm: float64 x3 # qhasm: float64 r3highx0 # qhasm: float64 r1high # qhasm: float64 x2 # qhasm: float64 r0highx6 # qhasm: float64 sr2low # qhasm: float64 r0lowx0 # qhasm: float64 sr2high # qhasm: float64 sr1lowx6 # qhasm: float64 r2low # qhasm: float64 r0highx0 # qhasm: float64 r2high # qhasm: float64 sr1highx6 # qhasm: float64 sr3low # qhasm: float64 r1lowx0 # qhasm: float64 sr3high # qhasm: float64 sr2lowx6 # qhasm: float64 r1highx0 # qhasm: float64 sr2highx6 # qhasm: float64 r2lowx0 # qhasm: float64 sr3lowx6 # qhasm: float64 r2highx0 # qhasm: float64 sr3highx6 # qhasm: float64 r1highx4 # qhasm: float64 r1lowx4 # qhasm: float64 r0highx4 # qhasm: float64 r0lowx4 # qhasm: float64 sr3highx4 # qhasm: float64 sr3lowx4 # qhasm: float64 sr2highx4 # qhasm: float64 sr2lowx4 # qhasm: float64 r0lowx2 # qhasm: float64 r0highx2 # qhasm: float64 r1lowx2 # qhasm: float64 r1highx2 # qhasm: float64 r2lowx2 # qhasm: float64 r2highx2 # qhasm: float64 sr3lowx2 # qhasm: float64 sr3highx2 # qhasm: float64 z0 # qhasm: float64 z1 # qhasm: float64 z2 # qhasm: float64 z3 # qhasm: int64 out # qhasm: int64 r # qhasm: int64 s # qhasm: int64 m # qhasm: int64 l # qhasm: int64 r0 # qhasm: int64 r1 # qhasm: int64 r2 # qhasm: int64 r3 # qhasm: int64 r00 # qhasm: int64 r01 # qhasm: int64 r02 # qhasm: int64 r03 # qhasm: int64 r10 # qhasm: int64 r11 # qhasm: int64 r12 # qhasm: int64 r13 # qhasm: int64 r20 # qhasm: int64 r21 # qhasm: int64 r22 # qhasm: int64 r23 # qhasm: int64 r30 # qhasm: int64 r31 # qhasm: int64 r32 # qhasm: int64 r33 # qhasm: int64 m0 # qhasm: int64 m1 # qhasm: int64 m2 # qhasm: int64 m3 # qhasm: int64 m00 # qhasm: int64 m01 # qhasm: int64 m02 # qhasm: int64 m03 # qhasm: int64 m10 # qhasm: int64 m11 # qhasm: int64 m12 # qhasm: int64 m13 # qhasm: int64 m20 # qhasm: int64 m21 # qhasm: int64 m22 # qhasm: int64 m23 # qhasm: int64 m30 # qhasm: int64 m31 # qhasm: int64 m32 # qhasm: int64 m33 # qhasm: int64 constants # qhasm: int64 constants_low # qhasm: int64 lbelow2 # qhasm: int64 lbelow3 # qhasm: int64 lbelow4 # qhasm: int64 lbelow5 # qhasm: int64 lbelow6 # qhasm: int64 lbelow7 # qhasm: int64 lbelow8 # qhasm: int64 lbelow9 # qhasm: int64 lbelow10 # qhasm: int64 lbelow11 # qhasm: int64 lbelow12 # qhasm: int64 lbelow13 # qhasm: int64 lbelow14 # qhasm: int64 lbelow15 # qhasm: float64 alpham80 # qhasm: float64 alpham48 # qhasm: float64 alpham16 # qhasm: float64 alpha18 # qhasm: float64 alpha50 # qhasm: float64 alpha82 # qhasm: float64 alpha112 # qhasm: float64 offset0 # qhasm: float64 offset1 # qhasm: float64 offset2 # qhasm: float64 offset3 # qhasm: stack64 d0 # qhasm: stack64 d1 # qhasm: stack64 d2 # qhasm: stack64 d3 # qhasm: stack64 sr1high_stack # qhasm: stack64 sr1low_stack # qhasm: stack64 sr2high_stack # qhasm: stack64 sr2low_stack # qhasm: stack64 sr3high_stack # qhasm: stack64 sr3low_stack # qhasm: enter poly1305_sparc .section ".text" .align 32 .global poly1305_sparc poly1305_sparc: save %sp,-256,%sp # qhasm: input out # qhasm: input r # qhasm: input s # qhasm: input m # qhasm: input l # qhasm: r00 = *(uint8 *) (r + 0) # asm 1: ldub [r00=int64#6 # asm 2: ldub [r00=%i5 ldub [%i1+0],%i5 # qhasm: constants = (poly1305_sparc_constants & 0xfffffc0000000000) >> 32 # asm 1: sethi %hh(poly1305_sparc_constants),>constants=int64#7 # asm 2: sethi %hh(poly1305_sparc_constants),>constants=%g1 sethi %hh(poly1305_sparc_constants),%g1 # qhasm: constants_low = poly1305_sparc_constants & 0xfffffc00 # asm 1: sethi %lm(poly1305_sparc_constants),>constants_low=int64#8 # asm 2: sethi %lm(poly1305_sparc_constants),>constants_low=%g4 sethi %lm(poly1305_sparc_constants),%g4 # qhasm: r01 = *(uint8 *) (r + 1) # asm 1: ldub [r01=int64#9 # asm 2: ldub [r01=%g5 ldub [%i1+1],%g5 # qhasm: constants |= (poly1305_sparc_constants & 0x3ff00000000) >> 32 # asm 1: or constants=int64#7 # asm 2: or constants=%g1 or %g1,%hm(poly1305_sparc_constants),%g1 # qhasm: constants_low |= poly1305_sparc_constants & 0x3ff # asm 1: or constants_low=int64#8 # asm 2: or constants_low=%g4 or %g4,%lo(poly1305_sparc_constants),%g4 # qhasm: r02 = *(uint8 *) (r + 2) # asm 1: ldub [r02=int64#10 # asm 2: ldub [r02=%o0 ldub [%i1+2],%o0 # qhasm: r0 = 2151 # asm 1: add %g0,2151,>r0=int64#11 # asm 2: add %g0,2151,>r0=%o1 add %g0,2151,%o1 # qhasm: constants <<= 32 # asm 1: sllx constants=int64#7 # asm 2: sllx constants=%g1 sllx %g1,32,%g1 # qhasm: r03 = *(uint8 *) (r + 3) # asm 1: ldub [r03=int64#12 # asm 2: ldub [r03=%o2 ldub [%i1+3],%o2 # qhasm: r0 <<= 51 # asm 1: sllx r0=int64#11 # asm 2: sllx r0=%o1 sllx %o1,51,%o1 # qhasm: constants |= constants_low # asm 1: or constants=int64#7 # asm 2: or constants=%g1 or %g1,%g4,%g1 # qhasm: r10 = *(uint8 *) (r + 4) # asm 1: ldub [r10=int64#8 # asm 2: ldub [r10=%g4 ldub [%i1+4],%g4 # qhasm: r01 <<= 8 # asm 1: sllx r01=int64#9 # asm 2: sllx r01=%g5 sllx %g5,8,%g5 # qhasm: r0 += r00 # asm 1: add r0=int64#6 # asm 2: add r0=%i5 add %o1,%i5,%i5 # qhasm: r11 = *(uint8 *) (r + 5) # asm 1: ldub [r11=int64#11 # asm 2: ldub [r11=%o1 ldub [%i1+5],%o1 # qhasm: r02 <<= 16 # asm 1: sllx r02=int64#10 # asm 2: sllx r02=%o0 sllx %o0,16,%o0 # qhasm: r0 += r01 # asm 1: add r0=int64#6 # asm 2: add r0=%i5 add %i5,%g5,%i5 # qhasm: r12 = *(uint8 *) (r + 6) # asm 1: ldub [r12=int64#9 # asm 2: ldub [r12=%g5 ldub [%i1+6],%g5 # qhasm: r03 <<= 24 # asm 1: sllx r03=int64#12 # asm 2: sllx r03=%o2 sllx %o2,24,%o2 # qhasm: r0 += r02 # asm 1: add r0=int64#6 # asm 2: add r0=%i5 add %i5,%o0,%i5 # qhasm: r13 = *(uint8 *) (r + 7) # asm 1: ldub [r13=int64#10 # asm 2: ldub [r13=%o0 ldub [%i1+7],%o0 # qhasm: r1 = 2215 # asm 1: add %g0,2215,>r1=int64#13 # asm 2: add %g0,2215,>r1=%o3 add %g0,2215,%o3 # qhasm: r0 += r03 # asm 1: add r0=int64#6 # asm 2: add r0=%i5 add %i5,%o2,%i5 # qhasm: d0 = r0 # asm 1: stx d0=stack64#1] # asm 2: stx d0=0] stx %i5,[%fp+2023-0] # qhasm: r1 <<= 51 # asm 1: sllx r1=int64#6 # asm 2: sllx r1=%i5 sllx %o3,51,%i5 # qhasm: r2 = 2279 # asm 1: add %g0,2279,>r2=int64#12 # asm 2: add %g0,2279,>r2=%o2 add %g0,2279,%o2 # qhasm: r20 = *(uint8 *) (r + 8) # asm 1: ldub [r20=int64#13 # asm 2: ldub [r20=%o3 ldub [%i1+8],%o3 # qhasm: r11 <<= 8 # asm 1: sllx r11=int64#11 # asm 2: sllx r11=%o1 sllx %o1,8,%o1 # qhasm: r1 += r10 # asm 1: add r1=int64#6 # asm 2: add r1=%i5 add %i5,%g4,%i5 # qhasm: r21 = *(uint8 *) (r + 9) # asm 1: ldub [r21=int64#8 # asm 2: ldub [r21=%g4 ldub [%i1+9],%g4 # qhasm: r12 <<= 16 # asm 1: sllx r12=int64#9 # asm 2: sllx r12=%g5 sllx %g5,16,%g5 # qhasm: r1 += r11 # asm 1: add r1=int64#6 # asm 2: add r1=%i5 add %i5,%o1,%i5 # qhasm: r22 = *(uint8 *) (r + 10) # asm 1: ldub [r22=int64#11 # asm 2: ldub [r22=%o1 ldub [%i1+10],%o1 # qhasm: r13 <<= 24 # asm 1: sllx r13=int64#10 # asm 2: sllx r13=%o0 sllx %o0,24,%o0 # qhasm: r1 += r12 # asm 1: add r1=int64#6 # asm 2: add r1=%i5 add %i5,%g5,%i5 # qhasm: r23 = *(uint8 *) (r + 11) # asm 1: ldub [r23=int64#9 # asm 2: ldub [r23=%g5 ldub [%i1+11],%g5 # qhasm: r2 <<= 51 # asm 1: sllx r2=int64#12 # asm 2: sllx r2=%o2 sllx %o2,51,%o2 # qhasm: r1 += r13 # asm 1: add r1=int64#6 # asm 2: add r1=%i5 add %i5,%o0,%i5 # qhasm: d1 = r1 # asm 1: stx d1=stack64#2] # asm 2: stx d1=8] stx %i5,[%fp+2023-8] # qhasm: r21 <<= 8 # asm 1: sllx r21=int64#6 # asm 2: sllx r21=%i5 sllx %g4,8,%i5 # qhasm: r2 += r20 # asm 1: add r2=int64#8 # asm 2: add r2=%g4 add %o2,%o3,%g4 # qhasm: r30 = *(uint8 *) (r + 12) # asm 1: ldub [r30=int64#10 # asm 2: ldub [r30=%o0 ldub [%i1+12],%o0 # qhasm: r22 <<= 16 # asm 1: sllx r22=int64#11 # asm 2: sllx r22=%o1 sllx %o1,16,%o1 # qhasm: r2 += r21 # asm 1: add r2=int64#6 # asm 2: add r2=%i5 add %g4,%i5,%i5 # qhasm: r31 = *(uint8 *) (r + 13) # asm 1: ldub [r31=int64#8 # asm 2: ldub [r31=%g4 ldub [%i1+13],%g4 # qhasm: r23 <<= 24 # asm 1: sllx r23=int64#9 # asm 2: sllx r23=%g5 sllx %g5,24,%g5 # qhasm: r2 += r22 # asm 1: add r2=int64#6 # asm 2: add r2=%i5 add %i5,%o1,%i5 # qhasm: r32 = *(uint8 *) (r + 14) # asm 1: ldub [r32=int64#11 # asm 2: ldub [r32=%o1 ldub [%i1+14],%o1 # qhasm: r2 += r23 # asm 1: add r2=int64#6 # asm 2: add r2=%i5 add %i5,%g5,%i5 # qhasm: r3 = 2343 # asm 1: add %g0,2343,>r3=int64#9 # asm 2: add %g0,2343,>r3=%g5 add %g0,2343,%g5 # qhasm: d2 = r2 # asm 1: stx d2=stack64#3] # asm 2: stx d2=16] stx %i5,[%fp+2023-16] # qhasm: r3 <<= 51 # asm 1: sllx r3=int64#6 # asm 2: sllx r3=%i5 sllx %g5,51,%i5 # qhasm: alpha32 = *(float64 *) (constants + 40) # asm 1: ldd [alpha32=float64#1 # asm 2: ldd [alpha32=%f0 ldd [%g1+40],%f0 # qhasm: r33 = *(uint8 *) (r + 15) # asm 1: ldub [r33=int64#2 # asm 2: ldub [r33=%i1 ldub [%i1+15],%i1 # qhasm: r31 <<= 8 # asm 1: sllx r31=int64#8 # asm 2: sllx r31=%g4 sllx %g4,8,%g4 # qhasm: r3 += r30 # asm 1: add r3=int64#6 # asm 2: add r3=%i5 add %i5,%o0,%i5 # qhasm: round *(uint32 *) (constants + 136) # asm 1: ld [r32=int64#9 # asm 2: sllx r32=%g5 sllx %o1,16,%g5 # qhasm: r3 += r31 # asm 1: add r3=int64#6 # asm 2: add r3=%i5 add %i5,%g4,%i5 # qhasm: r33 <<= 24 # asm 1: sllx r33=int64#2 # asm 2: sllx r33=%i1 sllx %i1,24,%i1 # qhasm: r3 += r32 # asm 1: add r3=int64#6 # asm 2: add r3=%i5 add %i5,%g5,%i5 # qhasm: r3 += r33 # asm 1: add r3=int64#2 # asm 2: add r3=%i1 add %i5,%i1,%i1 # qhasm: h0 = alpha32 - alpha32 # asm 1: fsubd h0=float64#2 # asm 2: fsubd h0=%f2 fsubd %f0,%f0,%f2 # qhasm: d3 = r3 # asm 1: stx d3=stack64#4] # asm 2: stx d3=24] stx %i1,[%fp+2023-24] # qhasm: h1 = alpha32 - alpha32 # asm 1: fsubd h1=float64#3 # asm 2: fsubd h1=%f4 fsubd %f0,%f0,%f4 # qhasm: alpha0 = *(float64 *) (constants + 24) # asm 1: ldd [alpha0=float64#4 # asm 2: ldd [alpha0=%f6 ldd [%g1+24],%f6 # qhasm: h2 = alpha32 - alpha32 # asm 1: fsubd h2=float64#5 # asm 2: fsubd h2=%f8 fsubd %f0,%f0,%f8 # qhasm: alpha64 = *(float64 *) (constants + 56) # asm 1: ldd [alpha64=float64#6 # asm 2: ldd [alpha64=%f10 ldd [%g1+56],%f10 # qhasm: h3 = alpha32 - alpha32 # asm 1: fsubd h3=float64#7 # asm 2: fsubd h3=%f12 fsubd %f0,%f0,%f12 # qhasm: alpha18 = *(float64 *) (constants + 32) # asm 1: ldd [alpha18=float64#8 # asm 2: ldd [alpha18=%f14 ldd [%g1+32],%f14 # qhasm: h4 = alpha32 - alpha32 # asm 1: fsubd h4=float64#9 # asm 2: fsubd h4=%f16 fsubd %f0,%f0,%f16 # qhasm: r0low = d0 # asm 1: ldd [%fp+2023-r0low=float64#10 # asm 2: ldd [%fp+2023-r0low=%f18 ldd [%fp+2023-0],%f18 # qhasm: h5 = alpha32 - alpha32 # asm 1: fsubd h5=float64#11 # asm 2: fsubd h5=%f20 fsubd %f0,%f0,%f20 # qhasm: r1low = d1 # asm 1: ldd [%fp+2023-r1low=float64#12 # asm 2: ldd [%fp+2023-r1low=%f22 ldd [%fp+2023-8],%f22 # qhasm: h6 = alpha32 - alpha32 # asm 1: fsubd h6=float64#13 # asm 2: fsubd h6=%f24 fsubd %f0,%f0,%f24 # qhasm: r2low = d2 # asm 1: ldd [%fp+2023-r2low=float64#14 # asm 2: ldd [%fp+2023-r2low=%f26 ldd [%fp+2023-16],%f26 # qhasm: h7 = alpha32 - alpha32 # asm 1: fsubd h7=float64#15 # asm 2: fsubd h7=%f28 fsubd %f0,%f0,%f28 # qhasm: alpha50 = *(float64 *) (constants + 48) # asm 1: ldd [alpha50=float64#16 # asm 2: ldd [alpha50=%f30 ldd [%g1+48],%f30 # qhasm: r0low -= alpha0 # asm 1: fsubd r0low=float64#10 # asm 2: fsubd r0low=%f18 fsubd %f18,%f6,%f18 # qhasm: alpha82 = *(float64 *) (constants + 64) # asm 1: ldd [alpha82=float64#17 # asm 2: ldd [alpha82=%f32 ldd [%g1+64],%f32 # qhasm: r1low -= alpha32 # asm 1: fsubd r1low=float64#12 # asm 2: fsubd r1low=%f22 fsubd %f22,%f0,%f22 # qhasm: scale = *(float64 *) (constants + 96) # asm 1: ldd [scale=float64#18 # asm 2: ldd [scale=%f34 ldd [%g1+96],%f34 # qhasm: r2low -= alpha64 # asm 1: fsubd r2low=float64#14 # asm 2: fsubd r2low=%f26 fsubd %f26,%f10,%f26 # qhasm: alpha96 = *(float64 *) (constants + 72) # asm 1: ldd [alpha96=float64#19 # asm 2: ldd [alpha96=%f36 ldd [%g1+72],%f36 # qhasm: r0high = r0low + alpha18 # asm 1: faddd r0high=float64#20 # asm 2: faddd r0high=%f38 faddd %f18,%f14,%f38 # qhasm: r3low = d3 # asm 1: ldd [%fp+2023-r3low=float64#21 # asm 2: ldd [%fp+2023-r3low=%f40 ldd [%fp+2023-24],%f40 # qhasm: alpham80 = *(float64 *) (constants + 0) # asm 1: ldd [alpham80=float64#22 # asm 2: ldd [alpham80=%f42 ldd [%g1+0],%f42 # qhasm: r1high = r1low + alpha50 # asm 1: faddd r1high=float64#23 # asm 2: faddd r1high=%f44 faddd %f22,%f30,%f44 # qhasm: sr1low = scale * r1low # asm 1: fmuld sr1low=float64#24 # asm 2: fmuld sr1low=%f46 fmuld %f34,%f22,%f46 # qhasm: alpham48 = *(float64 *) (constants + 8) # asm 1: ldd [alpham48=float64#25 # asm 2: ldd [alpham48=%f48 ldd [%g1+8],%f48 # qhasm: r2high = r2low + alpha82 # asm 1: faddd r2high=float64#26 # asm 2: faddd r2high=%f50 faddd %f26,%f32,%f50 # qhasm: sr2low = scale * r2low # asm 1: fmuld sr2low=float64#27 # asm 2: fmuld sr2low=%f52 fmuld %f34,%f26,%f52 # qhasm: r0high -= alpha18 # asm 1: fsubd r0high=float64#8 # asm 2: fsubd r0high=%f14 fsubd %f38,%f14,%f14 # qhasm: r3low -= alpha96 # asm 1: fsubd r3low=float64#20 # asm 2: fsubd r3low=%f38 fsubd %f40,%f36,%f38 # qhasm: r1high -= alpha50 # asm 1: fsubd r1high=float64#16 # asm 2: fsubd r1high=%f30 fsubd %f44,%f30,%f30 # qhasm: sr1high = sr1low + alpham80 # asm 1: faddd sr1high=float64#21 # asm 2: faddd sr1high=%f40 faddd %f46,%f42,%f40 # qhasm: alpha112 = *(float64 *) (constants + 80) # asm 1: ldd [alpha112=float64#23 # asm 2: ldd [alpha112=%f44 ldd [%g1+80],%f44 # qhasm: r0low -= r0high # asm 1: fsubd r0low=float64#10 # asm 2: fsubd r0low=%f18 fsubd %f18,%f14,%f18 # qhasm: alpham16 = *(float64 *) (constants + 16) # asm 1: ldd [alpham16=float64#28 # asm 2: ldd [alpham16=%f54 ldd [%g1+16],%f54 # qhasm: r2high -= alpha82 # asm 1: fsubd r2high=float64#17 # asm 2: fsubd r2high=%f32 fsubd %f50,%f32,%f32 # qhasm: sr3low = scale * r3low # asm 1: fmuld sr3low=float64#26 # asm 2: fmuld sr3low=%f50 fmuld %f34,%f38,%f50 # qhasm: alpha130 = *(float64 *) (constants + 88) # asm 1: ldd [alpha130=float64#29 # asm 2: ldd [alpha130=%f56 ldd [%g1+88],%f56 # qhasm: sr2high = sr2low + alpham48 # asm 1: faddd sr2high=float64#30 # asm 2: faddd sr2high=%f58 faddd %f52,%f48,%f58 # qhasm: r1low -= r1high # asm 1: fsubd r1low=float64#12 # asm 2: fsubd r1low=%f22 fsubd %f22,%f30,%f22 # qhasm: sr1high -= alpham80 # asm 1: fsubd sr1high=float64#21 # asm 2: fsubd sr1high=%f40 fsubd %f40,%f42,%f40 # qhasm: sr1high_stack = sr1high # asm 1: std sr1high_stack=stack64#1] # asm 2: std sr1high_stack=0] std %f40,[%fp+2023-0] # qhasm: r2low -= r2high # asm 1: fsubd r2low=float64#14 # asm 2: fsubd r2low=%f26 fsubd %f26,%f32,%f26 # qhasm: sr2high -= alpham48 # asm 1: fsubd sr2high=float64#22 # asm 2: fsubd sr2high=%f42 fsubd %f58,%f48,%f42 # qhasm: sr2high_stack = sr2high # asm 1: std sr2high_stack=stack64#2] # asm 2: std sr2high_stack=8] std %f42,[%fp+2023-8] # qhasm: r3high = r3low + alpha112 # asm 1: faddd r3high=float64#25 # asm 2: faddd r3high=%f48 faddd %f38,%f44,%f48 # qhasm: sr1low -= sr1high # asm 1: fsubd sr1low=float64#21 # asm 2: fsubd sr1low=%f40 fsubd %f46,%f40,%f40 # qhasm: sr1low_stack = sr1low # asm 1: std sr1low_stack=stack64#3] # asm 2: std sr1low_stack=16] std %f40,[%fp+2023-16] # qhasm: sr3high = sr3low + alpham16 # asm 1: faddd sr3high=float64#21 # asm 2: faddd sr3high=%f40 faddd %f50,%f54,%f40 # qhasm: sr2low -= sr2high # asm 1: fsubd sr2low=float64#22 # asm 2: fsubd sr2low=%f42 fsubd %f52,%f42,%f42 # qhasm: sr2low_stack = sr2low # asm 1: std sr2low_stack=stack64#4] # asm 2: std sr2low_stack=24] std %f42,[%fp+2023-24] # qhasm: r3high -= alpha112 # asm 1: fsubd r3high=float64#22 # asm 2: fsubd r3high=%f42 fsubd %f48,%f44,%f42 # qhasm: sr3high -= alpham16 # asm 1: fsubd sr3high=float64#21 # asm 2: fsubd sr3high=%f40 fsubd %f40,%f54,%f40 # qhasm: sr3high_stack = sr3high # asm 1: std sr3high_stack=stack64#5] # asm 2: std sr3high_stack=32] std %f40,[%fp+2023-32] # qhasm: unsignedr3low=float64#20 # asm 2: fsubd r3low=%f38 fsubd %f38,%f42,%f38 # qhasm: sr3low -= sr3high # asm 1: fsubd sr3low=float64#21 # asm 2: fsubd sr3low=%f40 fsubd %f50,%f40,%f40 # qhasm: sr3low_stack = sr3low # asm 1: std sr3low_stack=stack64#6] # asm 2: std sr3low_stack=40] std %f40,[%fp+2023-40] # qhasm: goto addatmost15bytes if unsigned< blu,pt %xcc,._addatmost15bytes nop # qhasm: m00 = *(uint8 *) (m + 0) # asm 1: ldub [m00=int64#2 # asm 2: ldub [m00=%i1 ldub [%i3+0],%i1 # qhasm: m0 = 2151 # asm 1: add %g0,2151,>m0=int64#6 # asm 2: add %g0,2151,>m0=%i5 add %g0,2151,%i5 # qhasm: m0 <<= 51 # asm 1: sllx m0=int64#6 # asm 2: sllx m0=%i5 sllx %i5,51,%i5 # qhasm: m1 = 2215 # asm 1: add %g0,2215,>m1=int64#8 # asm 2: add %g0,2215,>m1=%g4 add %g0,2215,%g4 # qhasm: m01 = *(uint8 *) (m + 1) # asm 1: ldub [m01=int64#9 # asm 2: ldub [m01=%g5 ldub [%i3+1],%g5 # qhasm: m1 <<= 51 # asm 1: sllx m1=int64#8 # asm 2: sllx m1=%g4 sllx %g4,51,%g4 # qhasm: m2 = 2279 # asm 1: add %g0,2279,>m2=int64#10 # asm 2: add %g0,2279,>m2=%o0 add %g0,2279,%o0 # qhasm: m02 = *(uint8 *) (m + 2) # asm 1: ldub [m02=int64#11 # asm 2: ldub [m02=%o1 ldub [%i3+2],%o1 # qhasm: m2 <<= 51 # asm 1: sllx m2=int64#10 # asm 2: sllx m2=%o0 sllx %o0,51,%o0 # qhasm: m3 = 2343 # asm 1: add %g0,2343,>m3=int64#12 # asm 2: add %g0,2343,>m3=%o2 add %g0,2343,%o2 # qhasm: m03 = *(uint8 *) (m + 3) # asm 1: ldub [m03=int64#13 # asm 2: ldub [m03=%o3 ldub [%i3+3],%o3 # qhasm: m10 = *(uint8 *) (m + 4) # asm 1: ldub [m10=int64#14 # asm 2: ldub [m10=%o4 ldub [%i3+4],%o4 # qhasm: m01 <<= 8 # asm 1: sllx m01=int64#9 # asm 2: sllx m01=%g5 sllx %g5,8,%g5 # qhasm: m0 += m00 # asm 1: add m0=int64#2 # asm 2: add m0=%i1 add %i5,%i1,%i1 # qhasm: m11 = *(uint8 *) (m + 5) # asm 1: ldub [m11=int64#6 # asm 2: ldub [m11=%i5 ldub [%i3+5],%i5 # qhasm: m02 <<= 16 # asm 1: sllx m02=int64#11 # asm 2: sllx m02=%o1 sllx %o1,16,%o1 # qhasm: m0 += m01 # asm 1: add m0=int64#2 # asm 2: add m0=%i1 add %i1,%g5,%i1 # qhasm: m12 = *(uint8 *) (m + 6) # asm 1: ldub [m12=int64#9 # asm 2: ldub [m12=%g5 ldub [%i3+6],%g5 # qhasm: m03 <<= 24 # asm 1: sllx m03=int64#13 # asm 2: sllx m03=%o3 sllx %o3,24,%o3 # qhasm: m0 += m02 # asm 1: add m0=int64#2 # asm 2: add m0=%i1 add %i1,%o1,%i1 # qhasm: m13 = *(uint8 *) (m + 7) # asm 1: ldub [m13=int64#11 # asm 2: ldub [m13=%o1 ldub [%i3+7],%o1 # qhasm: m3 <<= 51 # asm 1: sllx m3=int64#12 # asm 2: sllx m3=%o2 sllx %o2,51,%o2 # qhasm: m0 += m03 # asm 1: add m0=int64#2 # asm 2: add m0=%i1 add %i1,%o3,%i1 # qhasm: m20 = *(uint8 *) (m + 8) # asm 1: ldub [m20=int64#13 # asm 2: ldub [m20=%o3 ldub [%i3+8],%o3 # qhasm: m11 <<= 8 # asm 1: sllx m11=int64#6 # asm 2: sllx m11=%i5 sllx %i5,8,%i5 # qhasm: m1 += m10 # asm 1: add m1=int64#8 # asm 2: add m1=%g4 add %g4,%o4,%g4 # qhasm: m21 = *(uint8 *) (m + 9) # asm 1: ldub [m21=int64#14 # asm 2: ldub [m21=%o4 ldub [%i3+9],%o4 # qhasm: m12 <<= 16 # asm 1: sllx m12=int64#9 # asm 2: sllx m12=%g5 sllx %g5,16,%g5 # qhasm: m1 += m11 # asm 1: add m1=int64#6 # asm 2: add m1=%i5 add %g4,%i5,%i5 # qhasm: m22 = *(uint8 *) (m + 10) # asm 1: ldub [m22=int64#8 # asm 2: ldub [m22=%g4 ldub [%i3+10],%g4 # qhasm: m13 <<= 24 # asm 1: sllx m13=int64#11 # asm 2: sllx m13=%o1 sllx %o1,24,%o1 # qhasm: m1 += m12 # asm 1: add m1=int64#6 # asm 2: add m1=%i5 add %i5,%g5,%i5 # qhasm: m23 = *(uint8 *) (m + 11) # asm 1: ldub [m23=int64#9 # asm 2: ldub [m23=%g5 ldub [%i3+11],%g5 # qhasm: m1 += m13 # asm 1: add m1=int64#6 # asm 2: add m1=%i5 add %i5,%o1,%i5 # qhasm: m30 = *(uint8 *) (m + 12) # asm 1: ldub [m30=int64#11 # asm 2: ldub [m30=%o1 ldub [%i3+12],%o1 # qhasm: m21 <<= 8 # asm 1: sllx m21=int64#14 # asm 2: sllx m21=%o4 sllx %o4,8,%o4 # qhasm: m2 += m20 # asm 1: add m2=int64#10 # asm 2: add m2=%o0 add %o0,%o3,%o0 # qhasm: m31 = *(uint8 *) (m + 13) # asm 1: ldub [m31=int64#13 # asm 2: ldub [m31=%o3 ldub [%i3+13],%o3 # qhasm: m22 <<= 16 # asm 1: sllx m22=int64#8 # asm 2: sllx m22=%g4 sllx %g4,16,%g4 # qhasm: m2 += m21 # asm 1: add m2=int64#10 # asm 2: add m2=%o0 add %o0,%o4,%o0 # qhasm: m32 = *(uint8 *) (m + 14) # asm 1: ldub [m32=int64#14 # asm 2: ldub [m32=%o4 ldub [%i3+14],%o4 # qhasm: m23 <<= 24 # asm 1: sllx m23=int64#9 # asm 2: sllx m23=%g5 sllx %g5,24,%g5 # qhasm: m2 += m22 # asm 1: add m2=int64#8 # asm 2: add m2=%g4 add %o0,%g4,%g4 # qhasm: m33 = *(uint8 *) (m + 15) # asm 1: ldub [m33=int64#10 # asm 2: ldub [m33=%o0 ldub [%i3+15],%o0 # qhasm: m2 += m23 # asm 1: add m2=int64#8 # asm 2: add m2=%g4 add %g4,%g5,%g4 # qhasm: d0 = m0 # asm 1: stx d0=stack64#7] # asm 2: stx d0=48] stx %i1,[%fp+2023-48] # qhasm: m31 <<= 8 # asm 1: sllx m31=int64#2 # asm 2: sllx m31=%i1 sllx %o3,8,%i1 # qhasm: m3 += m30 # asm 1: add m3=int64#9 # asm 2: add m3=%g5 add %o2,%o1,%g5 # qhasm: d1 = m1 # asm 1: stx d1=stack64#8] # asm 2: stx d1=56] stx %i5,[%fp+2023-56] # qhasm: m32 <<= 16 # asm 1: sllx m32=int64#6 # asm 2: sllx m32=%i5 sllx %o4,16,%i5 # qhasm: m3 += m31 # asm 1: add m3=int64#2 # asm 2: add m3=%i1 add %g5,%i1,%i1 # qhasm: d2 = m2 # asm 1: stx d2=stack64#9] # asm 2: stx d2=64] stx %g4,[%fp+2023-64] # qhasm: m33 += 256 # asm 1: add m33=int64#8 # asm 2: add m33=%g4 add %o0,256,%g4 # qhasm: m33 <<= 24 # asm 1: sllx m33=int64#8 # asm 2: sllx m33=%g4 sllx %g4,24,%g4 # qhasm: m3 += m32 # asm 1: add m3=int64#2 # asm 2: add m3=%i1 add %i1,%i5,%i1 # qhasm: m3 += m33 # asm 1: add m3=int64#2 # asm 2: add m3=%i1 add %i1,%g4,%i1 # qhasm: d3 = m3 # asm 1: stx d3=stack64#10] # asm 2: stx d3=72] stx %i1,[%fp+2023-72] # qhasm: m += 16 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,16,%i3 # qhasm: l -= 16 # asm 1: sub l=int64#5 # asm 2: sub l=%i4 sub %i4,16,%i4 # qhasm: z0 = d0 # asm 1: ldd [%fp+2023-z0=float64#21 # asm 2: ldd [%fp+2023-z0=%f40 ldd [%fp+2023-48],%f40 # qhasm: z1 = d1 # asm 1: ldd [%fp+2023-z1=float64#23 # asm 2: ldd [%fp+2023-z1=%f44 ldd [%fp+2023-56],%f44 # qhasm: z2 = d2 # asm 1: ldd [%fp+2023-z2=float64#24 # asm 2: ldd [%fp+2023-z2=%f46 ldd [%fp+2023-64],%f46 # qhasm: z3 = d3 # asm 1: ldd [%fp+2023-z3=float64#25 # asm 2: ldd [%fp+2023-z3=%f48 ldd [%fp+2023-72],%f48 # qhasm: z0 -= alpha0 # asm 1: fsubd z0=float64#4 # asm 2: fsubd z0=%f6 fsubd %f40,%f6,%f6 # qhasm: z1 -= alpha32 # asm 1: fsubd z1=float64#21 # asm 2: fsubd z1=%f40 fsubd %f44,%f0,%f40 # qhasm: z2 -= alpha64 # asm 1: fsubd z2=float64#23 # asm 2: fsubd z2=%f44 fsubd %f46,%f10,%f44 # qhasm: z3 -= alpha96 # asm 1: fsubd z3=float64#24 # asm 2: fsubd z3=%f46 fsubd %f48,%f36,%f46 # qhasm: unsignedh0=float64#2 # asm 2: faddd h0=%f2 faddd %f2,%f6,%f2 # qhasm: h1 += z1 # asm 1: faddd h1=float64#3 # asm 2: faddd h1=%f4 faddd %f4,%f40,%f4 # qhasm: h3 += z2 # asm 1: faddd h3=float64#4 # asm 2: faddd h3=%f6 faddd %f12,%f44,%f6 # qhasm: h5 += z3 # asm 1: faddd h5=float64#7 # asm 2: faddd h5=%f12 faddd %f20,%f46,%f12 # qhasm: goto multiplyaddatmost15bytes if unsigned< blu,pt %xcc,._multiplyaddatmost15bytes nop # qhasm: multiplyaddatleast16bytes: ._multiplyaddatleast16bytes: # qhasm: m2 = 2279 # asm 1: add %g0,2279,>m2=int64#2 # asm 2: add %g0,2279,>m2=%i1 add %g0,2279,%i1 # qhasm: m20 = *(uint8 *) (m + 8) # asm 1: ldub [m20=int64#6 # asm 2: ldub [m20=%i5 ldub [%i3+8],%i5 # qhasm: y7 = h7 + alpha130 # asm 1: faddd y7=float64#11 # asm 2: faddd y7=%f20 faddd %f28,%f56,%f20 # qhasm: m2 <<= 51 # asm 1: sllx m2=int64#2 # asm 2: sllx m2=%i1 sllx %i1,51,%i1 # qhasm: m3 = 2343 # asm 1: add %g0,2343,>m3=int64#8 # asm 2: add %g0,2343,>m3=%g4 add %g0,2343,%g4 # qhasm: m21 = *(uint8 *) (m + 9) # asm 1: ldub [m21=int64#9 # asm 2: ldub [m21=%g5 ldub [%i3+9],%g5 # qhasm: y6 = h6 + alpha130 # asm 1: faddd y6=float64#21 # asm 2: faddd y6=%f40 faddd %f24,%f56,%f40 # qhasm: m3 <<= 51 # asm 1: sllx m3=int64#8 # asm 2: sllx m3=%g4 sllx %g4,51,%g4 # qhasm: m0 = 2151 # asm 1: add %g0,2151,>m0=int64#10 # asm 2: add %g0,2151,>m0=%o0 add %g0,2151,%o0 # qhasm: m22 = *(uint8 *) (m + 10) # asm 1: ldub [m22=int64#11 # asm 2: ldub [m22=%o1 ldub [%i3+10],%o1 # qhasm: y1 = h1 + alpha32 # asm 1: faddd y1=float64#23 # asm 2: faddd y1=%f44 faddd %f4,%f0,%f44 # qhasm: m0 <<= 51 # asm 1: sllx m0=int64#10 # asm 2: sllx m0=%o0 sllx %o0,51,%o0 # qhasm: m1 = 2215 # asm 1: add %g0,2215,>m1=int64#12 # asm 2: add %g0,2215,>m1=%o2 add %g0,2215,%o2 # qhasm: m23 = *(uint8 *) (m + 11) # asm 1: ldub [m23=int64#13 # asm 2: ldub [m23=%o3 ldub [%i3+11],%o3 # qhasm: y0 = h0 + alpha32 # asm 1: faddd y0=float64#24 # asm 2: faddd y0=%f46 faddd %f2,%f0,%f46 # qhasm: m1 <<= 51 # asm 1: sllx m1=int64#12 # asm 2: sllx m1=%o2 sllx %o2,51,%o2 # qhasm: m30 = *(uint8 *) (m + 12) # asm 1: ldub [m30=int64#14 # asm 2: ldub [m30=%o4 ldub [%i3+12],%o4 # qhasm: y7 -= alpha130 # asm 1: fsubd y7=float64#11 # asm 2: fsubd y7=%f20 fsubd %f20,%f56,%f20 # qhasm: m21 <<= 8 # asm 1: sllx m21=int64#9 # asm 2: sllx m21=%g5 sllx %g5,8,%g5 # qhasm: m2 += m20 # asm 1: add m2=int64#2 # asm 2: add m2=%i1 add %i1,%i5,%i1 # qhasm: m31 = *(uint8 *) (m + 13) # asm 1: ldub [m31=int64#6 # asm 2: ldub [m31=%i5 ldub [%i3+13],%i5 # qhasm: y6 -= alpha130 # asm 1: fsubd y6=float64#21 # asm 2: fsubd y6=%f40 fsubd %f40,%f56,%f40 # qhasm: m22 <<= 16 # asm 1: sllx m22=int64#11 # asm 2: sllx m22=%o1 sllx %o1,16,%o1 # qhasm: m2 += m21 # asm 1: add m2=int64#2 # asm 2: add m2=%i1 add %i1,%g5,%i1 # qhasm: m32 = *(uint8 *) (m + 14) # asm 1: ldub [m32=int64#9 # asm 2: ldub [m32=%g5 ldub [%i3+14],%g5 # qhasm: y1 -= alpha32 # asm 1: fsubd y1=float64#23 # asm 2: fsubd y1=%f44 fsubd %f44,%f0,%f44 # qhasm: m23 <<= 24 # asm 1: sllx m23=int64#13 # asm 2: sllx m23=%o3 sllx %o3,24,%o3 # qhasm: m2 += m22 # asm 1: add m2=int64#2 # asm 2: add m2=%i1 add %i1,%o1,%i1 # qhasm: m33 = *(uint8 *) (m + 15) # asm 1: ldub [m33=int64#11 # asm 2: ldub [m33=%o1 ldub [%i3+15],%o1 # qhasm: y0 -= alpha32 # asm 1: fsubd y0=float64#1 # asm 2: fsubd y0=%f0 fsubd %f46,%f0,%f0 # qhasm: m2 += m23 # asm 1: add m2=int64#2 # asm 2: add m2=%i1 add %i1,%o3,%i1 # qhasm: m00 = *(uint8 *) (m + 0) # asm 1: ldub [m00=int64#13 # asm 2: ldub [m00=%o3 ldub [%i3+0],%o3 # qhasm: y5 = h5 + alpha96 # asm 1: faddd y5=float64#24 # asm 2: faddd y5=%f46 faddd %f12,%f36,%f46 # qhasm: m31 <<= 8 # asm 1: sllx m31=int64#6 # asm 2: sllx m31=%i5 sllx %i5,8,%i5 # qhasm: m3 += m30 # asm 1: add m3=int64#8 # asm 2: add m3=%g4 add %g4,%o4,%g4 # qhasm: m01 = *(uint8 *) (m + 1) # asm 1: ldub [m01=int64#14 # asm 2: ldub [m01=%o4 ldub [%i3+1],%o4 # qhasm: y4 = h4 + alpha96 # asm 1: faddd y4=float64#25 # asm 2: faddd y4=%f48 faddd %f16,%f36,%f48 # qhasm: m32 <<= 16 # asm 1: sllx m32=int64#9 # asm 2: sllx m32=%g5 sllx %g5,16,%g5 # qhasm: m02 = *(uint8 *) (m + 2) # asm 1: ldub [m02=int64#15 # asm 2: ldub [m02=%o5 ldub [%i3+2],%o5 # qhasm: x7 = h7 - y7 # asm 1: fsubd x7=float64#15 # asm 2: fsubd x7=%f28 fsubd %f28,%f20,%f28 # qhasm: y7 *= scale # asm 1: fmuld y7=float64#11 # asm 2: fmuld y7=%f20 fmuld %f20,%f34,%f20 # qhasm: m33 += 256 # asm 1: add m33=int64#11 # asm 2: add m33=%o1 add %o1,256,%o1 # qhasm: m03 = *(uint8 *) (m + 3) # asm 1: ldub [m03=int64#16 # asm 2: ldub [m03=%o7 ldub [%i3+3],%o7 # qhasm: x6 = h6 - y6 # asm 1: fsubd x6=float64#13 # asm 2: fsubd x6=%f24 fsubd %f24,%f40,%f24 # qhasm: y6 *= scale # asm 1: fmuld y6=float64#21 # asm 2: fmuld y6=%f40 fmuld %f40,%f34,%f40 # qhasm: m33 <<= 24 # asm 1: sllx m33=int64#11 # asm 2: sllx m33=%o1 sllx %o1,24,%o1 # qhasm: m3 += m31 # asm 1: add m3=int64#6 # asm 2: add m3=%i5 add %g4,%i5,%i5 # qhasm: m10 = *(uint8 *) (m + 4) # asm 1: ldub [m10=int64#8 # asm 2: ldub [m10=%g4 ldub [%i3+4],%g4 # qhasm: x1 = h1 - y1 # asm 1: fsubd x1=float64#3 # asm 2: fsubd x1=%f4 fsubd %f4,%f44,%f4 # qhasm: m01 <<= 8 # asm 1: sllx m01=int64#14 # asm 2: sllx m01=%o4 sllx %o4,8,%o4 # qhasm: m3 += m32 # asm 1: add m3=int64#6 # asm 2: add m3=%i5 add %i5,%g5,%i5 # qhasm: m11 = *(uint8 *) (m + 5) # asm 1: ldub [m11=int64#9 # asm 2: ldub [m11=%g5 ldub [%i3+5],%g5 # qhasm: x0 = h0 - y0 # asm 1: fsubd x0=float64#2 # asm 2: fsubd x0=%f2 fsubd %f2,%f0,%f2 # qhasm: m3 += m33 # asm 1: add m3=int64#6 # asm 2: add m3=%i5 add %i5,%o1,%i5 # qhasm: m0 += m00 # asm 1: add m0=int64#10 # asm 2: add m0=%o0 add %o0,%o3,%o0 # qhasm: m12 = *(uint8 *) (m + 6) # asm 1: ldub [m12=int64#11 # asm 2: ldub [m12=%o1 ldub [%i3+6],%o1 # qhasm: y5 -= alpha96 # asm 1: fsubd y5=float64#24 # asm 2: fsubd y5=%f46 fsubd %f46,%f36,%f46 # qhasm: m02 <<= 16 # asm 1: sllx m02=int64#13 # asm 2: sllx m02=%o3 sllx %o5,16,%o3 # qhasm: m0 += m01 # asm 1: add m0=int64#10 # asm 2: add m0=%o0 add %o0,%o4,%o0 # qhasm: m13 = *(uint8 *) (m + 7) # asm 1: ldub [m13=int64#14 # asm 2: ldub [m13=%o4 ldub [%i3+7],%o4 # qhasm: y4 -= alpha96 # asm 1: fsubd y4=float64#25 # asm 2: fsubd y4=%f48 fsubd %f48,%f36,%f48 # qhasm: m03 <<= 24 # asm 1: sllx m03=int64#15 # asm 2: sllx m03=%o5 sllx %o7,24,%o5 # qhasm: m0 += m02 # asm 1: add m0=int64#10 # asm 2: add m0=%o0 add %o0,%o3,%o0 # qhasm: x1 += y7 # asm 1: faddd x1=float64#3 # asm 2: faddd x1=%f4 faddd %f4,%f20,%f4 # qhasm: m0 += m03 # asm 1: add m0=int64#10 # asm 2: add m0=%o0 add %o0,%o5,%o0 # qhasm: x0 += y6 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f40,%f2 # qhasm: m11 <<= 8 # asm 1: sllx m11=int64#9 # asm 2: sllx m11=%g5 sllx %g5,8,%g5 # qhasm: m1 += m10 # asm 1: add m1=int64#8 # asm 2: add m1=%g4 add %o2,%g4,%g4 # qhasm: x7 += y5 # asm 1: faddd x7=float64#11 # asm 2: faddd x7=%f20 faddd %f28,%f46,%f20 # qhasm: m12 <<= 16 # asm 1: sllx m12=int64#11 # asm 2: sllx m12=%o1 sllx %o1,16,%o1 # qhasm: m1 += m11 # asm 1: add m1=int64#8 # asm 2: add m1=%g4 add %g4,%g5,%g4 # qhasm: x6 += y4 # asm 1: faddd x6=float64#13 # asm 2: faddd x6=%f24 faddd %f24,%f48,%f24 # qhasm: m13 <<= 24 # asm 1: sllx m13=int64#9 # asm 2: sllx m13=%g5 sllx %o4,24,%g5 # qhasm: m1 += m12 # asm 1: add m1=int64#8 # asm 2: add m1=%g4 add %g4,%o1,%g4 # qhasm: y3 = h3 + alpha64 # asm 1: faddd y3=float64#15 # asm 2: faddd y3=%f28 faddd %f6,%f10,%f28 # qhasm: m1 += m13 # asm 1: add m1=int64#8 # asm 2: add m1=%g4 add %g4,%g5,%g4 # qhasm: y2 = h2 + alpha64 # asm 1: faddd y2=float64#21 # asm 2: faddd y2=%f40 faddd %f8,%f10,%f40 # qhasm: d2 = m2 # asm 1: stx d2=stack64#7] # asm 2: stx d2=48] stx %i1,[%fp+2023-48] # qhasm: x0 += x1 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f4,%f2 # qhasm: d3 = m3 # asm 1: stx d3=stack64#8] # asm 2: stx d3=56] stx %i5,[%fp+2023-56] # qhasm: x6 += x7 # asm 1: faddd x6=float64#3 # asm 2: faddd x6=%f4 faddd %f24,%f20,%f4 # qhasm: d0 = m0 # asm 1: stx d0=stack64#9] # asm 2: stx d0=64] stx %o0,[%fp+2023-64] # qhasm: y3 -= alpha64 # asm 1: fsubd y3=float64#11 # asm 2: fsubd y3=%f20 fsubd %f28,%f10,%f20 # qhasm: d1 = m1 # asm 1: stx d1=stack64#10] # asm 2: stx d1=72] stx %g4,[%fp+2023-72] # qhasm: y2 -= alpha64 # asm 1: fsubd y2=float64#13 # asm 2: fsubd y2=%f24 fsubd %f40,%f10,%f24 # qhasm: x5 = h5 - y5 # asm 1: fsubd x5=float64#7 # asm 2: fsubd x5=%f12 fsubd %f12,%f46,%f12 # qhasm: r3lowx0 = r3low * x0 # asm 1: fmuld r3lowx0=float64#15 # asm 2: fmuld r3lowx0=%f28 fmuld %f38,%f2,%f28 # qhasm: x4 = h4 - y4 # asm 1: fsubd x4=float64#9 # asm 2: fsubd x4=%f16 fsubd %f16,%f48,%f16 # qhasm: r0lowx6 = r0low * x6 # asm 1: fmuld r0lowx6=float64#21 # asm 2: fmuld r0lowx6=%f40 fmuld %f18,%f4,%f40 # qhasm: x3 = h3 - y3 # asm 1: fsubd x3=float64#4 # asm 2: fsubd x3=%f6 fsubd %f6,%f20,%f6 # qhasm: r3highx0 = r3high * x0 # asm 1: fmuld r3highx0=float64#24 # asm 2: fmuld r3highx0=%f46 fmuld %f42,%f2,%f46 # qhasm: sr1low = sr1low_stack # asm 1: ldd [%fp+2023-sr1low=float64#25 # asm 2: ldd [%fp+2023-sr1low=%f48 ldd [%fp+2023-16],%f48 # qhasm: x2 = h2 - y2 # asm 1: fsubd x2=float64#5 # asm 2: fsubd x2=%f8 fsubd %f8,%f24,%f8 # qhasm: r0highx6 = r0high * x6 # asm 1: fmuld r0highx6=float64#26 # asm 2: fmuld r0highx6=%f50 fmuld %f14,%f4,%f50 # qhasm: sr1high = sr1high_stack # asm 1: ldd [%fp+2023-sr1high=float64#27 # asm 2: ldd [%fp+2023-sr1high=%f52 ldd [%fp+2023-0],%f52 # qhasm: x5 += y3 # asm 1: faddd x5=float64#7 # asm 2: faddd x5=%f12 faddd %f12,%f20,%f12 # qhasm: r0lowx0 = r0low * x0 # asm 1: fmuld r0lowx0=float64#11 # asm 2: fmuld r0lowx0=%f20 fmuld %f18,%f2,%f20 # qhasm: sr2low = sr2low_stack # asm 1: ldd [%fp+2023-sr2low=float64#28 # asm 2: ldd [%fp+2023-sr2low=%f54 ldd [%fp+2023-24],%f54 # qhasm: h6 = r3lowx0 + r0lowx6 # asm 1: faddd h6=float64#15 # asm 2: faddd h6=%f28 faddd %f28,%f40,%f28 # qhasm: sr1lowx6 = sr1low * x6 # asm 1: fmuld sr1lowx6=float64#21 # asm 2: fmuld sr1lowx6=%f40 fmuld %f48,%f4,%f40 # qhasm: sr2high = sr2high_stack # asm 1: ldd [%fp+2023-sr2high=float64#25 # asm 2: ldd [%fp+2023-sr2high=%f48 ldd [%fp+2023-8],%f48 # qhasm: x4 += y2 # asm 1: faddd x4=float64#9 # asm 2: faddd x4=%f16 faddd %f16,%f24,%f16 # qhasm: r0highx0 = r0high * x0 # asm 1: fmuld r0highx0=float64#13 # asm 2: fmuld r0highx0=%f24 fmuld %f14,%f2,%f24 # qhasm: sr3low = sr3low_stack # asm 1: ldd [%fp+2023-sr3low=float64#30 # asm 2: ldd [%fp+2023-sr3low=%f58 ldd [%fp+2023-40],%f58 # qhasm: h7 = r3highx0 + r0highx6 # asm 1: faddd h7=float64#24 # asm 2: faddd h7=%f46 faddd %f46,%f50,%f46 # qhasm: sr1highx6 = sr1high * x6 # asm 1: fmuld sr1highx6=float64#26 # asm 2: fmuld sr1highx6=%f50 fmuld %f52,%f4,%f50 # qhasm: sr3high = sr3high_stack # asm 1: ldd [%fp+2023-sr3high=float64#27 # asm 2: ldd [%fp+2023-sr3high=%f52 ldd [%fp+2023-32],%f52 # qhasm: x3 += y1 # asm 1: faddd x3=float64#4 # asm 2: faddd x3=%f6 faddd %f6,%f44,%f6 # qhasm: r1lowx0 = r1low * x0 # asm 1: fmuld r1lowx0=float64#23 # asm 2: fmuld r1lowx0=%f44 fmuld %f22,%f2,%f44 # qhasm: h0 = r0lowx0 + sr1lowx6 # asm 1: faddd h0=float64#11 # asm 2: faddd h0=%f20 faddd %f20,%f40,%f20 # qhasm: sr2lowx6 = sr2low * x6 # asm 1: fmuld sr2lowx6=float64#21 # asm 2: fmuld sr2lowx6=%f40 fmuld %f54,%f4,%f40 # qhasm: x2 += y0 # asm 1: faddd x2=float64#1 # asm 2: faddd x2=%f0 faddd %f8,%f0,%f0 # qhasm: r1highx0 = r1high * x0 # asm 1: fmuld r1highx0=float64#5 # asm 2: fmuld r1highx0=%f8 fmuld %f30,%f2,%f8 # qhasm: h1 = r0highx0 + sr1highx6 # asm 1: faddd h1=float64#13 # asm 2: faddd h1=%f24 faddd %f24,%f50,%f24 # qhasm: sr2highx6 = sr2high * x6 # asm 1: fmuld sr2highx6=float64#26 # asm 2: fmuld sr2highx6=%f50 fmuld %f48,%f4,%f50 # qhasm: x4 += x5 # asm 1: faddd x4=float64#7 # asm 2: faddd x4=%f12 faddd %f16,%f12,%f12 # qhasm: r2lowx0 = r2low * x0 # asm 1: fmuld r2lowx0=float64#9 # asm 2: fmuld r2lowx0=%f16 fmuld %f26,%f2,%f16 # qhasm: z2 = d2 # asm 1: ldd [%fp+2023-z2=float64#31 # asm 2: ldd [%fp+2023-z2=%f60 ldd [%fp+2023-48],%f60 # qhasm: h2 = r1lowx0 + sr2lowx6 # asm 1: faddd h2=float64#21 # asm 2: faddd h2=%f40 faddd %f44,%f40,%f40 # qhasm: sr3lowx6 = sr3low * x6 # asm 1: fmuld sr3lowx6=float64#23 # asm 2: fmuld sr3lowx6=%f44 fmuld %f58,%f4,%f44 # qhasm: z3 = d3 # asm 1: ldd [%fp+2023-z3=float64#32 # asm 2: ldd [%fp+2023-z3=%f62 ldd [%fp+2023-56],%f62 # qhasm: x2 += x3 # asm 1: faddd x2=float64#4 # asm 2: faddd x2=%f6 faddd %f0,%f6,%f6 # qhasm: r2highx0 = r2high * x0 # asm 1: fmuld r2highx0=float64#1 # asm 2: fmuld r2highx0=%f0 fmuld %f32,%f2,%f0 # qhasm: h3 = r1highx0 + sr2highx6 # asm 1: faddd h3=float64#2 # asm 2: faddd h3=%f2 faddd %f8,%f50,%f2 # qhasm: sr3highx6 = sr3high * x6 # asm 1: fmuld sr3highx6=float64#3 # asm 2: fmuld sr3highx6=%f4 fmuld %f52,%f4,%f4 # qhasm: z2 -= alpha64 # asm 1: fsubd z2=float64#26 # asm 2: fsubd z2=%f50 fsubd %f60,%f10,%f50 # qhasm: r1highx4 = r1high * x4 # asm 1: fmuld r1highx4=float64#5 # asm 2: fmuld r1highx4=%f8 fmuld %f30,%f12,%f8 # qhasm: h4 = r2lowx0 + sr3lowx6 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f16,%f44,%f16 # qhasm: r1lowx4 = r1low * x4 # asm 1: fmuld r1lowx4=float64#23 # asm 2: fmuld r1lowx4=%f44 fmuld %f22,%f12,%f44 # qhasm: z3 -= alpha96 # asm 1: fsubd z3=float64#31 # asm 2: fsubd z3=%f60 fsubd %f62,%f36,%f60 # qhasm: r0lowx4 = r0low * x4 # asm 1: fmuld r0lowx4=float64#32 # asm 2: fmuld r0lowx4=%f62 fmuld %f18,%f12,%f62 # qhasm: h5 = r2highx0 + sr3highx6 # asm 1: faddd h5=float64#1 # asm 2: faddd h5=%f0 faddd %f0,%f4,%f0 # qhasm: r0highx4 = r0high * x4 # asm 1: fmuld r0highx4=float64#3 # asm 2: fmuld r0highx4=%f4 fmuld %f14,%f12,%f4 # qhasm: h7 += r1highx4 # asm 1: faddd h7=float64#24 # asm 2: faddd h7=%f46 faddd %f46,%f8,%f46 # qhasm: sr3highx4 = sr3high * x4 # asm 1: fmuld sr3highx4=float64#5 # asm 2: fmuld sr3highx4=%f8 fmuld %f52,%f12,%f8 # qhasm: h6 += r1lowx4 # asm 1: faddd h6=float64#15 # asm 2: faddd h6=%f28 faddd %f28,%f44,%f28 # qhasm: sr3lowx4 = sr3low * x4 # asm 1: fmuld sr3lowx4=float64#23 # asm 2: fmuld sr3lowx4=%f44 fmuld %f58,%f12,%f44 # qhasm: h4 += r0lowx4 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f16,%f62,%f16 # qhasm: sr2highx4 = sr2high * x4 # asm 1: fmuld sr2highx4=float64#25 # asm 2: fmuld sr2highx4=%f48 fmuld %f48,%f12,%f48 # qhasm: h5 += r0highx4 # asm 1: faddd h5=float64#3 # asm 2: faddd h5=%f4 faddd %f0,%f4,%f4 # qhasm: sr2lowx4 = sr2low * x4 # asm 1: fmuld sr2lowx4=float64#1 # asm 2: fmuld sr2lowx4=%f0 fmuld %f54,%f12,%f0 # qhasm: h3 += sr3highx4 # asm 1: faddd h3=float64#2 # asm 2: faddd h3=%f2 faddd %f2,%f8,%f2 # qhasm: r0highx2 = r0high * x2 # asm 1: fmuld r0highx2=float64#5 # asm 2: fmuld r0highx2=%f8 fmuld %f14,%f6,%f8 # qhasm: h2 += sr3lowx4 # asm 1: faddd h2=float64#7 # asm 2: faddd h2=%f12 faddd %f40,%f44,%f12 # qhasm: r0lowx2 = r0low * x2 # asm 1: fmuld r0lowx2=float64#21 # asm 2: fmuld r0lowx2=%f40 fmuld %f18,%f6,%f40 # qhasm: h1 += sr2highx4 # asm 1: faddd h1=float64#23 # asm 2: faddd h1=%f44 faddd %f24,%f48,%f44 # qhasm: r1lowx2 = r1low * x2 # asm 1: fmuld r1lowx2=float64#13 # asm 2: fmuld r1lowx2=%f24 fmuld %f22,%f6,%f24 # qhasm: h0 += sr2lowx4 # asm 1: faddd h0=float64#11 # asm 2: faddd h0=%f20 faddd %f20,%f0,%f20 # qhasm: r1highx2 = r1high * x2 # asm 1: fmuld r1highx2=float64#25 # asm 2: fmuld r1highx2=%f48 fmuld %f30,%f6,%f48 # qhasm: h3 += r0highx2 # asm 1: faddd h3=float64#2 # asm 2: faddd h3=%f2 faddd %f2,%f8,%f2 # qhasm: r2lowx2 = r2low * x2 # asm 1: fmuld r2lowx2=float64#28 # asm 2: fmuld r2lowx2=%f54 fmuld %f26,%f6,%f54 # qhasm: alpha32 = *(float64 *) (constants + 40) # asm 1: ldd [alpha32=float64#1 # asm 2: ldd [alpha32=%f0 ldd [%g1+40],%f0 # qhasm: h2 += r0lowx2 # asm 1: faddd h2=float64#5 # asm 2: faddd h2=%f8 faddd %f12,%f40,%f8 # qhasm: r2highx2 = r2high * x2 # asm 1: fmuld r2highx2=float64#7 # asm 2: fmuld r2highx2=%f12 fmuld %f32,%f6,%f12 # qhasm: alpha0 = *(float64 *) (constants + 24) # asm 1: ldd [alpha0=float64#21 # asm 2: ldd [alpha0=%f40 ldd [%g1+24],%f40 # qhasm: h4 += r1lowx2 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f16,%f24,%f16 # qhasm: sr3lowx2 = sr3low * x2 # asm 1: fmuld sr3lowx2=float64#30 # asm 2: fmuld sr3lowx2=%f58 fmuld %f58,%f6,%f58 # qhasm: z1 = d1 # asm 1: ldd [%fp+2023-z1=float64#32 # asm 2: ldd [%fp+2023-z1=%f62 ldd [%fp+2023-72],%f62 # qhasm: h5 += r1highx2 # asm 1: faddd h5=float64#3 # asm 2: faddd h5=%f4 faddd %f4,%f48,%f4 # qhasm: sr3highx2 = sr3high * x2 # asm 1: fmuld sr3highx2=float64#4 # asm 2: fmuld sr3highx2=%f6 fmuld %f52,%f6,%f6 # qhasm: z0 = d0 # asm 1: ldd [%fp+2023-z0=float64#25 # asm 2: ldd [%fp+2023-z0=%f48 ldd [%fp+2023-64],%f48 # qhasm: m += 16 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,16,%i3 # qhasm: h6 += r2lowx2 # asm 1: faddd h6=float64#13 # asm 2: faddd h6=%f24 faddd %f28,%f54,%f24 # qhasm: l -= 16 # asm 1: sub l=int64#5 # asm 2: sub l=%i4 sub %i4,16,%i4 # qhasm: h7 += r2highx2 # asm 1: faddd h7=float64#15 # asm 2: faddd h7=%f28 faddd %f46,%f12,%f28 # qhasm: h0 += sr3lowx2 # asm 1: faddd h0=float64#11 # asm 2: faddd h0=%f20 faddd %f20,%f58,%f20 # qhasm: h1 += sr3highx2 # asm 1: faddd h1=float64#23 # asm 2: faddd h1=%f44 faddd %f44,%f6,%f44 # qhasm: z1 -= alpha32 # asm 1: fsubd z1=float64#24 # asm 2: fsubd z1=%f46 fsubd %f62,%f0,%f46 # qhasm: z0 -= alpha0 # asm 1: fsubd z0=float64#21 # asm 2: fsubd z0=%f40 fsubd %f48,%f40,%f40 # qhasm: unsignedh5=float64#7 # asm 2: faddd h5=%f12 faddd %f4,%f60,%f12 # qhasm: h3 += z2 # asm 1: faddd h3=float64#4 # asm 2: faddd h3=%f6 faddd %f2,%f50,%f6 # qhasm: h1 += z1 # asm 1: faddd h1=float64#3 # asm 2: faddd h1=%f4 faddd %f44,%f46,%f4 # qhasm: h0 += z0 # asm 1: faddd h0=float64#2 # asm 2: faddd h0=%f2 faddd %f20,%f40,%f2 # qhasm: goto multiplyaddatleast16bytes if !unsigned< bgeu,pt %xcc,._multiplyaddatleast16bytes nop # qhasm: multiplyaddatmost15bytes: ._multiplyaddatmost15bytes: # qhasm: y7 = h7 + alpha130 # asm 1: faddd y7=float64#11 # asm 2: faddd y7=%f20 faddd %f28,%f56,%f20 # qhasm: y6 = h6 + alpha130 # asm 1: faddd y6=float64#21 # asm 2: faddd y6=%f40 faddd %f24,%f56,%f40 # qhasm: y1 = h1 + alpha32 # asm 1: faddd y1=float64#23 # asm 2: faddd y1=%f44 faddd %f4,%f0,%f44 # qhasm: y0 = h0 + alpha32 # asm 1: faddd y0=float64#24 # asm 2: faddd y0=%f46 faddd %f2,%f0,%f46 # qhasm: y7 -= alpha130 # asm 1: fsubd y7=float64#11 # asm 2: fsubd y7=%f20 fsubd %f20,%f56,%f20 # qhasm: y6 -= alpha130 # asm 1: fsubd y6=float64#21 # asm 2: fsubd y6=%f40 fsubd %f40,%f56,%f40 # qhasm: y1 -= alpha32 # asm 1: fsubd y1=float64#23 # asm 2: fsubd y1=%f44 fsubd %f44,%f0,%f44 # qhasm: y0 -= alpha32 # asm 1: fsubd y0=float64#24 # asm 2: fsubd y0=%f46 fsubd %f46,%f0,%f46 # qhasm: y5 = h5 + alpha96 # asm 1: faddd y5=float64#25 # asm 2: faddd y5=%f48 faddd %f12,%f36,%f48 # qhasm: y4 = h4 + alpha96 # asm 1: faddd y4=float64#26 # asm 2: faddd y4=%f50 faddd %f16,%f36,%f50 # qhasm: x7 = h7 - y7 # asm 1: fsubd x7=float64#15 # asm 2: fsubd x7=%f28 fsubd %f28,%f20,%f28 # qhasm: y7 *= scale # asm 1: fmuld y7=float64#11 # asm 2: fmuld y7=%f20 fmuld %f20,%f34,%f20 # qhasm: x6 = h6 - y6 # asm 1: fsubd x6=float64#13 # asm 2: fsubd x6=%f24 fsubd %f24,%f40,%f24 # qhasm: y6 *= scale # asm 1: fmuld y6=float64#21 # asm 2: fmuld y6=%f40 fmuld %f40,%f34,%f40 # qhasm: x1 = h1 - y1 # asm 1: fsubd x1=float64#3 # asm 2: fsubd x1=%f4 fsubd %f4,%f44,%f4 # qhasm: x0 = h0 - y0 # asm 1: fsubd x0=float64#2 # asm 2: fsubd x0=%f2 fsubd %f2,%f46,%f2 # qhasm: y5 -= alpha96 # asm 1: fsubd y5=float64#25 # asm 2: fsubd y5=%f48 fsubd %f48,%f36,%f48 # qhasm: y4 -= alpha96 # asm 1: fsubd y4=float64#26 # asm 2: fsubd y4=%f50 fsubd %f50,%f36,%f50 # qhasm: x1 += y7 # asm 1: faddd x1=float64#3 # asm 2: faddd x1=%f4 faddd %f4,%f20,%f4 # qhasm: x0 += y6 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f40,%f2 # qhasm: x7 += y5 # asm 1: faddd x7=float64#11 # asm 2: faddd x7=%f20 faddd %f28,%f48,%f20 # qhasm: x6 += y4 # asm 1: faddd x6=float64#13 # asm 2: faddd x6=%f24 faddd %f24,%f50,%f24 # qhasm: y3 = h3 + alpha64 # asm 1: faddd y3=float64#15 # asm 2: faddd y3=%f28 faddd %f6,%f10,%f28 # qhasm: y2 = h2 + alpha64 # asm 1: faddd y2=float64#21 # asm 2: faddd y2=%f40 faddd %f8,%f10,%f40 # qhasm: x0 += x1 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f4,%f2 # qhasm: x6 += x7 # asm 1: faddd x6=float64#3 # asm 2: faddd x6=%f4 faddd %f24,%f20,%f4 # qhasm: y3 -= alpha64 # asm 1: fsubd y3=float64#11 # asm 2: fsubd y3=%f20 fsubd %f28,%f10,%f20 # qhasm: y2 -= alpha64 # asm 1: fsubd y2=float64#13 # asm 2: fsubd y2=%f24 fsubd %f40,%f10,%f24 # qhasm: x5 = h5 - y5 # asm 1: fsubd x5=float64#7 # asm 2: fsubd x5=%f12 fsubd %f12,%f48,%f12 # qhasm: r3lowx0 = r3low * x0 # asm 1: fmuld r3lowx0=float64#15 # asm 2: fmuld r3lowx0=%f28 fmuld %f38,%f2,%f28 # qhasm: x4 = h4 - y4 # asm 1: fsubd x4=float64#9 # asm 2: fsubd x4=%f16 fsubd %f16,%f50,%f16 # qhasm: r0lowx6 = r0low * x6 # asm 1: fmuld r0lowx6=float64#21 # asm 2: fmuld r0lowx6=%f40 fmuld %f18,%f4,%f40 # qhasm: x3 = h3 - y3 # asm 1: fsubd x3=float64#4 # asm 2: fsubd x3=%f6 fsubd %f6,%f20,%f6 # qhasm: r3highx0 = r3high * x0 # asm 1: fmuld r3highx0=float64#25 # asm 2: fmuld r3highx0=%f48 fmuld %f42,%f2,%f48 # qhasm: sr1low = sr1low_stack # asm 1: ldd [%fp+2023-sr1low=float64#26 # asm 2: ldd [%fp+2023-sr1low=%f50 ldd [%fp+2023-16],%f50 # qhasm: x2 = h2 - y2 # asm 1: fsubd x2=float64#5 # asm 2: fsubd x2=%f8 fsubd %f8,%f24,%f8 # qhasm: r0highx6 = r0high * x6 # asm 1: fmuld r0highx6=float64#27 # asm 2: fmuld r0highx6=%f52 fmuld %f14,%f4,%f52 # qhasm: sr1high = sr1high_stack # asm 1: ldd [%fp+2023-sr1high=float64#28 # asm 2: ldd [%fp+2023-sr1high=%f54 ldd [%fp+2023-0],%f54 # qhasm: x5 += y3 # asm 1: faddd x5=float64#7 # asm 2: faddd x5=%f12 faddd %f12,%f20,%f12 # qhasm: r0lowx0 = r0low * x0 # asm 1: fmuld r0lowx0=float64#11 # asm 2: fmuld r0lowx0=%f20 fmuld %f18,%f2,%f20 # qhasm: h6 = r3lowx0 + r0lowx6 # asm 1: faddd h6=float64#15 # asm 2: faddd h6=%f28 faddd %f28,%f40,%f28 # qhasm: sr1lowx6 = sr1low * x6 # asm 1: fmuld sr1lowx6=float64#21 # asm 2: fmuld sr1lowx6=%f40 fmuld %f50,%f4,%f40 # qhasm: x4 += y2 # asm 1: faddd x4=float64#9 # asm 2: faddd x4=%f16 faddd %f16,%f24,%f16 # qhasm: r0highx0 = r0high * x0 # asm 1: fmuld r0highx0=float64#13 # asm 2: fmuld r0highx0=%f24 fmuld %f14,%f2,%f24 # qhasm: sr2low = sr2low_stack # asm 1: ldd [%fp+2023-sr2low=float64#26 # asm 2: ldd [%fp+2023-sr2low=%f50 ldd [%fp+2023-24],%f50 # qhasm: h7 = r3highx0 + r0highx6 # asm 1: faddd h7=float64#25 # asm 2: faddd h7=%f48 faddd %f48,%f52,%f48 # qhasm: sr1highx6 = sr1high * x6 # asm 1: fmuld sr1highx6=float64#27 # asm 2: fmuld sr1highx6=%f52 fmuld %f54,%f4,%f52 # qhasm: sr2high = sr2high_stack # asm 1: ldd [%fp+2023-sr2high=float64#28 # asm 2: ldd [%fp+2023-sr2high=%f54 ldd [%fp+2023-8],%f54 # qhasm: x3 += y1 # asm 1: faddd x3=float64#4 # asm 2: faddd x3=%f6 faddd %f6,%f44,%f6 # qhasm: r1lowx0 = r1low * x0 # asm 1: fmuld r1lowx0=float64#23 # asm 2: fmuld r1lowx0=%f44 fmuld %f22,%f2,%f44 # qhasm: h0 = r0lowx0 + sr1lowx6 # asm 1: faddd h0=float64#11 # asm 2: faddd h0=%f20 faddd %f20,%f40,%f20 # qhasm: sr2lowx6 = sr2low * x6 # asm 1: fmuld sr2lowx6=float64#21 # asm 2: fmuld sr2lowx6=%f40 fmuld %f50,%f4,%f40 # qhasm: x2 += y0 # asm 1: faddd x2=float64#5 # asm 2: faddd x2=%f8 faddd %f8,%f46,%f8 # qhasm: r1highx0 = r1high * x0 # asm 1: fmuld r1highx0=float64#24 # asm 2: fmuld r1highx0=%f46 fmuld %f30,%f2,%f46 # qhasm: sr3low = sr3low_stack # asm 1: ldd [%fp+2023-sr3low=float64#30 # asm 2: ldd [%fp+2023-sr3low=%f58 ldd [%fp+2023-40],%f58 # qhasm: h1 = r0highx0 + sr1highx6 # asm 1: faddd h1=float64#13 # asm 2: faddd h1=%f24 faddd %f24,%f52,%f24 # qhasm: sr2highx6 = sr2high * x6 # asm 1: fmuld sr2highx6=float64#27 # asm 2: fmuld sr2highx6=%f52 fmuld %f54,%f4,%f52 # qhasm: sr3high = sr3high_stack # asm 1: ldd [%fp+2023-sr3high=float64#31 # asm 2: ldd [%fp+2023-sr3high=%f60 ldd [%fp+2023-32],%f60 # qhasm: x4 += x5 # asm 1: faddd x4=float64#7 # asm 2: faddd x4=%f12 faddd %f16,%f12,%f12 # qhasm: r2lowx0 = r2low * x0 # asm 1: fmuld r2lowx0=float64#9 # asm 2: fmuld r2lowx0=%f16 fmuld %f26,%f2,%f16 # qhasm: h2 = r1lowx0 + sr2lowx6 # asm 1: faddd h2=float64#21 # asm 2: faddd h2=%f40 faddd %f44,%f40,%f40 # qhasm: sr3lowx6 = sr3low * x6 # asm 1: fmuld sr3lowx6=float64#23 # asm 2: fmuld sr3lowx6=%f44 fmuld %f58,%f4,%f44 # qhasm: x2 += x3 # asm 1: faddd x2=float64#4 # asm 2: faddd x2=%f6 faddd %f8,%f6,%f6 # qhasm: r2highx0 = r2high * x0 # asm 1: fmuld r2highx0=float64#2 # asm 2: fmuld r2highx0=%f2 fmuld %f32,%f2,%f2 # qhasm: h3 = r1highx0 + sr2highx6 # asm 1: faddd h3=float64#5 # asm 2: faddd h3=%f8 faddd %f46,%f52,%f8 # qhasm: sr3highx6 = sr3high * x6 # asm 1: fmuld sr3highx6=float64#3 # asm 2: fmuld sr3highx6=%f4 fmuld %f60,%f4,%f4 # qhasm: r1highx4 = r1high * x4 # asm 1: fmuld r1highx4=float64#24 # asm 2: fmuld r1highx4=%f46 fmuld %f30,%f12,%f46 # qhasm: h4 = r2lowx0 + sr3lowx6 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f16,%f44,%f16 # qhasm: r1lowx4 = r1low * x4 # asm 1: fmuld r1lowx4=float64#23 # asm 2: fmuld r1lowx4=%f44 fmuld %f22,%f12,%f44 # qhasm: r0highx4 = r0high * x4 # asm 1: fmuld r0highx4=float64#27 # asm 2: fmuld r0highx4=%f52 fmuld %f14,%f12,%f52 # qhasm: h5 = r2highx0 + sr3highx6 # asm 1: faddd h5=float64#2 # asm 2: faddd h5=%f2 faddd %f2,%f4,%f2 # qhasm: r0lowx4 = r0low * x4 # asm 1: fmuld r0lowx4=float64#3 # asm 2: fmuld r0lowx4=%f4 fmuld %f18,%f12,%f4 # qhasm: h7 += r1highx4 # asm 1: faddd h7=float64#24 # asm 2: faddd h7=%f46 faddd %f48,%f46,%f46 # qhasm: sr3highx4 = sr3high * x4 # asm 1: fmuld sr3highx4=float64#25 # asm 2: fmuld sr3highx4=%f48 fmuld %f60,%f12,%f48 # qhasm: h6 += r1lowx4 # asm 1: faddd h6=float64#15 # asm 2: faddd h6=%f28 faddd %f28,%f44,%f28 # qhasm: sr3lowx4 = sr3low * x4 # asm 1: fmuld sr3lowx4=float64#23 # asm 2: fmuld sr3lowx4=%f44 fmuld %f58,%f12,%f44 # qhasm: h5 += r0highx4 # asm 1: faddd h5=float64#2 # asm 2: faddd h5=%f2 faddd %f2,%f52,%f2 # qhasm: sr2highx4 = sr2high * x4 # asm 1: fmuld sr2highx4=float64#27 # asm 2: fmuld sr2highx4=%f52 fmuld %f54,%f12,%f52 # qhasm: h4 += r0lowx4 # asm 1: faddd h4=float64#3 # asm 2: faddd h4=%f4 faddd %f16,%f4,%f4 # qhasm: sr2lowx4 = sr2low * x4 # asm 1: fmuld sr2lowx4=float64#7 # asm 2: fmuld sr2lowx4=%f12 fmuld %f50,%f12,%f12 # qhasm: h3 += sr3highx4 # asm 1: faddd h3=float64#9 # asm 2: faddd h3=%f16 faddd %f8,%f48,%f16 # qhasm: r0lowx2 = r0low * x2 # asm 1: fmuld r0lowx2=float64#5 # asm 2: fmuld r0lowx2=%f8 fmuld %f18,%f6,%f8 # qhasm: h2 += sr3lowx4 # asm 1: faddd h2=float64#21 # asm 2: faddd h2=%f40 faddd %f40,%f44,%f40 # qhasm: r0highx2 = r0high * x2 # asm 1: fmuld r0highx2=float64#23 # asm 2: fmuld r0highx2=%f44 fmuld %f14,%f6,%f44 # qhasm: h1 += sr2highx4 # asm 1: faddd h1=float64#25 # asm 2: faddd h1=%f48 faddd %f24,%f52,%f48 # qhasm: r1lowx2 = r1low * x2 # asm 1: fmuld r1lowx2=float64#13 # asm 2: fmuld r1lowx2=%f24 fmuld %f22,%f6,%f24 # qhasm: h0 += sr2lowx4 # asm 1: faddd h0=float64#26 # asm 2: faddd h0=%f50 faddd %f20,%f12,%f50 # qhasm: r1highx2 = r1high * x2 # asm 1: fmuld r1highx2=float64#11 # asm 2: fmuld r1highx2=%f20 fmuld %f30,%f6,%f20 # qhasm: h2 += r0lowx2 # asm 1: faddd h2=float64#5 # asm 2: faddd h2=%f8 faddd %f40,%f8,%f8 # qhasm: r2lowx2 = r2low * x2 # asm 1: fmuld r2lowx2=float64#21 # asm 2: fmuld r2lowx2=%f40 fmuld %f26,%f6,%f40 # qhasm: h3 += r0highx2 # asm 1: faddd h3=float64#7 # asm 2: faddd h3=%f12 faddd %f16,%f44,%f12 # qhasm: r2highx2 = r2high * x2 # asm 1: fmuld r2highx2=float64#23 # asm 2: fmuld r2highx2=%f44 fmuld %f32,%f6,%f44 # qhasm: h4 += r1lowx2 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f4,%f24,%f16 # qhasm: sr3lowx2 = sr3low * x2 # asm 1: fmuld sr3lowx2=float64#3 # asm 2: fmuld sr3lowx2=%f4 fmuld %f58,%f6,%f4 # qhasm: h5 += r1highx2 # asm 1: faddd h5=float64#11 # asm 2: faddd h5=%f20 faddd %f2,%f20,%f20 # qhasm: sr3highx2 = sr3high * x2 # asm 1: fmuld sr3highx2=float64#4 # asm 2: fmuld sr3highx2=%f6 fmuld %f60,%f6,%f6 # qhasm: h6 += r2lowx2 # asm 1: faddd h6=float64#13 # asm 2: faddd h6=%f24 faddd %f28,%f40,%f24 # qhasm: h7 += r2highx2 # asm 1: faddd h7=float64#15 # asm 2: faddd h7=%f28 faddd %f46,%f44,%f28 # qhasm: h0 += sr3lowx2 # asm 1: faddd h0=float64#2 # asm 2: faddd h0=%f2 faddd %f50,%f4,%f2 # qhasm: h1 += sr3highx2 # asm 1: faddd h1=float64#3 # asm 2: faddd h1=%f4 faddd %f48,%f6,%f4 # qhasm: addatmost15bytes: ._addatmost15bytes: # qhasm: lbelow2 = l - 2 # asm 1: sub lbelow2=int64#2 # asm 2: sub lbelow2=%i1 sub %i4,2,%i1 # qhasm: =? l - 0 # asm 1: subcc lbelow3=int64#6 # asm 2: sub lbelow3=%i5 sub %i4,3,%i5 # qhasm: goto nomorebytes if = be,pt %xcc,._nomorebytes nop # qhasm: (int64) lbelow2 >>= 63 # asm 1: srax lbelow2=int64#2 # asm 2: srax lbelow2=%i1 srax %i1,63,%i1 # qhasm: lbelow4 = l - 4 # asm 1: sub lbelow4=int64#8 # asm 2: sub lbelow4=%g4 sub %i4,4,%g4 # qhasm: m00 = *(uint8 *) (m + 0) # asm 1: ldub [m00=int64#9 # asm 2: ldub [m00=%g5 ldub [%i3+0],%g5 # qhasm: (int64) lbelow3 >>= 63 # asm 1: srax lbelow3=int64#6 # asm 2: srax lbelow3=%i5 srax %i5,63,%i5 # qhasm: m += lbelow2 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%i1,%i3 # qhasm: m01 = *(uint8 *) (m + 1) # asm 1: ldub [m01=int64#10 # asm 2: ldub [m01=%o0 ldub [%i3+1],%o0 # qhasm: (int64) lbelow4 >>= 63 # asm 1: srax lbelow4=int64#8 # asm 2: srax lbelow4=%g4 srax %g4,63,%g4 # qhasm: m += lbelow3 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%i5,%i3 # qhasm: m02 = *(uint8 *) (m + 2) # asm 1: ldub [m02=int64#11 # asm 2: ldub [m02=%o1 ldub [%i3+2],%o1 # qhasm: m += lbelow4 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%g4,%i3 # qhasm: m0 = 2151 # asm 1: add %g0,2151,>m0=int64#12 # asm 2: add %g0,2151,>m0=%o2 add %g0,2151,%o2 # qhasm: m03 = *(uint8 *) (m + 3) # asm 1: ldub [m03=int64#13 # asm 2: ldub [m03=%o3 ldub [%i3+3],%o3 # qhasm: m0 <<= 51 # asm 1: sllx m0=int64#12 # asm 2: sllx m0=%o2 sllx %o2,51,%o2 # qhasm: m1 = 2215 # asm 1: add %g0,2215,>m1=int64#14 # asm 2: add %g0,2215,>m1=%o4 add %g0,2215,%o4 # qhasm: m0 += m00 # asm 1: add m0=int64#9 # asm 2: add m0=%g5 add %o2,%g5,%g5 # qhasm: m01 &= ~lbelow2 # asm 1: andn m01=int64#10 # asm 2: andn m01=%o0 andn %o0,%i1,%o0 # qhasm: m02 &= ~lbelow3 # asm 1: andn m02=int64#11 # asm 2: andn m02=%o1 andn %o1,%i5,%o1 # qhasm: m01 -= lbelow2 # asm 1: sub m01=int64#10 # asm 2: sub m01=%o0 sub %o0,%i1,%o0 # qhasm: m01 <<= 8 # asm 1: sllx m01=int64#10 # asm 2: sllx m01=%o0 sllx %o0,8,%o0 # qhasm: m03 &= ~lbelow4 # asm 1: andn m03=int64#12 # asm 2: andn m03=%o2 andn %o3,%g4,%o2 # qhasm: m0 += m01 # asm 1: add m0=int64#9 # asm 2: add m0=%g5 add %g5,%o0,%g5 # qhasm: lbelow2 -= lbelow3 # asm 1: sub lbelow2=int64#2 # asm 2: sub lbelow2=%i1 sub %i1,%i5,%i1 # qhasm: m02 += lbelow2 # asm 1: add m02=int64#2 # asm 2: add m02=%i1 add %o1,%i1,%i1 # qhasm: lbelow3 -= lbelow4 # asm 1: sub lbelow3=int64#6 # asm 2: sub lbelow3=%i5 sub %i5,%g4,%i5 # qhasm: m02 <<= 16 # asm 1: sllx m02=int64#2 # asm 2: sllx m02=%i1 sllx %i1,16,%i1 # qhasm: m03 += lbelow3 # asm 1: add m03=int64#6 # asm 2: add m03=%i5 add %o2,%i5,%i5 # qhasm: m03 <<= 24 # asm 1: sllx m03=int64#6 # asm 2: sllx m03=%i5 sllx %i5,24,%i5 # qhasm: m0 += m02 # asm 1: add m0=int64#2 # asm 2: add m0=%i1 add %g5,%i1,%i1 # qhasm: m0 += m03 # asm 1: add m0=int64#2 # asm 2: add m0=%i1 add %i1,%i5,%i1 # qhasm: lbelow5 = l - 5 # asm 1: sub lbelow5=int64#6 # asm 2: sub lbelow5=%i5 sub %i4,5,%i5 # qhasm: lbelow6 = l - 6 # asm 1: sub lbelow6=int64#9 # asm 2: sub lbelow6=%g5 sub %i4,6,%g5 # qhasm: lbelow7 = l - 7 # asm 1: sub lbelow7=int64#10 # asm 2: sub lbelow7=%o0 sub %i4,7,%o0 # qhasm: (int64) lbelow5 >>= 63 # asm 1: srax lbelow5=int64#6 # asm 2: srax lbelow5=%i5 srax %i5,63,%i5 # qhasm: lbelow8 = l - 8 # asm 1: sub lbelow8=int64#11 # asm 2: sub lbelow8=%o1 sub %i4,8,%o1 # qhasm: (int64) lbelow6 >>= 63 # asm 1: srax lbelow6=int64#9 # asm 2: srax lbelow6=%g5 srax %g5,63,%g5 # qhasm: m += lbelow5 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%i5,%i3 # qhasm: m10 = *(uint8 *) (m + 4) # asm 1: ldub [m10=int64#12 # asm 2: ldub [m10=%o2 ldub [%i3+4],%o2 # qhasm: (int64) lbelow7 >>= 63 # asm 1: srax lbelow7=int64#10 # asm 2: srax lbelow7=%o0 srax %o0,63,%o0 # qhasm: m += lbelow6 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%g5,%i3 # qhasm: m11 = *(uint8 *) (m + 5) # asm 1: ldub [m11=int64#13 # asm 2: ldub [m11=%o3 ldub [%i3+5],%o3 # qhasm: (int64) lbelow8 >>= 63 # asm 1: srax lbelow8=int64#11 # asm 2: srax lbelow8=%o1 srax %o1,63,%o1 # qhasm: m += lbelow7 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o0,%i3 # qhasm: m12 = *(uint8 *) (m + 6) # asm 1: ldub [m12=int64#15 # asm 2: ldub [m12=%o5 ldub [%i3+6],%o5 # qhasm: m1 <<= 51 # asm 1: sllx m1=int64#14 # asm 2: sllx m1=%o4 sllx %o4,51,%o4 # qhasm: m += lbelow8 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o1,%i3 # qhasm: m13 = *(uint8 *) (m + 7) # asm 1: ldub [m13=int64#16 # asm 2: ldub [m13=%o7 ldub [%i3+7],%o7 # qhasm: m10 &= ~lbelow5 # asm 1: andn m10=int64#12 # asm 2: andn m10=%o2 andn %o2,%i5,%o2 # qhasm: lbelow4 -= lbelow5 # asm 1: sub lbelow4=int64#8 # asm 2: sub lbelow4=%g4 sub %g4,%i5,%g4 # qhasm: m10 += lbelow4 # asm 1: add m10=int64#8 # asm 2: add m10=%g4 add %o2,%g4,%g4 # qhasm: lbelow5 -= lbelow6 # asm 1: sub lbelow5=int64#6 # asm 2: sub lbelow5=%i5 sub %i5,%g5,%i5 # qhasm: m11 &= ~lbelow6 # asm 1: andn m11=int64#12 # asm 2: andn m11=%o2 andn %o3,%g5,%o2 # qhasm: m11 += lbelow5 # asm 1: add m11=int64#6 # asm 2: add m11=%i5 add %o2,%i5,%i5 # qhasm: m11 <<= 8 # asm 1: sllx m11=int64#6 # asm 2: sllx m11=%i5 sllx %i5,8,%i5 # qhasm: m1 += m10 # asm 1: add m1=int64#8 # asm 2: add m1=%g4 add %o4,%g4,%g4 # qhasm: m1 += m11 # asm 1: add m1=int64#6 # asm 2: add m1=%i5 add %g4,%i5,%i5 # qhasm: m12 &= ~lbelow7 # asm 1: andn m12=int64#8 # asm 2: andn m12=%g4 andn %o5,%o0,%g4 # qhasm: lbelow6 -= lbelow7 # asm 1: sub lbelow6=int64#9 # asm 2: sub lbelow6=%g5 sub %g5,%o0,%g5 # qhasm: m13 &= ~lbelow8 # asm 1: andn m13=int64#12 # asm 2: andn m13=%o2 andn %o7,%o1,%o2 # qhasm: m12 += lbelow6 # asm 1: add m12=int64#8 # asm 2: add m12=%g4 add %g4,%g5,%g4 # qhasm: lbelow7 -= lbelow8 # asm 1: sub lbelow7=int64#9 # asm 2: sub lbelow7=%g5 sub %o0,%o1,%g5 # qhasm: m12 <<= 16 # asm 1: sllx m12=int64#8 # asm 2: sllx m12=%g4 sllx %g4,16,%g4 # qhasm: m13 += lbelow7 # asm 1: add m13=int64#9 # asm 2: add m13=%g5 add %o2,%g5,%g5 # qhasm: m13 <<= 24 # asm 1: sllx m13=int64#9 # asm 2: sllx m13=%g5 sllx %g5,24,%g5 # qhasm: m1 += m12 # asm 1: add m1=int64#6 # asm 2: add m1=%i5 add %i5,%g4,%i5 # qhasm: m1 += m13 # asm 1: add m1=int64#6 # asm 2: add m1=%i5 add %i5,%g5,%i5 # qhasm: m2 = 2279 # asm 1: add %g0,2279,>m2=int64#8 # asm 2: add %g0,2279,>m2=%g4 add %g0,2279,%g4 # qhasm: lbelow9 = l - 9 # asm 1: sub lbelow9=int64#9 # asm 2: sub lbelow9=%g5 sub %i4,9,%g5 # qhasm: m3 = 2343 # asm 1: add %g0,2343,>m3=int64#10 # asm 2: add %g0,2343,>m3=%o0 add %g0,2343,%o0 # qhasm: lbelow10 = l - 10 # asm 1: sub lbelow10=int64#12 # asm 2: sub lbelow10=%o2 sub %i4,10,%o2 # qhasm: lbelow11 = l - 11 # asm 1: sub lbelow11=int64#13 # asm 2: sub lbelow11=%o3 sub %i4,11,%o3 # qhasm: (int64) lbelow9 >>= 63 # asm 1: srax lbelow9=int64#9 # asm 2: srax lbelow9=%g5 srax %g5,63,%g5 # qhasm: lbelow12 = l - 12 # asm 1: sub lbelow12=int64#14 # asm 2: sub lbelow12=%o4 sub %i4,12,%o4 # qhasm: (int64) lbelow10 >>= 63 # asm 1: srax lbelow10=int64#12 # asm 2: srax lbelow10=%o2 srax %o2,63,%o2 # qhasm: m += lbelow9 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%g5,%i3 # qhasm: m20 = *(uint8 *) (m + 8) # asm 1: ldub [m20=int64#15 # asm 2: ldub [m20=%o5 ldub [%i3+8],%o5 # qhasm: (int64) lbelow11 >>= 63 # asm 1: srax lbelow11=int64#13 # asm 2: srax lbelow11=%o3 srax %o3,63,%o3 # qhasm: m += lbelow10 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o2,%i3 # qhasm: m21 = *(uint8 *) (m + 9) # asm 1: ldub [m21=int64#16 # asm 2: ldub [m21=%o7 ldub [%i3+9],%o7 # qhasm: (int64) lbelow12 >>= 63 # asm 1: srax lbelow12=int64#14 # asm 2: srax lbelow12=%o4 srax %o4,63,%o4 # qhasm: m += lbelow11 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o3,%i3 # qhasm: m22 = *(uint8 *) (m + 10) # asm 1: ldub [m22=int64#17 # asm 2: ldub [m22=%l0 ldub [%i3+10],%l0 # qhasm: m2 <<= 51 # asm 1: sllx m2=int64#8 # asm 2: sllx m2=%g4 sllx %g4,51,%g4 # qhasm: m += lbelow12 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o4,%i3 # qhasm: m23 = *(uint8 *) (m + 11) # asm 1: ldub [m23=int64#18 # asm 2: ldub [m23=%l1 ldub [%i3+11],%l1 # qhasm: m20 &= ~lbelow9 # asm 1: andn m20=int64#15 # asm 2: andn m20=%o5 andn %o5,%g5,%o5 # qhasm: lbelow8 -= lbelow9 # asm 1: sub lbelow8=int64#11 # asm 2: sub lbelow8=%o1 sub %o1,%g5,%o1 # qhasm: m20 += lbelow8 # asm 1: add m20=int64#11 # asm 2: add m20=%o1 add %o5,%o1,%o1 # qhasm: lbelow9 -= lbelow10 # asm 1: sub lbelow9=int64#9 # asm 2: sub lbelow9=%g5 sub %g5,%o2,%g5 # qhasm: m21 &= ~lbelow10 # asm 1: andn m21=int64#15 # asm 2: andn m21=%o5 andn %o7,%o2,%o5 # qhasm: m21 += lbelow9 # asm 1: add m21=int64#9 # asm 2: add m21=%g5 add %o5,%g5,%g5 # qhasm: m21 <<= 8 # asm 1: sllx m21=int64#9 # asm 2: sllx m21=%g5 sllx %g5,8,%g5 # qhasm: m2 += m20 # asm 1: add m2=int64#8 # asm 2: add m2=%g4 add %g4,%o1,%g4 # qhasm: m2 += m21 # asm 1: add m2=int64#8 # asm 2: add m2=%g4 add %g4,%g5,%g4 # qhasm: m22 &= ~lbelow11 # asm 1: andn m22=int64#9 # asm 2: andn m22=%g5 andn %l0,%o3,%g5 # qhasm: lbelow10 -= lbelow11 # asm 1: sub lbelow10=int64#11 # asm 2: sub lbelow10=%o1 sub %o2,%o3,%o1 # qhasm: m23 &= ~lbelow12 # asm 1: andn m23=int64#12 # asm 2: andn m23=%o2 andn %l1,%o4,%o2 # qhasm: m22 += lbelow10 # asm 1: add m22=int64#9 # asm 2: add m22=%g5 add %g5,%o1,%g5 # qhasm: lbelow11 -= lbelow12 # asm 1: sub lbelow11=int64#11 # asm 2: sub lbelow11=%o1 sub %o3,%o4,%o1 # qhasm: m22 <<= 16 # asm 1: sllx m22=int64#9 # asm 2: sllx m22=%g5 sllx %g5,16,%g5 # qhasm: m23 += lbelow11 # asm 1: add m23=int64#11 # asm 2: add m23=%o1 add %o2,%o1,%o1 # qhasm: m23 <<= 24 # asm 1: sllx m23=int64#11 # asm 2: sllx m23=%o1 sllx %o1,24,%o1 # qhasm: m2 += m22 # asm 1: add m2=int64#8 # asm 2: add m2=%g4 add %g4,%g5,%g4 # qhasm: m3 <<= 51 # asm 1: sllx m3=int64#9 # asm 2: sllx m3=%g5 sllx %o0,51,%g5 # qhasm: lbelow13 = l - 13 # asm 1: sub lbelow13=int64#10 # asm 2: sub lbelow13=%o0 sub %i4,13,%o0 # qhasm: (int64) lbelow13 >>= 63 # asm 1: srax lbelow13=int64#10 # asm 2: srax lbelow13=%o0 srax %o0,63,%o0 # qhasm: lbelow14 = l - 14 # asm 1: sub lbelow14=int64#12 # asm 2: sub lbelow14=%o2 sub %i4,14,%o2 # qhasm: (int64) lbelow14 >>= 63 # asm 1: srax lbelow14=int64#12 # asm 2: srax lbelow14=%o2 srax %o2,63,%o2 # qhasm: m += lbelow13 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o0,%i3 # qhasm: lbelow15 = l - 15 # asm 1: sub lbelow15=int64#5 # asm 2: sub lbelow15=%i4 sub %i4,15,%i4 # qhasm: m30 = *(uint8 *) (m + 12) # asm 1: ldub [m30=int64#13 # asm 2: ldub [m30=%o3 ldub [%i3+12],%o3 # qhasm: (int64) lbelow15 >>= 63 # asm 1: srax lbelow15=int64#5 # asm 2: srax lbelow15=%i4 srax %i4,63,%i4 # qhasm: m += lbelow14 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%o2,%i3 # qhasm: m31 = *(uint8 *) (m + 13) # asm 1: ldub [m31=int64#15 # asm 2: ldub [m31=%o5 ldub [%i3+13],%o5 # qhasm: m += lbelow15 # asm 1: add m=int64#4 # asm 2: add m=%i3 add %i3,%i4,%i3 # qhasm: m2 += m23 # asm 1: add m2=int64#8 # asm 2: add m2=%g4 add %g4,%o1,%g4 # qhasm: m32 = *(uint8 *) (m + 14) # asm 1: ldub [m32=int64#4 # asm 2: ldub [m32=%i3 ldub [%i3+14],%i3 # qhasm: m30 &= ~lbelow13 # asm 1: andn m30=int64#11 # asm 2: andn m30=%o1 andn %o3,%o0,%o1 # qhasm: lbelow12 -= lbelow13 # asm 1: sub lbelow12=int64#13 # asm 2: sub lbelow12=%o3 sub %o4,%o0,%o3 # qhasm: m30 += lbelow12 # asm 1: add m30=int64#11 # asm 2: add m30=%o1 add %o1,%o3,%o1 # qhasm: lbelow13 -= lbelow14 # asm 1: sub lbelow13=int64#10 # asm 2: sub lbelow13=%o0 sub %o0,%o2,%o0 # qhasm: m3 += m30 # asm 1: add m3=int64#9 # asm 2: add m3=%g5 add %g5,%o1,%g5 # qhasm: m31 &= ~lbelow14 # asm 1: andn m31=int64#11 # asm 2: andn m31=%o1 andn %o5,%o2,%o1 # qhasm: m31 += lbelow13 # asm 1: add m31=int64#10 # asm 2: add m31=%o0 add %o1,%o0,%o0 # qhasm: m32 &= ~lbelow15 # asm 1: andn m32=int64#4 # asm 2: andn m32=%i3 andn %i3,%i4,%i3 # qhasm: m31 <<= 8 # asm 1: sllx m31=int64#10 # asm 2: sllx m31=%o0 sllx %o0,8,%o0 # qhasm: lbelow14 -= lbelow15 # asm 1: sub lbelow14=int64#11 # asm 2: sub lbelow14=%o1 sub %o2,%i4,%o1 # qhasm: m3 += m31 # asm 1: add m3=int64#9 # asm 2: add m3=%g5 add %g5,%o0,%g5 # qhasm: m32 += lbelow14 # asm 1: add m32=int64#4 # asm 2: add m32=%i3 add %i3,%o1,%i3 # qhasm: d0 = m0 # asm 1: stx d0=stack64#7] # asm 2: stx d0=48] stx %i1,[%fp+2023-48] # qhasm: m32 <<= 16 # asm 1: sllx m32=int64#2 # asm 2: sllx m32=%i1 sllx %i3,16,%i1 # qhasm: m33 = lbelow15 + 1 # asm 1: add m33=int64#4 # asm 2: add m33=%i3 add %i4,1,%i3 # qhasm: d1 = m1 # asm 1: stx d1=stack64#8] # asm 2: stx d1=56] stx %i5,[%fp+2023-56] # qhasm: m33 <<= 24 # asm 1: sllx m33=int64#4 # asm 2: sllx m33=%i3 sllx %i3,24,%i3 # qhasm: m3 += m32 # asm 1: add m3=int64#2 # asm 2: add m3=%i1 add %g5,%i1,%i1 # qhasm: d2 = m2 # asm 1: stx d2=stack64#9] # asm 2: stx d2=64] stx %g4,[%fp+2023-64] # qhasm: m3 += m33 # asm 1: add m3=int64#2 # asm 2: add m3=%i1 add %i1,%i3,%i1 # qhasm: d3 = m3 # asm 1: stx d3=stack64#10] # asm 2: stx d3=72] stx %i1,[%fp+2023-72] # qhasm: alpha0 = *(float64 *) (constants + 24) # asm 1: ldd [alpha0=float64#4 # asm 2: ldd [alpha0=%f6 ldd [%g1+24],%f6 # qhasm: z3 = d3 # asm 1: ldd [%fp+2023-z3=float64#21 # asm 2: ldd [%fp+2023-z3=%f40 ldd [%fp+2023-72],%f40 # qhasm: z2 = d2 # asm 1: ldd [%fp+2023-z2=float64#23 # asm 2: ldd [%fp+2023-z2=%f44 ldd [%fp+2023-64],%f44 # qhasm: z1 = d1 # asm 1: ldd [%fp+2023-z1=float64#24 # asm 2: ldd [%fp+2023-z1=%f46 ldd [%fp+2023-56],%f46 # qhasm: z0 = d0 # asm 1: ldd [%fp+2023-z0=float64#25 # asm 2: ldd [%fp+2023-z0=%f48 ldd [%fp+2023-48],%f48 # qhasm: z3 -= alpha96 # asm 1: fsubd z3=float64#21 # asm 2: fsubd z3=%f40 fsubd %f40,%f36,%f40 # qhasm: z2 -= alpha64 # asm 1: fsubd z2=float64#23 # asm 2: fsubd z2=%f44 fsubd %f44,%f10,%f44 # qhasm: z1 -= alpha32 # asm 1: fsubd z1=float64#24 # asm 2: fsubd z1=%f46 fsubd %f46,%f0,%f46 # qhasm: z0 -= alpha0 # asm 1: fsubd z0=float64#4 # asm 2: fsubd z0=%f6 fsubd %f48,%f6,%f6 # qhasm: h5 += z3 # asm 1: faddd h5=float64#11 # asm 2: faddd h5=%f20 faddd %f20,%f40,%f20 # qhasm: h3 += z2 # asm 1: faddd h3=float64#7 # asm 2: faddd h3=%f12 faddd %f12,%f44,%f12 # qhasm: h1 += z1 # asm 1: faddd h1=float64#3 # asm 2: faddd h1=%f4 faddd %f4,%f46,%f4 # qhasm: h0 += z0 # asm 1: faddd h0=float64#2 # asm 2: faddd h0=%f2 faddd %f2,%f6,%f2 # qhasm: y7 = h7 + alpha130 # asm 1: faddd y7=float64#4 # asm 2: faddd y7=%f6 faddd %f28,%f56,%f6 # qhasm: y6 = h6 + alpha130 # asm 1: faddd y6=float64#21 # asm 2: faddd y6=%f40 faddd %f24,%f56,%f40 # qhasm: y1 = h1 + alpha32 # asm 1: faddd y1=float64#23 # asm 2: faddd y1=%f44 faddd %f4,%f0,%f44 # qhasm: y0 = h0 + alpha32 # asm 1: faddd y0=float64#24 # asm 2: faddd y0=%f46 faddd %f2,%f0,%f46 # qhasm: y7 -= alpha130 # asm 1: fsubd y7=float64#4 # asm 2: fsubd y7=%f6 fsubd %f6,%f56,%f6 # qhasm: y6 -= alpha130 # asm 1: fsubd y6=float64#21 # asm 2: fsubd y6=%f40 fsubd %f40,%f56,%f40 # qhasm: y1 -= alpha32 # asm 1: fsubd y1=float64#23 # asm 2: fsubd y1=%f44 fsubd %f44,%f0,%f44 # qhasm: y0 -= alpha32 # asm 1: fsubd y0=float64#24 # asm 2: fsubd y0=%f46 fsubd %f46,%f0,%f46 # qhasm: y5 = h5 + alpha96 # asm 1: faddd y5=float64#25 # asm 2: faddd y5=%f48 faddd %f20,%f36,%f48 # qhasm: y4 = h4 + alpha96 # asm 1: faddd y4=float64#26 # asm 2: faddd y4=%f50 faddd %f16,%f36,%f50 # qhasm: x7 = h7 - y7 # asm 1: fsubd x7=float64#15 # asm 2: fsubd x7=%f28 fsubd %f28,%f6,%f28 # qhasm: y7 *= scale # asm 1: fmuld y7=float64#4 # asm 2: fmuld y7=%f6 fmuld %f6,%f34,%f6 # qhasm: x6 = h6 - y6 # asm 1: fsubd x6=float64#13 # asm 2: fsubd x6=%f24 fsubd %f24,%f40,%f24 # qhasm: y6 *= scale # asm 1: fmuld y6=float64#21 # asm 2: fmuld y6=%f40 fmuld %f40,%f34,%f40 # qhasm: x1 = h1 - y1 # asm 1: fsubd x1=float64#3 # asm 2: fsubd x1=%f4 fsubd %f4,%f44,%f4 # qhasm: x0 = h0 - y0 # asm 1: fsubd x0=float64#2 # asm 2: fsubd x0=%f2 fsubd %f2,%f46,%f2 # qhasm: y5 -= alpha96 # asm 1: fsubd y5=float64#25 # asm 2: fsubd y5=%f48 fsubd %f48,%f36,%f48 # qhasm: y4 -= alpha96 # asm 1: fsubd y4=float64#26 # asm 2: fsubd y4=%f50 fsubd %f50,%f36,%f50 # qhasm: x1 += y7 # asm 1: faddd x1=float64#3 # asm 2: faddd x1=%f4 faddd %f4,%f6,%f4 # qhasm: x0 += y6 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f40,%f2 # qhasm: x7 += y5 # asm 1: faddd x7=float64#4 # asm 2: faddd x7=%f6 faddd %f28,%f48,%f6 # qhasm: x6 += y4 # asm 1: faddd x6=float64#13 # asm 2: faddd x6=%f24 faddd %f24,%f50,%f24 # qhasm: y3 = h3 + alpha64 # asm 1: faddd y3=float64#15 # asm 2: faddd y3=%f28 faddd %f12,%f10,%f28 # qhasm: y2 = h2 + alpha64 # asm 1: faddd y2=float64#21 # asm 2: faddd y2=%f40 faddd %f8,%f10,%f40 # qhasm: x0 += x1 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f4,%f2 # qhasm: x6 += x7 # asm 1: faddd x6=float64#3 # asm 2: faddd x6=%f4 faddd %f24,%f6,%f4 # qhasm: y3 -= alpha64 # asm 1: fsubd y3=float64#4 # asm 2: fsubd y3=%f6 fsubd %f28,%f10,%f6 # qhasm: y2 -= alpha64 # asm 1: fsubd y2=float64#13 # asm 2: fsubd y2=%f24 fsubd %f40,%f10,%f24 # qhasm: x5 = h5 - y5 # asm 1: fsubd x5=float64#11 # asm 2: fsubd x5=%f20 fsubd %f20,%f48,%f20 # qhasm: r3lowx0 = r3low * x0 # asm 1: fmuld r3lowx0=float64#15 # asm 2: fmuld r3lowx0=%f28 fmuld %f38,%f2,%f28 # qhasm: x4 = h4 - y4 # asm 1: fsubd x4=float64#9 # asm 2: fsubd x4=%f16 fsubd %f16,%f50,%f16 # qhasm: r0lowx6 = r0low * x6 # asm 1: fmuld r0lowx6=float64#20 # asm 2: fmuld r0lowx6=%f38 fmuld %f18,%f4,%f38 # qhasm: x3 = h3 - y3 # asm 1: fsubd x3=float64#7 # asm 2: fsubd x3=%f12 fsubd %f12,%f6,%f12 # qhasm: r3highx0 = r3high * x0 # asm 1: fmuld r3highx0=float64#21 # asm 2: fmuld r3highx0=%f40 fmuld %f42,%f2,%f40 # qhasm: sr1low = sr1low_stack # asm 1: ldd [%fp+2023-sr1low=float64#22 # asm 2: ldd [%fp+2023-sr1low=%f42 ldd [%fp+2023-16],%f42 # qhasm: x2 = h2 - y2 # asm 1: fsubd x2=float64#5 # asm 2: fsubd x2=%f8 fsubd %f8,%f24,%f8 # qhasm: r0highx6 = r0high * x6 # asm 1: fmuld r0highx6=float64#25 # asm 2: fmuld r0highx6=%f48 fmuld %f14,%f4,%f48 # qhasm: sr1high = sr1high_stack # asm 1: ldd [%fp+2023-sr1high=float64#26 # asm 2: ldd [%fp+2023-sr1high=%f50 ldd [%fp+2023-0],%f50 # qhasm: x5 += y3 # asm 1: faddd x5=float64#4 # asm 2: faddd x5=%f6 faddd %f20,%f6,%f6 # qhasm: r0lowx0 = r0low * x0 # asm 1: fmuld r0lowx0=float64#11 # asm 2: fmuld r0lowx0=%f20 fmuld %f18,%f2,%f20 # qhasm: h6 = r3lowx0 + r0lowx6 # asm 1: faddd h6=float64#15 # asm 2: faddd h6=%f28 faddd %f28,%f38,%f28 # qhasm: sr1lowx6 = sr1low * x6 # asm 1: fmuld sr1lowx6=float64#20 # asm 2: fmuld sr1lowx6=%f38 fmuld %f42,%f4,%f38 # qhasm: x4 += y2 # asm 1: faddd x4=float64#9 # asm 2: faddd x4=%f16 faddd %f16,%f24,%f16 # qhasm: r0highx0 = r0high * x0 # asm 1: fmuld r0highx0=float64#13 # asm 2: fmuld r0highx0=%f24 fmuld %f14,%f2,%f24 # qhasm: sr2low = sr2low_stack # asm 1: ldd [%fp+2023-sr2low=float64#22 # asm 2: ldd [%fp+2023-sr2low=%f42 ldd [%fp+2023-24],%f42 # qhasm: h7 = r3highx0 + r0highx6 # asm 1: faddd h7=float64#21 # asm 2: faddd h7=%f40 faddd %f40,%f48,%f40 # qhasm: sr1highx6 = sr1high * x6 # asm 1: fmuld sr1highx6=float64#25 # asm 2: fmuld sr1highx6=%f48 fmuld %f50,%f4,%f48 # qhasm: sr2high = sr2high_stack # asm 1: ldd [%fp+2023-sr2high=float64#26 # asm 2: ldd [%fp+2023-sr2high=%f50 ldd [%fp+2023-8],%f50 # qhasm: x3 += y1 # asm 1: faddd x3=float64#7 # asm 2: faddd x3=%f12 faddd %f12,%f44,%f12 # qhasm: r1lowx0 = r1low * x0 # asm 1: fmuld r1lowx0=float64#23 # asm 2: fmuld r1lowx0=%f44 fmuld %f22,%f2,%f44 # qhasm: h0 = r0lowx0 + sr1lowx6 # asm 1: faddd h0=float64#11 # asm 2: faddd h0=%f20 faddd %f20,%f38,%f20 # qhasm: sr2lowx6 = sr2low * x6 # asm 1: fmuld sr2lowx6=float64#20 # asm 2: fmuld sr2lowx6=%f38 fmuld %f42,%f4,%f38 # qhasm: x2 += y0 # asm 1: faddd x2=float64#5 # asm 2: faddd x2=%f8 faddd %f8,%f46,%f8 # qhasm: r1highx0 = r1high * x0 # asm 1: fmuld r1highx0=float64#24 # asm 2: fmuld r1highx0=%f46 fmuld %f30,%f2,%f46 # qhasm: sr3low = sr3low_stack # asm 1: ldd [%fp+2023-sr3low=float64#27 # asm 2: ldd [%fp+2023-sr3low=%f52 ldd [%fp+2023-40],%f52 # qhasm: h1 = r0highx0 + sr1highx6 # asm 1: faddd h1=float64#13 # asm 2: faddd h1=%f24 faddd %f24,%f48,%f24 # qhasm: sr2highx6 = sr2high * x6 # asm 1: fmuld sr2highx6=float64#25 # asm 2: fmuld sr2highx6=%f48 fmuld %f50,%f4,%f48 # qhasm: sr3high = sr3high_stack # asm 1: ldd [%fp+2023-sr3high=float64#28 # asm 2: ldd [%fp+2023-sr3high=%f54 ldd [%fp+2023-32],%f54 # qhasm: x4 += x5 # asm 1: faddd x4=float64#4 # asm 2: faddd x4=%f6 faddd %f16,%f6,%f6 # qhasm: r2lowx0 = r2low * x0 # asm 1: fmuld r2lowx0=float64#9 # asm 2: fmuld r2lowx0=%f16 fmuld %f26,%f2,%f16 # qhasm: h2 = r1lowx0 + sr2lowx6 # asm 1: faddd h2=float64#20 # asm 2: faddd h2=%f38 faddd %f44,%f38,%f38 # qhasm: sr3lowx6 = sr3low * x6 # asm 1: fmuld sr3lowx6=float64#23 # asm 2: fmuld sr3lowx6=%f44 fmuld %f52,%f4,%f44 # qhasm: x2 += x3 # asm 1: faddd x2=float64#30 # asm 2: faddd x2=%f58 faddd %f8,%f12,%f58 # qhasm: r2highx0 = r2high * x0 # asm 1: fmuld r2highx0=float64#2 # asm 2: fmuld r2highx0=%f2 fmuld %f32,%f2,%f2 # qhasm: h3 = r1highx0 + sr2highx6 # asm 1: faddd h3=float64#5 # asm 2: faddd h3=%f8 faddd %f46,%f48,%f8 # qhasm: sr3highx6 = sr3high * x6 # asm 1: fmuld sr3highx6=float64#3 # asm 2: fmuld sr3highx6=%f4 fmuld %f54,%f4,%f4 # qhasm: r1highx4 = r1high * x4 # asm 1: fmuld r1highx4=float64#7 # asm 2: fmuld r1highx4=%f12 fmuld %f30,%f6,%f12 # qhasm: h4 = r2lowx0 + sr3lowx6 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f16,%f44,%f16 # qhasm: r1lowx4 = r1low * x4 # asm 1: fmuld r1lowx4=float64#23 # asm 2: fmuld r1lowx4=%f44 fmuld %f22,%f6,%f44 # qhasm: r0highx4 = r0high * x4 # asm 1: fmuld r0highx4=float64#24 # asm 2: fmuld r0highx4=%f46 fmuld %f14,%f6,%f46 # qhasm: h5 = r2highx0 + sr3highx6 # asm 1: faddd h5=float64#2 # asm 2: faddd h5=%f2 faddd %f2,%f4,%f2 # qhasm: r0lowx4 = r0low * x4 # asm 1: fmuld r0lowx4=float64#3 # asm 2: fmuld r0lowx4=%f4 fmuld %f18,%f6,%f4 # qhasm: h7 += r1highx4 # asm 1: faddd h7=float64#21 # asm 2: faddd h7=%f40 faddd %f40,%f12,%f40 # qhasm: sr3highx4 = sr3high * x4 # asm 1: fmuld sr3highx4=float64#7 # asm 2: fmuld sr3highx4=%f12 fmuld %f54,%f6,%f12 # qhasm: h6 += r1lowx4 # asm 1: faddd h6=float64#15 # asm 2: faddd h6=%f28 faddd %f28,%f44,%f28 # qhasm: sr3lowx4 = sr3low * x4 # asm 1: fmuld sr3lowx4=float64#23 # asm 2: fmuld sr3lowx4=%f44 fmuld %f52,%f6,%f44 # qhasm: h5 += r0highx4 # asm 1: faddd h5=float64#2 # asm 2: faddd h5=%f2 faddd %f2,%f46,%f2 # qhasm: sr2highx4 = sr2high * x4 # asm 1: fmuld sr2highx4=float64#24 # asm 2: fmuld sr2highx4=%f46 fmuld %f50,%f6,%f46 # qhasm: h4 += r0lowx4 # asm 1: faddd h4=float64#3 # asm 2: faddd h4=%f4 faddd %f16,%f4,%f4 # qhasm: sr2lowx4 = sr2low * x4 # asm 1: fmuld sr2lowx4=float64#4 # asm 2: fmuld sr2lowx4=%f6 fmuld %f42,%f6,%f6 # qhasm: h3 += sr3highx4 # asm 1: faddd h3=float64#7 # asm 2: faddd h3=%f12 faddd %f8,%f12,%f12 # qhasm: r0lowx2 = r0low * x2 # asm 1: fmuld r0lowx2=float64#5 # asm 2: fmuld r0lowx2=%f8 fmuld %f18,%f58,%f8 # qhasm: h2 += sr3lowx4 # asm 1: faddd h2=float64#9 # asm 2: faddd h2=%f16 faddd %f38,%f44,%f16 # qhasm: r0highx2 = r0high * x2 # asm 1: fmuld r0highx2=float64#8 # asm 2: fmuld r0highx2=%f14 fmuld %f14,%f58,%f14 # qhasm: h1 += sr2highx4 # asm 1: faddd h1=float64#10 # asm 2: faddd h1=%f18 faddd %f24,%f46,%f18 # qhasm: r1lowx2 = r1low * x2 # asm 1: fmuld r1lowx2=float64#12 # asm 2: fmuld r1lowx2=%f22 fmuld %f22,%f58,%f22 # qhasm: h0 += sr2lowx4 # asm 1: faddd h0=float64#4 # asm 2: faddd h0=%f6 faddd %f20,%f6,%f6 # qhasm: r1highx2 = r1high * x2 # asm 1: fmuld r1highx2=float64#11 # asm 2: fmuld r1highx2=%f20 fmuld %f30,%f58,%f20 # qhasm: h2 += r0lowx2 # asm 1: faddd h2=float64#5 # asm 2: faddd h2=%f8 faddd %f16,%f8,%f8 # qhasm: r2lowx2 = r2low * x2 # asm 1: fmuld r2lowx2=float64#13 # asm 2: fmuld r2lowx2=%f24 fmuld %f26,%f58,%f24 # qhasm: h3 += r0highx2 # asm 1: faddd h3=float64#7 # asm 2: faddd h3=%f12 faddd %f12,%f14,%f12 # qhasm: r2highx2 = r2high * x2 # asm 1: fmuld r2highx2=float64#8 # asm 2: fmuld r2highx2=%f14 fmuld %f32,%f58,%f14 # qhasm: h4 += r1lowx2 # asm 1: faddd h4=float64#9 # asm 2: faddd h4=%f16 faddd %f4,%f22,%f16 # qhasm: sr3lowx2 = sr3low * x2 # asm 1: fmuld sr3lowx2=float64#3 # asm 2: fmuld sr3lowx2=%f4 fmuld %f52,%f58,%f4 # qhasm: h5 += r1highx2 # asm 1: faddd h5=float64#11 # asm 2: faddd h5=%f20 faddd %f2,%f20,%f20 # qhasm: sr3highx2 = sr3high * x2 # asm 1: fmuld sr3highx2=float64#12 # asm 2: fmuld sr3highx2=%f22 fmuld %f54,%f58,%f22 # qhasm: h6 += r2lowx2 # asm 1: faddd h6=float64#13 # asm 2: faddd h6=%f24 faddd %f28,%f24,%f24 # qhasm: h7 += r2highx2 # asm 1: faddd h7=float64#15 # asm 2: faddd h7=%f28 faddd %f40,%f14,%f28 # qhasm: h0 += sr3lowx2 # asm 1: faddd h0=float64#2 # asm 2: faddd h0=%f2 faddd %f6,%f4,%f2 # qhasm: h1 += sr3highx2 # asm 1: faddd h1=float64#3 # asm 2: faddd h1=%f4 faddd %f18,%f22,%f4 # qhasm: nomorebytes: ._nomorebytes: # qhasm: offset0 = *(float64 *) (constants + 104) # asm 1: ldd [offset0=float64#4 # asm 2: ldd [offset0=%f6 ldd [%g1+104],%f6 # qhasm: y7 = h7 + alpha130 # asm 1: faddd y7=float64#8 # asm 2: faddd y7=%f14 faddd %f28,%f56,%f14 # qhasm: offset1 = *(float64 *) (constants + 112) # asm 1: ldd [offset1=float64#10 # asm 2: ldd [offset1=%f18 ldd [%g1+112],%f18 # qhasm: y0 = h0 + alpha32 # asm 1: faddd y0=float64#12 # asm 2: faddd y0=%f22 faddd %f2,%f0,%f22 # qhasm: offset2 = *(float64 *) (constants + 120) # asm 1: ldd [offset2=float64#14 # asm 2: ldd [offset2=%f26 ldd [%g1+120],%f26 # qhasm: y1 = h1 + alpha32 # asm 1: faddd y1=float64#16 # asm 2: faddd y1=%f30 faddd %f4,%f0,%f30 # qhasm: offset3 = *(float64 *) (constants + 128) # asm 1: ldd [offset3=float64#17 # asm 2: ldd [offset3=%f32 ldd [%g1+128],%f32 # qhasm: y2 = h2 + alpha64 # asm 1: faddd y2=float64#20 # asm 2: faddd y2=%f38 faddd %f8,%f10,%f38 # qhasm: y7 -= alpha130 # asm 1: fsubd y7=float64#8 # asm 2: fsubd y7=%f14 fsubd %f14,%f56,%f14 # qhasm: y3 = h3 + alpha64 # asm 1: faddd y3=float64#21 # asm 2: faddd y3=%f40 faddd %f12,%f10,%f40 # qhasm: y4 = h4 + alpha96 # asm 1: faddd y4=float64#22 # asm 2: faddd y4=%f42 faddd %f16,%f36,%f42 # qhasm: y5 = h5 + alpha96 # asm 1: faddd y5=float64#23 # asm 2: faddd y5=%f44 faddd %f20,%f36,%f44 # qhasm: x7 = h7 - y7 # asm 1: fsubd x7=float64#15 # asm 2: fsubd x7=%f28 fsubd %f28,%f14,%f28 # qhasm: y7 *= scale # asm 1: fmuld y7=float64#8 # asm 2: fmuld y7=%f14 fmuld %f14,%f34,%f14 # qhasm: y0 -= alpha32 # asm 1: fsubd y0=float64#12 # asm 2: fsubd y0=%f22 fsubd %f22,%f0,%f22 # qhasm: y1 -= alpha32 # asm 1: fsubd y1=float64#1 # asm 2: fsubd y1=%f0 fsubd %f30,%f0,%f0 # qhasm: y2 -= alpha64 # asm 1: fsubd y2=float64#16 # asm 2: fsubd y2=%f30 fsubd %f38,%f10,%f30 # qhasm: h6 += x7 # asm 1: faddd h6=float64#13 # asm 2: faddd h6=%f24 faddd %f24,%f28,%f24 # qhasm: y3 -= alpha64 # asm 1: fsubd y3=float64#6 # asm 2: fsubd y3=%f10 fsubd %f40,%f10,%f10 # qhasm: y4 -= alpha96 # asm 1: fsubd y4=float64#15 # asm 2: fsubd y4=%f28 fsubd %f42,%f36,%f28 # qhasm: y5 -= alpha96 # asm 1: fsubd y5=float64#19 # asm 2: fsubd y5=%f36 fsubd %f44,%f36,%f36 # qhasm: y6 = h6 + alpha130 # asm 1: faddd y6=float64#20 # asm 2: faddd y6=%f38 faddd %f24,%f56,%f38 # qhasm: x0 = h0 - y0 # asm 1: fsubd x0=float64#2 # asm 2: fsubd x0=%f2 fsubd %f2,%f22,%f2 # qhasm: x1 = h1 - y1 # asm 1: fsubd x1=float64#3 # asm 2: fsubd x1=%f4 fsubd %f4,%f0,%f4 # qhasm: x2 = h2 - y2 # asm 1: fsubd x2=float64#5 # asm 2: fsubd x2=%f8 fsubd %f8,%f30,%f8 # qhasm: y6 -= alpha130 # asm 1: fsubd y6=float64#20 # asm 2: fsubd y6=%f38 fsubd %f38,%f56,%f38 # qhasm: x0 += y7 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f14,%f2 # qhasm: x3 = h3 - y3 # asm 1: fsubd x3=float64#7 # asm 2: fsubd x3=%f12 fsubd %f12,%f10,%f12 # qhasm: x4 = h4 - y4 # asm 1: fsubd x4=float64#8 # asm 2: fsubd x4=%f14 fsubd %f16,%f28,%f14 # qhasm: x5 = h5 - y5 # asm 1: fsubd x5=float64#9 # asm 2: fsubd x5=%f16 fsubd %f20,%f36,%f16 # qhasm: x6 = h6 - y6 # asm 1: fsubd x6=float64#11 # asm 2: fsubd x6=%f20 fsubd %f24,%f38,%f20 # qhasm: y6 *= scale # asm 1: fmuld y6=float64#13 # asm 2: fmuld y6=%f24 fmuld %f38,%f34,%f24 # qhasm: x2 += y0 # asm 1: faddd x2=float64#5 # asm 2: faddd x2=%f8 faddd %f8,%f22,%f8 # qhasm: x3 += y1 # asm 1: faddd x3=float64#1 # asm 2: faddd x3=%f0 faddd %f12,%f0,%f0 # qhasm: x4 += y2 # asm 1: faddd x4=float64#7 # asm 2: faddd x4=%f12 faddd %f14,%f30,%f12 # qhasm: x0 += y6 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f24,%f2 # qhasm: x5 += y3 # asm 1: faddd x5=float64#6 # asm 2: faddd x5=%f10 faddd %f16,%f10,%f10 # qhasm: x6 += y4 # asm 1: faddd x6=float64#8 # asm 2: faddd x6=%f14 faddd %f20,%f28,%f14 # qhasm: x2 += x3 # asm 1: faddd x2=float64#1 # asm 2: faddd x2=%f0 faddd %f8,%f0,%f0 # qhasm: x0 += x1 # asm 1: faddd x0=float64#2 # asm 2: faddd x0=%f2 faddd %f2,%f4,%f2 # qhasm: x4 += x5 # asm 1: faddd x4=float64#3 # asm 2: faddd x4=%f4 faddd %f12,%f10,%f4 # qhasm: x6 += y5 # asm 1: faddd x6=float64#5 # asm 2: faddd x6=%f8 faddd %f14,%f36,%f8 # qhasm: x2 += offset1 # asm 1: faddd x2=float64#1 # asm 2: faddd x2=%f0 faddd %f0,%f18,%f0 # qhasm: d1 = x2 # asm 1: std d1=stack64#1] # asm 2: std d1=0] std %f0,[%fp+2023-0] # qhasm: x0 += offset0 # asm 1: faddd x0=float64#1 # asm 2: faddd x0=%f0 faddd %f2,%f6,%f0 # qhasm: d0 = x0 # asm 1: std d0=stack64#2] # asm 2: std d0=8] std %f0,[%fp+2023-8] # qhasm: x4 += offset2 # asm 1: faddd x4=float64#1 # asm 2: faddd x4=%f0 faddd %f4,%f26,%f0 # qhasm: d2 = x4 # asm 1: std d2=stack64#3] # asm 2: std d2=16] std %f0,[%fp+2023-16] # qhasm: x6 += offset3 # asm 1: faddd x6=float64#1 # asm 2: faddd x6=%f0 faddd %f8,%f32,%f0 # qhasm: d3 = x6 # asm 1: std d3=stack64#4] # asm 2: std d3=24] std %f0,[%fp+2023-24] # qhasm: int64 s00 # qhasm: int64 s01 # qhasm: int64 s02 # qhasm: int64 s03 # qhasm: int64 s10 # qhasm: int64 s11 # qhasm: int64 s12 # qhasm: int64 s13 # qhasm: int64 s20 # qhasm: int64 s21 # qhasm: int64 s22 # qhasm: int64 s23 # qhasm: int64 s30 # qhasm: int64 s31 # qhasm: int64 s32 # qhasm: int64 s33 # qhasm: int64 bits32 # qhasm: int64 f # qhasm: int64 f0 # qhasm: int64 f1 # qhasm: int64 f2 # qhasm: int64 f3 # qhasm: int64 f4 # qhasm: int64 g # qhasm: int64 g0 # qhasm: int64 g1 # qhasm: int64 g2 # qhasm: int64 g3 # qhasm: int64 g4 # qhasm: f0 = d0 # asm 1: ldx [%fp+2023-f0=int64#2 # asm 2: ldx [%fp+2023-f0=%i1 ldx [%fp+2023-8],%i1 # qhasm: f1 = d1 # asm 1: ldx [%fp+2023-f1=int64#4 # asm 2: ldx [%fp+2023-f1=%i3 ldx [%fp+2023-0],%i3 # qhasm: bits32 = -1 # asm 1: sub %g0,1,>bits32=int64#5 # asm 2: sub %g0,1,>bits32=%i4 sub %g0,1,%i4 # qhasm: f2 = d2 # asm 1: ldx [%fp+2023-f2=int64#6 # asm 2: ldx [%fp+2023-f2=%i5 ldx [%fp+2023-16],%i5 # qhasm: (uint64) bits32 >>= 32 # asm 1: srlx bits32=int64#5 # asm 2: srlx bits32=%i4 srlx %i4,32,%i4 # qhasm: f3 = d3 # asm 1: ldx [%fp+2023-f3=int64#7 # asm 2: ldx [%fp+2023-f3=%g1 ldx [%fp+2023-24],%g1 # qhasm: f = (uint64) f0 >> 32 # asm 1: srlx f=int64#8 # asm 2: srlx f=%g4 srlx %i1,32,%g4 # qhasm: f0 &= bits32 # asm 1: and f0=int64#2 # asm 2: and f0=%i1 and %i1,%i4,%i1 # qhasm: f &= 255 # asm 1: and f=int64#8 # asm 2: and f=%g4 and %g4,255,%g4 # qhasm: f1 += f # asm 1: add f1=int64#4 # asm 2: add f1=%i3 add %i3,%g4,%i3 # qhasm: g0 = f0 + 5 # asm 1: add g0=int64#8 # asm 2: add g0=%g4 add %i1,5,%g4 # qhasm: g = (uint64) g0 >> 32 # asm 1: srlx g=int64#9 # asm 2: srlx g=%g5 srlx %g4,32,%g5 # qhasm: g0 &= bits32 # asm 1: and g0=int64#8 # asm 2: and g0=%g4 and %g4,%i4,%g4 # qhasm: f = (uint64) f1 >> 32 # asm 1: srlx f=int64#10 # asm 2: srlx f=%o0 srlx %i3,32,%o0 # qhasm: f1 &= bits32 # asm 1: and f1=int64#4 # asm 2: and f1=%i3 and %i3,%i4,%i3 # qhasm: f &= 255 # asm 1: and f=int64#10 # asm 2: and f=%o0 and %o0,255,%o0 # qhasm: g1 = f1 + g # asm 1: add g1=int64#9 # asm 2: add g1=%g5 add %i3,%g5,%g5 # qhasm: g = (uint64) g1 >> 32 # asm 1: srlx g=int64#11 # asm 2: srlx g=%o1 srlx %g5,32,%o1 # qhasm: f2 += f # asm 1: add f2=int64#6 # asm 2: add f2=%i5 add %i5,%o0,%i5 # qhasm: f = (uint64) f2 >> 32 # asm 1: srlx f=int64#10 # asm 2: srlx f=%o0 srlx %i5,32,%o0 # qhasm: g1 &= bits32 # asm 1: and g1=int64#9 # asm 2: and g1=%g5 and %g5,%i4,%g5 # qhasm: f2 &= bits32 # asm 1: and f2=int64#6 # asm 2: and f2=%i5 and %i5,%i4,%i5 # qhasm: f &= 255 # asm 1: and f=int64#10 # asm 2: and f=%o0 and %o0,255,%o0 # qhasm: f3 += f # asm 1: add f3=int64#7 # asm 2: add f3=%g1 add %g1,%o0,%g1 # qhasm: g2 = f2 + g # asm 1: add g2=int64#10 # asm 2: add g2=%o0 add %i5,%o1,%o0 # qhasm: g = (uint64) g2 >> 32 # asm 1: srlx g=int64#11 # asm 2: srlx g=%o1 srlx %o0,32,%o1 # qhasm: g2 &= bits32 # asm 1: and g2=int64#10 # asm 2: and g2=%o0 and %o0,%i4,%o0 # qhasm: f4 = (uint64) f3 >> 32 # asm 1: srlx f4=int64#12 # asm 2: srlx f4=%o2 srlx %g1,32,%o2 # qhasm: f3 &= bits32 # asm 1: and f3=int64#7 # asm 2: and f3=%g1 and %g1,%i4,%g1 # qhasm: f4 &= 255 # asm 1: and f4=int64#12 # asm 2: and f4=%o2 and %o2,255,%o2 # qhasm: g3 = f3 + g # asm 1: add g3=int64#11 # asm 2: add g3=%o1 add %g1,%o1,%o1 # qhasm: g = (uint64) g3 >> 32 # asm 1: srlx g=int64#13 # asm 2: srlx g=%o3 srlx %o1,32,%o3 # qhasm: g3 &= bits32 # asm 1: and g3=int64#5 # asm 2: and g3=%i4 and %o1,%i4,%i4 # qhasm: g4 = f4 + g # asm 1: add g4=int64#11 # asm 2: add g4=%o1 add %o2,%o3,%o1 # qhasm: g4 = g4 - 4 # asm 1: sub g4=int64#11 # asm 2: sub g4=%o1 sub %o1,4,%o1 # qhasm: s00 = *(uint8 *) (s + 0) # asm 1: ldub [s00=int64#12 # asm 2: ldub [s00=%o2 ldub [%i2+0],%o2 # qhasm: f = (int64) g4 >> 63 # asm 1: srax f=int64#11 # asm 2: srax f=%o1 srax %o1,63,%o1 # qhasm: s01 = *(uint8 *) (s + 1) # asm 1: ldub [s01=int64#13 # asm 2: ldub [s01=%o3 ldub [%i2+1],%o3 # qhasm: f0 &= f # asm 1: and f0=int64#2 # asm 2: and f0=%i1 and %i1,%o1,%i1 # qhasm: g0 &= ~f # asm 1: andn g0=int64#8 # asm 2: andn g0=%g4 andn %g4,%o1,%g4 # qhasm: s02 = *(uint8 *) (s + 2) # asm 1: ldub [s02=int64#14 # asm 2: ldub [s02=%o4 ldub [%i2+2],%o4 # qhasm: f1 &= f # asm 1: and f1=int64#4 # asm 2: and f1=%i3 and %i3,%o1,%i3 # qhasm: f0 |= g0 # asm 1: or f0=int64#2 # asm 2: or f0=%i1 or %i1,%g4,%i1 # qhasm: s03 = *(uint8 *) (s + 3) # asm 1: ldub [s03=int64#8 # asm 2: ldub [s03=%g4 ldub [%i2+3],%g4 # qhasm: g1 &= ~f # asm 1: andn g1=int64#9 # asm 2: andn g1=%g5 andn %g5,%o1,%g5 # qhasm: f2 &= f # asm 1: and f2=int64#6 # asm 2: and f2=%i5 and %i5,%o1,%i5 # qhasm: s10 = *(uint8 *) (s + 4) # asm 1: ldub [s10=int64#15 # asm 2: ldub [s10=%o5 ldub [%i2+4],%o5 # qhasm: f3 &= f # asm 1: and f3=int64#7 # asm 2: and f3=%g1 and %g1,%o1,%g1 # qhasm: g2 &= ~f # asm 1: andn g2=int64#10 # asm 2: andn g2=%o0 andn %o0,%o1,%o0 # qhasm: s11 = *(uint8 *) (s + 5) # asm 1: ldub [s11=int64#16 # asm 2: ldub [s11=%o7 ldub [%i2+5],%o7 # qhasm: g3 &= ~f # asm 1: andn g3=int64#5 # asm 2: andn g3=%i4 andn %i4,%o1,%i4 # qhasm: f1 |= g1 # asm 1: or f1=int64#4 # asm 2: or f1=%i3 or %i3,%g5,%i3 # qhasm: s12 = *(uint8 *) (s + 6) # asm 1: ldub [s12=int64#9 # asm 2: ldub [s12=%g5 ldub [%i2+6],%g5 # qhasm: f2 |= g2 # asm 1: or f2=int64#6 # asm 2: or f2=%i5 or %i5,%o0,%i5 # qhasm: f3 |= g3 # asm 1: or f3=int64#5 # asm 2: or f3=%i4 or %g1,%i4,%i4 # qhasm: s13 = *(uint8 *) (s + 7) # asm 1: ldub [s13=int64#7 # asm 2: ldub [s13=%g1 ldub [%i2+7],%g1 # qhasm: s01 <<= 8 # asm 1: sllx s01=int64#10 # asm 2: sllx s01=%o0 sllx %o3,8,%o0 # qhasm: f0 += s00 # asm 1: add f0=int64#2 # asm 2: add f0=%i1 add %i1,%o2,%i1 # qhasm: s20 = *(uint8 *) (s + 8) # asm 1: ldub [s20=int64#11 # asm 2: ldub [s20=%o1 ldub [%i2+8],%o1 # qhasm: s02 <<= 16 # asm 1: sllx s02=int64#12 # asm 2: sllx s02=%o2 sllx %o4,16,%o2 # qhasm: f0 += s01 # asm 1: add f0=int64#2 # asm 2: add f0=%i1 add %i1,%o0,%i1 # qhasm: s21 = *(uint8 *) (s + 9) # asm 1: ldub [s21=int64#10 # asm 2: ldub [s21=%o0 ldub [%i2+9],%o0 # qhasm: s03 <<= 24 # asm 1: sllx s03=int64#8 # asm 2: sllx s03=%g4 sllx %g4,24,%g4 # qhasm: f0 += s02 # asm 1: add f0=int64#2 # asm 2: add f0=%i1 add %i1,%o2,%i1 # qhasm: s22 = *(uint8 *) (s + 10) # asm 1: ldub [s22=int64#12 # asm 2: ldub [s22=%o2 ldub [%i2+10],%o2 # qhasm: s11 <<= 8 # asm 1: sllx s11=int64#13 # asm 2: sllx s11=%o3 sllx %o7,8,%o3 # qhasm: f1 += s10 # asm 1: add f1=int64#4 # asm 2: add f1=%i3 add %i3,%o5,%i3 # qhasm: s23 = *(uint8 *) (s + 11) # asm 1: ldub [s23=int64#14 # asm 2: ldub [s23=%o4 ldub [%i2+11],%o4 # qhasm: s12 <<= 16 # asm 1: sllx s12=int64#9 # asm 2: sllx s12=%g5 sllx %g5,16,%g5 # qhasm: f1 += s11 # asm 1: add f1=int64#4 # asm 2: add f1=%i3 add %i3,%o3,%i3 # qhasm: s30 = *(uint8 *) (s + 12) # asm 1: ldub [s30=int64#13 # asm 2: ldub [s30=%o3 ldub [%i2+12],%o3 # qhasm: s13 <<= 24 # asm 1: sllx s13=int64#7 # asm 2: sllx s13=%g1 sllx %g1,24,%g1 # qhasm: f1 += s12 # asm 1: add f1=int64#4 # asm 2: add f1=%i3 add %i3,%g5,%i3 # qhasm: s31 = *(uint8 *) (s + 13) # asm 1: ldub [s31=int64#9 # asm 2: ldub [s31=%g5 ldub [%i2+13],%g5 # qhasm: f0 += s03 # asm 1: add f0=int64#2 # asm 2: add f0=%i1 add %i1,%g4,%i1 # qhasm: f1 += s13 # asm 1: add f1=int64#4 # asm 2: add f1=%i3 add %i3,%g1,%i3 # qhasm: s32 = *(uint8 *) (s + 14) # asm 1: ldub [s32=int64#7 # asm 2: ldub [s32=%g1 ldub [%i2+14],%g1 # qhasm: s21 <<= 8 # asm 1: sllx s21=int64#8 # asm 2: sllx s21=%g4 sllx %o0,8,%g4 # qhasm: f2 += s20 # asm 1: add f2=int64#6 # asm 2: add f2=%i5 add %i5,%o1,%i5 # qhasm: s33 = *(uint8 *) (s + 15) # asm 1: ldub [s33=int64#3 # asm 2: ldub [s33=%i2 ldub [%i2+15],%i2 # qhasm: s22 <<= 16 # asm 1: sllx s22=int64#10 # asm 2: sllx s22=%o0 sllx %o2,16,%o0 # qhasm: f2 += s21 # asm 1: add f2=int64#6 # asm 2: add f2=%i5 add %i5,%g4,%i5 # qhasm: s23 <<= 24 # asm 1: sllx s23=int64#8 # asm 2: sllx s23=%g4 sllx %o4,24,%g4 # qhasm: f2 += s22 # asm 1: add f2=int64#6 # asm 2: add f2=%i5 add %i5,%o0,%i5 # qhasm: s31 <<= 8 # asm 1: sllx s31=int64#9 # asm 2: sllx s31=%g5 sllx %g5,8,%g5 # qhasm: f3 += s30 # asm 1: add f3=int64#5 # asm 2: add f3=%i4 add %i4,%o3,%i4 # qhasm: s32 <<= 16 # asm 1: sllx s32=int64#7 # asm 2: sllx s32=%g1 sllx %g1,16,%g1 # qhasm: f3 += s31 # asm 1: add f3=int64#5 # asm 2: add f3=%i4 add %i4,%g5,%i4 # qhasm: s33 <<= 24 # asm 1: sllx s33=int64#3 # asm 2: sllx s33=%i2 sllx %i2,24,%i2 # qhasm: f3 += s32 # asm 1: add f3=int64#5 # asm 2: add f3=%i4 add %i4,%g1,%i4 # qhasm: f2 += s23 # asm 1: add f2=int64#6 # asm 2: add f2=%i5 add %i5,%g4,%i5 # qhasm: f3 += s33 # asm 1: add f3=int64#3 # asm 2: add f3=%i2 add %i4,%i2,%i2 # qhasm: *(uint8 *) (out + 0) = f0 # asm 1: stb >= 8 # asm 1: srlx f0=int64#2 # asm 2: srlx f0=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 1) = f0 # asm 1: stb >= 8 # asm 1: srlx f0=int64#2 # asm 2: srlx f0=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 2) = f0 # asm 1: stb >= 8 # asm 1: srlx f0=int64#2 # asm 2: srlx f0=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 3) = f0 # asm 1: stb >= 8 # asm 1: srlx f0=int64#2 # asm 2: srlx f0=%i1 srlx %i1,8,%i1 # qhasm: f1 += f0 # asm 1: add f1=int64#2 # asm 2: add f1=%i1 add %i3,%i1,%i1 # qhasm: *(uint8 *) (out + 4) = f1 # asm 1: stb >= 8 # asm 1: srlx f1=int64#2 # asm 2: srlx f1=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 5) = f1 # asm 1: stb >= 8 # asm 1: srlx f1=int64#2 # asm 2: srlx f1=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 6) = f1 # asm 1: stb >= 8 # asm 1: srlx f1=int64#2 # asm 2: srlx f1=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 7) = f1 # asm 1: stb >= 8 # asm 1: srlx f1=int64#2 # asm 2: srlx f1=%i1 srlx %i1,8,%i1 # qhasm: f2 += f1 # asm 1: add f2=int64#2 # asm 2: add f2=%i1 add %i5,%i1,%i1 # qhasm: *(uint8 *) (out + 8) = f2 # asm 1: stb >= 8 # asm 1: srlx f2=int64#2 # asm 2: srlx f2=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 9) = f2 # asm 1: stb >= 8 # asm 1: srlx f2=int64#2 # asm 2: srlx f2=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 10) = f2 # asm 1: stb >= 8 # asm 1: srlx f2=int64#2 # asm 2: srlx f2=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 11) = f2 # asm 1: stb >= 8 # asm 1: srlx f2=int64#2 # asm 2: srlx f2=%i1 srlx %i1,8,%i1 # qhasm: f3 += f2 # asm 1: add f3=int64#2 # asm 2: add f3=%i1 add %i2,%i1,%i1 # qhasm: *(uint8 *) (out + 12) = f3 # asm 1: stb >= 8 # asm 1: srlx f3=int64#2 # asm 2: srlx f3=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 13) = f3 # asm 1: stb >= 8 # asm 1: srlx f3=int64#2 # asm 2: srlx f3=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 14) = f3 # asm 1: stb >= 8 # asm 1: srlx f3=int64#2 # asm 2: srlx f3=%i1 srlx %i1,8,%i1 # qhasm: *(uint8 *) (out + 15) = f3 # asm 1: stb