float64 scale float64 alpha0 float64 alpha32 float64 alpha64 float64 alpha96 float64 alpha130 float64 h0 float64 h1 float64 h2 float64 h3 float64 h4 float64 h5 float64 h6 float64 h7 float64 y7 float64 y6 float64 y1 float64 y0 float64 y5 float64 y4 float64 x7 float64 x6 float64 x1 float64 x0 float64 y3 float64 y2 float64 r3low float64 r0low float64 r3high float64 r0high float64 sr1low float64 x5 float64 r3lowx0 float64 sr1high float64 x4 float64 r0lowx6 float64 r1low float64 x3 float64 r3highx0 float64 r1high float64 x2 float64 r0highx6 float64 sr2low float64 r0lowx0 float64 sr2high float64 sr1lowx6 float64 r2low float64 r0highx0 float64 r2high float64 sr1highx6 float64 sr3low float64 r1lowx0 float64 sr3high float64 sr2lowx6 float64 r1highx0 float64 sr2highx6 float64 r2lowx0 float64 sr3lowx6 float64 r2highx0 float64 sr3highx6 float64 r1highx4 float64 r1lowx4 float64 r0highx4 float64 r0lowx4 float64 sr3highx4 float64 sr3lowx4 float64 sr2highx4 float64 sr2lowx4 float64 r0lowx2 float64 r0highx2 float64 r1lowx2 float64 r1highx2 float64 r2lowx2 float64 r2highx2 float64 sr3lowx2 float64 sr3highx2 float64 z0 float64 z1 float64 z2 float64 z3 int64 out int64 r int64 s int64 m int64 l int64 r0 int64 r1 int64 r2 int64 r3 int64 r00 int64 r01 int64 r02 int64 r03 int64 r10 int64 r11 int64 r12 int64 r13 int64 r20 int64 r21 int64 r22 int64 r23 int64 r30 int64 r31 int64 r32 int64 r33 int64 m0 int64 m1 int64 m2 int64 m3 int64 m00 int64 m01 int64 m02 int64 m03 int64 m10 int64 m11 int64 m12 int64 m13 int64 m20 int64 m21 int64 m22 int64 m23 int64 m30 int64 m31 int64 m32 int64 m33 int64 constants int64 constants_low int64 lbelow2 int64 lbelow3 int64 lbelow4 int64 lbelow5 int64 lbelow6 int64 lbelow7 int64 lbelow8 int64 lbelow9 int64 lbelow10 int64 lbelow11 int64 lbelow12 int64 lbelow13 int64 lbelow14 int64 lbelow15 float64 alpham80 float64 alpham48 float64 alpham16 float64 alpha18 float64 alpha50 float64 alpha82 float64 alpha112 float64 offset0 float64 offset1 float64 offset2 float64 offset3 stack64 d0 stack64 d1 stack64 d2 stack64 d3 stack64 sr1high_stack stack64 sr1low_stack stack64 sr2high_stack stack64 sr2low_stack stack64 sr3high_stack stack64 sr3low_stack enter poly1305_sparc input out input r input s input m input l # block 1: prologue r00 = *(uint8 *) (r + 0) constants = (poly1305_sparc_constants & 0xfffffc0000000000) >> 32 constants_low = poly1305_sparc_constants & 0xfffffc00 r01 = *(uint8 *) (r + 1) constants |= (poly1305_sparc_constants & 0x3ff00000000) >> 32 constants_low |= poly1305_sparc_constants & 0x3ff r02 = *(uint8 *) (r + 2) r0 = 2151 constants <<= 32 r03 = *(uint8 *) (r + 3) r0 <<= 51 constants |= constants_low r10 = *(uint8 *) (r + 4) r01 <<= 8 r0 += r00 r11 = *(uint8 *) (r + 5) r02 <<= 16 r0 += r01 r12 = *(uint8 *) (r + 6) r03 <<= 24 r0 += r02 r13 = *(uint8 *) (r + 7) r1 = 2215 r0 += r03 d0 = r0 r1 <<= 51 r2 = 2279 r20 = *(uint8 *) (r + 8) r11 <<= 8 r1 += r10 r21 = *(uint8 *) (r + 9) r12 <<= 16 r1 += r11 r22 = *(uint8 *) (r + 10) r13 <<= 24 r1 += r12 r23 = *(uint8 *) (r + 11) r2 <<= 51 r1 += r13 d1 = r1 r21 <<= 8 r2 += r20 r30 = *(uint8 *) (r + 12) r22 <<= 16 r2 += r21 r31 = *(uint8 *) (r + 13) r23 <<= 24 r2 += r22 r32 = *(uint8 *) (r + 14) r2 += r23 r3 = 2343 d2 = r2 r3 <<= 51 alpha32 = *(float64 *) (constants + 40) # range alpha32 83 3 3 r33 = *(uint8 *) (r + 15) r31 <<= 8 r3 += r30 round *(uint32 *) (constants + 136) r32 <<= 16 r3 += r31 r33 <<= 24 r3 += r32 r3 += r33 h0 = alpha32 - alpha32 # range h0 0 0 0 d3 = r3 h1 = alpha32 - alpha32 # range h1 16 0 0 alpha0 = *(float64 *) (constants + 24) # range alpha0 51 3 3 h2 = alpha32 - alpha32 # range h2 32 0 0 alpha64 = *(float64 *) (constants + 56) # range alpha64 115 3 3 h3 = alpha32 - alpha32 # range h3 48 0 0 alpha18 = *(float64 *) (constants + 32) # range alpha18 69 3 3 h4 = alpha32 - alpha32 # range h4 64 0 0 r0low = d0 h5 = alpha32 - alpha32 # range h5 80 0 0 r1low = d1 h6 = alpha32 - alpha32 # range h6 96 0 0 r2low = d2 h7 = alpha32 - alpha32 # range h7 112 0 0 alpha50 = *(float64 *) (constants + 48) # range alpha50 101 3 3 r0low -= alpha0 # range r0low 0 0 0x0fffffff alpha82 = *(float64 *) (constants + 64) # range alpha82 133 3 3 r1low -= alpha32 # range r1low 34 0 0x3ffffff scale = *(float64 *) (constants + 96) # range scale "-130" 5 5 r2low -= alpha64 # range r2low 66 0 0x3ffffff alpha96 = *(float64 *) (constants + 72) # range alpha96 147 3 3 r0high = r0low + alpha18 r3low = d3 alpham80 = *(float64 *) (constants + 0) # range alpham80 "-29" 3 3 r1high = r1low + alpha50 sr1low = scale * r1low alpham48 = *(float64 *) (constants + 8) # range alpham48 "3" 3 3 r2high = r2low + alpha82 sr2low = scale * r2low r0high -= alpha18 r3low -= alpha96 # range r3low 98 0 0x3ffffff r1high -= alpha50 sr1high = sr1low + alpham80 alpha112 = *(float64 *) (constants + 80) # range alpha112 163 3 3 r0low -= r0high alpham16 = *(float64 *) (constants + 16) # range alpham16 35 3 3 r2high -= alpha82 sr3low = scale * r3low alpha130 = *(float64 *) (constants + 88) # range alpha130 181 3 3 sr2high = sr2low + alpham48 r1low -= r1high sr1high -= alpham80 sr1high_stack = sr1high r2low -= r2high sr2high -= alpham48 sr2high_stack = sr2high r3high = r3low + alpha112 sr1low -= sr1high sr1low_stack = sr1low sr3high = sr3low + alpham16 sr2low -= sr2high sr2low_stack = sr2low r3high -= alpha112 sr3high -= alpham16 sr3high_stack = sr3high unsigned>= 63 lbelow4 = l - 4 m00 = *(uint8 *) (m + 0) (int64) lbelow3 >>= 63 m += lbelow2 m01 = *(uint8 *) (m + 1) (int64) lbelow4 >>= 63 m += lbelow3 m02 = *(uint8 *) (m + 2) m += lbelow4 m0 = 2151 m03 = *(uint8 *) (m + 3) m0 <<= 51 m1 = 2215 m0 += m00 m01 &= ~lbelow2 m02 &= ~lbelow3 m01 -= lbelow2 m01 <<= 8 m03 &= ~lbelow4 m0 += m01 lbelow2 -= lbelow3 m02 += lbelow2 lbelow3 -= lbelow4 m02 <<= 16 m03 += lbelow3 m03 <<= 24 m0 += m02 m0 += m03 lbelow5 = l - 5 lbelow6 = l - 6 lbelow7 = l - 7 (int64) lbelow5 >>= 63 lbelow8 = l - 8 (int64) lbelow6 >>= 63 m += lbelow5 m10 = *(uint8 *) (m + 4) (int64) lbelow7 >>= 63 m += lbelow6 m11 = *(uint8 *) (m + 5) (int64) lbelow8 >>= 63 m += lbelow7 m12 = *(uint8 *) (m + 6) m1 <<= 51 m += lbelow8 m13 = *(uint8 *) (m + 7) m10 &= ~lbelow5 lbelow4 -= lbelow5 m10 += lbelow4 lbelow5 -= lbelow6 m11 &= ~lbelow6 m11 += lbelow5 m11 <<= 8 m1 += m10 m1 += m11 m12 &= ~lbelow7 lbelow6 -= lbelow7 m13 &= ~lbelow8 m12 += lbelow6 lbelow7 -= lbelow8 m12 <<= 16 m13 += lbelow7 m13 <<= 24 m1 += m12 m1 += m13 m2 = 2279 lbelow9 = l - 9 m3 = 2343 lbelow10 = l - 10 lbelow11 = l - 11 (int64) lbelow9 >>= 63 lbelow12 = l - 12 (int64) lbelow10 >>= 63 m += lbelow9 m20 = *(uint8 *) (m + 8) (int64) lbelow11 >>= 63 m += lbelow10 m21 = *(uint8 *) (m + 9) (int64) lbelow12 >>= 63 m += lbelow11 m22 = *(uint8 *) (m + 10) m2 <<= 51 m += lbelow12 m23 = *(uint8 *) (m + 11) m20 &= ~lbelow9 lbelow8 -= lbelow9 m20 += lbelow8 lbelow9 -= lbelow10 m21 &= ~lbelow10 m21 += lbelow9 m21 <<= 8 m2 += m20 m2 += m21 m22 &= ~lbelow11 lbelow10 -= lbelow11 m23 &= ~lbelow12 m22 += lbelow10 lbelow11 -= lbelow12 m22 <<= 16 m23 += lbelow11 m23 <<= 24 m2 += m22 m3 <<= 51 lbelow13 = l - 13 (int64) lbelow13 >>= 63 lbelow14 = l - 14 (int64) lbelow14 >>= 63 m += lbelow13 lbelow15 = l - 15 m30 = *(uint8 *) (m + 12) (int64) lbelow15 >>= 63 m += lbelow14 m31 = *(uint8 *) (m + 13) m += lbelow15 m2 += m23 m32 = *(uint8 *) (m + 14) m30 &= ~lbelow13 lbelow12 -= lbelow13 m30 += lbelow12 lbelow13 -= lbelow14 m3 += m30 m31 &= ~lbelow14 m31 += lbelow13 m32 &= ~lbelow15 m31 <<= 8 lbelow14 -= lbelow15 m3 += m31 m32 += lbelow14 d0 = m0 m32 <<= 16 m33 = lbelow15 + 1 d1 = m1 m33 <<= 24 m3 += m32 d2 = m2 m3 += m33 d3 = m3 alpha0 = *(float64 *) (constants + 24) # range alpha0 51 3 3 z3 = d3 z2 = d2 z1 = d1 z0 = d0 z3 -= alpha96 z2 -= alpha64 z1 -= alpha32 z0 -= alpha0 # range z0 0 0 0xffffffff # range z1 32 0 0xffffffff # range z2 64 0 0xffffffff # range z3 96 0 0x1ffffffff h5 += z3 h3 += z2 h1 += z1 h0 += z0 y7 = h7 + alpha130 y6 = h6 + alpha130 y1 = h1 + alpha32 y0 = h0 + alpha32 y7 -= alpha130 y6 -= alpha130 y1 -= alpha32 y0 -= alpha32 y5 = h5 + alpha96 y4 = h4 + alpha96 x7 = h7 - y7 y7 *= scale x6 = h6 - y6 y6 *= scale x1 = h1 - y1 x0 = h0 - y0 y5 -= alpha96 y4 -= alpha96 x1 += y7 x0 += y6 x7 += y5 x6 += y4 y3 = h3 + alpha64 y2 = h2 + alpha64 x0 += x1 x6 += x7 y3 -= alpha64 y2 -= alpha64 x5 = h5 - y5 r3lowx0 = r3low * x0 x4 = h4 - y4 r0lowx6 = r0low * x6 x3 = h3 - y3 r3highx0 = r3high * x0 sr1low = sr1low_stack x2 = h2 - y2 r0highx6 = r0high * x6 sr1high = sr1high_stack x5 += y3 r0lowx0 = r0low * x0 h6 = r3lowx0 + r0lowx6 sr1lowx6 = sr1low * x6 x4 += y2 r0highx0 = r0high * x0 sr2low = sr2low_stack h7 = r3highx0 + r0highx6 sr1highx6 = sr1high * x6 sr2high = sr2high_stack x3 += y1 r1lowx0 = r1low * x0 h0 = r0lowx0 + sr1lowx6 sr2lowx6 = sr2low * x6 x2 += y0 r1highx0 = r1high * x0 sr3low = sr3low_stack h1 = r0highx0 + sr1highx6 sr2highx6 = sr2high * x6 sr3high = sr3high_stack x4 += x5 r2lowx0 = r2low * x0 h2 = r1lowx0 + sr2lowx6 sr3lowx6 = sr3low * x6 x2 += x3 r2highx0 = r2high * x0 h3 = r1highx0 + sr2highx6 sr3highx6 = sr3high * x6 r1highx4 = r1high * x4 h4 = r2lowx0 + sr3lowx6 r1lowx4 = r1low * x4 r0highx4 = r0high * x4 h5 = r2highx0 + sr3highx6 r0lowx4 = r0low * x4 h7 += r1highx4 sr3highx4 = sr3high * x4 h6 += r1lowx4 sr3lowx4 = sr3low * x4 h5 += r0highx4 sr2highx4 = sr2high * x4 h4 += r0lowx4 sr2lowx4 = sr2low * x4 h3 += sr3highx4 r0lowx2 = r0low * x2 h2 += sr3lowx4 r0highx2 = r0high * x2 h1 += sr2highx4 r1lowx2 = r1low * x2 h0 += sr2lowx4 r1highx2 = r1high * x2 h2 += r0lowx2 r2lowx2 = r2low * x2 h3 += r0highx2 r2highx2 = r2high * x2 h4 += r1lowx2 sr3lowx2 = sr3low * x2 h5 += r1highx2 sr3highx2 = sr3high * x2 h6 += r2lowx2 h7 += r2highx2 h0 += sr3lowx2 h1 += sr3highx2 nomorebytes: # block 6: epilogue offset0 = *(float64 *) (constants + 104) # range offset0 0 0x180001fffffffb 0x180001fffffffb y7 = h7 + alpha130 offset1 = *(float64 *) (constants + 112) # range offset1 32 0x180001fffffffe 0x180001fffffffe y0 = h0 + alpha32 offset2 = *(float64 *) (constants + 120) # range offset2 64 0x180001fffffffe 0x180001fffffffe y1 = h1 + alpha32 offset3 = *(float64 *) (constants + 128) # range offset3 96 0x180003fffffffe 0x180003fffffffe y2 = h2 + alpha64 y7 -= alpha130 y3 = h3 + alpha64 y4 = h4 + alpha96 y5 = h5 + alpha96 x7 = h7 - y7 y7 *= scale y0 -= alpha32 y1 -= alpha32 y2 -= alpha64 h6 += x7 y3 -= alpha64 y4 -= alpha96 y5 -= alpha96 y6 = h6 + alpha130 x0 = h0 - y0 x1 = h1 - y1 x2 = h2 - y2 y6 -= alpha130 x0 += y7 x3 = h3 - y3 x4 = h4 - y4 x5 = h5 - y5 x6 = h6 - y6 y6 *= scale x2 += y0 x3 += y1 x4 += y2 x0 += y6 x5 += y3 x6 += y4 x2 += x3 x0 += x1 x4 += x5 x6 += y5 x2 += offset1 d1 = x2 x0 += offset0 d0 = x0 x4 += offset2 d2 = x4 x6 += offset3 d3 = x6 int64 s00 int64 s01 int64 s02 int64 s03 int64 s10 int64 s11 int64 s12 int64 s13 int64 s20 int64 s21 int64 s22 int64 s23 int64 s30 int64 s31 int64 s32 int64 s33 int64 bits32 int64 f int64 f0 int64 f1 int64 f2 int64 f3 int64 f4 int64 g int64 g0 int64 g1 int64 g2 int64 g3 int64 g4 f0 = d0 f1 = d1 bits32 = -1 f2 = d2 (uint64) bits32 >>= 32 f3 = d3 f = (uint64) f0 >> 32 f0 &= bits32 f &= 255 f1 += f g0 = f0 + 5 g = (uint64) g0 >> 32 g0 &= bits32 f = (uint64) f1 >> 32 f1 &= bits32 f &= 255 g1 = f1 + g g = (uint64) g1 >> 32 f2 += f f = (uint64) f2 >> 32 g1 &= bits32 f2 &= bits32 f &= 255 f3 += f g2 = f2 + g g = (uint64) g2 >> 32 g2 &= bits32 f4 = (uint64) f3 >> 32 f3 &= bits32 f4 &= 255 g3 = f3 + g g = (uint64) g3 >> 32 g3 &= bits32 g4 = f4 + g g4 = g4 - 4 s00 = *(uint8 *) (s + 0) f = (int64) g4 >> 63 s01 = *(uint8 *) (s + 1) f0 &= f g0 &= ~f s02 = *(uint8 *) (s + 2) f1 &= f f0 |= g0 s03 = *(uint8 *) (s + 3) g1 &= ~f f2 &= f s10 = *(uint8 *) (s + 4) f3 &= f g2 &= ~f s11 = *(uint8 *) (s + 5) g3 &= ~f f1 |= g1 s12 = *(uint8 *) (s + 6) f2 |= g2 f3 |= g3 s13 = *(uint8 *) (s + 7) s01 <<= 8 f0 += s00 s20 = *(uint8 *) (s + 8) s02 <<= 16 f0 += s01 s21 = *(uint8 *) (s + 9) s03 <<= 24 f0 += s02 s22 = *(uint8 *) (s + 10) s11 <<= 8 f1 += s10 s23 = *(uint8 *) (s + 11) s12 <<= 16 f1 += s11 s30 = *(uint8 *) (s + 12) s13 <<= 24 f1 += s12 s31 = *(uint8 *) (s + 13) f0 += s03 f1 += s13 s32 = *(uint8 *) (s + 14) s21 <<= 8 f2 += s20 s33 = *(uint8 *) (s + 15) s22 <<= 16 f2 += s21 s23 <<= 24 f2 += s22 s31 <<= 8 f3 += s30 s32 <<= 16 f3 += s31 s33 <<= 24 f3 += s32 f2 += s23 f3 += s33 *(uint8 *) (out + 0) = f0 (uint64) f0 >>= 8 *(uint8 *) (out + 1) = f0 (uint64) f0 >>= 8 *(uint8 *) (out + 2) = f0 (uint64) f0 >>= 8 *(uint8 *) (out + 3) = f0 (uint64) f0 >>= 8 f1 += f0 *(uint8 *) (out + 4) = f1 (uint64) f1 >>= 8 *(uint8 *) (out + 5) = f1 (uint64) f1 >>= 8 *(uint8 *) (out + 6) = f1 (uint64) f1 >>= 8 *(uint8 *) (out + 7) = f1 (uint64) f1 >>= 8 f2 += f1 *(uint8 *) (out + 8) = f2 (uint64) f2 >>= 8 *(uint8 *) (out + 9) = f2 (uint64) f2 >>= 8 *(uint8 *) (out + 10) = f2 (uint64) f2 >>= 8 *(uint8 *) (out + 11) = f2 (uint64) f2 >>= 8 f3 += f2 *(uint8 *) (out + 12) = f3 (uint64) f3 >>= 8 *(uint8 *) (out + 13) = f3 (uint64) f3 >>= 8 *(uint8 *) (out + 14) = f3 (uint64) f3 >>= 8 *(uint8 *) (out + 15) = f3 leave