/**
- * Author......: Jens Steube <jens.steube@gmail.com>
+ * Authors.....: Jens Steube <jens.steube@gmail.com>
+ * magnum <john.magnum@hushmail.com>
+ *
* License.....: MIT
*/
+#define DEVICE_TYPE_CPU 2
+#define DEVICE_TYPE_GPU 4
+
typedef uchar u8;
typedef ushort u16;
typedef uint u32;
#define VECT_SIZE 1
#endif
-#if VECT_SIZE == 1
-typedef uint u32x;
-typedef ulong u64x;
-#endif
-
-#if VECT_SIZE == 2
-typedef uint2 u32x;
-typedef ulong2 u64x;
-#endif
-
-#if VECT_SIZE == 4
-typedef uint4 u32x;
-typedef ulong4 u64x;
-#endif
+#define CONCAT(a, b) a##b
+#define VTYPE(type, width) CONCAT(type, width)
-#if VECT_SIZE == 8
-typedef uint8 u32x;
-typedef ulong8 u64x;
+#if VECT_SIZE == 1
+typedef uchar u8x;
+typedef ushort u16x;
+typedef uint u32x;
+typedef ulong u64x;
+#else
+typedef VTYPE(uchar, VECT_SIZE) u8x;
+typedef VTYPE(ushort, VECT_SIZE) u16x;
+typedef VTYPE(uint, VECT_SIZE) u32x;
+typedef VTYPE(ulong, VECT_SIZE) u64x;
#endif
-// this one needs to die
-#define allx(r) r
-
-static inline u32 l32_from_64 (u64 a)
+inline u32 l32_from_64_S (u64 a)
{
- const u32 r = (uint) (a);
+ const u32 r = (u32) (a);
return r;
}
-static inline u32 h32_from_64 (u64 a)
+inline u32 h32_from_64_S (u64 a)
{
a >>= 32;
- const u32 r = (uint) (a);
+ const u32 r = (u32) (a);
return r;
}
-static inline u64 hl32_to_64 (const u32 a, const u32 b)
+inline u64 hl32_to_64_S (const u32 a, const u32 b)
{
return as_ulong ((uint2) (b, a));
}
-#ifdef IS_AMD
-static inline u32 swap32 (const u32 v)
-{
- return (as_uint (as_uchar4 (v).s3210));
-}
-
-static inline u64 swap64 (const u64 v)
-{
- return (as_ulong (as_uchar8 (v).s76543210));
-}
-#endif
-
-#ifdef IS_NV
-static inline u32 swap32 (const u32 v)
-{
- u32 r;
-
- asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v));
-
- return r;
-}
-
-static inline u64 swap64 (const u64 v)
+inline u32x l32_from_64 (u64x a)
{
- u32 il;
- u32 ir;
+ u32x r;
- asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v));
+ #if VECT_SIZE == 1
+ r = (u32) a;
+ #endif
- u32 tl;
- u32 tr;
+ #if VECT_SIZE >= 2
+ r.s0 = (u32) a.s0;
+ r.s1 = (u32) a.s1;
+ #endif
- asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il));
- asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir));
+ #if VECT_SIZE >= 4
+ r.s2 = (u32) a.s2;
+ r.s3 = (u32) a.s3;
+ #endif
- u64 r;
+ #if VECT_SIZE >= 8
+ r.s4 = (u32) a.s4;
+ r.s5 = (u32) a.s5;
+ r.s6 = (u32) a.s6;
+ r.s7 = (u32) a.s7;
+ #endif
- asm ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl));
+ #if VECT_SIZE >= 16
+ r.s8 = (u32) a.s8;
+ r.s9 = (u32) a.s9;
+ r.sa = (u32) a.sa;
+ r.sb = (u32) a.sb;
+ r.sc = (u32) a.sc;
+ r.sd = (u32) a.sd;
+ r.se = (u32) a.se;
+ r.sf = (u32) a.sf;
+ #endif
return r;
}
-#endif
-#ifdef IS_GENERIC
-static inline u32 swap32 (const u32 v)
+inline u32x h32_from_64 (u64x a)
{
- return (as_uint (as_uchar4 (v).s3210));
-}
+ a >>= 32;
-static inline u64 swap64 (const u64 v)
-{
- return (as_ulong (as_uchar8 (v).s76543210));
-}
-#endif
+ u32x r;
-#ifdef IS_AMD
-static inline u32 __bfe (const u32 a, const u32 b, const u32 c)
-{
- return amd_bfe (a, b, c);
-}
+ #if VECT_SIZE == 1
+ r = (u32) a;
+ #endif
-static inline u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
-{
- return amd_bytealign (a, b, c);
-}
+ #if VECT_SIZE >= 2
+ r.s0 = (u32) a.s0;
+ r.s1 = (u32) a.s1;
+ #endif
-#endif
+ #if VECT_SIZE >= 4
+ r.s2 = (u32) a.s2;
+ r.s3 = (u32) a.s3;
+ #endif
-#ifdef IS_NV
-static inline u32 __byte_perm_S (const u32 a, const u32 b, const u32 c)
-{
- u32 r;
+ #if VECT_SIZE >= 8
+ r.s4 = (u32) a.s4;
+ r.s5 = (u32) a.s5;
+ r.s6 = (u32) a.s6;
+ r.s7 = (u32) a.s7;
+ #endif
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
+ #if VECT_SIZE >= 16
+ r.s8 = (u32) a.s8;
+ r.s9 = (u32) a.s9;
+ r.sa = (u32) a.sa;
+ r.sb = (u32) a.sb;
+ r.sc = (u32) a.sc;
+ r.sd = (u32) a.sd;
+ r.se = (u32) a.se;
+ r.sf = (u32) a.sf;
+ #endif
return r;
}
-static inline u32x __byte_perm (const u32x a, const u32x b, const u32x c)
+inline u64x hl32_to_64 (const u32x a, const u32x b)
{
- u32x r;
+ u64x r;
#if VECT_SIZE == 1
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c) );
+ r = as_ulong ((uint2) (b, a));
#endif
- #if VECT_SIZE == 2
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
+ #if VECT_SIZE >= 2
+ r.s0 = as_ulong ((uint2) (b.s0, a.s0));
+ r.s1 = as_ulong ((uint2) (b.s1, a.s1));
#endif
- #if VECT_SIZE == 4
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3));
+ #if VECT_SIZE >= 4
+ r.s2 = as_ulong ((uint2) (b.s2, a.s2));
+ r.s3 = as_ulong ((uint2) (b.s3, a.s3));
#endif
- #if VECT_SIZE == 8
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6));
- asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7));
+ #if VECT_SIZE >= 8
+ r.s4 = as_ulong ((uint2) (b.s4, a.s4));
+ r.s5 = as_ulong ((uint2) (b.s5, a.s5));
+ r.s6 = as_ulong ((uint2) (b.s6, a.s6));
+ r.s7 = as_ulong ((uint2) (b.s7, a.s7));
#endif
- return r;
-}
-
-static inline u32 __bfe (const u32 a, const u32 b, const u32 c)
-{
- u32 r;
-
- asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
+ #if VECT_SIZE >= 16
+ r.s8 = as_ulong ((uint2) (b.s8, a.s8));
+ r.s9 = as_ulong ((uint2) (b.s9, a.s9));
+ r.sa = as_ulong ((uint2) (b.sa, a.sa));
+ r.sb = as_ulong ((uint2) (b.sb, a.sb));
+ r.sc = as_ulong ((uint2) (b.sc, a.sc));
+ r.sd = as_ulong ((uint2) (b.sd, a.sd));
+ r.se = as_ulong ((uint2) (b.se, a.se));
+ r.sf = as_ulong ((uint2) (b.sf, a.sf));
+ #endif
return r;
}
-#if CUDA_ARCH >= 350
-static inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
+#ifdef IS_AMD
+inline u32 swap32_S (const u32 v)
{
- u32 r;
-
- asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8));
-
- return r;
+ return (as_uint (as_uchar4 (v).s3210));
}
-#else
-static inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
+
+inline u64 swap64_S (const u64 v)
{
- return __byte_perm_S (b, a, (0x76543210 >> ((c & 3) * 4)) & 0xffff);
+ return (as_ulong (as_uchar8 (v).s76543210));
}
-#endif
-#endif
-#ifdef IS_GENERIC
-static inline u32 __bfe (const u32 a, const u32 b, const u32 c)
+inline u32 rotr32_S (const u32 a, const u32 n)
{
- #define BIT(x) (1 << (x))
- #define BIT_MASK(x) (BIT (x) - 1)
- #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z))
-
- return BFE (a, b, c);
+ return rotate (a, 32 - n);
}
-static inline u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
+inline u32 rotl32_S (const u32 a, const u32 n)
{
- const u64 tmp = ((((u64) a) << 32) | ((u64) b)) >> ((c & 3) * 8);
-
- return (u32) (tmp);
+ return rotate (a, n);
}
-static inline u32x amd_bytealign (const u32x a, const u32x b, const u32 c)
+inline u64 rotr64_S (const u64 a, const u32 n)
{
- #if VECT_SIZE == 1
- const u64x tmp = ((((u64x) (a)) << 32) | ((u64x) (b))) >> ((c & 3) * 8);
-
- return (u32x) (tmp);
- #endif
+ const u32 a0 = h32_from_64_S (a);
+ const u32 a1 = l32_from_64_S (a);
- #if VECT_SIZE == 2
- const u64x tmp = ((((u64x) (a.s0, a.s1)) << 32) | ((u64x) (b.s0, b.s1))) >> ((c & 3) * 8);
+ const u32 t0 = (n >= 32) ? amd_bitalign (a0, a1, n - 32) : amd_bitalign (a1, a0, n);
+ const u32 t1 = (n >= 32) ? amd_bitalign (a1, a0, n - 32) : amd_bitalign (a0, a1, n);
- return (u32x) (tmp.s0, tmp.s1);
- #endif
+ const u64 r = hl32_to_64_S (t0, t1);
- #if VECT_SIZE == 4
- const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3))) >> ((c & 3) * 8);
+ return r;
+}
- return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3);
- #endif
+inline u64 rotl64_S (const u64 a, const u32 n)
+{
+ return rotr64_S (a, 64 - n);
+}
- #if VECT_SIZE == 8
- const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3, a.s4, a.s5, a.s6, a.s7)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3, b.s4, b.s5, b.s6, b.s7))) >> ((c & 3) * 8);
+inline u32x swap32 (const u32x v)
+{
+ return ((v >> 24) & 0x000000ff)
+ | ((v >> 8) & 0x0000ff00)
+ | ((v << 8) & 0x00ff0000)
+ | ((v << 24) & 0xff000000);
+}
- return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3, tmp.s4, tmp.s5, tmp.s6, tmp.s7);
- #endif
+inline u64x swap64 (const u64x v)
+{
+ return ((v >> 56) & 0x00000000000000ff)
+ | ((v >> 40) & 0x000000000000ff00)
+ | ((v >> 24) & 0x0000000000ff0000)
+ | ((v >> 8) & 0x00000000ff000000)
+ | ((v << 8) & 0x000000ff00000000)
+ | ((v << 24) & 0x0000ff0000000000)
+ | ((v << 40) & 0x00ff000000000000)
+ | ((v << 56) & 0xff00000000000000);
}
-#endif
-#ifdef IS_AMD
-static inline u32x rotr32 (const u32x a, const u32 n)
+inline u32x rotr32 (const u32x a, const u32 n)
{
return rotate (a, 32 - n);
}
-static inline u32x rotl32 (const u32x a, const u32 n)
+inline u32x rotl32 (const u32x a, const u32 n)
{
return rotate (a, n);
}
-static inline u64 rotr64 (const u64 a, const u32 n)
+inline u64x rotr64 (const u64x a, const u32 n)
{
- uint2 a2 = as_uint2 (a);
+ const u32x a0 = h32_from_64 (a);
+ const u32x a1 = l32_from_64 (a);
- uint2 t;
+ const u32x t0 = (n >= 32) ? amd_bitalign (a0, a1, n - 32) : amd_bitalign (a1, a0, n);
+ const u32x t1 = (n >= 32) ? amd_bitalign (a1, a0, n - 32) : amd_bitalign (a0, a1, n);
- t.s0 = (n >= 32) ? amd_bitalign (a2.s0, a2.s1, n - 32)
- : amd_bitalign (a2.s1, a2.s0, n);
- t.s1 = (n >= 32) ? amd_bitalign (a2.s1, a2.s0, n - 32)
- : amd_bitalign (a2.s0, a2.s1, n);
+ const u64x r = hl32_to_64 (t0, t1);
- return as_ulong (t);
+ return r;
}
-static inline u64 rotl64 (const u64 a, const u32 n)
+inline u64x rotl64 (const u64x a, const u32 n)
{
return rotr64 (a, 64 - n);
}
-#endif
-#ifdef IS_NV
-static inline u32x rotr32 (const u32x a, const u32 n)
+inline u32 __bfe (const u32 a, const u32 b, const u32 c)
{
- return rotate (a, 32 - n);
+ return amd_bfe (a, b, c);
}
-static inline u32x rotl32 (const u32x a, const u32 n)
+inline u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
{
- return rotate (a, n);
+ return amd_bytealign (a, b, c);
}
+#endif
-#if CUDA_ARCH >= 350
-static inline u64 rotr64 (const u64 a, const u32 n)
+#ifdef IS_NV
+inline u32 swap32_S (const u32 v)
+{
+ u32 r;
+
+ asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v));
+
+ return r;
+}
+
+inline u64 swap64_S (const u64 v)
{
u32 il;
u32 ir;
- asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(a));
+ asm ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v));
u32 tl;
u32 tr;
- if (n >= 32)
- {
- asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(ir), "r"(il), "r"(n - 32));
- asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(il), "r"(ir), "r"(n - 32));
- }
- else
- {
- asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tl) : "r"(il), "r"(ir), "r"(n));
- asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(tr) : "r"(ir), "r"(il), "r"(n));
- }
+ asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il));
+ asm ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir));
u64 r;
- asm ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tl), "r"(tr));
+ asm ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl));
return r;
}
-#else
-static inline u64 rotr64 (const u64 a, const u32 n)
+
+inline u32 rotr32_S (const u32 a, const u32 n)
{
- return rotate (a, (u64) 64 - n);
+ return rotate (a, 32 - n);
}
-#endif
-static inline u64 rotl64 (const u64 a, const u32 n)
+inline u32 rotl32_S (const u32 a, const u32 n)
{
- return rotr64 (a, 64 - n);
+ return rotate (a, n);
}
-#endif
-#ifdef IS_GENERIC
+inline u64 rotr64_S (const u64 a, const u32 n)
+{
+ return rotate (a, (u64) 64 - n);
+}
-static inline u32x rotr32 (const u32x a, const u32x n)
+inline u64 rotl64_S (const u64 a, const u32 n)
{
- return rotate (a, 32 - n);
+ return rotr64_S (a, 64 - n);
}
-static inline u32x rotl32 (const u32x a, const u32x n)
+inline u32 __byte_perm_S (const u32 a, const u32 b, const u32 c)
{
- return rotate (a, n);
+ u32 r;
+
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
+
+ return r;
}
-static inline u64 rotr64 (const u64 a, const u32 n)
+inline u32x swap32 (const u32x v)
{
- return rotate (a, (u64) 64 - n);
+ return ((v >> 24) & 0x000000ff)
+ | ((v >> 8) & 0x0000ff00)
+ | ((v << 8) & 0x00ff0000)
+ | ((v << 24) & 0xff000000);
}
-static inline u64 rotl64 (const u64 a, const u32 n)
+inline u64x swap64 (const u64x v)
{
- return rotate (a, (u64) n);
+ return ((v >> 56) & 0x00000000000000ff)
+ | ((v >> 40) & 0x000000000000ff00)
+ | ((v >> 24) & 0x0000000000ff0000)
+ | ((v >> 8) & 0x00000000ff000000)
+ | ((v << 8) & 0x000000ff00000000)
+ | ((v << 24) & 0x0000ff0000000000)
+ | ((v << 40) & 0x00ff000000000000)
+ | ((v << 56) & 0xff00000000000000);
}
-#endif
-#ifdef IS_NV
-#if CUDA_ARCH >= 500
-static inline u32x lut3_2d (const u32x a, const u32x b, const u32x c)
+inline u32x rotr32 (const u32x a, const u32 n)
{
- u32x r;
+ #if CUDA_ARCH < 350
- #if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
- #endif
+ u32x t;
+ u32x r;
#if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- #endif
- #if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- #endif
+ asm ("\n"
+ "shl.b32 %4, %2, %6;\n"
+ "shl.b32 %5, %3, %6;\n"
+ "shr.b32 %0, %2, %7;\n"
+ "shr.b32 %1, %3, %7;\n"
+ "add.u32 %0, %0, %4;\n"
+ "add.u32 %1, %1, %5;\n"
+ : "=r"(r.s0),
+ "=r"(r.s1)
+ : "r"(a.s0),
+ "r"(a.s1),
+ "r"(t.s0),
+ "r"(t.s1),
+ "r"(32 - n),
+ "r"(n));
+
+ #elif VECT_SIZE == 4
+
+ asm ("\n"
+ "shl.b32 %8, %4, %12;\n"
+ "shl.b32 %9, %5, %12;\n"
+ "shl.b32 %10, %6, %12;\n"
+ "shl.b32 %11, %7, %12;\n"
+ "shr.b32 %0, %4, %13;\n"
+ "shr.b32 %1, %5, %13;\n"
+ "shr.b32 %2, %6, %13;\n"
+ "shr.b32 %3, %7, %13;\n"
+ "add.u32 %0, %0, %8;\n"
+ "add.u32 %1, %1, %9;\n"
+ "add.u32 %2, %2, %10;\n"
+ "add.u32 %3, %3, %11;\n"
+ : "=r"(r.s0),
+ "=r"(r.s1),
+ "=r"(r.s2),
+ "=r"(r.s3)
+ : "r"(a.s0),
+ "r"(a.s1),
+ "r"(a.s2),
+ "r"(a.s3),
+ "r"(t.s0),
+ "r"(t.s1),
+ "r"(t.s2),
+ "r"(t.s3),
+ "r"(32 - n),
+ "r"(n));
+
+ #else
+
+ r = rotate (a, n);
- #if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0x2d;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
#endif
return r;
-}
-static inline u32x lut3_39 (const u32x a, const u32x b, const u32x c)
-{
- u32x r;
+ #else
- #if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
- #endif
+ return rotate (a, n);
- #if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
#endif
+}
- #if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- #endif
+inline u32x rotl32 (const u32x a, const u32 n)
+{
+ return rotr32 (a, 32 - n);
+}
- #if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0x39;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
- #endif
+inline u64x rotr64 (const u64x a, const u32 n)
+{
+ return rotate (a, (u64) 64 - n);
+}
- return r;
+inline u64x rotl64 (const u64x a, const u32 n)
+{
+ return rotate (a, (u64) n);
}
-static inline u32x lut3_59 (const u32x a, const u32x b, const u32x c)
+inline u32x __byte_perm (const u32x a, const u32x b, const u32x c)
{
u32x r;
#if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c) );
#endif
- #if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
+ #if VECT_SIZE >= 2
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
#endif
- #if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
+ #if VECT_SIZE >= 4
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3));
#endif
- #if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
+ #if VECT_SIZE >= 8
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7));
+ #endif
+
+ #if VECT_SIZE >= 16
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(a.s8), "r"(b.s8), "r"(c.s8));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(a.s9), "r"(b.s9), "r"(c.s9));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(a.sa), "r"(b.sa), "r"(c.sa));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(a.sb), "r"(b.sb), "r"(c.sb));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(a.sc), "r"(b.sc), "r"(c.sc));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(a.sd), "r"(b.sd), "r"(c.sd));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(a.se), "r"(b.se), "r"(c.se));
+ asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(a.sf), "r"(b.sf), "r"(c.sf));
#endif
return r;
}
-static inline u32x lut3_96 (const u32x a, const u32x b, const u32x c)
+inline u32 __bfe (const u32 a, const u32 b, const u32 c)
{
- u32x r;
+ u32 r;
- #if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
- #endif
+ asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
- #if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- #endif
+ return r;
+}
- #if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- #endif
+#if CUDA_ARCH >= 350
+inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
+{
+ u32 r;
- #if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
- #endif
+ asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8));
return r;
}
+#else
+inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
+{
+ return __byte_perm_S (b, a, (0x76543210 >> ((c & 3) * 4)) & 0xffff);
+}
+#endif
-static inline u32x lut3_e4 (const u32x a, const u32x b, const u32x c)
+#endif
+
+#ifdef IS_GENERIC
+inline u32 swap32_S (const u32 v)
{
- u32x r;
+ return (as_uint (as_uchar4 (v).s3210));
+}
- #if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
- #endif
+inline u64 swap64_S (const u64 v)
+{
+ return (as_ulong (as_uchar8 (v).s76543210));
+}
- #if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- #endif
+inline u32 rotr32_S (const u32 a, const u32 n)
+{
+ return rotate (a, 32 - n);
+}
- #if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- #endif
+inline u32 rotl32_S (const u32 a, const u32 n)
+{
+ return rotate (a, n);
+}
- #if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe4;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
- #endif
+inline u64 rotr64_S (const u64 a, const u32 n)
+{
+ return rotate (a, (u64) 64 - n);
+}
- return r;
+inline u64 rotl64_S (const u64 a, const u32 n)
+{
+ return rotate (a, (u64) n);
}
-static inline u32x lut3_e8 (const u32x a, const u32x b, const u32x c)
+inline u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
{
- u32x r;
+ const u64 tmp = ((((u64) a) << 32) | ((u64) b)) >> ((c & 3) * 8);
- #if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
- #endif
+ return (u32) (tmp);
+}
- #if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- #endif
+inline u32x swap32 (const u32x v)
+{
+ return ((v >> 24) & 0x000000ff)
+ | ((v >> 8) & 0x0000ff00)
+ | ((v << 8) & 0x00ff0000)
+ | ((v << 24) & 0xff000000);
+}
- #if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- #endif
+inline u64x swap64 (const u64x v)
+{
+ return ((v >> 56) & 0x00000000000000ff)
+ | ((v >> 40) & 0x000000000000ff00)
+ | ((v >> 24) & 0x0000000000ff0000)
+ | ((v >> 8) & 0x00000000ff000000)
+ | ((v << 8) & 0x000000ff00000000)
+ | ((v << 24) & 0x0000ff0000000000)
+ | ((v << 40) & 0x00ff000000000000)
+ | ((v << 56) & 0xff00000000000000);
+}
- #if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0xe8;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
- #endif
+inline u32x rotr32 (const u32x a, const u32 n)
+{
+ return rotate (a, 32 - n);
+}
- return r;
+inline u32x rotl32 (const u32x a, const u32 n)
+{
+ return rotate (a, n);
}
-static inline u32x lut3_ca (const u32x a, const u32x b, const u32x c)
+inline u64x rotr64 (const u64x a, const u32 n)
{
- u32x r;
+ return rotate (a, (u64) 64 - n);
+}
+
+inline u64x rotl64 (const u64x a, const u32 n)
+{
+ return rotate (a, (u64) n);
+}
+inline u32 __bfe (const u32 a, const u32 b, const u32 c)
+{
+ #define BIT(x) (1 << (x))
+ #define BIT_MASK(x) (BIT (x) - 1)
+ #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z))
+
+ return BFE (a, b, c);
+}
+
+inline u32x amd_bytealign (const u32x a, const u32x b, const u32 c)
+{
#if VECT_SIZE == 1
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r) : "r" (a), "r" (b), "r" (c));
+ const u64x tmp = ((((u64x) (a)) << 32) | ((u64x) (b))) >> ((c & 3) * 8);
+
+ return (u32x) (tmp);
#endif
#if VECT_SIZE == 2
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
+ const u64x tmp = ((((u64x) (a.s0, a.s1)) << 32) | ((u64x) (b.s0, b.s1))) >> ((c & 3) * 8);
+
+ return (u32x) (tmp.s0, tmp.s1);
#endif
#if VECT_SIZE == 4
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
+ const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3))) >> ((c & 3) * 8);
+
+ return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3);
#endif
#if VECT_SIZE == 8
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s0) : "r" (a.s0), "r" (b.s0), "r" (c.s0));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s1) : "r" (a.s1), "r" (b.s1), "r" (c.s1));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s2) : "r" (a.s2), "r" (b.s2), "r" (c.s2));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s3) : "r" (a.s3), "r" (b.s3), "r" (c.s3));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s4) : "r" (a.s4), "r" (b.s4), "r" (c.s4));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s5) : "r" (a.s5), "r" (b.s5), "r" (c.s5));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s6) : "r" (a.s6), "r" (b.s6), "r" (c.s6));
- asm ("lop3.b32 %0, %1, %2, %3, 0xca;" : "=r" (r.s7) : "r" (a.s7), "r" (b.s7), "r" (c.s7));
+ const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3, a.s4, a.s5, a.s6, a.s7)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3, b.s4, b.s5, b.s6, b.s7))) >> ((c & 3) * 8);
+
+ return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3, tmp.s4, tmp.s5, tmp.s6, tmp.s7);
#endif
- return r;
-}
+ #if VECT_SIZE == 16
+ const u64x tmp = ((((u64x) (a.s0, a.s1, a.s2, a.s3, a.s4, a.s5, a.s6, a.s7, a.s8, a.s9, a.sa, a.sb, a.sc, a.sd, a.se, a.sf)) << 32) | ((u64x) (b.s0, b.s1, b.s2, b.s3, b.s4, b.s5, b.s6, b.s7, b.s8, b.s9, b.sa, b.sb, b.sc, b.sd, b.se, b.sf))) >> ((c & 3) * 8);
-#endif
+ return (u32x) (tmp.s0, tmp.s1, tmp.s2, tmp.s3, tmp.s4, tmp.s5, tmp.s6, tmp.s7, tmp.s8, tmp.s9, tmp.sa, tmp.sb, tmp.sc, tmp.sd, tmp.se, tmp.sf);
+ #endif
+}
#endif
typedef struct
u32 digest_buf[8];
#elif defined _RAR5_
u32 digest_buf[4];
+ #elif defined _KRB5TGS_
+ u32 digest_buf[4];
+ #elif defined _AXCRYPT_
+ u32 digest_buf[4];
+ #elif defined _KEEPASS_
+ u32 digest_buf[4];
#endif
} digest_t;
u32 eapol[64];
int eapol_size;
int keyver;
+ u8 orig_mac1[6];
+ u8 orig_mac2[6];
+ u8 orig_nonce1[32];
+ u8 orig_nonce2[32];
} wpa_t;
} krb5pa_t;
+typedef struct
+{
+ u32 account_info[512];
+ u32 checksum[4];
+ u32 edata2[2560];
+ u32 edata2_len;
+
+} krb5tgs_t;
+
typedef struct
{
u32 salt_buf[16];
} oldoffice34_t;
+typedef struct
+{
+ u32 salt_buf[128];
+ u32 salt_len;
+
+ u32 pc_digest[5];
+ u32 pc_offset;
+
+} pstoken_t;
+
+typedef struct
+{
+ u32 version;
+ u32 algorithm;
+
+ /* key-file handling */
+ u32 keyfile_len;
+ u32 keyfile[8];
+
+ u32 final_random_seed[8];
+ u32 transf_random_seed[8];
+ u32 enc_iv[4];
+ u32 contents_hash[8];
+
+ /* specific to version 1 */
+ u32 contents_len;
+ u32 contents[75000];
+
+ /* specific to version 2 */
+ u32 expected_bytes[8];
+
+} keepass_t;
+
typedef struct
{
u32 digest[4];
} seven_zip_tmp_t;
+typedef struct
+{
+ u32 KEK[5];
+
+ u32 lsb[4];
+ u32 cipher[4];
+
+} axcrypt_tmp_t;
+
+typedef struct
+{
+ u32 tmp_digest[8];
+
+} keepass_tmp_t;
+
typedef struct
{
u32 Kc[16];
typedef struct
{
- u32 i[64];
+ u32 i[16];
u32 pw_len;